[RFC PATCH v3 3/6] hv: vcpu: wait and signal vcpu event support


Shuo A Liu
 

Introduce two kinds of events for each vcpu,
VCPU_EVENT_IOREQ: for vcpu waiting for IO request completion
VCPU_EVENT_VIRTUAL_INTERRUPT: for vcpu waiting for virtual interrupts events
vcpu can wait for such events, and resume to run when the
event get signalled.

This patch also change IO request waiting/notifying to this way.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vcpu.c | 5 ++++-
hypervisor/common/hypercall.c | 8 +++-----
hypervisor/dm/io_req.c | 15 ++++-----------
hypervisor/include/arch/x86/guest/vcpu.h | 7 +++++++
4 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c
index ea6d0f4..6d0ecc5 100644
--- a/hypervisor/arch/x86/guest/vcpu.c
+++ b/hypervisor/arch/x86/guest/vcpu.c
@@ -795,7 +795,7 @@ void launch_vcpu(struct acrn_vcpu *vcpu)
/* help function for vcpu create */
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
- int32_t ret;
+ int32_t ret, i;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];

@@ -811,6 +811,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in;
init_thread_data(&vcpu->thread_obj);
+ for (i = 0; i < VCPU_EVENT_NUM; i++) {
+ init_event(&vcpu->events[i]);
+ }
}

return ret;
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index c114040..394f405 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -553,12 +553,10 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
__func__, vcpu_id, target_vm->vm_id);
} else {
vcpu = vcpu_from_vid(target_vm, vcpu_id);
- if (vcpu->state == VCPU_PAUSED) {
- if (!vcpu->vm->sw.is_completion_polling) {
- resume_vcpu(vcpu);
- }
- ret = 0;
+ if (!vcpu->vm->sw.is_completion_polling) {
+ signal_event(&vcpu->events[VCPU_EVENT_IOREQ]);
}
+ ret = 0;
}
}

diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c
index d0ce332..daee03d 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -103,14 +103,6 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
}
clac();

- /* pause vcpu in notification mode , wait for VHM to handle the MMIO request.
- * TODO: when pause_vcpu changed to switch vcpu out directlly, we
- * should fix the race issue between req.processed update and vcpu pause
- */
- if (!is_polling) {
- pause_vcpu(vcpu, VCPU_PAUSED);
- }
-
/* Before updating the vhm_req state, enforce all fill vhm_req operations done */
cpu_write_memory_barrier();

@@ -136,10 +128,11 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
schedule();
}
}
- } else if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
- schedule();
} else {
- ret = -EINVAL;
+ wait_event(&vcpu->events[VCPU_EVENT_IOREQ]);
+ if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
+ schedule();
+ }
}
} else {
ret = -EINVAL;
diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h
index bfee6e4..827ee6f 100644
--- a/hypervisor/include/arch/x86/guest/vcpu.h
+++ b/hypervisor/include/arch/x86/guest/vcpu.h
@@ -146,6 +146,11 @@ enum vm_cpu_mode {
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};

+enum vcpu_event_type {
+ VCPU_EVENT_IOREQ,
+ VCPU_EVENT_VIRTUAL_INTERRUPT,
+ VCPU_EVENT_NUM
+};

/* 2 worlds: 0 for Normal World, 1 for Secure World */
#define NR_WORLD 2
@@ -260,6 +265,8 @@ struct acrn_vcpu {

uint64_t reg_cached;
uint64_t reg_updated;
+
+ struct sched_event events[VCPU_EVENT_NUM];
} __aligned(PAGE_SIZE);

struct vcpu_dump {
--
2.8.3

Join acrn-dev@lists.projectacrn.org to automatically receive all group messages.