[RFC PATCH v2 6/7] hv: HLT emulation in hypervisor


Shuo A Liu
 

HLT emulation is import to CPU resource maximum utilization. vcpu
doing HLT means it is idle and can give up CPU proactively. Thus, we
pause the vcpu in HLT emulation and resume it while event happens.

When vcpu enter HLT, it will be paused.

VM ID PCPU ID VCPU ID VCPU ROLE VCPU STATE
===== ======= ======= ========= ==========
0 0 0 PRIMARY Paused
0 1 1 SECONDARY Paused

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vlapic.c | 2 ++
hypervisor/arch/x86/guest/vmcs.c | 2 +-
hypervisor/arch/x86/guest/vmexit.c | 11 ++++++++++-
hypervisor/common/hv_main.c | 1 +
4 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 1c7bdfb..63f6b01 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -556,6 +556,7 @@ static void apicv_basic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector,

static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
{
+
/* update TMR if interrupt trigger mode has changed */
vlapic_set_tmr(vlapic, vector, level);

@@ -590,6 +591,7 @@ static void vlapic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector, bool
if ((lapic->svr.v & APIC_SVR_ENABLE) == 0U) {
dev_dbg(ACRN_DBG_LAPIC, "vlapic is software disabled, ignoring interrupt %u", vector);
} else {
+ notify_vcpu_event(vlapic->vcpu, VCPU_EVENT_INTERRUPT);
vlapic->ops->accept_intr(vlapic, vector, level);
}
}
diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c
index 33be74f..fb0ba76 100644
--- a/hypervisor/arch/x86/guest/vmcs.c
+++ b/hypervisor/arch/x86/guest/vmcs.c
@@ -272,7 +272,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS,
VMX_PROCBASED_CTLS_TSC_OFF | VMX_PROCBASED_CTLS_TPR_SHADOW |
VMX_PROCBASED_CTLS_IO_BITMAP | VMX_PROCBASED_CTLS_MSR_BITMAP |
- VMX_PROCBASED_CTLS_SECONDARY);
+ VMX_PROCBASED_CTLS_HLT | VMX_PROCBASED_CTLS_SECONDARY);

/*Disable VM_EXIT for CR3 access*/
value32 &= ~(VMX_PROCBASED_CTLS_CR3_LOAD | VMX_PROCBASED_CTLS_CR3_STORE);
diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c
index 4fb0478..ea576be 100644
--- a/hypervisor/arch/x86/guest/vmexit.c
+++ b/hypervisor/arch/x86/guest/vmexit.c
@@ -31,6 +31,7 @@ static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t pause_vmexit_handler(struct acrn_vcpu *vcpu);
+static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu);

/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@@ -59,7 +60,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_GETSEC] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_HLT] = {
- .handler = unhandled_vmexit_handler},
+ .handler = hlt_vmexit_handler},
[VMX_EXIT_REASON_INVD] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_INVLPG] = {
@@ -284,6 +285,14 @@ static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu)
return 0;
}

+static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu)
+{
+ if ((vcpu->arch.pending_req == 0UL) && (!vlapic_has_pending_intr(vcpu))) {
+ wait_vcpu_event(vcpu, VCPU_EVENT_INTERRUPT);
+ }
+ return 0;
+}
+
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t rax, rbx, rcx, rdx;
diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c
index e949149..a6920f6 100644
--- a/hypervisor/common/hv_main.c
+++ b/hypervisor/common/hv_main.c
@@ -40,6 +40,7 @@ void vcpu_thread(struct thread_object *obj)
continue;
}

+ reset_vcpu_event(vcpu, VCPU_EVENT_INTERRUPT);
profiling_vmenter_handler(vcpu);

TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);
--
2.8.3

Join acrn-dev@lists.projectacrn.org to automatically receive all group messages.