[PATCH 12/14] hv: apicv: change apicv name to vmx_vapic
Yu Wang
The original names, vlapic means HV lapic device model, and apicv means
hardware vmx vapic. For the "apic", sometimes it means ioapic + lapic, sometimes it means lapic. To get these names more explicit, change apicv to vmx_vapic. And remain vlapic for HV lapic device model. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vlapic.c | 81 ++++++++++++---------- hypervisor/arch/x86/guest/vlapic_priv.h | 16 +++-- hypervisor/arch/x86/virq.c | 4 +- hypervisor/arch/x86/vmx.c | 4 +- hypervisor/dm/vioapic.c | 2 +- hypervisor/include/arch/x86/guest/vlapic.h | 8 +-- 6 files changed, 62 insertions(+), 53 deletions(-) diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 89ee37f..8c62ea2 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -75,19 +75,20 @@ static inline void vlapic_dump_isr(struct acrn_vlapic *vlapic, char *msg) #endif /*APIC-v APIC-access address */ -static void *apicv_apic_access_addr; +static void *vmx_vapic_apic_access_addr; static int -apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level); +vmx_vapic_set_intr_ready(struct acrn_vlapic *vlapic, + uint32_t vector, bool level); static int -apicv_pending_intr(struct acrn_vlapic *vlapic, __unused uint32_t *vecptr); +vmx_vapic_pending_intr(struct acrn_vlapic *vlapic, __unused uint32_t *vecptr); static void -apicv_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level); +vmx_vapic_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level); static void -apicv_batch_set_tmr(struct acrn_vlapic *vlapic); +vmx_vapic_batch_set_tmr(struct acrn_vlapic *vlapic); /* * Post an interrupt to the vcpu running on 'hostcpu'. This will use a @@ -461,8 +462,8 @@ vlapic_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level) return 1; } - if (vlapic->ops.apicv_set_intr_ready_fn != NULL) { - return vlapic->ops.apicv_set_intr_ready_fn + if (vlapic->ops.vmx_vapic_set_intr_ready_fn != NULL) { + return vlapic->ops.vmx_vapic_set_intr_ready_fn (vlapic, vector, level); } @@ -1198,8 +1199,8 @@ vlapic_pending_intr(struct acrn_vlapic *vlapic, uint32_t *vecptr) uint32_t i, vector, val, bitpos; struct lapic_reg *irrptr; - if (vlapic->ops.apicv_pending_intr_fn != NULL) { - return vlapic->ops.apicv_pending_intr_fn(vlapic, vecptr); + if (vlapic->ops.vmx_vapic_pending_intr_fn != NULL) { + return vlapic->ops.vmx_vapic_pending_intr_fn(vlapic, vecptr); } irrptr = &lapic->irr[0]; @@ -1230,8 +1231,8 @@ vlapic_intr_accepted(struct acrn_vlapic *vlapic, uint32_t vector) struct lapic_reg *irrptr, *isrptr; uint32_t idx, stk_top; - if (vlapic->ops.apicv_intr_accepted_fn != NULL) { - vlapic->ops.apicv_intr_accepted_fn(vlapic, vector); + if (vlapic->ops.vmx_vapic_intr_accepted_fn != NULL) { + vlapic->ops.vmx_vapic_intr_accepted_fn(vlapic, vector); return; } @@ -1721,18 +1722,19 @@ vlapic_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level) * to avoid unnecessary VMCS read/update. */ void -vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic) +vlapic_vmx_vapic_batch_set_tmr(struct acrn_vlapic *vlapic) { - if (vlapic->ops.apicv_batch_set_tmr_fn != NULL) { - vlapic->ops.apicv_batch_set_tmr_fn(vlapic); + if (vlapic->ops.vmx_vapic_batch_set_tmr_fn != NULL) { + vlapic->ops.vmx_vapic_batch_set_tmr_fn(vlapic); } } static void -vlapic_apicv_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level) +vlapic_vmx_vapic_set_tmr(struct acrn_vlapic *vlapic, + uint32_t vector, bool level) { - if (vlapic->ops.apicv_set_tmr_fn != NULL) { - vlapic->ops.apicv_set_tmr_fn(vlapic, vector, level); + if (vlapic->ops.vmx_vapic_set_tmr_fn != NULL) { + vlapic->ops.vmx_vapic_set_tmr_fn(vlapic, vector, level); } } @@ -2046,22 +2048,22 @@ int vlapic_create(struct vcpu *vcpu) if (is_vapic_supported()) { if (is_vapic_intr_delivery_supported()) { - vlapic->ops.apicv_set_intr_ready_fn = - apicv_set_intr_ready; + vlapic->ops.vmx_vapic_set_intr_ready_fn = + vmx_vapic_set_intr_ready; - vlapic->ops.apicv_pending_intr_fn = - apicv_pending_intr; + vlapic->ops.vmx_vapic_pending_intr_fn = + vmx_vapic_pending_intr; - vlapic->ops.apicv_set_tmr_fn = apicv_set_tmr; - vlapic->ops.apicv_batch_set_tmr_fn = - apicv_batch_set_tmr; + vlapic->ops.vmx_vapic_set_tmr_fn = vmx_vapic_set_tmr; + vlapic->ops.vmx_vapic_batch_set_tmr_fn = + vmx_vapic_batch_set_tmr; vlapic->pir_desc = (struct vlapic_pir_desc *)(&(vlapic->pir)); } if (is_vcpu_bsp(vcpu)) { ept_mr_add(vcpu->vm, - apicv_get_apic_access_addr(vcpu->vm), + vmx_vapic_get_apic_access_addr(vcpu->vm), DEFAULT_APIC_BASE, CPU_PAGE_SIZE, IA32E_EPT_W_BIT | IA32E_EPT_R_BIT | IA32E_EPT_UNCACHED); @@ -2120,7 +2122,8 @@ void vlapic_free(struct vcpu *vcpu) * APIC-v functions * **/ static int -apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, __unused bool level) +vmx_vapic_set_intr_ready(struct acrn_vlapic *vlapic, + uint32_t vector, __unused bool level) { struct vlapic_pir_desc *pir_desc; uint64_t mask; @@ -2138,7 +2141,7 @@ apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, __unused bool } static int -apicv_pending_intr(struct acrn_vlapic *vlapic, __unused uint32_t *vecptr) +vmx_vapic_pending_intr(struct acrn_vlapic *vlapic, __unused uint32_t *vecptr) { struct vlapic_pir_desc *pir_desc; struct lapic_regs *lapic; @@ -2173,7 +2176,8 @@ apicv_pending_intr(struct acrn_vlapic *vlapic, __unused uint32_t *vecptr) } static void -apicv_set_tmr(__unused struct acrn_vlapic *vlapic, uint32_t vector, bool level) +vmx_vapic_set_tmr(__unused struct acrn_vlapic *vlapic, + uint32_t vector, bool level) { uint64_t mask, val; uint32_t field; @@ -2195,7 +2199,7 @@ apicv_set_tmr(__unused struct acrn_vlapic *vlapic, uint32_t vector, bool level) #define EOI_STEP_LEN (64U) #define TMR_STEP_LEN (32U) static void -apicv_batch_set_tmr(struct acrn_vlapic *vlapic) +vmx_vapic_batch_set_tmr(struct acrn_vlapic *vlapic) { struct lapic_regs *lapic = vlapic->apic_page; uint64_t val; @@ -2220,23 +2224,24 @@ apicv_batch_set_tmr(struct acrn_vlapic *vlapic) *APIC-v: Get the HPA to APIC-access page * **/ uint64_t -apicv_get_apic_access_addr(__unused struct vm *vm) +vmx_vapic_get_apic_access_addr(__unused struct vm *vm) { - if (apicv_apic_access_addr == NULL) { - apicv_apic_access_addr = alloc_page(); - ASSERT(apicv_apic_access_addr != NULL, - "apicv allocate failed."); + if (vmx_vapic_apic_access_addr == NULL) { + vmx_vapic_apic_access_addr = alloc_page(); + ASSERT(vmx_vapic_apic_access_addr != NULL, + "vmx_vapic allocate failed."); - (void)memset((void *)apicv_apic_access_addr, 0U, CPU_PAGE_SIZE); + (void)memset((void *)vmx_vapic_apic_access_addr, + 0U, CPU_PAGE_SIZE); } - return HVA2HPA(apicv_apic_access_addr); + return HVA2HPA(vmx_vapic_apic_access_addr); } /** *APIC-v: Get the HPA to virtualized APIC registers page * **/ uint64_t -apicv_get_apic_page_addr(struct acrn_vlapic *vlapic) +vmx_vapic_get_apic_page_addr(struct acrn_vlapic *vlapic) { return HVA2HPA(vlapic->apic_page); } @@ -2247,7 +2252,7 @@ apicv_get_apic_page_addr(struct acrn_vlapic *vlapic) */ void -apicv_inject_pir(struct acrn_vlapic *vlapic) +vmx_vapic_inject_pir(struct acrn_vlapic *vlapic) { struct vlapic_pir_desc *pir_desc; struct lapic_regs *lapic; diff --git a/hypervisor/arch/x86/guest/vlapic_priv.h b/hypervisor/arch/x86/guest/vlapic_priv.h index 1388c4a..04bdfc2 100644 --- a/hypervisor/arch/x86/guest/vlapic_priv.h +++ b/hypervisor/arch/x86/guest/vlapic_priv.h @@ -97,13 +97,17 @@ struct vlapic_pir_desc { } __aligned(64); struct vlapic_ops { - int (*apicv_set_intr_ready_fn) + int (*vmx_vapic_set_intr_ready_fn) (struct acrn_vlapic *vlapic, uint32_t vector, bool level); - int (*apicv_pending_intr_fn)(struct acrn_vlapic *vlapic, uint32_t *vecptr); - void (*apicv_intr_accepted_fn)(struct acrn_vlapic *vlapic, uint32_t vector); - void (*apicv_post_intr_fn)(struct acrn_vlapic *vlapic, int hostcpu); - void (*apicv_set_tmr_fn)(struct acrn_vlapic *vlapic, uint32_t vector, bool level); - void (*apicv_batch_set_tmr_fn)(struct acrn_vlapic *vlapic); + int (*vmx_vapic_pending_intr_fn)(struct acrn_vlapic *vlapic, + uint32_t *vecptr); + void (*vmx_vapic_intr_accepted_fn)(struct acrn_vlapic *vlapic, + uint32_t vector); + void (*vmx_vapic_post_intr_fn)(struct acrn_vlapic *vlapic, + int hostcpu); + void (*vmx_vapic_set_tmr_fn)(struct acrn_vlapic *vlapic, + uint32_t vector, bool level); + void (*vmx_vapic_batch_set_tmr_fn)(struct acrn_vlapic *vlapic); void (*enable_x2apic_mode_fn)(struct acrn_vlapic *vlapic); }; diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index 5f0d928..c351950 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -94,7 +94,7 @@ static int vcpu_do_pending_event(struct vcpu *vcpu) int ret = 0; if (is_vapic_intr_delivery_supported()) { - apicv_inject_pir(vlapic); + vmx_vapic_inject_pir(vlapic); return 0; } @@ -459,7 +459,7 @@ INTR_WIN: intr_pending = 1U; } - /* If the "virtual-interrupt delivery" is enabled for vmx apicv, + /* If the "virtual-interrupt delivery" is enabled for vmx vmx_vapic, * then need avoid to enable interrupt-window exiting. * * From SDM Vol3, 29.2.1, the evaluation of pending virtual diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index 249525c..a9101fd 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -1032,12 +1032,12 @@ static void init_exec_ctrl(struct vcpu *vcpu) if (is_vapic_supported()) { /*APIC-v, config APIC-access address*/ - value64 = apicv_get_apic_access_addr(vcpu->vm); + value64 = vmx_vapic_get_apic_access_addr(vcpu->vm); exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL, value64); /*APIC-v, config APIC virtualized page address*/ - value64 = apicv_get_apic_page_addr(vcpu->arch_vcpu.vlapic); + value64 = vmx_vapic_get_apic_page_addr(vcpu->arch_vcpu.vlapic); exec_vmwrite64(VMX_VIRTUAL_APIC_PAGE_ADDR_FULL, value64); diff --git a/hypervisor/dm/vioapic.c b/hypervisor/dm/vioapic.c index 4692094..e8f086c 100644 --- a/hypervisor/dm/vioapic.c +++ b/hypervisor/dm/vioapic.c @@ -234,7 +234,7 @@ vioapic_update_tmr(struct vcpu *vcpu) vector = rte.u.lo_32 & IOAPIC_RTE_LOW_INTVEC; vlapic_set_tmr_one_vec(vlapic, delmode, vector, level); } - vlapic_apicv_batch_set_tmr(vlapic); + vlapic_vmx_vapic_batch_set_tmr(vlapic); VIOAPIC_UNLOCK(vioapic); } diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h index 548e482..b2ce473 100644 --- a/hypervisor/include/arch/x86/guest/vlapic.h +++ b/hypervisor/include/arch/x86/guest/vlapic.h @@ -109,7 +109,7 @@ void vlapic_set_tmr_one_vec(struct acrn_vlapic *vlapic, uint32_t delmode, uint32_t vector, bool level); void -vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic); +vlapic_vmx_vapic_batch_set_tmr(struct acrn_vlapic *vlapic); int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, __unused void *handler_private_data); @@ -123,9 +123,9 @@ void vlapic_init(struct acrn_vlapic *vlapic); void vlapic_reset(struct acrn_vlapic *vlapic); void vlapic_restore(struct acrn_vlapic *vlapic, struct lapic_regs *regs); bool vlapic_enabled(struct acrn_vlapic *vlapic); -uint64_t apicv_get_apic_access_addr(__unused struct vm *vm); -uint64_t apicv_get_apic_page_addr(struct acrn_vlapic *vlapic); -void apicv_inject_pir(struct acrn_vlapic *vlapic); +uint64_t vmx_vapic_get_apic_access_addr(__unused struct vm *vm); +uint64_t vmx_vapic_get_apic_page_addr(struct acrn_vlapic *vlapic); +void vmx_vapic_inject_pir(struct acrn_vlapic *vlapic); int apic_access_vmexit_handler(struct vcpu *vcpu); int apic_write_vmexit_handler(struct vcpu *vcpu); int veoi_vmexit_handler(struct vcpu *vcpu); -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 11/14] hv: apicv: correct the INIT-IPI check condition
Yu Wang
From SDM Vol3 10.6.1, there has two types INIT-IPI for Delivery Mode.
One is "INIT" and another one is "INIT Level De-assert", they are re-use the 101 bits of Delivery Mode field. The way to distinguish them is depend on the level flag and trigger mode flag. "level = 0" and "trigger mode = 1" be treat as "INIT Level De-assert". The matrix should be likes: +-------+--------------+----------------------+ | level | trigger mode | INIT type | +-------+--------------+----------------------+ | 0 | 0 | INIT | +-------+--------------+----------------------+ | 0 | 1 | INIT Level De-assert | +-------+--------------+----------------------+ | 1 | 0 | INIT | +-------+--------------+----------------------+ | 1 | 1 | INIT | +-------+--------------+----------------------+ The old logic only check the level, it can't cover this matrix which cause level=0 and trigger=0 be treat as "INIT Level De-assert" also. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vlapic.c | 33 ++++++++++++++++-------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index e2a4e26..89ee37f 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -1140,22 +1140,25 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic) dev_dbg(ACRN_DBG_LAPIC, "vlapic send ipi nmi to vcpu_id %hu", vcpu_id); } else if (mode == APIC_DELMODE_INIT) { - if ((icr_low & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) { - continue; - } + /* Do not support INIT Level De-assert */ + if (((icr_low & APIC_LEVEL_MASK) == + APIC_LEVEL_ASSERT) || + ((icr_low & APIC_TRIGMOD_MASK) == + APIC_TRIGMOD_EDGE)) { - dev_dbg(ACRN_DBG_LAPIC, - "Sending INIT from VCPU %hu to %hu", - vlapic->vcpu->vcpu_id, vcpu_id); - - /* put target vcpu to INIT state and wait for SIPI */ - pause_vcpu(target_vcpu, VCPU_PAUSED); - reset_vcpu(target_vcpu); - /* new cpu model only need one SIPI to kick AP run, - * the second SIPI will be ignored as it move out of - * wait-for-SIPI state. - */ - target_vcpu->arch_vcpu.nr_sipi = 1U; + dev_dbg(ACRN_DBG_LAPIC, + "Sending INIT from VCPU %hu to %hu", + vlapic->vcpu->vcpu_id, vcpu_id); + + /* put vcpu to INIT state for wait SIPI */ + pause_vcpu(target_vcpu, VCPU_PAUSED); + reset_vcpu(target_vcpu); + /* new cpu model only need one SIPI to kick + * AP run, the second SIPI will be ignored + * as it move out of wait-for-SIPI state. + */ + target_vcpu->arch_vcpu.nr_sipi = 1U; + } } else if (mode == APIC_DELMODE_STARTUP) { /* Ignore SIPIs in any state other than wait-for-SIPI */ if ((target_vcpu->state != VCPU_INIT) || -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 09/14] hv: apicv: explicit log for SMI IPI unsupported
Yu Wang
ACRN currently do not support SMM and SMI. Print one explicit warning
for it. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vlapic.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index f244311..c05a834 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -1178,6 +1178,8 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic) target_vcpu->vcpu_id, target_vcpu->vm->vm_id); schedule_vcpu(target_vcpu); + } else if (mode == APIC_DELMODE_SMI) { + pr_info("vmx vapic: SMI IPI do not support\n"); } else { pr_err("Unhandled icrlo write with mode %u\n", mode); } -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 08/14] hv: apicv: remove APIC_OFFSET_SELF_IPI(0x3F0) register
Yu Wang
From SDM Vol3 Table 10-1 Local APIC Register Address Map. The 0x3F0 is
reserved. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vlapic.c | 9 --------- hypervisor/arch/x86/guest/vlapic_priv.h | 1 - 2 files changed, 10 deletions(-) diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index ea1ed22..f244311 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -1415,12 +1415,6 @@ vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg, case APIC_OFFSET_TIMER_DCR: *data = lapic->dcr_timer; break; - case APIC_OFFSET_SELF_IPI: - /* - * XXX generate a GP fault if vlapic is in x2apic mode - */ - *data = 0UL; - break; case APIC_OFFSET_RRR: default: *data = 0UL; @@ -1510,9 +1504,6 @@ vlapic_write(struct acrn_vlapic *vlapic, uint32_t offset, vlapic_esr_write_handler(vlapic); break; - case APIC_OFFSET_SELF_IPI: - break; - case APIC_OFFSET_VER: case APIC_OFFSET_APR: case APIC_OFFSET_PPR: diff --git a/hypervisor/arch/x86/guest/vlapic_priv.h b/hypervisor/arch/x86/guest/vlapic_priv.h index 0e0be09..1388c4a 100644 --- a/hypervisor/arch/x86/guest/vlapic_priv.h +++ b/hypervisor/arch/x86/guest/vlapic_priv.h @@ -80,7 +80,6 @@ #define APIC_OFFSET_TIMER_ICR 0x380U /* Timer's Initial Count */ #define APIC_OFFSET_TIMER_CCR 0x390U /* Timer's Current Count */ #define APIC_OFFSET_TIMER_DCR 0x3E0U /* Timer's Divide Configuration */ -#define APIC_OFFSET_SELF_IPI 0x3F0U /* Self IPI register */ /* * 16 priority levels with at most one vector injected per level. -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 02/14] hv: virq: disable interrupt-window exiting in vmexit handler
Yu Wang
In interrupt-window exiting handler, disable it directly even there has
pending interrupts. The later acrn_handle_pending_request will re-evaluation and re-enable it if needed. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/virq.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index d006a35..54baa7f 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -339,19 +339,13 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu) if (vcpu == NULL) return -1; - if (vcpu_pending_request(vcpu)) { - /* Do nothing - * acrn_handle_pending_request will continue for this vcpu - */ - } else { - /* No interrupts to inject. - * Disable the interrupt window exiting - */ - vcpu->arch_vcpu.irq_window_enabled = 0U; - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN); - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); - } + /* Disable interrupt-window exiting first. + * acrn_handle_pending_request will continue handle for this vcpu + */ + vcpu->arch_vcpu.irq_window_enabled = 0U; + value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); + value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN); + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); vcpu_retain_rip(vcpu); return 0; -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 07/14] hv: apicv: remove x2apic related code
Yu Wang
Currently, ACRN hasn't expose x2apic capability through cpuid.
And x2apic related code in vlapic.c has no real functionality. This patch clear related code. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vlapic.c | 65 ++++-------------------------- 1 file changed, 8 insertions(+), 57 deletions(-) diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index c331d9d..ea1ed22 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -1303,24 +1303,13 @@ vlapic_svr_write_handler(struct acrn_vlapic *vlapic) } static int -vlapic_read(struct acrn_vlapic *vlapic, int mmio_access, uint32_t offset_arg, +vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg, uint64_t *data) { struct lapic_regs *lapic = vlapic->apic_page; uint32_t i; uint32_t offset = offset_arg; - if (mmio_access == 0) { - /* - * XXX Generate GP fault for MSR accesses in xAPIC mode - */ - dev_dbg(ACRN_DBG_LAPIC, - "x2APIC MSR read from offset %#x in xAPIC mode", - offset); - *data = 0UL; - goto done; - } - if (offset > sizeof(*lapic)) { *data = 0UL; goto done; @@ -1444,7 +1433,7 @@ done: } static int -vlapic_write(struct acrn_vlapic *vlapic, int mmio_access, uint32_t offset, +vlapic_write(struct acrn_vlapic *vlapic, uint32_t offset, uint64_t data) { struct lapic_regs *lapic = vlapic->apic_page; @@ -1462,16 +1451,6 @@ vlapic_write(struct acrn_vlapic *vlapic, int mmio_access, uint32_t offset, return 0; } - /* - * XXX Generate GP fault for MSR accesses in xAPIC mode - */ - if (mmio_access == 0) { - dev_dbg(ACRN_DBG_LAPIC, - "x2APIC MSR write of %#lx to offset %#x in xAPIC mode", - data, offset); - return 0; - } - retval = 0; switch (offset) { case APIC_OFFSET_ID: @@ -1903,32 +1882,6 @@ vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg) return 0; } -static bool -is_x2apic_msr(uint32_t msr) -{ - if (msr >= 0x800U && msr <= 0xBFFU) { - return true; - } else { - return false; - } -} - -static uint32_t -x2apic_msr_to_regoff(uint32_t msr) -{ - return (msr - 0x800U) << 4U; -} - -bool -is_vlapic_msr(uint32_t msr) -{ - if (is_x2apic_msr(msr) || (msr == MSR_IA32_APIC_BASE)) { - return true; - } else { - return false; - } -} - /* interrupt context */ static int vlapic_timer_expired(void *data) { @@ -1955,7 +1908,6 @@ int vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval) { int error = 0; - uint32_t offset; struct acrn_vlapic *vlapic; dev_dbg(ACRN_DBG_LAPIC, "cpu[%hu] rdmsr: %x", vcpu->vcpu_id, msr); @@ -1971,8 +1923,8 @@ vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval) break; default: - offset = x2apic_msr_to_regoff(msr); - error = vlapic_read(vlapic, 0, offset, rval); + dev_dbg(ACRN_DBG_LAPIC, + "Invalid vmx vapic msr 0x%x access\n", msr); break; } @@ -1983,7 +1935,6 @@ int vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval) { int error = 0; - uint32_t offset; struct acrn_vlapic *vlapic; vlapic = vcpu->arch_vcpu.vlapic; @@ -1998,8 +1949,8 @@ vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval) break; default: - offset = x2apic_msr_to_regoff(msr); - error = vlapic_write(vlapic, 0, offset, wval); + dev_dbg(ACRN_DBG_LAPIC, + "Invalid vmx vapic msr 0x%x access\n", msr); break; } @@ -2027,7 +1978,7 @@ vlapic_write_mmio_reg(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, } vlapic = vcpu->arch_vcpu.vlapic; - error = vlapic_write(vlapic, 1, off, wval); + error = vlapic_write(vlapic, off, wval); return error; } @@ -2052,7 +2003,7 @@ vlapic_read_mmio_reg(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, } vlapic = vcpu->arch_vcpu.vlapic; - error = vlapic_read(vlapic, 1, off, rval); + error = vlapic_read(vlapic, off, rval); return error; } -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 06/14] hv: apicv: local APIC ID related clean up
Yu Wang
From SDM Vol3 10.4.6:
Some processors permit software to modify the APIC ID. However, the ability of software to modify the APIC ID is processor model specific. Because of this, operating system software should avoid writing to the local APIC ID register. So to permit change APIC ID is not one *must* feature. Keep it simple, we are also reject to modify it. This patch does code cleaning up for LAPIC ID related emulation. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vlapic.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 35be79c..c331d9d 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -212,19 +212,6 @@ vlapic_ldr_write_handler(struct acrn_vlapic *vlapic) dev_dbg(ACRN_DBG_LAPIC, "vlapic LDR set to %#x", lapic->ldr); } -static void -vlapic_id_write_handler(struct acrn_vlapic *vlapic) -{ - struct lapic_regs *lapic; - - /* - * We don't allow the ID register to be modified so reset it back to - * its default value. - */ - lapic = vlapic->apic_page; - lapic->id = vlapic_get_id(vlapic); -} - static inline uint32_t vlapic_timer_divisor_shift(uint32_t dcr) { @@ -1488,8 +1475,7 @@ vlapic_write(struct acrn_vlapic *vlapic, int mmio_access, uint32_t offset, retval = 0; switch (offset) { case APIC_OFFSET_ID: - lapic->id = data32; - vlapic_id_write_handler(vlapic); + /* Force APIC ID as readonly */ break; case APIC_OFFSET_TPR: vlapic_set_tpr(vlapic, data32 & 0xffU); @@ -2468,7 +2454,7 @@ int apic_write_vmexit_handler(struct vcpu *vcpu) switch (offset) { case APIC_OFFSET_ID: - vlapic_id_write_handler(vlapic); + /* Force APIC ID as readonly */ break; case APIC_OFFSET_EOI: vlapic_process_eoi(vlapic); -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 05/14] hv: apicv: improve the check conditions for apicv initial reset
Yu Wang
From SDM Vol3, SDM 29.4.3.1:
If the “APIC-register virtualization” VM-execution control is 0 and the “virtual-interrupt delivery” VM-execution control is 1, a write access is virtualized if its page offset is 080H (task priority), 0B0H (end of interrupt), and 300H (interrupt command — low); otherwise, the access causes an APIC-access VM exit. So the "Virtual-interrupt delivery" enable is not mean the "APIC-register virtualization" is must enabled. So far, the "Posted-interrupt Processing" is not enabled in ACRN, it also should be one check condition once it enabled in future. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/vmx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index f2778d6..249525c 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -1041,7 +1041,8 @@ static void init_exec_ctrl(struct vcpu *vcpu) exec_vmwrite64(VMX_VIRTUAL_APIC_PAGE_ADDR_FULL, value64); - if (is_vapic_intr_delivery_supported()) { + if (is_vapic_virt_reg_supported() && + is_vapic_intr_delivery_supported()) { /* Disable all EOI VMEXIT by default and * clear RVI and SVI. */ -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 04/14] hv: apicv: remove arch_vcpu->irq_window_enabled
Yu Wang
The arch_vcpu->irq_window_enabled is not really necessary, remove it.
Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/guest/vcpu.c | 1 - hypervisor/arch/x86/virq.c | 4 +--- hypervisor/include/arch/x86/guest/vcpu.h | 1 - 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 9dc0241..c3cc6b5 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -415,7 +415,6 @@ void reset_vcpu(struct vcpu *vcpu) vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID; vcpu->arch_vcpu.cur_context = NORMAL_WORLD; - vcpu->arch_vcpu.irq_window_enabled = 0; vcpu->arch_vcpu.inject_event_pending = false; (void)memset(vcpu->arch_vcpu.vmcs, 0U, CPU_PAGE_SIZE); diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index adc9d0c..5f0d928 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -321,7 +321,6 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu) /* Disable interrupt-window exiting first. * acrn_handle_pending_request will continue handle for this vcpu */ - vcpu->arch_vcpu.irq_window_enabled = 0U; value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN); exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); @@ -482,8 +481,7 @@ INTR_WIN: } /* Enable interrupt window exiting if pending */ - if (intr_pending == 1U && arch_vcpu->irq_window_enabled == 0U) { - arch_vcpu->irq_window_enabled = 1U; + if (intr_pending == 1U) { tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); tmp |= VMX_PROCBASED_CTLS_IRQ_WIN; exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp); diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 568ec66..871f252 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -181,7 +181,6 @@ struct vcpu_arch { } exception_info; uint8_t lapic_mask; - uint32_t irq_window_enabled; uint32_t nrexits; /* Auxiliary TSC value */ -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 03/14] hv: apicv: avoid trigger unnecessary IPI before VM ENTRY
Yu Wang
The "interrupt-window exiting" should be only enabled if current
pending ACRN_REQUEST_EVENT caused by injection fail, shouldn't enable if it caused by new coming ACRN_REQUEST_EVENT. These new coming events will be handled by the vmexit triggered by "external interrupt vmexit". For every new ACRN_REQUEST_EVENT, it was already triggered IPI. So needn't call vcpu_pending_request to trigger again. The vcpu_pending_request is useless now, remove it. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/virq.c | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index 54baa7f..adc9d0c 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -71,27 +71,6 @@ static bool is_guest_irq_enabled(struct vcpu *vcpu) return status; } -static bool vcpu_pending_request(struct vcpu *vcpu) -{ - struct acrn_vlapic *vlapic; - uint32_t vector = 0U; - int ret = 0; - - /* Query vLapic to get vector to inject */ - vlapic = vcpu->arch_vcpu.vlapic; - ret = vlapic_pending_intr(vlapic, &vector); - - /* we need to check and raise request if we have pending event - * in LAPIC IRR - */ - if (ret != 0) { - /* we have pending IRR */ - vcpu_make_request(vcpu, ACRN_REQUEST_EVENT); - } - - return vcpu->arch_vcpu.pending_req != 0UL; -} - void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid) { bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req); @@ -387,6 +366,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu) uint32_t intr_pending = 0U; uint32_t intr_info; uint32_t error_code; + uint32_t vector = 0U; struct vcpu_arch * arch_vcpu = &vcpu->arch_vcpu; uint64_t *pending_req_bits = &arch_vcpu->pending_req; @@ -496,7 +476,8 @@ INTR_WIN: * due to inject conditions are not satisfied. */ if ((intr_pending == 0U) && - (vcpu_pending_request(vcpu) != 0U)) { + (vlapic_pending_intr(vcpu->arch_vcpu.vlapic, &vector) != 0) && + (is_guest_irq_enabled(vcpu) == 0U)) { intr_pending = 1U; } -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 01/14] hv: apicv: correct the interrupt-window exiting usage
Yu Wang
The external interrupt events only can be inject if RFLAGS.IF = 1 and no
blocking by both STI and MOV SS. If met this scenario, we need to enable "interrupt-window exiting" for injection in next VMEXIT. But if the "virtual-interrupt delivery" is enabled for vmx apicv, then need avoid to enable interrupt-window exiting. From SDM Vol3, 29.2.1, the evaluation of pending virtual interrupts only be trigger if "interrupt-window exiting" is 0. Signed-off-by: Yu Wang <yu1.wang@...> --- hypervisor/arch/x86/virq.c | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index e9c46f7..d006a35 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -390,7 +390,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu) { int ret = 0; uint32_t tmp; - bool intr_pending = false; + uint32_t intr_pending = 0U; uint32_t intr_info; uint32_t error_code; struct vcpu_arch * arch_vcpu = &vcpu->arch_vcpu; @@ -477,11 +477,37 @@ int acrn_handle_pending_request(struct vcpu *vcpu) goto INTR_WIN; INTR_WIN: - /* check if we have new interrupt pending for next VMExit */ - intr_pending = vcpu_pending_request(vcpu); + /* The pending ExtInt may hasn't inject due to blocking by + * RFLAGS.IF = 0 or STI or MOV SS. So need to enable interrupt- + * window exiting for injection in next VMExit. + */ + if (bitmap_test(ffs64(ACRN_REQUEST_EXTINT), + pending_req_bits) == 1U) { + intr_pending = 1U; + } + + /* If the "virtual-interrupt delivery" is enabled for vmx apicv, + * then need avoid to enable interrupt-window exiting. + * + * From SDM Vol3, 29.2.1, the evaluation of pending virtual + * interrupts only be trigger if "interrupt-window exiting" is 0. + */ + if ((intr_pending == 0U) && + (is_vapic_intr_delivery_supported() == 1U)) { + return ret; + } + + /* For "virtual-interrupt delivery" disabled case, enable + * interrupt-window exiting if this time injection failed + * due to inject conditions are not satisfied. + */ + if ((intr_pending == 0U) && + (vcpu_pending_request(vcpu) != 0U)) { + intr_pending = 1U; + } /* Enable interrupt window exiting if pending */ - if (intr_pending && arch_vcpu->irq_window_enabled == 0U) { + if (intr_pending == 1U && arch_vcpu->irq_window_enabled == 0U) { arch_vcpu->irq_window_enabled = 1U; tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); tmp |= VMX_PROCBASED_CTLS_IRQ_WIN; -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH 00/14] 2nd patch set for vioapic/vlapic/vpic reshuffle
Yu Wang
The 2nd patch set is majorly for vmx vapic reshuffle. The lvt, nmi and
split vmx_vapic are still WIP. -refine the interrupt-window related code. -fix several bugs. -remove x2apic, APIC ID register related code. -cleanup the APIC-access VM exit handler. -rename the apicv to vmx_vapic in vlapic.c. -rename the vapic to vmx_vapic in virq.c. Yu Wang (14): hv: apicv: correct the interrupt-window exiting usage hv: virq: disable interrupt-window exiting in vmexit handler hv: apicv: avoid trigger unnecessary IPI before VM ENTRY hv: apicv: remove arch_vcpu->irq_window_enabled hv: apicv: improve the check conditions for apicv initial reset hv: apicv: local APIC ID related clean up hv: apicv: remove x2apic related code hv: apicv: remove APIC_OFFSET_SELF_IPI(0x3F0) register hv: apicv: explicit log for SMI IPI unsupported hv: apicv: clean up the APIC-access VM exit handler hv: apicv: correct the INIT-IPI check condition hv: apicv: change apicv name to vmx_vapic hv: vmx_vapic: change the name of vapic to vmx_vapic hv: vmx_vapic: fix two build warnings hypervisor/arch/x86/cpu.c | 28 ++- hypervisor/arch/x86/guest/vcpu.c | 1 - hypervisor/arch/x86/guest/vlapic.c | 263 ++++++++------------- hypervisor/arch/x86/guest/vlapic_priv.h | 17 +- hypervisor/arch/x86/virq.c | 81 ++++--- hypervisor/arch/x86/vmx.c | 17 +- hypervisor/dm/vioapic.c | 2 +- hypervisor/include/arch/x86/cpu.h | 6 +- hypervisor/include/arch/x86/guest/vcpu.h | 1 - hypervisor/include/arch/x86/guest/vlapic.h | 8 +- 10 files changed, 177 insertions(+), 247 deletions(-) -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
ACRN Project Technical Community Meeting Minutes - 8/15
(Meeting Minutes: https://drive.google.com/drive/u/2/folders/111m6iU3PjSZebOOKzLagYPUWMvseGLeU
)
ACRN Project TCM - 15th August 2018
Location
Attendees (Total 40 , 8/15)
Agenda
1. Zhai, Edwin: ACPI Virtualization for ACRN
Download foil from ACRN Presentation->ACRN_TCM->WW33’18.
Q: how iasl is called from device model?
Q: do you link it with acrn-dm binary or you call exec() to execute binary from /usr/bin
when will this operation performed? during SOS boot?
Q: What is your strategy for virtulaizing ACPI Global resources such as the ACPI GLobal Lock.
Q: what is the problem if you pass physical ACPI table to UOS also?
Q: it is a physical register access.. what is conflict in accessing registers or memory?
Q: The ACPI SCI interrupt will need to be virtualized for ACPI operations to occur in the guest. What issues/complications do you see virtualizing the SCI interrupt?
2. All: Community open discussion.
N/A
3. Next meeting agenda proposal:
Marketing/Events
Resources
Technical Mailing list: acrn-dev@...g
Best regards.
Hongbo
Tel: +86-21-6116 7445
MP: +86-1364 1793 689
Mail: hongbo.wang@...
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 7/7] hv: add gva check for the case gva is from instruction decode
Yin, Fengwei <fengwei.yin@...>
For the instructions other than MOVS and STO, the gva comes
from instruction decoding. Apply the gva check for this type of gva and inject exception to guest accordingly during instruction decoding phase. Signed-off-by: Yin Fengwei <fengwei.yin@...> --- hypervisor/arch/x86/guest/instr_emul.c | 87 ++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index c3793d1a..85e7408e 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -2150,6 +2150,91 @@ static int instr_check_di_si(struct vcpu *vcpu, } } +static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt, + enum vm_cpu_mode cpu_mode) +{ + int ret = 0; + uint64_t base, segbase, idx, gva, gpa; + uint32_t err_code; + enum cpu_reg_name seg; + struct instr_emul_vie *vie = &emul_ctxt->vie; + + base = 0UL; + if (vie->base_register != CPU_REG_LAST) { + base = vm_get_register(vcpu, vie->base_register); + + /* RIP relative addressing starts from the + * following instruction + */ + if (vie->base_register == CPU_REG_RIP) + base += vie->num_processed; + + } + + idx = 0UL; + if (vie->index_register != CPU_REG_LAST) { + idx = vm_get_register(vcpu, vie->index_register); + } + + /* "Specifying a Segment Selector" of SDM Vol1 3.7.4 + * + * In legacy IA-32 mode, when ESP or EBP register is used as + * base, the SS segment is default segment. + * + * All data references, except when relative to stack or + * string destination, DS is default segment. + * + * segment override could overwrite the default segment + * + * 64bit mode, segmentation is generally disabled. The + * exception are FS and GS. + */ + if (vie->seg_override != 0U) { + seg = vie->segment_register; + } else if ((vie->base_register == CPU_REG_RSP) || + (vie->base_register == CPU_REG_RBP)) { + seg = CPU_REG_SS; + } else { + seg = CPU_REG_DS; + } + + if ((cpu_mode == CPU_MODE_64BIT) && (seg != CPU_REG_FS) && + (seg != CPU_REG_GS)) { + segbase = 0UL; + } else { + struct seg_desc desc; + + vm_get_seg_desc(seg, &desc); + + segbase = desc.base; + } + + gva = segbase + base + vie->scale * idx + vie->displacement; + + if (vie_canonical_check(cpu_mode, gva) != 0) { + if (seg == CPU_REG_SS) { + vcpu_inject_ss(vcpu); + } else { + vcpu_inject_gp(vcpu, 0U); + } + return -EFAULT; + } + + err_code = (vcpu->req.reqs.mmio.direction == REQUEST_WRITE) ? + PAGE_FAULT_WR_FLAG : 0U; + + ret = gva2gpa(vcpu, gva, &gpa, &err_code); + if (ret < 0) { + if (ret == -EFAULT) { + vcpu_inject_pf(vcpu, gva, + err_code); + } + return ret; + } + + return 0; +} + int decode_instruction(struct vcpu *vcpu) { struct instr_emul_ctxt *emul_ctxt; @@ -2196,6 +2281,8 @@ int decode_instruction(struct vcpu *vcpu) retval = instr_check_di_si(vcpu, emul_ctxt); if (retval < 0) return retval; + } else { + instr_check_gva(vcpu, emul_ctxt, cpu_mode); } return emul_ctxt->vie.opsize; -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 6/7] hv: add failure case check for MOVS/STO
Yin, Fengwei <fengwei.yin@...>
MOVS and STO instruction get the operand line address from DI
and SI. This patch adds line address check after instruction decoding is done. Inject exception accordingly if check fails. We add check during instruction decoding phase. So we could assume the following instruction emulation will always success. Signed-off-by: Yin Fengwei <fengwei.yin@...> --- hypervisor/arch/x86/guest/instr_emul.c | 52 ++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index 1047a3c8..c3793d1a 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -55,7 +55,7 @@ #define VIE_OP_F_IMM8 (1U << 1) /* 8-bit immediate operand */ #define VIE_OP_F_MOFFSET (1U << 2) /* 16/32/64-bit immediate moffset */ #define VIE_OP_F_NO_MODRM (1U << 3) -#define VIE_OP_F_NO_GLA_VERIFICATION (1U << 4) +#define VIE_OP_F_GVA_IN_DI_SI (1U << 4) /* GVA from (R|E)SI/DI */ static const struct instr_emul_vie_op two_byte_opcodes[256] = { [0xB6] = { @@ -108,19 +108,19 @@ static const struct instr_emul_vie_op one_byte_opcodes[256] = { }, [0xA4] = { .op_type = VIE_OP_TYPE_MOVS, - .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION + .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_GVA_IN_DI_SI }, [0xA5] = { .op_type = VIE_OP_TYPE_MOVS, - .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION + .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_GVA_IN_DI_SI }, [0xAA] = { .op_type = VIE_OP_TYPE_STOS, - .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION + .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_GVA_IN_DI_SI }, [0xAB] = { .op_type = VIE_OP_TYPE_STOS, - .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION + .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_GVA_IN_DI_SI }, [0xC6] = { /* XXX Group 11 extended opcode - not just MOV */ @@ -953,7 +953,7 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie) { uint64_t dstaddr, srcaddr; uint64_t rcx, rdi, rsi, rflags; - int error, fault, repeat; + int error, repeat; uint8_t opsize; enum cpu_reg_name seg; @@ -1533,7 +1533,6 @@ static int emulate_bittest(struct vcpu *vcpu, struct instr_emul_vie *vie) static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt) { - struct vm_guest_paging *paging = &ctxt->paging; struct instr_emul_vie *vie = &ctxt->vie; struct vcpu *vcpu = ctxt->vcpu; int error; @@ -2123,6 +2122,34 @@ static int local_decode_instruction(enum vm_cpu_mode cpu_mode, return 0; } +/* for instruction MOVS/STO, check the gva gotten from DI/SI. */ +static int instr_check_di_si(struct vcpu *vcpu, + struct instr_emul_ctxt *emul_ctxt) +{ + int ret; + struct instr_emul_vie *vie = &emul_ctxt->vie; + uint64_t gva; + enum cpu_reg_name seg; + + ret = get_gva_di_si_check(vcpu, vie->addrsize, PROT_WRITE, + CPU_REG_ES, CPU_REG_RDI, &gva); + + if (ret < 0) { + return -EFAULT; + } + + if (vie->op.op_type == VIE_OP_TYPE_MOVS) { + seg = (vie->seg_override != 0U) ? + (vie->segment_register) : CPU_REG_DS; + ret = get_gva_di_si_check(vcpu, vie->addrsize, PROT_READ, + seg, CPU_REG_RSI, &gva); + + if (ret < 0) { + return -EFAULT; + } + } +} + int decode_instruction(struct vcpu *vcpu) { struct instr_emul_ctxt *emul_ctxt; @@ -2160,6 +2187,17 @@ int decode_instruction(struct vcpu *vcpu) return -EFAULT; } + /* + * We do operand check in instruction decode phase and + * inject exception accordingly. In late instruction + * emulation, it will always sucess. + */ + if (emul_ctxt->vie.op.op_flags & VIE_OP_F_GVA_IN_DI_SI) { + retval = instr_check_di_si(vcpu, emul_ctxt); + if (retval < 0) + return retval; + } + return emul_ctxt->vie.opsize; } -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 5/7] hv: add new function to get gva for MOVS/STO instruction
Yin, Fengwei <fengwei.yin@...>
Drop the get_gla function and add
- get_gva_di_si_nocheck get gva from ES:DI and DS(other segment):SI without check faulure case - get_gva_di_si_check get gva from ES:DI and DS(other segment):SI with failure case checked TODO: - Save dst_gpa and src_gpa for instruction emulation phase use. Signed-off-by: Yin Fengwei <fengwei.yin@...> --- hypervisor/arch/x86/guest/instr_emul.c | 116 ++++++++++++++++--------- 1 file changed, 76 insertions(+), 40 deletions(-) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index 590ecc03..1047a3c8 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -454,7 +454,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg, uint64_t firstoff, segbase; uint64_t offset = offset_arg; uint8_t glasize; - uint32_t type; firstoff = offset; glasize = (cpu_mode == CPU_MODE_64BIT) ? 8U: 4U; @@ -860,52 +859,97 @@ static int emulate_movx(struct vcpu *vcpu, struct instr_emul_vie *vie) return error; } +/** + * @pre only called by instruction emulation and check was done during + * instruction decode + * + * @remark This function can only be called in instruction emulation and + * suppose always success because the check was done during instruction + * decode. + * + * It's only used by MOVS/STO + */ +static void get_gva_di_si_nocheck(struct vcpu *vcpu, uint8_t addrsize, + enum cpu_reg_name seg, enum cpu_reg_name gpr, uint64_t *gva) +{ + uint64_t val; + struct seg_desc desc; + enum vm_cpu_mode cpu_mode; + + val = vm_get_register(vcpu, gpr); + vm_get_seg_desc(seg, &desc); + cpu_mode = get_vcpu_mode(vcpu); + + (void)vie_calculate_gla(cpu_mode, seg, &desc, val, addrsize, gva); + + return; +} + /* - * Helper function to calculate and validate a linear address. + * @pre only called during instruction decode phase + * + * @remark This function get gva from ES:DI and DS(other segment):SI. And + * do check the failure condition and inject exception to guest accordingly. + * + * It's only used by MOVS/STO */ -static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie, - struct vm_guest_paging *paging, - uint8_t opsize, uint8_t addrsize, uint32_t prot, enum cpu_reg_name seg, - enum cpu_reg_name gpr, uint64_t *gla, int *fault) +static int get_gva_di_si_check(struct vcpu *vcpu, uint8_t addrsize, + uint32_t prot, enum cpu_reg_name seg, enum cpu_reg_name gpr, + uint64_t *gva) { int ret; + uint32_t err_code; struct seg_desc desc; - uint64_t cr0, val, rflags; + enum vm_cpu_mode cpu_mode; + uint64_t val, gpa; - cr0 = vm_get_register(vcpu, CPU_REG_CR0); - rflags = vm_get_register(vcpu, CPU_REG_RFLAGS); val = vm_get_register(vcpu, gpr); vm_get_seg_desc(seg, &desc); + cpu_mode = get_vcpu_mode(vcpu); - if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, - addrsize, gla) != 0) { - if (seg == CPU_REG_SS) { - pr_err("TODO: inject ss exception"); - } else { - pr_err("TODO: inject gp exception"); + if (!is_desc_valid(&desc, prot)) { + goto exception_inject; + } + + if (cpu_mode == CPU_MODE_64BIT) { + if ((addrsize != 4U) && (addrsize != 8U)) { + goto exception_inject; + } + } else { + if ((addrsize != 2U) && (addrsize != 4U)) { + goto exception_inject; } - goto guest_fault; } - if (vie_canonical_check(paging->cpu_mode, *gla) != 0) { - if (seg == CPU_REG_SS) { - pr_err("TODO: inject ss exception"); - } else { - pr_err("TODO: inject gp exception"); + if (vie_calculate_gla(cpu_mode, seg, &desc, val, addrsize, gva) != 0) { + goto exception_inject; + } + + if (vie_canonical_check(cpu_mode, *gva) != 0) { + goto exception_inject; + } + + err_code = (prot == PROT_WRITE) ? PAGE_FAULT_WR_FLAG : 0U; + ret = gva2gpa(vcpu, (uint64_t)gva, &gpa, &err_code); + if (ret < 0) { + if (ret == -EFAULT) { + vcpu_inject_pf(vcpu, (uint64_t)gva, err_code); } - goto guest_fault; + return ret; } - *fault = 0; return 0; -guest_fault: - *fault = 1; - return ret; +exception_inject: + if (seg == CPU_REG_SS) { + vcpu_inject_ss(vcpu); + } else { + vcpu_inject_gp(vcpu, 0U); + } + return -EFAULT; } -static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie, - struct vm_guest_paging *paging) +static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie) { uint64_t dstaddr, srcaddr; uint64_t rcx, rdi, rsi, rflags; @@ -939,18 +983,10 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie, } seg = (vie->seg_override != 0U) ? (vie->segment_register) : CPU_REG_DS; - error = get_gla(vcpu, vie, paging, opsize, vie->addrsize, - PROT_READ, seg, CPU_REG_RSI, &srcaddr, &fault); - if ((error != 0) || (fault != 0)) { - goto done; - } - error = get_gla(vcpu, vie, paging, opsize, vie->addrsize, - PROT_WRITE, CPU_REG_ES, CPU_REG_RDI, &dstaddr, - &fault); - if ((error != 0) || (fault != 0)) { - goto done; - } + get_gva_di_si_nocheck(vcpu, vie->addrsize, seg, CPU_REG_RSI, &srcaddr); + get_gva_di_si_nocheck(vcpu, vie->addrsize, CPU_REG_ES, CPU_REG_RDI, + &dstaddr); (void)memcpy_s((void *)dstaddr, 16U, (void *)srcaddr, opsize); @@ -1520,7 +1556,7 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt) error = emulate_movx(vcpu, vie); break; case VIE_OP_TYPE_MOVS: - error = emulate_movs(vcpu, vie, paging); + error = emulate_movs(vcpu, vie); break; case VIE_OP_TYPE_STOS: error = emulate_stos(vcpu, vie); -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 4/7] hv: move check out of vie_calculate_gla
Yin, Fengwei <fengwei.yin@...>
We will do check only during instruction decode phase.
vie_calculate_gla will be called both in instruction emulation phase, we move the check out of vie_calculate_gla. Signed-off-by: Yin Fengwei <fengwei.yin@...> Acked-by: Anthony Xu <anthony.xu@...> --- hypervisor/arch/x86/guest/instr_emul.c | 82 ++++++++++++-------------- 1 file changed, 37 insertions(+), 45 deletions(-) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index 26c29587..590ecc03 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -405,6 +405,40 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla) } } +static bool is_desc_valid(struct seg_desc *desc, uint32_t prot) +{ + uint32_t type; + + /* The descriptor type must indicate a code/data segment. */ + type = SEG_DESC_TYPE(desc->access); + if (type < 16 || type > 31) { + return false; + } + + if ((prot & PROT_READ) != 0U) { + /* #GP on a read access to a exec-only code segment */ + if ((type & 0xAU) == 0x8U) { + return false; + } + } + + if ((prot & PROT_WRITE) != 0U) { + /* + * #GP on a write access to a code segment or a + * read-only data segment. + */ + if ((type & 0x8U) != 0U) { /* code segment */ + return false; + } + + if ((type & 0xAU) == 0U) { /* read-only data seg */ + return false; + } + } + + return true; +} + /* *@pre seg must be segment register index *@pre length_arg must be 1, 2, 4 or 8 @@ -415,7 +449,7 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla) */ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg, struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize, - uint32_t prot, uint64_t *gla) + uint64_t *gla) { uint64_t firstoff, segbase; uint64_t offset = offset_arg; @@ -423,49 +457,7 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg, uint32_t type; firstoff = offset; - if (cpu_mode == CPU_MODE_64BIT) { - if (addrsize != 4U && addrsize != 8U) { - pr_dbg("%s: invalid addr size %d for cpu mode %d", - __func__, addrsize, cpu_mode); - return -1; - } - glasize = 8U; - } else { - if (addrsize != 2U && addrsize != 4U) { - pr_dbg("%s: invalid addr size %d for cpu mode %d", - __func__, addrsize, cpu_mode); - return -1; - } - glasize = 4U; - - /* The descriptor type must indicate a code/data segment. */ - type = SEG_DESC_TYPE(desc->access); - if (type < 16 || type > 31) { - /*TODO: Inject #GP */ - return -1; - } - - if ((prot & PROT_READ) != 0U) { - /* #GP on a read access to a exec-only code segment */ - if ((type & 0xAU) == 0x8U) { - return -1; - } - } - - if ((prot & PROT_WRITE) != 0U) { - /* - * #GP on a write access to a code segment or a - * read-only data segment. - */ - if ((type & 0x8U) != 0U) { /* code segment */ - return -1; - } - - if ((type & 0xAU) == 0U) { /* read-only data seg */ - return -1; - } - } - } + glasize = (cpu_mode == CPU_MODE_64BIT) ? 8U: 4U; /* * In 64-bit mode all segments except %fs and %gs have a segment @@ -886,7 +878,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie, vm_get_seg_desc(seg, &desc); if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, - addrsize, prot, gla) != 0) { + addrsize, gla) != 0) { if (seg == CPU_REG_SS) { pr_err("TODO: inject ss exception"); } else { -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 3/7] hv: remove unnecessary check for gva
Yin, Fengwei <fengwei.yin@...>
According to SDM vol3 25.1.1
With VMX enabled, following exception will be handled by guest without trigger VM exit: - faults based on privilege level - general protection due to relevent segment being unusable - general protection due to offset beyond limit of relevent segment - alignment check exception ACRN always assume VMX is enabled. So we don't need to these check in instruction emulation. But we need to do page fault related check. Signed-off-by: Yin Fengwei <fengwei.yin@...> Acked-by: Anthony Xu <anthony.xu@...> --- hypervisor/arch/x86/guest/instr_emul.c | 79 +++----------------------- 1 file changed, 7 insertions(+), 72 deletions(-) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index e479c912..26c29587 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -385,18 +385,6 @@ static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emu emul_ctxt->paging.paging_mode = get_vcpu_paging_mode(vcpu); } -static int vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0, - uint64_t rflags, uint64_t gla) -{ - pr_dbg("Checking alignment with cpl: %hhu, addrsize: %hhu", cpl, size); - - if (cpl < 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) { - return 0; - } - - return ((gla & (size - 1U)) != 0UL) ? 1 : 0; -} - static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla) { uint64_t mask; @@ -426,12 +414,11 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla) *return -1 - on failure */ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg, - struct seg_desc *desc, uint64_t offset_arg, uint8_t length_arg, - uint8_t addrsize, uint32_t prot, uint64_t *gla) + struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize, + uint32_t prot, uint64_t *gla) { - uint64_t firstoff, low_limit, high_limit, segbase; + uint64_t firstoff, segbase; uint64_t offset = offset_arg; - uint8_t length = length_arg; uint8_t glasize; uint32_t type; @@ -450,25 +437,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg, return -1; } glasize = 4U; - /* - * If the segment selector is loaded with a NULL selector - * then the descriptor is unusable and attempting to use - * it results in a #GP(0). - */ - if (SEG_DESC_UNUSABLE(desc->access)) { - return -1; - } - - /* - * The processor generates a #NP exception when a segment - * register is loaded with a selector that points to a - * descriptor that is not present. If this was the case then - * it would have been checked before the VM-exit. - */ - if (SEG_DESC_PRESENT(desc->access) != 0) { - /* TODO: Inject #NP */ - return -1; - } /* The descriptor type must indicate a code/data segment. */ type = SEG_DESC_TYPE(desc->access); @@ -497,30 +465,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg, return -1; } } - - /* - * 'desc->limit' is fully expanded taking granularity into - * account. - */ - if ((type & 0xCU) == 0x4U) { - /* expand-down data segment */ - low_limit = desc->limit + 1U; - high_limit = SEG_DESC_DEF32(desc->access) ? - 0xffffffffU : 0xffffU; - } else { - /* code segment or expand-up data segment */ - low_limit = 0U; - high_limit = desc->limit; - } - - while (length > 0U) { - offset &= size2mask[addrsize]; - if (offset < low_limit || offset > high_limit) { - return -1; - } - offset++; - length--; - } } /* @@ -932,6 +876,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie, uint8_t opsize, uint8_t addrsize, uint32_t prot, enum cpu_reg_name seg, enum cpu_reg_name gpr, uint64_t *gla, int *fault) { + int ret; struct seg_desc desc; uint64_t cr0, val, rflags; @@ -940,13 +885,11 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie, val = vm_get_register(vcpu, gpr); vm_get_seg_desc(seg, &desc); - if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize, - addrsize, prot, gla) != 0) { + if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, + addrsize, prot, gla) != 0) { if (seg == CPU_REG_SS) { - /*vm_inject_ss(vcpu, 0);*/ pr_err("TODO: inject ss exception"); } else { - /*vm_inject_gp(vcpu);*/ pr_err("TODO: inject gp exception"); } goto guest_fault; @@ -954,27 +897,19 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie, if (vie_canonical_check(paging->cpu_mode, *gla) != 0) { if (seg == CPU_REG_SS) { - /*vm_inject_ss(vcpu, 0);*/ pr_err("TODO: inject ss exception"); } else { - /*vm_inject_gp(vcpu);*/ pr_err("TODO: inject gp exception"); } goto guest_fault; } - if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla) != 0) { - /*vm_inject_ac(vcpu, 0);*/ - pr_err("TODO: inject ac exception"); - goto guest_fault; - } - *fault = 0; return 0; guest_fault: *fault = 1; - return 0; + return ret; } static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie, -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 2/7] hv: fix use without initialization build error
Yin, Fengwei <fengwei.yin@...>
Fix two use without initialization build error in instr_emul.c
Signed-off-by: Yin Fengwei <fengwei.yin@...> --- hypervisor/arch/x86/guest/instr_emul.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index 389745c1..e479c912 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -310,7 +310,7 @@ static uint32_t get_vmcs_field(enum cpu_reg_name ident) */ static uint64_t vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg) { - uint64_t reg_val; + uint64_t reg_val = 0; if ((reg >= CPU_REG_GENERAL_FIRST) && (reg <= CPU_REG_GENERAL_LAST)) { reg_val = vcpu_get_gpreg(vcpu, reg); @@ -363,7 +363,7 @@ static void vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg, */ static void vm_get_seg_desc(enum cpu_reg_name seg, struct seg_desc *desc) { - struct seg_desc tdesc; + struct seg_desc tdesc = {0UL, 0U, 0U}; /* tdesc->access != 0xffffffffU in this function */ encode_vmcs_seg_desc(seg, &tdesc); -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
[PATCH v2 1/7] hv: extend the decode_modrm
Yin, Fengwei <fengwei.yin@...>
If rm show there is no SIB following rm field, we should get
base_register info from rm. --- hypervisor/arch/x86/guest/instr_emul.c | 43 +++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index 521f0d36..389745c1 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -1636,6 +1636,11 @@ static int vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu) (void)memset(vie, 0U, sizeof(struct instr_emul_vie)); + /* init register fields in vie. */ + vie->base_register = CPU_REG_LAST; + vie->index_register = CPU_REG_LAST; + vie->segment_register = CPU_REG_LAST; + err_code = PAGE_FAULT_ID_FLAG; ret = copy_from_gva(vcpu, vie->inst, guest_rip_gva, inst_len, &err_code, &fault_addr); @@ -1859,6 +1864,42 @@ static int decode_modrm(struct instr_emul_vie *vie, enum vm_cpu_mode cpu_mode) vie->reg |= (vie->rex_r << 3); + /* SIB */ + if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB) { + goto done; + } + + vie->base_register = vie->rm; + + switch (vie->mod) { + case VIE_MOD_INDIRECT_DISP8: + vie->disp_bytes = 1U; + break; + case VIE_MOD_INDIRECT_DISP32: + vie->disp_bytes = 4U; + break; + case VIE_MOD_INDIRECT: + if (vie->rm == VIE_RM_DISP32) { + vie->disp_bytes = 4U; + /* + * Table 2-7. RIP-Relative Addressing + * + * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32 + * whereas in compatibility mode it just implies disp32. + */ + + if (cpu_mode == CPU_MODE_64BIT) { + vie->base_register = CPU_REG_RIP; + pr_err("VM exit with RIP as indirect access"); + } + else { + vie->base_register = CPU_REG_LAST; + } + } + break; + } + +done: vie_advance(vie); return 0; @@ -1935,7 +1976,7 @@ static int decode_sib(struct instr_emul_vie *vie) } /* 'scale' makes sense only in the context of an index register */ - if (vie->index_register <= CPU_REG_LAST) { + if (vie->index_register < CPU_REG_LAST) { vie->scale = 1U << vie->ss; } -- 2.17.0
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|