Date   

[PATCH v2 1/3] hv:Replace vpic pointer with instance in structure vm

Mingqiang Chi
 

From: Mingqiang Chi <mingqiang.chi@...>

replace MACRO with inline function for vm_pic
changed vpic_init to void type
removed vpic_cleanup
move struct acrn_vpic/i8259_reg_state to vpic.h
move vpic in struct vm to struct arch_vm

Signed-off-by: Mingqiang Chi <mingqiang.chi@...>
---
hypervisor/arch/x86/guest/vm.c | 9 +----
hypervisor/arch/x86/virq.c | 2 +-
hypervisor/debug/vuart.c | 9 ++---
hypervisor/dm/vpic.c | 60 ++++++--------------------------
hypervisor/include/arch/x86/guest/vm.h | 3 +-
hypervisor/include/arch/x86/guest/vpic.h | 32 +++++++++++++++--
hypervisor/include/arch/x86/hv_arch.h | 2 +-
7 files changed, 48 insertions(+), 69 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index 813126c..e13af1e 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -200,7 +200,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
/* Create virtual uart */
vm->vuart = vuart_init(vm);
}
- vm->vpic = vpic_init(vm);
+ vpic_init(vm);

#ifdef CONFIG_PARTITION_MODE
/* Create virtual uart */
@@ -239,10 +239,6 @@ err:
vioapic_cleanup(vm->arch_vm.virt_ioapic);
}

- if (vm->vpic != NULL) {
- vpic_cleanup(vm);
- }
-
if (vm->arch_vm.m2p != NULL) {
free(vm->arch_vm.m2p);
}
@@ -313,9 +309,6 @@ int shutdown_vm(struct vm *vm)
free_vm_id(vm);
#endif

- if (vm->vpic != NULL) {
- vpic_cleanup(vm);
- }

#ifdef CONFIG_PARTITION_MODE
vpci_cleanup(vm);
diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c
index e11bfe3..43702a0 100644
--- a/hypervisor/arch/x86/virq.c
+++ b/hypervisor/arch/x86/virq.c
@@ -156,7 +156,7 @@ static int vcpu_do_pending_extint(struct vcpu *vcpu)
/* check if there is valid interrupt from vPIC, if yes just inject it */
/* PIC only connect with primary CPU */
primary = get_primary_vcpu(vm);
- if ((vm->vpic != NULL) && vcpu == primary) {
+ if (vcpu == primary) {

vpic_pending_intr(vcpu->vm, &vector);
if (vector <= NR_MAX_VECTOR) {
diff --git a/hypervisor/debug/vuart.c b/hypervisor/debug/vuart.c
index 73b7500..37b272e 100644
--- a/hypervisor/debug/vuart.c
+++ b/hypervisor/debug/vuart.c
@@ -125,14 +125,11 @@ static void vuart_toggle_intr(struct vuart *vu)
intr_reason = vuart_intr_reason(vu);

if (intr_reason != IIR_NOPEND) {
- if (vu->vm->vpic != NULL) {
- vpic_assert_irq(vu->vm, COM1_IRQ);
- }
+ vpic_assert_irq(vu->vm, COM1_IRQ);

vioapic_assert_irq(vu->vm, COM1_IRQ);
- if (vu->vm->vpic != NULL) {
- vpic_deassert_irq(vu->vm, COM1_IRQ);
- }
+
+ vpic_deassert_irq(vu->vm, COM1_IRQ);

vioapic_deassert_irq(vu->vm, COM1_IRQ);
}
diff --git a/hypervisor/dm/vpic.c b/hypervisor/dm/vpic.c
index 260f1f6..9075cc4 100644
--- a/hypervisor/dm/vpic.c
+++ b/hypervisor/dm/vpic.c
@@ -35,8 +35,6 @@
/* TODO: add spinlock_locked support? */
/*#define VPIC_LOCKED(vpic) spinlock_locked(&((vpic)->lock))*/

-#define vm_pic(vm) (vm->vpic)
-
#define ACRN_DBG_PIC 6U

enum irqstate {
@@ -45,34 +43,6 @@ enum irqstate {
IRQSTATE_PULSE
};

-struct i8259_reg_state {
- bool ready;
- uint8_t icw_num;
- uint8_t rd_cmd_reg;
-
- bool aeoi;
- bool poll;
- bool rotate;
- bool sfn; /* special fully-nested mode */
-
- uint32_t irq_base;
- uint8_t request; /* Interrupt Request Register (IIR) */
- uint8_t service; /* Interrupt Service (ISR) */
- uint8_t mask; /* Interrupt Mask Register (IMR) */
- uint8_t smm; /* special mask mode */
-
- int acnt[8]; /* sum of pin asserts and deasserts */
- uint8_t lowprio; /* lowest priority irq */
-
- bool intr_raised;
- uint8_t elc;
-};
-
-struct acrn_vpic {
- struct vm *vm;
- spinlock_t lock;
- struct i8259_reg_state i8259[2];
-};

#define NR_VPIC_PINS_PER_CHIP 8U
#define NR_VPIC_PINS_TOTAL 16U
@@ -88,6 +58,12 @@ struct acrn_vpic {

static void vpic_set_pinstate(struct acrn_vpic *vpic, uint8_t pin, bool newstate);

+static inline struct acrn_vpic *
+vm_pic(struct vm *vm)
+{
+ return (struct acrn_vpic *)&(vm->arch_vm.vpic);
+}
+
static inline bool master_pic(struct acrn_vpic *vpic, struct i8259_reg_state *i8259)
{

@@ -921,27 +897,13 @@ static void vpic_register_io_handler(struct vm *vm)
&vpic_elc_io_read, &vpic_elc_io_write);
}

-void *vpic_init(struct vm *vm)
+void vpic_init(struct vm *vm)
{
- struct acrn_vpic *vpic;
-
+ struct acrn_vpic *vpic = vm_pic(vm);
vpic_register_io_handler(vm);
-
- vpic = calloc(1U, sizeof(struct acrn_vpic));
- ASSERT(vpic != NULL, "");
- vpic->vm = vm;
- vpic->i8259[0].mask = 0xffU;
- vpic->i8259[1].mask = 0xffU;
+ vm->arch_vm.vpic.vm = vm;
+ vm->arch_vm.vpic.i8259[0].mask = 0xffU;
+ vm->arch_vm.vpic.i8259[1].mask = 0xffU;

VPIC_LOCK_INIT(vpic);
-
- return vpic;
-}
-
-void vpic_cleanup(struct vm *vm)
-{
- if (vm->vpic != NULL) {
- free(vm->vpic);
- vm->vpic = NULL;
- }
}
diff --git a/hypervisor/include/arch/x86/guest/vm.h b/hypervisor/include/arch/x86/guest/vm.h
index b2355eb..f852122 100644
--- a/hypervisor/include/arch/x86/guest/vm.h
+++ b/hypervisor/include/arch/x86/guest/vm.h
@@ -99,6 +99,7 @@ struct vm_arch {
void *iobitmap[2];/* IO bitmap page array base address for this VM */
void *msr_bitmap; /* MSR bitmap page base address for this VM */
void *virt_ioapic; /* Virtual IOAPIC base address */
+ struct acrn_vpic vpic; /* Virtual PIC */
/**
* A link to the IO handler of this VM.
* We only register io handle to this link
@@ -125,7 +126,6 @@ struct vcpuid_entry {
uint32_t padding;
};

-struct acrn_vpic;
struct vm {
uint16_t vm_id; /* Virtual machine identifier */
struct vm_hw_info hw; /* Reference to this VM's HW information */
@@ -134,7 +134,6 @@ struct vm {
struct vm_arch arch_vm; /* Reference to this VM's arch information */
enum vm_state state; /* VM state */
void *vuart; /* Virtual UART */
- struct acrn_vpic *vpic; /* Virtual PIC */
enum vpic_wire_mode wire_mode;
struct iommu_domain *iommu; /* iommu domain of this VM */
struct list_head list; /* list of VM */
diff --git a/hypervisor/include/arch/x86/guest/vpic.h b/hypervisor/include/arch/x86/guest/vpic.h
index a44bf8d..0b84f8f 100644
--- a/hypervisor/include/arch/x86/guest/vpic.h
+++ b/hypervisor/include/arch/x86/guest/vpic.h
@@ -90,8 +90,36 @@ enum vpic_trigger {
LEVEL_TRIGGER
};

-void *vpic_init(struct vm *vm);
-void vpic_cleanup(struct vm *vm);
+struct i8259_reg_state {
+ bool ready;
+ uint8_t icw_num;
+ uint8_t rd_cmd_reg;
+
+ bool aeoi;
+ bool poll;
+ bool rotate;
+ bool sfn; /* special fully-nested mode */
+
+ uint32_t irq_base;
+ uint8_t request; /* Interrupt Request Register (IIR) */
+ uint8_t service; /* Interrupt Service (ISR) */
+ uint8_t mask; /* Interrupt Mask Register (IMR) */
+ uint8_t smm; /* special mask mode */
+
+ int acnt[8]; /* sum of pin asserts and deasserts */
+ uint8_t lowprio; /* lowest priority irq */
+
+ bool intr_raised;
+ uint8_t elc;
+};
+
+struct acrn_vpic {
+ struct vm *vm;
+ spinlock_t lock;
+ struct i8259_reg_state i8259[2];
+};
+
+void vpic_init(struct vm *vm);

void vpic_assert_irq(struct vm *vm, uint32_t irq);
void vpic_deassert_irq(struct vm *vm, uint32_t irq);
diff --git a/hypervisor/include/arch/x86/hv_arch.h b/hypervisor/include/arch/x86/hv_arch.h
index 21d2973..9ed9901 100644
--- a/hypervisor/include/arch/x86/hv_arch.h
+++ b/hypervisor/include/arch/x86/hv_arch.h
@@ -21,6 +21,7 @@
#include <trusty.h>
#include <guest_pm.h>
#include <host_pm.h>
+#include <vpic.h>
#include <vm.h>
#include <cpuid.h>
#include <mmu.h>
@@ -32,7 +33,6 @@
#include <assign.h>
#include <vtd.h>

-#include <vpic.h>
#include <vlapic.h>
#include <vioapic.h>
#include <guest.h>
--
2.7.4


[PATCH v2 0/3]Replace vpic/vioapic/vuart pointer with instance

Mingqiang Chi
 

From: Mingqiang Chi <mingqiang.chi@...>

v1-->v2:
1) rename struct virt_uart --> struct acrn_vuart
struct virt_ioapic --> struct acrn_vioapic
2) Replace MACRO with inline function for vm_pic/vm_vuart/vm_ioapic
3) &vm->arch_vm.xxx --> &(vm->arch_vm.xxx)

Mingqiang Chi (3):
hv:Replace vpic pointer with instance in structure vm
hv:Replace vuart pointer with instance in structure vm
hv:Repalce vioapic pointer with instance in structure vm

hypervisor/arch/x86/assign.c | 3 +-
hypervisor/arch/x86/guest/vm.c | 30 +++++---------
hypervisor/arch/x86/virq.c | 2 +-
hypervisor/debug/console.c | 2 +-
hypervisor/debug/shell.c | 30 +++++---------
hypervisor/debug/vuart.c | 62 ++++++++++++++---------------
hypervisor/dm/vioapic.c | 58 ++++++++++-----------------
hypervisor/dm/vpic.c | 60 +++++-----------------------
hypervisor/include/arch/x86/guest/vioapic.h | 19 +++++++--
hypervisor/include/arch/x86/guest/vm.h | 7 ++--
hypervisor/include/arch/x86/guest/vpic.h | 32 ++++++++++++++-
hypervisor/include/arch/x86/hv_arch.h | 5 ++-
hypervisor/include/debug/vuart.h | 19 +++++----
hypervisor/include/hv_debug.h | 1 -
14 files changed, 143 insertions(+), 187 deletions(-)

--
2.7.4


[PATCH] HV: Refine APICv capabilities detection

Yonghua Huang
 

- by default,following APICv features MUST be
supported on ACRN:
- Use TPR shadow
- APIC-register virtualization
- remove mmio emualtion code

Signed-off-by: Yonghua Huang <yonghua.huang@...>
---
hypervisor/arch/x86/cpu.c | 27 +++----
hypervisor/arch/x86/guest/vlapic.c | 117 +++--------------------------
hypervisor/arch/x86/vmx.c | 89 +++++++++-------------
hypervisor/include/arch/x86/cpu.h | 2 -
hypervisor/include/arch/x86/guest/vlapic.h | 14 +---
5 files changed, 57 insertions(+), 192 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 5b640ab..e141025 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -790,26 +790,27 @@ static void apicv_cap_detect(void)
uint8_t features;
uint64_t msr_val;

- features = 0U;
-
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
if (!is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS_TPR_SHADOW)) {
- cpu_caps.apicv_features = 0U;
+ pr_fatal("APICv: No APIC TPR virtualization support.");
return;
}
- features |= VAPIC_FEATURE_TPR_SHADOW;

msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
if (!is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC)) {
- cpu_caps.apicv_features = features;
+ pr_fatal("APICv: No APIC-access virtualization support.");
return;
}
- features |= VAPIC_FEATURE_VIRT_ACCESS;

- if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC_REGS)) {
- features |= VAPIC_FEATURE_VIRT_REG;
+ if (!is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC_REGS)) {
+ pr_fatal("APICv: No APIC-register virtualization support.");
+ return;
}

+ features = (VAPIC_FEATURE_TPR_SHADOW
+ | VAPIC_FEATURE_VIRT_ACCESS
+ | VAPIC_FEATURE_VIRT_REG);
+
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VX2APIC)) {
features |= VAPIC_FEATURE_VX2APIC_MODE;
}
@@ -823,7 +824,6 @@ static void apicv_cap_detect(void)
features |= VAPIC_FEATURE_POST_INTR;
}
}
-
cpu_caps.apicv_features = features;
}

@@ -838,20 +838,11 @@ bool is_ept_supported(void)
return (cpu_caps.ept_features != 0U);
}

-bool is_apicv_supported(void)
-{
- return ((cpu_caps.apicv_features & VAPIC_FEATURE_VIRT_ACCESS) != 0U);
-}
-
bool is_apicv_intr_delivery_supported(void)
{
return ((cpu_caps.apicv_features & VAPIC_FEATURE_INTR_DELIVERY) != 0U);
}

-bool is_apicv_virt_reg_supported(void)
-{
- return ((cpu_caps.apicv_features & VAPIC_FEATURE_VIRT_REG) != 0U);
-}

static void cpu_xsave_init(void)
{
diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 3da87a4..d5867a9 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -1950,81 +1950,6 @@ vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval)
return error;
}

-int
-vlapic_write_mmio_reg(struct vcpu *vcpu, uint64_t gpa, uint64_t wval,
- uint8_t size)
-{
- int error;
- uint32_t off;
- struct acrn_vlapic *vlapic;
-
- off = (uint32_t)(gpa - DEFAULT_APIC_BASE);
-
- /*
- * Memory mapped local apic accesses must be 4 bytes wide and
- * aligned on a 16-byte boundary.
- */
- if ((size != 4U) || ((off & 0xfU) != 0U)) {
- return -EINVAL;
- }
-
- vlapic = vcpu->arch_vcpu.vlapic;
- error = vlapic_write(vlapic, off, wval);
- return error;
-}
-
-int
-vlapic_read_mmio_reg(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval,
- __unused uint8_t size)
-{
- int error;
- uint32_t off;
- struct acrn_vlapic *vlapic;
-
- off = (uint32_t)(gpa - DEFAULT_APIC_BASE);
-
- /*
- * Memory mapped local apic accesses should be aligned on a
- * 16-byte boundary. They are also suggested to be 4 bytes
- * wide, alas not all OSes follow suggestions.
- */
- off &= ~0x3U;
- if ((off & 0xfU) != 0U) {
- return -EINVAL;
- }
-
- vlapic = vcpu->arch_vcpu.vlapic;
- error = vlapic_read(vlapic, off, rval);
- return error;
-}
-
-int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
- __unused void *handler_private_data)
-{
- struct mmio_request *mmio_req = &io_req->reqs.mmio;
- uint64_t gpa = mmio_req->address;
- int ret = 0;
-
- /* Note all RW to LAPIC are 32-Bit in size */
- ASSERT(mmio_req->size == 4UL, "All RW to LAPIC must be 32-bits in size");
-
- if (mmio_req->direction == REQUEST_READ) {
- ret = vlapic_read_mmio_reg(vcpu,
- gpa,
- &mmio_req->value,
- mmio_req->size);
- } else if (mmio_req->direction == REQUEST_WRITE) {
- ret = vlapic_write_mmio_reg(vcpu,
- gpa,
- mmio_req->value,
- mmio_req->size);
- } else {
- /* Can never happen due to the range of mmio_req->direction. */
- }
-
- return ret;
-}
-
int vlapic_create(struct vcpu *vcpu)
{
struct acrn_vlapic *vlapic = calloc(1U, sizeof(struct acrn_vlapic));
@@ -2032,34 +1957,21 @@ int vlapic_create(struct vcpu *vcpu)
ASSERT(vlapic != NULL, "vlapic allocate failed");
vlapic->vm = vcpu->vm;
vlapic->vcpu = vcpu;
- if (is_apicv_supported()) {
- if (is_vcpu_bsp(vcpu)) {
- uint64_t *pml4_page =
- (uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
- ept_mr_del(vcpu->vm, pml4_page,
- DEFAULT_APIC_BASE, CPU_PAGE_SIZE);
-
- ept_mr_add(vcpu->vm, pml4_page,
- vlapic_apicv_get_apic_access_addr(vcpu->vm),
- DEFAULT_APIC_BASE, CPU_PAGE_SIZE,
- EPT_WR | EPT_RD | EPT_UNCACHED);
- }
- } else {
- /*No APICv support*/
- if (register_mmio_emulation_handler(vcpu->vm,
- vlapic_mmio_access_handler,
- (uint64_t)DEFAULT_APIC_BASE,
- (uint64_t)DEFAULT_APIC_BASE +
- CPU_PAGE_SIZE,
- (void *) 0) != 0) {
- free(vlapic);
- return -1;
- }
+
+ if (is_vcpu_bsp(vcpu)) {
+ uint64_t *pml4_page =
+ (uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
+ ept_mr_del(vcpu->vm, pml4_page,
+ DEFAULT_APIC_BASE, CPU_PAGE_SIZE);
+
+ ept_mr_add(vcpu->vm, pml4_page,
+ vlapic_apicv_get_apic_access_addr(vcpu->vm),
+ DEFAULT_APIC_BASE, CPU_PAGE_SIZE,
+ EPT_WR | EPT_RD | EPT_UNCACHED);
}

vcpu->arch_vcpu.vlapic = vlapic;
vlapic_init(vlapic);
-
return 0;
}

@@ -2075,15 +1987,8 @@ void vlapic_free(struct vcpu *vcpu)
if (vlapic == NULL) {
return;
}
-
del_timer(&vlapic->vtimer.timer);

- if (!is_apicv_supported()) {
- unregister_mmio_emulation_handler(vcpu->vm,
- (uint64_t)DEFAULT_APIC_BASE,
- (uint64_t)DEFAULT_APIC_BASE + CPU_PAGE_SIZE);
- }
-
free(vlapic);
}

diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c
index 8b02ccd..2c14adf 100644
--- a/hypervisor/arch/x86/vmx.c
+++ b/hypervisor/arch/x86/vmx.c
@@ -953,6 +953,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS,
VMX_PROCBASED_CTLS_TSC_OFF |
/* VMX_PROCBASED_CTLS_RDTSC | */
+ VMX_PROCBASED_CTLS_TPR_SHADOW|
VMX_PROCBASED_CTLS_IO_BITMAP |
VMX_PROCBASED_CTLS_MSR_BITMAP |
VMX_PROCBASED_CTLS_SECONDARY);
@@ -966,15 +967,6 @@ static void init_exec_ctrl(struct vcpu *vcpu)
*/
value32 &= ~VMX_PROCBASED_CTLS_INVLPG;

- if (is_apicv_supported()) {
- value32 |= VMX_PROCBASED_CTLS_TPR_SHADOW;
- } else {
- /* Add CR8 VMExit for vlapic */
- value32 |=
- (VMX_PROCBASED_CTLS_CR8_LOAD |
- VMX_PROCBASED_CTLS_CR8_STORE);
- }
-
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32);
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS: 0x%x ", value32);

@@ -983,9 +975,11 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* guest (optional)
*/
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
+ VMX_PROCBASED_CTLS2_VAPIC |
VMX_PROCBASED_CTLS2_EPT |
VMX_PROCBASED_CTLS2_RDTSCP |
- VMX_PROCBASED_CTLS2_UNRESTRICT);
+ VMX_PROCBASED_CTLS2_UNRESTRICT|
+ VMX_PROCBASED_CTLS2_VAPIC_REGS);

if (vcpu->arch_vcpu.vpid != 0U) {
value32 |= VMX_PROCBASED_CTLS2_VPID;
@@ -993,27 +987,18 @@ static void init_exec_ctrl(struct vcpu *vcpu)
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
}

- if (is_apicv_supported()) {
- value32 |= VMX_PROCBASED_CTLS2_VAPIC;
-
- if (is_apicv_virt_reg_supported()) {
- value32 |= VMX_PROCBASED_CTLS2_VAPIC_REGS;
- }
-
- if (is_apicv_intr_delivery_supported()) {
- value32 |= VMX_PROCBASED_CTLS2_VIRQ;
- }
- else {
- /*
- * This field exists only on processors that support
- * the 1-setting of the "use TPR shadow"
- * VM-execution control.
- *
- * Set up TPR threshold for virtual interrupt delivery
- * - pg 2904 24.6.8
- */
- exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
- }
+ if (is_apicv_intr_delivery_supported()) {
+ value32 |= VMX_PROCBASED_CTLS2_VIRQ;
+ } else {
+ /*
+ * This field exists only on processors that support
+ * the 1-setting of the "use TPR shadow"
+ * VM-execution control.
+ *
+ * Set up TPR threshold for virtual interrupt delivery
+ * - pg 2904 24.6.8
+ */
+ exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
}

if (cpu_has_cap(X86_FEATURE_OSXSAVE)) {
@@ -1024,29 +1009,27 @@ static void init_exec_ctrl(struct vcpu *vcpu)
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);

- if (is_apicv_supported()) {
- /*APIC-v, config APIC-access address*/
- value64 = vlapic_apicv_get_apic_access_addr(vcpu->vm);
- exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL,
- value64);
-
- /*APIC-v, config APIC virtualized page address*/
- value64 = vlapic_apicv_get_apic_page_addr(
- vcpu->arch_vcpu.vlapic);
- exec_vmwrite64(VMX_VIRTUAL_APIC_PAGE_ADDR_FULL,
- value64);
-
- if (is_apicv_intr_delivery_supported()) {
- /* Disable all EOI VMEXIT by default and
- * clear RVI and SVI.
- */
- exec_vmwrite64(VMX_EOI_EXIT0_FULL, 0UL);
- exec_vmwrite64(VMX_EOI_EXIT1_FULL, 0UL);
- exec_vmwrite64(VMX_EOI_EXIT2_FULL, 0UL);
- exec_vmwrite64(VMX_EOI_EXIT3_FULL, 0UL);
+ /*APIC-v, config APIC-access address*/
+ value64 = vlapic_apicv_get_apic_access_addr(vcpu->vm);
+ exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL,
+ value64);

- exec_vmwrite16(VMX_GUEST_INTR_STATUS, 0);
- }
+ /*APIC-v, config APIC virtualized page address*/
+ value64 = vlapic_apicv_get_apic_page_addr(
+ vcpu->arch_vcpu.vlapic);
+ exec_vmwrite64(VMX_VIRTUAL_APIC_PAGE_ADDR_FULL,
+ value64);
+
+ if (is_apicv_intr_delivery_supported()) {
+ /* Disable all EOI VMEXIT by default and
+ * clear RVI and SVI.
+ */
+ exec_vmwrite64(VMX_EOI_EXIT0_FULL, 0UL);
+ exec_vmwrite64(VMX_EOI_EXIT1_FULL, 0UL);
+ exec_vmwrite64(VMX_EOI_EXIT2_FULL, 0UL);
+ exec_vmwrite64(VMX_EOI_EXIT3_FULL, 0UL);
+
+ exec_vmwrite16(VMX_GUEST_INTR_STATUS, 0);
}

/* Load EPTP execution control
diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h
index 0691fca..781096d 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -323,9 +323,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
void cpu_do_idle(__unused uint16_t pcpu_id);
void cpu_dead(uint16_t pcpu_id);
void trampoline_start16(void);
-bool is_apicv_supported(void);
bool is_apicv_intr_delivery_supported(void);
-bool is_apicv_virt_reg_supported(void);
bool is_ept_supported(void);
bool cpu_has_cap(uint32_t bit);
void load_cpu_state_data(void);
diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h
index b93fc7f..0206d37 100644
--- a/hypervisor/include/arch/x86/guest/vlapic.h
+++ b/hypervisor/include/arch/x86/guest/vlapic.h
@@ -60,11 +60,6 @@ struct acrn_vlapic *vm_lapic_from_pcpuid(struct vm *vm, uint16_t pcpu_id);
int vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval);
int vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval);

-int vlapic_read_mmio_reg(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval,
- __unused uint8_t size);
-int vlapic_write_mmio_reg(struct vcpu *vcpu, uint64_t gpa,
- uint64_t wval, uint8_t size);
-
/*
* Signals to the LAPIC that an interrupt at 'vector' needs to be generated
* to the 'cpu', the state is recorded in IRR.
@@ -107,15 +102,9 @@ void vlapic_reset_tmr(struct acrn_vlapic *vlapic);
void vlapic_set_tmr_one_vec(struct acrn_vlapic *vlapic, uint32_t delmode,
uint32_t vector, bool level);

-void
-vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic);
-
-int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
- __unused void *handler_private_data);
-
+void vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic);
uint32_t vlapic_get_id(struct acrn_vlapic *vlapic);
uint8_t vlapic_get_apicid(struct acrn_vlapic *vlapic);
-
int vlapic_create(struct vcpu *vcpu);
void vlapic_free(struct vcpu *vcpu);
void vlapic_init(struct acrn_vlapic *vlapic);
@@ -129,6 +118,5 @@ int apic_access_vmexit_handler(struct vcpu *vcpu);
int apic_write_vmexit_handler(struct vcpu *vcpu);
int veoi_vmexit_handler(struct vcpu *vcpu);
int tpr_below_threshold_vmexit_handler(__unused struct vcpu *vcpu);
-
void calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
#endif /* _VLAPIC_H_ */
--
2.7.4


Re: [acrn-kernel PATCH 2/2] drivers/vhm/vhm_ioreq: fix client use-after-free

Li Zhijian
 

On 8/23/2018 10:54 AM, Chen, Jason CJ wrote:
On Thu, Aug 23, 2018 at 09:36:31AM +0800, Yu Wang wrote:
Please change the prefix of the title to vhm instead of
"drivers/vhm/vhm_ioreq".

On 18-08-22 17:22:14, Li Zhijian wrote:
free_client() will free resouce of client

Signed-off-by: Li Zhijian <lizhijian@...>
---
drivers/vhm/vhm_ioreq.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c
index 03046a4..ba2daf1 100644
--- a/drivers/vhm/vhm_ioreq.c
+++ b/drivers/vhm/vhm_ioreq.c
@@ -260,6 +260,7 @@ static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client,
{
struct list_head *pos, *tmp;
unsigned long flags;
+ int id = client->id;
client->destroying = true;
acrn_ioreq_notify_client(client);
@@ -282,9 +283,9 @@ static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client,
spin_lock_irqsave(&vm->ioreq_client_lock, flags);
list_del(&client->list);
spin_unlock_irqrestore(&vm->ioreq_client_lock, flags);
- free_client(client->id);
+ free_client(id);
- if (client->id == vm->ioreq_fallback_client)
+ if (id == vm->ioreq_fallback_client)
vm->ioreq_fallback_client = -1;
Suggest that to move these two lines before free_client. Otherwise,
maybe still has race condition to index clients[] by
io_req_fallback_client but the client object already free..

For exmaple: vhm_dev_poll...
normally, the poll thread should exit during ioreq destroy, but the race
condition could happen if user try to run poll during destroy. To avoid
it, we may need another lock..
I didn't notice that other thread could reference this client at the same time before.
maybe we could use rcu

Thanks



}
--
2.7.4






Re: [acrn-kernel PATCH 2/2] drivers/vhm/vhm_ioreq: fix client use-after-free

Chen, Jason CJ
 

On Thu, Aug 23, 2018 at 09:36:31AM +0800, Yu Wang wrote:
Please change the prefix of the title to vhm instead of
"drivers/vhm/vhm_ioreq".

On 18-08-22 17:22:14, Li Zhijian wrote:
free_client() will free resouce of client

Signed-off-by: Li Zhijian <lizhijian@...>
---
drivers/vhm/vhm_ioreq.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c
index 03046a4..ba2daf1 100644
--- a/drivers/vhm/vhm_ioreq.c
+++ b/drivers/vhm/vhm_ioreq.c
@@ -260,6 +260,7 @@ static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client,
{
struct list_head *pos, *tmp;
unsigned long flags;
+ int id = client->id;

client->destroying = true;
acrn_ioreq_notify_client(client);
@@ -282,9 +283,9 @@ static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client,
spin_lock_irqsave(&vm->ioreq_client_lock, flags);
list_del(&client->list);
spin_unlock_irqrestore(&vm->ioreq_client_lock, flags);
- free_client(client->id);
+ free_client(id);

- if (client->id == vm->ioreq_fallback_client)
+ if (id == vm->ioreq_fallback_client)
vm->ioreq_fallback_client = -1;
Suggest that to move these two lines before free_client. Otherwise,
maybe still has race condition to index clients[] by
io_req_fallback_client but the client object already free..

For exmaple: vhm_dev_poll...
normally, the poll thread should exit during ioreq destroy, but the race
condition could happen if user try to run poll during destroy. To avoid
it, we may need another lock..


}

--
2.7.4






--

Thanks

Jason


Re: [PATCH 2/3] hv:Replace vuart pointer with instance in structure vm

Eddie Dong
 

b/hypervisor/arch/x86/guest/vm.c index 8931b38..391fc3b 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -196,14 +196,14 @@ int create_vm(struct vm_description
*vm_desc,
struct vm **rtn_vm)
}

/* Create virtual uart */
- vm->vuart = vuart_init(vm);
+ vuart_init(vm);
This API is better to use &( vm->vuart), as parameter.
It will use vm in vuart_init, here should use vm as parameter
vm->vuart.vm = vm;
vuart_register_io_handler(vm);
OK

Eddie


Re: [acrn-kernel PATCH 1/2] drivers/vhm/vhm_ioreq: init client->kthread_exit true

Chen, Jason CJ
 

On Wed, Aug 22, 2018 at 05:22:13PM +0800, Li Zhijian wrote:
Previously, there is a deadlock at below case
- acrn-dm create gvt instance successfully
- acrn-dm open uos image failed(wrong image path), the acrn-dm does some cleanup,
like destroy gvt instance
then acrn-dm stucks.

when destroying gvt instance, it waits client->kthread_exit to be true
while client->kthread_exit is set to be 0 at initializing and the thread
is not created/started actually.

Signed-off-by: Li Zhijian <lizhijian@...>
Reviewed-by: Jason Chen CJ <jason.cj.chen@...>

LGTM.

---
drivers/vhm/vhm_ioreq.c | 3 +++
1 file changed, 3 insertions(+)

diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c
index 960723b..03046a4 100644
--- a/drivers/vhm/vhm_ioreq.c
+++ b/drivers/vhm/vhm_ioreq.c
@@ -149,6 +149,7 @@ static int alloc_client(void)
if (!client)
return -ENOMEM;
client->id = i;
+ client->kthread_exit = true;
clients[i] = client;

return i;
@@ -531,7 +532,9 @@ int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop)
"for client %s\n", client->name);
return -ENOMEM;
}
+ client->kthread_exit = false;
} else {
+ client->kthread_exit = false;
might_sleep();

if (check_kthread_stop) {
--
2.7.4





--

Thanks

Jason


Re: [PATCH v3 0/6] instruction decoding refine

Yin, Fengwei <fengwei.yin@...>
 

On 08/23/2018 10:19 AM, Xu, Anthony wrote:
Acked-by: Anthony Xu <anthony.xu@...>
PR after fixing the comments.
OK. Thanks.

Regards
Yin, Fengwei
Anthony

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Yin, Fengwei
Sent: Wednesday, August 22, 2018 7:04 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v3 0/6] instruction decoding refine

According to SDM, we should check whether the gva access from
guest is valid. If it's not, correct exception should be injected.

We only need to emulate the instructions which access the
memory and could trigger EPT violation or APIC access VM exit.
It's not necessary to cover the instructions which doesn't access
memory.

To eliminate the side effect of access mmio, we move all gva check
to instruction decoding phase. To make instruction emulation always
sucess.

There are two types of instructions:
- MOVS. The gva is from DI/SI
- Others. The gva is from instruction decoding
We cover both of them.

The TODO work in next cyle refine:
- Fix issue in movs
- Optimize movs
- enable smep/smap check during gva2gpa
- cache the gpa during instruction decoding and avoid gva2gpa
during instruction emulation

ChangeLog:
v2 -> v3:
- Drop the warning fixing patch which was fixed already.
- Removing the src operand check for MOVs instruction. It's done
by VMX

v1 -> v2:
- Drop the lock prefix check.
- print message if MMIO is access with RIP as indirect access base

Yin Fengwei (6):
hv: extend the decode_modrm
hv: remove unnecessary check for gva
hv: move check out of vie_calculate_gla
hv: add new function to get gva for MOVS/STO instruction
hv: add GVA validation for MOVS
hv: add gva check for the case gva is from instruction decode

hypervisor/arch/x86/guest/instr_emul.c | 441 ++++++++++++++++---------
1 file changed, 282 insertions(+), 159 deletions(-)

--
2.17.0



Re: [PATCH v3 0/6] instruction decoding refine

Xu, Anthony
 

Acked-by: Anthony Xu <anthony.xu@...>

PR after fixing the comments.

Anthony

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Yin, Fengwei
Sent: Wednesday, August 22, 2018 7:04 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v3 0/6] instruction decoding refine

According to SDM, we should check whether the gva access from
guest is valid. If it's not, correct exception should be injected.

We only need to emulate the instructions which access the
memory and could trigger EPT violation or APIC access VM exit.
It's not necessary to cover the instructions which doesn't access
memory.

To eliminate the side effect of access mmio, we move all gva check
to instruction decoding phase. To make instruction emulation always
sucess.

There are two types of instructions:
- MOVS. The gva is from DI/SI
- Others. The gva is from instruction decoding
We cover both of them.

The TODO work in next cyle refine:
- Fix issue in movs
- Optimize movs
- enable smep/smap check during gva2gpa
- cache the gpa during instruction decoding and avoid gva2gpa
during instruction emulation

ChangeLog:
v2 -> v3:
- Drop the warning fixing patch which was fixed already.
- Removing the src operand check for MOVs instruction. It's done
by VMX

v1 -> v2:
- Drop the lock prefix check.
- print message if MMIO is access with RIP as indirect access base

Yin Fengwei (6):
hv: extend the decode_modrm
hv: remove unnecessary check for gva
hv: move check out of vie_calculate_gla
hv: add new function to get gva for MOVS/STO instruction
hv: add GVA validation for MOVS
hv: add gva check for the case gva is from instruction decode

hypervisor/arch/x86/guest/instr_emul.c | 441 ++++++++++++++++---------
1 file changed, 282 insertions(+), 159 deletions(-)

--
2.17.0



Re: [PATCH v3 3/6] hv: move check out of vie_calculate_gla

Yin, Fengwei <fengwei.yin@...>
 

On 08/23/2018 10:17 AM, Xu, Anthony wrote:

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Yin, Fengwei
Sent: Wednesday, August 22, 2018 7:04 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v3 3/6] hv: move check out of vie_calculate_gla

We will do check only during instruction decode phase.
vie_calculate_gla will be called both in instruction emulation
phase, we move the check out of vie_calculate_gla.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
Acked-by: Anthony Xu <anthony.xu@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 82 ++++++++++++--------------
1 file changed, 37 insertions(+), 45 deletions(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c
b/hypervisor/arch/x86/guest/instr_emul.c
index bf733190..bb3817d2 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -405,6 +405,40 @@ static int vie_canonical_check(enum vm_cpu_mode
cpu_mode, uint64_t gla)
}
}

+static bool is_desc_valid(struct seg_desc *desc, uint32_t prot)
+{
+ uint32_t type;
+
+ /* The descriptor type must indicate a code/data segment. */
+ type = SEG_DESC_TYPE(desc->access);
+ if (type < 16 || type > 31) {
16U, 31U
Yes. Will address it in next version.

Regards
Yin, Fengwei


+ return false;
+ }
+
+ if ((prot & PROT_READ) != 0U) {
+ /* #GP on a read access to a exec-only code segment */
+ if ((type & 0xAU) == 0x8U) {
+ return false;
+ }
+ }
+
+ if ((prot & PROT_WRITE) != 0U) {
+ /*
+ * #GP on a write access to a code segment or a
+ * read-only data segment.
+ */
+ if ((type & 0x8U) != 0U) { /* code segment */
+ return false;
+ }
+
+ if ((type & 0xAU) == 0U) { /* read-only data seg */
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
*@pre seg must be segment register index
*@pre length_arg must be 1, 2, 4 or 8
@@ -415,7 +449,7 @@ static int vie_canonical_check(enum vm_cpu_mode
cpu_mode, uint64_t gla)
*/
static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum
cpu_reg_name seg,
struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize,
- uint32_t prot, uint64_t *gla)
+ uint64_t *gla)
{
uint64_t firstoff, segbase;
uint64_t offset = offset_arg;
@@ -423,49 +457,7 @@ static int vie_calculate_gla(enum vm_cpu_mode
cpu_mode, enum cpu_reg_name seg,
uint32_t type;

firstoff = offset;
- if (cpu_mode == CPU_MODE_64BIT) {
- if (addrsize != 4U && addrsize != 8U) {
- pr_dbg("%s: invalid addr size %d for cpu mode %d",
- __func__, addrsize, cpu_mode);
- return -1;
- }
- glasize = 8U;
- } else {
- if (addrsize != 2U && addrsize != 4U) {
- pr_dbg("%s: invalid addr size %d for cpu mode %d",
- __func__, addrsize, cpu_mode);
- return -1;
- }
- glasize = 4U;
-
- /* The descriptor type must indicate a code/data segment. */
- type = SEG_DESC_TYPE(desc->access);
- if (type < 16 || type > 31) {
- /*TODO: Inject #GP */
- return -1;
- }
-
- if ((prot & PROT_READ) != 0U) {
- /* #GP on a read access to a exec-only code segment
*/
- if ((type & 0xAU) == 0x8U) {
- return -1;
- }
- }
-
- if ((prot & PROT_WRITE) != 0U) {
- /*
- * #GP on a write access to a code segment or a
- * read-only data segment.
- */
- if ((type & 0x8U) != 0U) { /* code segment */
- return -1;
- }
-
- if ((type & 0xAU) == 0U) { /* read-only data seg
*/
- return -1;
- }
- }
- }
+ glasize = (cpu_mode == CPU_MODE_64BIT) ? 8U: 4U;

/*
* In 64-bit mode all segments except %fs and %gs have a segment
@@ -886,7 +878,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct
instr_emul_vie *vie,
vm_get_seg_desc(seg, &desc);

if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val,
- addrsize, prot, gla) != 0) {
+ addrsize, gla) != 0) {
if (seg == CPU_REG_SS) {
pr_err("TODO: inject ss exception");
} else {
--
2.17.0



Re: [PATCH v3 3/6] hv: move check out of vie_calculate_gla

Xu, Anthony
 

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Yin, Fengwei
Sent: Wednesday, August 22, 2018 7:04 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v3 3/6] hv: move check out of vie_calculate_gla

We will do check only during instruction decode phase.
vie_calculate_gla will be called both in instruction emulation
phase, we move the check out of vie_calculate_gla.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
Acked-by: Anthony Xu <anthony.xu@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 82 ++++++++++++--------------
1 file changed, 37 insertions(+), 45 deletions(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c
b/hypervisor/arch/x86/guest/instr_emul.c
index bf733190..bb3817d2 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -405,6 +405,40 @@ static int vie_canonical_check(enum vm_cpu_mode
cpu_mode, uint64_t gla)
}
}

+static bool is_desc_valid(struct seg_desc *desc, uint32_t prot)
+{
+ uint32_t type;
+
+ /* The descriptor type must indicate a code/data segment. */
+ type = SEG_DESC_TYPE(desc->access);
+ if (type < 16 || type > 31) {
16U, 31U



+ return false;
+ }
+
+ if ((prot & PROT_READ) != 0U) {
+ /* #GP on a read access to a exec-only code segment */
+ if ((type & 0xAU) == 0x8U) {
+ return false;
+ }
+ }
+
+ if ((prot & PROT_WRITE) != 0U) {
+ /*
+ * #GP on a write access to a code segment or a
+ * read-only data segment.
+ */
+ if ((type & 0x8U) != 0U) { /* code segment */
+ return false;
+ }
+
+ if ((type & 0xAU) == 0U) { /* read-only data seg */
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
*@pre seg must be segment register index
*@pre length_arg must be 1, 2, 4 or 8
@@ -415,7 +449,7 @@ static int vie_canonical_check(enum vm_cpu_mode
cpu_mode, uint64_t gla)
*/
static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum
cpu_reg_name seg,
struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize,
- uint32_t prot, uint64_t *gla)
+ uint64_t *gla)
{
uint64_t firstoff, segbase;
uint64_t offset = offset_arg;
@@ -423,49 +457,7 @@ static int vie_calculate_gla(enum vm_cpu_mode
cpu_mode, enum cpu_reg_name seg,
uint32_t type;

firstoff = offset;
- if (cpu_mode == CPU_MODE_64BIT) {
- if (addrsize != 4U && addrsize != 8U) {
- pr_dbg("%s: invalid addr size %d for cpu mode %d",
- __func__, addrsize, cpu_mode);
- return -1;
- }
- glasize = 8U;
- } else {
- if (addrsize != 2U && addrsize != 4U) {
- pr_dbg("%s: invalid addr size %d for cpu mode %d",
- __func__, addrsize, cpu_mode);
- return -1;
- }
- glasize = 4U;
-
- /* The descriptor type must indicate a code/data segment. */
- type = SEG_DESC_TYPE(desc->access);
- if (type < 16 || type > 31) {
- /*TODO: Inject #GP */
- return -1;
- }
-
- if ((prot & PROT_READ) != 0U) {
- /* #GP on a read access to a exec-only code segment
*/
- if ((type & 0xAU) == 0x8U) {
- return -1;
- }
- }
-
- if ((prot & PROT_WRITE) != 0U) {
- /*
- * #GP on a write access to a code segment or a
- * read-only data segment.
- */
- if ((type & 0x8U) != 0U) { /* code segment */
- return -1;
- }
-
- if ((type & 0xAU) == 0U) { /* read-only data seg
*/
- return -1;
- }
- }
- }
+ glasize = (cpu_mode == CPU_MODE_64BIT) ? 8U: 4U;

/*
* In 64-bit mode all segments except %fs and %gs have a segment
@@ -886,7 +878,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct
instr_emul_vie *vie,
vm_get_seg_desc(seg, &desc);

if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val,
- addrsize, prot, gla) != 0) {
+ addrsize, gla) != 0) {
if (seg == CPU_REG_SS) {
pr_err("TODO: inject ss exception");
} else {
--
2.17.0



[PATCH v3 6/6] hv: add gva check for the case gva is from instruction decode

Yin, Fengwei <fengwei.yin@...>
 

For the instructions other than MOVS, one operand is register
and another one is memory which trigger EPT voilation. In this
case, there is one possibility that EPT voilation happens before
guest fault:
the fault is triggered by related guest PTE access bit
voilation (like write to a gva with R/W bit cleared in PTE).

So we do this kind of check and inject exception to guest
accordingly during instruction decoding phase.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 87 ++++++++++++++++++++++++++
1 file changed, 87 insertions(+)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index 29015a20..dc7aab14 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -2138,6 +2138,91 @@ static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
}
}

+static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
+ enum vm_cpu_mode cpu_mode)
+{
+ int ret = 0;
+ uint64_t base, segbase, idx, gva, gpa;
+ uint32_t err_code;
+ enum cpu_reg_name seg;
+ struct instr_emul_vie *vie = &emul_ctxt->vie;
+
+ base = 0UL;
+ if (vie->base_register != CPU_REG_LAST) {
+ base = vm_get_register(vcpu, vie->base_register);
+
+ /* RIP relative addressing starts from the
+ * following instruction
+ */
+ if (vie->base_register == CPU_REG_RIP)
+ base += vie->num_processed;
+
+ }
+
+ idx = 0UL;
+ if (vie->index_register != CPU_REG_LAST) {
+ idx = vm_get_register(vcpu, vie->index_register);
+ }
+
+ /* "Specifying a Segment Selector" of SDM Vol1 3.7.4
+ *
+ * In legacy IA-32 mode, when ESP or EBP register is used as
+ * base, the SS segment is default segment.
+ *
+ * All data references, except when relative to stack or
+ * string destination, DS is default segment.
+ *
+ * segment override could overwrite the default segment
+ *
+ * 64bit mode, segmentation is generally disabled. The
+ * exception are FS and GS.
+ */
+ if (vie->seg_override != 0U) {
+ seg = vie->segment_register;
+ } else if ((vie->base_register == CPU_REG_RSP) ||
+ (vie->base_register == CPU_REG_RBP)) {
+ seg = CPU_REG_SS;
+ } else {
+ seg = CPU_REG_DS;
+ }
+
+ if ((cpu_mode == CPU_MODE_64BIT) && (seg != CPU_REG_FS) &&
+ (seg != CPU_REG_GS)) {
+ segbase = 0UL;
+ } else {
+ struct seg_desc desc;
+
+ vm_get_seg_desc(seg, &desc);
+
+ segbase = desc.base;
+ }
+
+ gva = segbase + base + vie->scale * idx + vie->displacement;
+
+ if (vie_canonical_check(cpu_mode, gva) != 0) {
+ if (seg == CPU_REG_SS) {
+ vcpu_inject_ss(vcpu);
+ } else {
+ vcpu_inject_gp(vcpu, 0U);
+ }
+ return -EFAULT;
+ }
+
+ err_code = (vcpu->req.reqs.mmio.direction == REQUEST_WRITE) ?
+ PAGE_FAULT_WR_FLAG : 0U;
+
+ ret = gva2gpa(vcpu, gva, &gpa, &err_code);
+ if (ret < 0) {
+ if (ret == -EFAULT) {
+ vcpu_inject_pf(vcpu, gva,
+ err_code);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
int decode_instruction(struct vcpu *vcpu)
{
struct instr_emul_ctxt *emul_ctxt;
@@ -2190,6 +2275,8 @@ int decode_instruction(struct vcpu *vcpu)
retval = instr_check_di(vcpu, emul_ctxt);
if (retval < 0)
return retval;
+ } else {
+ instr_check_gva(vcpu, emul_ctxt, cpu_mode);
}

return emul_ctxt->vie.opsize;
--
2.17.0


[PATCH v3 5/6] hv: add GVA validation for MOVS

Yin, Fengwei <fengwei.yin@...>
 

Unlike the other instructions we emulated, MOVS has two operands
both are memory.

So we need to check whether the operand is valid GVA. With VMX
enabled, the src operand is always checked first by VMX. Which
means if src operand is not valid GVA, it will trigger fault
in guest before trigger EPT. So we don't need to check src
operand. Only need to check dst operand here.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 46 ++++++++++++++++++++++----
1 file changed, 39 insertions(+), 7 deletions(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index 8857b3a7..29015a20 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -55,7 +55,7 @@
#define VIE_OP_F_IMM8 (1U << 1) /* 8-bit immediate operand */
#define VIE_OP_F_MOFFSET (1U << 2) /* 16/32/64-bit immediate moffset */
#define VIE_OP_F_NO_MODRM (1U << 3)
-#define VIE_OP_F_NO_GLA_VERIFICATION (1U << 4)
+#define VIE_OP_F_CHECK_GVA_DI (1U << 4) /* for movs, need to check DI */

static const struct instr_emul_vie_op two_byte_opcodes[256] = {
[0xB6] = {
@@ -108,19 +108,19 @@ static const struct instr_emul_vie_op one_byte_opcodes[256] = {
},
[0xA4] = {
.op_type = VIE_OP_TYPE_MOVS,
- .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+ .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_CHECK_GVA_DI
},
[0xA5] = {
.op_type = VIE_OP_TYPE_MOVS,
- .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+ .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_CHECK_GVA_DI
},
[0xAA] = {
.op_type = VIE_OP_TYPE_STOS,
- .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+ .op_flags = VIE_OP_F_NO_MODRM
},
[0xAB] = {
.op_type = VIE_OP_TYPE_STOS,
- .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
+ .op_flags = VIE_OP_F_NO_MODRM
},
[0xC6] = {
/* XXX Group 11 extended opcode - not just MOV */
@@ -953,7 +953,7 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie)
{
uint64_t dstaddr, srcaddr;
uint64_t rcx, rdi, rsi, rflags;
- int error, fault, repeat;
+ int error, repeat;
uint8_t opsize;
enum cpu_reg_name seg;

@@ -1533,7 +1533,6 @@ static int emulate_bittest(struct vcpu *vcpu, struct instr_emul_vie *vie)

static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
{
- struct vm_guest_paging *paging = &ctxt->paging;
struct instr_emul_vie *vie = &ctxt->vie;
struct vcpu *vcpu = ctxt->vcpu;
int error;
@@ -2123,6 +2122,22 @@ static int local_decode_instruction(enum vm_cpu_mode cpu_mode,
return 0;
}

+/* for instruction MOVS/STO, check the gva gotten from DI/SI. */
+static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
+{
+ int ret;
+ struct instr_emul_vie *vie = &emul_ctxt->vie;
+ uint64_t gva;
+ enum cpu_reg_name seg;
+
+ ret = get_gva_di_si_check(vcpu, vie->addrsize, PROT_WRITE,
+ CPU_REG_ES, CPU_REG_RDI, &gva);
+
+ if (ret < 0) {
+ return -EFAULT;
+ }
+}
+
int decode_instruction(struct vcpu *vcpu)
{
struct instr_emul_ctxt *emul_ctxt;
@@ -2160,6 +2175,23 @@ int decode_instruction(struct vcpu *vcpu)
return -EFAULT;
}

+ /*
+ * We do operand check in instruction decode phase and
+ * inject exception accordingly. In late instruction
+ * emulation, it will always sucess.
+ *
+ * We only need to do dst check for movs. For other instructions,
+ * they always has one register and one mmio which trigger EPT
+ * by access mmio. With VMX enabled, the related check is done
+ * by VMX itself before hit EPT violation.
+ *
+ */
+ if (emul_ctxt->vie.op.op_flags & VIE_OP_F_CHECK_GVA_DI) {
+ retval = instr_check_di(vcpu, emul_ctxt);
+ if (retval < 0)
+ return retval;
+ }
+
return emul_ctxt->vie.opsize;
}

--
2.17.0


[PATCH v3 4/6] hv: add new function to get gva for MOVS/STO instruction

Yin, Fengwei <fengwei.yin@...>
 

Drop the get_gla function and add
- get_gva_di_si_nocheck
get gva from ES:DI and DS(other segment):SI without
check faulure case
- get_gva_di_si_check
get gva from ES:DI and DS(other segment):SI with failure
case checked

TODO:
- Save dst_gpa and src_gpa for instruction emulation phase
use.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 116 ++++++++++++++++---------
1 file changed, 76 insertions(+), 40 deletions(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index bb3817d2..8857b3a7 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -454,7 +454,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
uint64_t firstoff, segbase;
uint64_t offset = offset_arg;
uint8_t glasize;
- uint32_t type;

firstoff = offset;
glasize = (cpu_mode == CPU_MODE_64BIT) ? 8U: 4U;
@@ -860,52 +859,97 @@ static int emulate_movx(struct vcpu *vcpu, struct instr_emul_vie *vie)
return error;
}

+/**
+ * @pre only called by instruction emulation and check was done during
+ * instruction decode
+ *
+ * @remark This function can only be called in instruction emulation and
+ * suppose always success because the check was done during instruction
+ * decode.
+ *
+ * It's only used by MOVS/STO
+ */
+static void get_gva_di_si_nocheck(struct vcpu *vcpu, uint8_t addrsize,
+ enum cpu_reg_name seg, enum cpu_reg_name gpr, uint64_t *gva)
+{
+ uint64_t val;
+ struct seg_desc desc;
+ enum vm_cpu_mode cpu_mode;
+
+ val = vm_get_register(vcpu, gpr);
+ vm_get_seg_desc(seg, &desc);
+ cpu_mode = get_vcpu_mode(vcpu);
+
+ (void)vie_calculate_gla(cpu_mode, seg, &desc, val, addrsize, gva);
+
+ return;
+}
+
/*
- * Helper function to calculate and validate a linear address.
+ * @pre only called during instruction decode phase
+ *
+ * @remark This function get gva from ES:DI and DS(other segment):SI. And
+ * do check the failure condition and inject exception to guest accordingly.
+ *
+ * It's only used by MOVS/STO
*/
-static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,
- struct vm_guest_paging *paging,
- uint8_t opsize, uint8_t addrsize, uint32_t prot, enum cpu_reg_name seg,
- enum cpu_reg_name gpr, uint64_t *gla, int *fault)
+static int get_gva_di_si_check(struct vcpu *vcpu, uint8_t addrsize,
+ uint32_t prot, enum cpu_reg_name seg, enum cpu_reg_name gpr,
+ uint64_t *gva)
{
int ret;
+ uint32_t err_code;
struct seg_desc desc;
- uint64_t cr0, val, rflags;
+ enum vm_cpu_mode cpu_mode;
+ uint64_t val, gpa;

- cr0 = vm_get_register(vcpu, CPU_REG_CR0);
- rflags = vm_get_register(vcpu, CPU_REG_RFLAGS);
val = vm_get_register(vcpu, gpr);
vm_get_seg_desc(seg, &desc);
+ cpu_mode = get_vcpu_mode(vcpu);

- if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val,
- addrsize, gla) != 0) {
- if (seg == CPU_REG_SS) {
- pr_err("TODO: inject ss exception");
- } else {
- pr_err("TODO: inject gp exception");
+ if (!is_desc_valid(&desc, prot)) {
+ goto exception_inject;
+ }
+
+ if (cpu_mode == CPU_MODE_64BIT) {
+ if ((addrsize != 4U) && (addrsize != 8U)) {
+ goto exception_inject;
+ }
+ } else {
+ if ((addrsize != 2U) && (addrsize != 4U)) {
+ goto exception_inject;
}
- goto guest_fault;
}

- if (vie_canonical_check(paging->cpu_mode, *gla) != 0) {
- if (seg == CPU_REG_SS) {
- pr_err("TODO: inject ss exception");
- } else {
- pr_err("TODO: inject gp exception");
+ if (vie_calculate_gla(cpu_mode, seg, &desc, val, addrsize, gva) != 0) {
+ goto exception_inject;
+ }
+
+ if (vie_canonical_check(cpu_mode, *gva) != 0) {
+ goto exception_inject;
+ }
+
+ err_code = (prot == PROT_WRITE) ? PAGE_FAULT_WR_FLAG : 0U;
+ ret = gva2gpa(vcpu, (uint64_t)gva, &gpa, &err_code);
+ if (ret < 0) {
+ if (ret == -EFAULT) {
+ vcpu_inject_pf(vcpu, (uint64_t)gva, err_code);
}
- goto guest_fault;
+ return ret;
}

- *fault = 0;
return 0;

-guest_fault:
- *fault = 1;
- return ret;
+exception_inject:
+ if (seg == CPU_REG_SS) {
+ vcpu_inject_ss(vcpu);
+ } else {
+ vcpu_inject_gp(vcpu, 0U);
+ }
+ return -EFAULT;
}

-static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,
- struct vm_guest_paging *paging)
+static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie)
{
uint64_t dstaddr, srcaddr;
uint64_t rcx, rdi, rsi, rflags;
@@ -939,18 +983,10 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,
}

seg = (vie->seg_override != 0U) ? (vie->segment_register) : CPU_REG_DS;
- error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
- PROT_READ, seg, CPU_REG_RSI, &srcaddr, &fault);
- if ((error != 0) || (fault != 0)) {
- goto done;
- }

- error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
- PROT_WRITE, CPU_REG_ES, CPU_REG_RDI, &dstaddr,
- &fault);
- if ((error != 0) || (fault != 0)) {
- goto done;
- }
+ get_gva_di_si_nocheck(vcpu, vie->addrsize, seg, CPU_REG_RSI, &srcaddr);
+ get_gva_di_si_nocheck(vcpu, vie->addrsize, CPU_REG_ES, CPU_REG_RDI,
+ &dstaddr);

(void)memcpy_s((void *)dstaddr, 16U, (void *)srcaddr, opsize);

@@ -1520,7 +1556,7 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
error = emulate_movx(vcpu, vie);
break;
case VIE_OP_TYPE_MOVS:
- error = emulate_movs(vcpu, vie, paging);
+ error = emulate_movs(vcpu, vie);
break;
case VIE_OP_TYPE_STOS:
error = emulate_stos(vcpu, vie);
--
2.17.0


[PATCH v3 3/6] hv: move check out of vie_calculate_gla

Yin, Fengwei <fengwei.yin@...>
 

We will do check only during instruction decode phase.
vie_calculate_gla will be called both in instruction emulation
phase, we move the check out of vie_calculate_gla.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
Acked-by: Anthony Xu <anthony.xu@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 82 ++++++++++++--------------
1 file changed, 37 insertions(+), 45 deletions(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index bf733190..bb3817d2 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -405,6 +405,40 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
}
}

+static bool is_desc_valid(struct seg_desc *desc, uint32_t prot)
+{
+ uint32_t type;
+
+ /* The descriptor type must indicate a code/data segment. */
+ type = SEG_DESC_TYPE(desc->access);
+ if (type < 16 || type > 31) {
+ return false;
+ }
+
+ if ((prot & PROT_READ) != 0U) {
+ /* #GP on a read access to a exec-only code segment */
+ if ((type & 0xAU) == 0x8U) {
+ return false;
+ }
+ }
+
+ if ((prot & PROT_WRITE) != 0U) {
+ /*
+ * #GP on a write access to a code segment or a
+ * read-only data segment.
+ */
+ if ((type & 0x8U) != 0U) { /* code segment */
+ return false;
+ }
+
+ if ((type & 0xAU) == 0U) { /* read-only data seg */
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
*@pre seg must be segment register index
*@pre length_arg must be 1, 2, 4 or 8
@@ -415,7 +449,7 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
*/
static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize,
- uint32_t prot, uint64_t *gla)
+ uint64_t *gla)
{
uint64_t firstoff, segbase;
uint64_t offset = offset_arg;
@@ -423,49 +457,7 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
uint32_t type;

firstoff = offset;
- if (cpu_mode == CPU_MODE_64BIT) {
- if (addrsize != 4U && addrsize != 8U) {
- pr_dbg("%s: invalid addr size %d for cpu mode %d",
- __func__, addrsize, cpu_mode);
- return -1;
- }
- glasize = 8U;
- } else {
- if (addrsize != 2U && addrsize != 4U) {
- pr_dbg("%s: invalid addr size %d for cpu mode %d",
- __func__, addrsize, cpu_mode);
- return -1;
- }
- glasize = 4U;
-
- /* The descriptor type must indicate a code/data segment. */
- type = SEG_DESC_TYPE(desc->access);
- if (type < 16 || type > 31) {
- /*TODO: Inject #GP */
- return -1;
- }
-
- if ((prot & PROT_READ) != 0U) {
- /* #GP on a read access to a exec-only code segment */
- if ((type & 0xAU) == 0x8U) {
- return -1;
- }
- }
-
- if ((prot & PROT_WRITE) != 0U) {
- /*
- * #GP on a write access to a code segment or a
- * read-only data segment.
- */
- if ((type & 0x8U) != 0U) { /* code segment */
- return -1;
- }
-
- if ((type & 0xAU) == 0U) { /* read-only data seg */
- return -1;
- }
- }
- }
+ glasize = (cpu_mode == CPU_MODE_64BIT) ? 8U: 4U;

/*
* In 64-bit mode all segments except %fs and %gs have a segment
@@ -886,7 +878,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,
vm_get_seg_desc(seg, &desc);

if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val,
- addrsize, prot, gla) != 0) {
+ addrsize, gla) != 0) {
if (seg == CPU_REG_SS) {
pr_err("TODO: inject ss exception");
} else {
--
2.17.0


[PATCH v3 2/6] hv: remove unnecessary check for gva

Yin, Fengwei <fengwei.yin@...>
 

According to SDM vol3 25.1.1
With VMX enabled, following exception will be handled by guest
without trigger VM exit:
- faults based on privilege level
- general protection due to relevent segment being unusable
- general protection due to offset beyond limit of relevent segment
- alignment check exception

ACRN always assume VMX is enabled. So we don't need to these check
in instruction emulation. But we need to do page fault related check.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
Acked-by: Anthony Xu <anthony.xu@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 79 +++-----------------------
1 file changed, 7 insertions(+), 72 deletions(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index 7fb67884..bf733190 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -385,18 +385,6 @@ static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emu
emul_ctxt->paging.paging_mode = get_vcpu_paging_mode(vcpu);
}

-static int vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0,
- uint64_t rflags, uint64_t gla)
-{
- pr_dbg("Checking alignment with cpl: %hhu, addrsize: %hhu", cpl, size);
-
- if (cpl < 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) {
- return 0;
- }
-
- return ((gla & (size - 1U)) != 0UL) ? 1 : 0;
-}
-
static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
{
uint64_t mask;
@@ -426,12 +414,11 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
*return -1 - on failure
*/
static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
- struct seg_desc *desc, uint64_t offset_arg, uint8_t length_arg,
- uint8_t addrsize, uint32_t prot, uint64_t *gla)
+ struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize,
+ uint32_t prot, uint64_t *gla)
{
- uint64_t firstoff, low_limit, high_limit, segbase;
+ uint64_t firstoff, segbase;
uint64_t offset = offset_arg;
- uint8_t length = length_arg;
uint8_t glasize;
uint32_t type;

@@ -450,25 +437,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
return -1;
}
glasize = 4U;
- /*
- * If the segment selector is loaded with a NULL selector
- * then the descriptor is unusable and attempting to use
- * it results in a #GP(0).
- */
- if (SEG_DESC_UNUSABLE(desc->access)) {
- return -1;
- }
-
- /*
- * The processor generates a #NP exception when a segment
- * register is loaded with a selector that points to a
- * descriptor that is not present. If this was the case then
- * it would have been checked before the VM-exit.
- */
- if (SEG_DESC_PRESENT(desc->access) != 0) {
- /* TODO: Inject #NP */
- return -1;
- }

/* The descriptor type must indicate a code/data segment. */
type = SEG_DESC_TYPE(desc->access);
@@ -497,30 +465,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
return -1;
}
}
-
- /*
- * 'desc->limit' is fully expanded taking granularity into
- * account.
- */
- if ((type & 0xCU) == 0x4U) {
- /* expand-down data segment */
- low_limit = desc->limit + 1U;
- high_limit = SEG_DESC_DEF32(desc->access) ?
- 0xffffffffU : 0xffffU;
- } else {
- /* code segment or expand-up data segment */
- low_limit = 0U;
- high_limit = desc->limit;
- }
-
- while (length > 0U) {
- offset &= size2mask[addrsize];
- if (offset < low_limit || offset > high_limit) {
- return -1;
- }
- offset++;
- length--;
- }
}

/*
@@ -932,6 +876,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,
uint8_t opsize, uint8_t addrsize, uint32_t prot, enum cpu_reg_name seg,
enum cpu_reg_name gpr, uint64_t *gla, int *fault)
{
+ int ret;
struct seg_desc desc;
uint64_t cr0, val, rflags;

@@ -940,13 +885,11 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,
val = vm_get_register(vcpu, gpr);
vm_get_seg_desc(seg, &desc);

- if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
- addrsize, prot, gla) != 0) {
+ if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val,
+ addrsize, prot, gla) != 0) {
if (seg == CPU_REG_SS) {
- /*vm_inject_ss(vcpu, 0);*/
pr_err("TODO: inject ss exception");
} else {
- /*vm_inject_gp(vcpu);*/
pr_err("TODO: inject gp exception");
}
goto guest_fault;
@@ -954,27 +897,19 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,

if (vie_canonical_check(paging->cpu_mode, *gla) != 0) {
if (seg == CPU_REG_SS) {
- /*vm_inject_ss(vcpu, 0);*/
pr_err("TODO: inject ss exception");
} else {
- /*vm_inject_gp(vcpu);*/
pr_err("TODO: inject gp exception");
}
goto guest_fault;
}

- if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla) != 0) {
- /*vm_inject_ac(vcpu, 0);*/
- pr_err("TODO: inject ac exception");
- goto guest_fault;
- }
-
*fault = 0;
return 0;

guest_fault:
*fault = 1;
- return 0;
+ return ret;
}

static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,
--
2.17.0


[PATCH v3 1/6] hv: extend the decode_modrm

Yin, Fengwei <fengwei.yin@...>
 

If rm show there is no SIB following rm field, we should get
base_register info from rm.
---
hypervisor/arch/x86/guest/instr_emul.c | 43 +++++++++++++++++++++++++-
1 file changed, 42 insertions(+), 1 deletion(-)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index 2f35e809..7fb67884 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -1636,6 +1636,11 @@ static int vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu)

(void)memset(vie, 0U, sizeof(struct instr_emul_vie));

+ /* init register fields in vie. */
+ vie->base_register = CPU_REG_LAST;
+ vie->index_register = CPU_REG_LAST;
+ vie->segment_register = CPU_REG_LAST;
+
err_code = PAGE_FAULT_ID_FLAG;
ret = copy_from_gva(vcpu, vie->inst, guest_rip_gva,
inst_len, &err_code, &fault_addr);
@@ -1859,6 +1864,42 @@ static int decode_modrm(struct instr_emul_vie *vie, enum vm_cpu_mode cpu_mode)

vie->reg |= (vie->rex_r << 3);

+ /* SIB */
+ if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB) {
+ goto done;
+ }
+
+ vie->base_register = vie->rm;
+
+ switch (vie->mod) {
+ case VIE_MOD_INDIRECT_DISP8:
+ vie->disp_bytes = 1U;
+ break;
+ case VIE_MOD_INDIRECT_DISP32:
+ vie->disp_bytes = 4U;
+ break;
+ case VIE_MOD_INDIRECT:
+ if (vie->rm == VIE_RM_DISP32) {
+ vie->disp_bytes = 4U;
+ /*
+ * Table 2-7. RIP-Relative Addressing
+ *
+ * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
+ * whereas in compatibility mode it just implies disp32.
+ */
+
+ if (cpu_mode == CPU_MODE_64BIT) {
+ vie->base_register = CPU_REG_RIP;
+ pr_err("VM exit with RIP as indirect access");
+ }
+ else {
+ vie->base_register = CPU_REG_LAST;
+ }
+ }
+ break;
+ }
+
+done:
vie_advance(vie);

return 0;
@@ -1935,7 +1976,7 @@ static int decode_sib(struct instr_emul_vie *vie)
}

/* 'scale' makes sense only in the context of an index register */
- if (vie->index_register <= CPU_REG_LAST) {
+ if (vie->index_register < CPU_REG_LAST) {
vie->scale = 1U << vie->ss;
}

--
2.17.0


[PATCH v3 0/6] instruction decoding refine

Yin, Fengwei <fengwei.yin@...>
 

According to SDM, we should check whether the gva access from
guest is valid. If it's not, correct exception should be injected.

We only need to emulate the instructions which access the
memory and could trigger EPT violation or APIC access VM exit.
It's not necessary to cover the instructions which doesn't access
memory.

To eliminate the side effect of access mmio, we move all gva check
to instruction decoding phase. To make instruction emulation always
sucess.

There are two types of instructions:
- MOVS. The gva is from DI/SI
- Others. The gva is from instruction decoding
We cover both of them.

The TODO work in next cyle refine:
- Fix issue in movs
- Optimize movs
- enable smep/smap check during gva2gpa
- cache the gpa during instruction decoding and avoid gva2gpa
during instruction emulation

ChangeLog:
v2 -> v3:
- Drop the warning fixing patch which was fixed already.
- Removing the src operand check for MOVs instruction. It's done
by VMX

v1 -> v2:
- Drop the lock prefix check.
- print message if MMIO is access with RIP as indirect access base

Yin Fengwei (6):
hv: extend the decode_modrm
hv: remove unnecessary check for gva
hv: move check out of vie_calculate_gla
hv: add new function to get gva for MOVS/STO instruction
hv: add GVA validation for MOVS
hv: add gva check for the case gva is from instruction decode

hypervisor/arch/x86/guest/instr_emul.c | 441 ++++++++++++++++---------
1 file changed, 282 insertions(+), 159 deletions(-)

--
2.17.0


Re: [PATCH 3/3] hv:Repalce vioapic pointer with instance in structure vm

Mingqiang Chi
 

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Xu, Anthony
Sent: Thursday, August 23, 2018 6:55 AM
To: acrn-dev@...
Subject: Re: [acrn-dev] [PATCH 3/3] hv:Repalce vioapic pointer with instance
in structure vm



-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...]
On Behalf Of Mingqiang Chi
Sent: Wednesday, August 22, 2018 3:57 AM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH 3/3] hv:Repalce vioapic pointer with
instance in structure vm

From: Mingqiang Chi <mingqiang.chi@...>

change vioapic_init to void type
rename struct vioapic --> struct virt_ioapic

Signed-off-by: Mingqiang Chi <mingqiang.chi@...>
---
hypervisor/arch/x86/guest/vm.c | 19 ++++------
hypervisor/dm/vioapic.c | 56
++++++++++-------------------
hypervisor/include/arch/x86/guest/vioapic.h | 19 +++++++---
hypervisor/include/arch/x86/guest/vm.h | 2 +-
hypervisor/include/arch/x86/hv_arch.h | 2 +-
5 files changed, 43 insertions(+), 55 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vm.c
b/hypervisor/arch/x86/guest/vm.c index 391fc3b..0f450de 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -212,12 +212,8 @@ int create_vm(struct vm_description *vm_desc,
struct vm **rtn_vm)
/* vpic wire_mode default is INTR */
vm->wire_mode = VPIC_WIRE_INTR;

- /* Allocate full emulated vIOAPIC instance */
- vm->arch_vm.virt_ioapic = vioapic_init(vm);
- if (vm->arch_vm.virt_ioapic == NULL) {
- status = -ENODEV;
- goto err;
- }
+ /* Init full emulated vIOAPIC instance */
+ vioapic_init(vm);

/* Populate return VM handle */
*rtn_vm = vm;
@@ -233,9 +229,8 @@ int create_vm(struct vm_description *vm_desc,
struct vm **rtn_vm)
return 0;

err:
- if (vm->arch_vm.virt_ioapic != NULL) {
- vioapic_cleanup(vm->arch_vm.virt_ioapic);
- }
+
+ vioapic_cleanup(&vm->arch_vm.vioapic);

if (vm->arch_vm.m2p != NULL) {
free(vm->arch_vm.m2p);
@@ -281,8 +276,8 @@ int shutdown_vm(struct vm *vm)

ptdev_release_all_entries(vm);

- /* cleanup and free vioapic */
- vioapic_cleanup(vm->arch_vm.virt_ioapic);
+ /* cleanup vioapic */
+ vioapic_cleanup(&vm->arch_vm.vioapic);

/* Destroy secure world */
if (vm->sworld_control.flag.active) { @@ -358,7 +353,7 @@ int
reset_vm(struct vm *vm)
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
}

- vioapic_reset(vm->arch_vm.virt_ioapic);
+ vioapic_reset(&vm->arch_vm.vioapic);
destroy_secure_world(vm, false);
vm->sworld_control.flag.active = 0UL;

diff --git a/hypervisor/dm/vioapic.c b/hypervisor/dm/vioapic.c index
b3eb83e..57bf748 100644
--- a/hypervisor/dm/vioapic.c
+++ b/hypervisor/dm/vioapic.c
@@ -32,23 +32,12 @@

#include <hypervisor.h>

-#define REDIR_ENTRIES_HW 120U /* SOS align with native ioapic */
#define RTBL_RO_BITS (uint32_t)(IOAPIC_RTE_REM_IRR |
IOAPIC_RTE_DELIVS)
#define NEED_TMR_UPDATE (IOAPIC_RTE_TRGRMOD |
IOAPIC_RTE_DELMOD
| IOAPIC_RTE_INTVEC)

#define ACRN_DBG_IOAPIC 6U
#define ACRN_IOAPIC_VERSION 0x11U

-struct vioapic {
- struct vm *vm;
- spinlock_t mtx;
- uint32_t id;
- uint32_t ioregsel;
- union ioapic_rte rtbl[REDIR_ENTRIES_HW];
- /* sum of pin asserts (+1) and deasserts (-1) */
- int32_t acnt[REDIR_ENTRIES_HW];
-};
-
#define VIOAPIC_LOCK(vioapic) spinlock_obtain(&((vioapic)->mtx))
#define VIOAPIC_UNLOCK(vioapic) spinlock_release(&((vioapic)-
mtx))
@@ -61,17 +50,17 @@ static inline const char *pinstate_str(bool
asserted)
return (asserted) ? "asserted" : "deasserted"; }

-static struct vioapic *
+static struct virt_ioapic *
vm_ioapic(struct vm *vm)
{
- return (struct vioapic *)vm->arch_vm.virt_ioapic;
+ return (struct virt_ioapic *)&vm->arch_vm.vioapic;
}

/**
* @pre pin < vioapic_pincount(vm)
*/
static void
-vioapic_send_intr(struct vioapic *vioapic, uint32_t pin)
+vioapic_send_intr(struct virt_ioapic *vioapic, uint32_t pin)
{
uint32_t vector, dest, delmode;
union ioapic_rte rte;
@@ -107,7 +96,7 @@ vioapic_send_intr(struct vioapic *vioapic, uint32_t
pin)
* @pre pin < vioapic_pincount(vm)
*/
static void
-vioapic_set_pinstate(struct vioapic *vioapic, uint32_t pin, bool
newstate)
+vioapic_set_pinstate(struct virt_ioapic *vioapic, uint32_t pin, bool
+newstate)
{
int oldcnt, newcnt;
bool needintr;
@@ -152,7 +141,7 @@ enum irqstate {
static void
vioapic_set_irqstate(struct vm *vm, uint32_t irq, enum irqstate
irqstate) {
- struct vioapic *vioapic;
+ struct virt_ioapic *vioapic;
uint32_t pin = irq;

vioapic = vm_ioapic(vm);
@@ -199,7 +188,7 @@ vioapic_pulse_irq(struct vm *vm, uint32_t irq)
void vioapic_update_tmr(struct vcpu *vcpu) {
- struct vioapic *vioapic;
+ struct virt_ioapic *vioapic;
struct acrn_vlapic *vlapic;
union ioapic_rte rte;
uint32_t vector, delmode;
@@ -231,7 +220,7 @@ vioapic_update_tmr(struct vcpu *vcpu) }

static uint32_t
-vioapic_indirect_read(struct vioapic *vioapic, uint32_t addr)
+vioapic_indirect_read(struct virt_ioapic *vioapic, uint32_t addr)
{
uint32_t regnum;
uint32_t pin, pincount = vioapic_pincount(vioapic->vm); @@ -275,7
+264,8 @@ vioapic_indirect_read(struct vioapic *vioapic, uint32_t
addr)
* VIOAPIC_UNLOCK(vioapic) by caller.
*/
static void
-vioapic_indirect_write(struct vioapic *vioapic, uint32_t addr,
uint32_t data)
+vioapic_indirect_write(struct virt_ioapic *vioapic, uint32_t addr,
+ uint32_t data)
{
union ioapic_rte last, new;
uint64_t changed;
@@ -408,7 +398,7 @@ vioapic_indirect_write(struct vioapic *vioapic,
uint32_t addr, uint32_t data) }

static void
-vioapic_mmio_rw(struct vioapic *vioapic, uint64_t gpa,
+vioapic_mmio_rw(struct virt_ioapic *vioapic, uint64_t gpa,
uint32_t *data, bool do_read)
{
uint32_t offset;
@@ -450,7 +440,7 @@ vioapic_mmio_rw(struct vioapic *vioapic, uint64_t
gpa, void vioapic_process_eoi(struct vm *vm, uint32_t vector) {
- struct vioapic *vioapic;
+ struct virt_ioapic *vioapic;
uint32_t pin, pincount = vioapic_pincount(vm);
union ioapic_rte rte;

@@ -496,7 +486,7 @@ vioapic_process_eoi(struct vm *vm, uint32_t
vector) }

void
-vioapic_reset(struct vioapic *vioapic)
+vioapic_reset(struct virt_ioapic *vioapic)
{
uint32_t pin, pincount;

@@ -509,35 +499,27 @@ vioapic_reset(struct vioapic *vioapic)
vioapic->ioregsel = 0U;
}

-struct vioapic *
+void
vioapic_init(struct vm *vm)
{
- struct vioapic *vioapic;
+ vm->arch_vm.vioapic.vm = vm;
+ spinlock_init(&vm->arch_vm.vioapic.mtx);

- vioapic = calloc(1U, sizeof(struct vioapic));
- ASSERT(vioapic != NULL, "");
-
- vioapic->vm = vm;
- spinlock_init(&vioapic->mtx);
-
- vioapic_reset(vioapic);
+ vioapic_reset(&vm->arch_vm.vioapic);

register_mmio_emulation_handler(vm,
vioapic_mmio_access_handler,
(uint64_t)VIOAPIC_BASE,
(uint64_t)VIOAPIC_BASE + VIOAPIC_SIZE,
NULL);
-
- return vioapic;
}

void
-vioapic_cleanup(struct vioapic *vioapic)
+vioapic_cleanup(struct virt_ioapic *vioapic)
{
unregister_mmio_emulation_handler(vioapic->vm,
(uint64_t)VIOAPIC_BASE,
(uint64_t)VIOAPIC_BASE + VIOAPIC_SIZE);
- free(vioapic);
}

uint32_t
@@ -554,7 +536,7 @@ int vioapic_mmio_access_handler(struct vcpu
*vcpu,
struct io_request *io_req,
__unused void *handler_private_data) {
struct vm *vm = vcpu->vm;
- struct vioapic *vioapic;
+ struct virt_ioapic *vioapic;
struct mmio_request *mmio = &io_req->reqs.mmio;
uint64_t gpa = mmio->address;
int ret = 0;
@@ -588,7 +570,7 @@ int vioapic_mmio_access_handler(struct vcpu
*vcpu,
struct io_request *io_req,
*/
void vioapic_get_rte(struct vm *vm, uint32_t pin, union ioapic_rte
*rte) {
- struct vioapic *vioapic;
+ struct virt_ioapic *vioapic;

vioapic = vm_ioapic(vm);
*rte = vioapic->rtbl[pin];
diff --git a/hypervisor/include/arch/x86/guest/vioapic.h
b/hypervisor/include/arch/x86/guest/vioapic.h
index 55254f9..fb3778f 100644
--- a/hypervisor/include/arch/x86/guest/vioapic.h
+++ b/hypervisor/include/arch/x86/guest/vioapic.h
@@ -32,14 +32,25 @@
#define _VIOAPIC_H_

#include <apicreg.h>
-#include <vm.h>

#define VIOAPIC_BASE 0xFEC00000UL
#define VIOAPIC_SIZE 4096UL

-struct vioapic *vioapic_init(struct vm *vm);
-void vioapic_cleanup(struct vioapic *vioapic);
-void vioapic_reset(struct vioapic *vioapic);
+#define REDIR_ENTRIES_HW 120U /* SOS align with native ioapic */
+
+struct virt_ioapic {
+ struct vm *vm;
+ spinlock_t mtx;
+ uint32_t id;
+ uint32_t ioregsel;
+ union ioapic_rte rtbl[REDIR_ENTRIES_HW];
+ /* sum of pin asserts (+1) and deasserts (-1) */
+ int32_t acnt[REDIR_ENTRIES_HW];
+};
+
+void vioapic_init(struct vm *vm);
+void vioapic_cleanup(struct virt_ioapic *vioapic);
+void vioapic_reset(struct virt_ioapic *vioapic);

void vioapic_assert_irq(struct vm *vm, uint32_t irq);
void vioapic_deassert_irq(struct vm *vm, uint32_t irq);
diff --git a/hypervisor/include/arch/x86/guest/vm.h
b/hypervisor/include/arch/x86/guest/vm.h
index 20598ee..bb31fdd 100644
--- a/hypervisor/include/arch/x86/guest/vm.h
+++ b/hypervisor/include/arch/x86/guest/vm.h
@@ -98,7 +98,7 @@ struct vm_arch {
void *tmp_pg_array; /* Page array for tmp guest paging struct */
void *iobitmap[2];/* IO bitmap page array base address for this VM */
void *msr_bitmap; /* MSR bitmap page base address for this VM
*/
- void *virt_ioapic; /* Virtual IOAPIC base address */
+ struct virt_ioapic vioapic; /* Virtual IOAPIC base address */
struct acrn_vpic vpic; /* Virtual PIC */
struct virt_iopaic , struct acrn_vpic.
We might want to have the same naming style.
Will rename struct acrn_vpic --> struct virt_pic







/**
* A link to the IO handler of this VM.
diff --git a/hypervisor/include/arch/x86/hv_arch.h
b/hypervisor/include/arch/x86/hv_arch.h
index 933a1fd..e1d65fd 100644
--- a/hypervisor/include/arch/x86/hv_arch.h
+++ b/hypervisor/include/arch/x86/hv_arch.h
@@ -23,6 +23,7 @@
#include <host_pm.h>
#include <vpic.h>
#include <vuart.h>
+#include <vioapic.h>
#include <vm.h>
#include <cpuid.h>
#include <mmu.h>
@@ -35,7 +36,6 @@
#include <vtd.h>

#include <vlapic.h>
-#include <vioapic.h>
#include <guest.h>
#include <vmexit.h>
#include <cpufeatures.h>
--
2.7.4




Re: [PATCH 2/3] hv:Replace vuart pointer with instance in structure vm

Mingqiang Chi
 

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Eddie Dong
Sent: Thursday, August 23, 2018 9:08 AM
To: acrn-dev@...
Subject: Re: [acrn-dev] [PATCH 2/3] hv:Replace vuart pointer with instance in
structure vm



-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Mingqiang Chi
Sent: Wednesday, August 22, 2018 6:57 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH 2/3] hv:Replace vuart pointer with instance
in structure vm

From: Mingqiang Chi <mingqiang.chi@...>

Change vuart_init to void type
rename struct vuart -->struct virt_uart

Signed-off-by: Mingqiang Chi <mingqiang.chi@...>
---
hypervisor/arch/x86/assign.c | 2 +-
hypervisor/arch/x86/guest/vm.c | 4 +--
hypervisor/debug/console.c | 2 +-
hypervisor/debug/shell.c | 4 +--
hypervisor/debug/vuart.c | 48
+++++++++++++++-------------------
hypervisor/include/arch/x86/guest/vm.h | 2 +-
hypervisor/include/arch/x86/hv_arch.h | 1 +
hypervisor/include/debug/vuart.h | 19 +++++++-------
hypervisor/include/hv_debug.h | 1 -
9 files changed, 38 insertions(+), 45 deletions(-)

diff --git a/hypervisor/arch/x86/assign.c
b/hypervisor/arch/x86/assign.c index fa7c9d3..7d0c79c 100644
--- a/hypervisor/arch/x86/assign.c
+++ b/hypervisor/arch/x86/assign.c
@@ -142,7 +142,7 @@ lookup_entry_by_vintx(struct vm *vm, uint8_t
vpin,
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info
*info) {
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
- if (is_vm0(vm) && (vm->vuart != NULL) && (info->virt_pin == 4U)) {
+ if (is_vm0(vm) && (info->virt_pin == 4U)) {
return true;
} else {
return false;
diff --git a/hypervisor/arch/x86/guest/vm.c
b/hypervisor/arch/x86/guest/vm.c index 8931b38..391fc3b 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -196,14 +196,14 @@ int create_vm(struct vm_description *vm_desc,
struct vm **rtn_vm)
}

/* Create virtual uart */
- vm->vuart = vuart_init(vm);
+ vuart_init(vm);
This API is better to use &( vm->vuart), as parameter.
It will use vm in vuart_init, here should use vm as parameter
vm->vuart.vm = vm;
vuart_register_io_handler(vm);



}
vpic_init(vm);

#ifdef CONFIG_PARTITION_MODE
/* Create virtual uart */
if (vm_desc->vm_vuart) {
- vm->vuart = vuart_init(vm);
+ vuart_init(vm);
}
vrtc_init(vm);
vpci_init(vm);
diff --git a/hypervisor/debug/console.c b/hypervisor/debug/console.c
index
88a2088..f6fe701 100644
--- a/hypervisor/debug/console.c
+++ b/hypervisor/debug/console.c
@@ -34,7 +34,7 @@ char console_getc(void)

static void console_timer_callback(__unused void *data) {
- struct vuart *vu;
+ struct virt_uart *vu;

/* Kick HV-Shell and Uart-Console tasks */
vu = vuart_console_active();
diff --git a/hypervisor/debug/shell.c b/hypervisor/debug/shell.c index
eeddaf9..cf3c234 100644
--- a/hypervisor/debug/shell.c
+++ b/hypervisor/debug/shell.c
@@ -721,7 +721,7 @@ static int shell_to_sos_console(__unused int argc,
__unused char **argv)
uint16_t guest_no = 0U;

struct vm *vm;
- struct vuart *vuart;
+ struct virt_uart *vuart;
vuart is an instance in "struct vm", not pointer now. When using as pointer,
how about we just use *vu here?
And check the entire file to be same style.
Will check the entire file, will rename"struct virt_uart *vu;"


#ifdef CONFIG_PARTITION_MODE
if (argc == 2U) {
@@ -735,7 +735,7 @@ static int shell_to_sos_console(__unused int argc,
__unused char **argv)
if (vm == NULL) {
return -EINVAL;
}
- vuart = vm->vuart;
+ vuart = &vm->vuart;
Let us use this style:
&(vm->vuart) to avoid confuse. This will be our additional rule. Please
change in entire file.
Will add ()
Also for macro: vm_vuart(vm)... Please check if it is a function like API that
we should avoid or not.
If it is OK, then use it here too (and more other places to be same).
Will replace it in entire file such as:
vuart = &vm->vuart; --> vu = vm_vuart(vm);





if (vuart == NULL) {
snprintf(temp_str, TEMP_STR_SIZE,
"\r\nError: serial console driver is not "
diff --git a/hypervisor/debug/vuart.c b/hypervisor/debug/vuart.c index
37b272e..7fe8f64 100644
--- a/hypervisor/debug/vuart.c
+++ b/hypervisor/debug/vuart.c
@@ -41,7 +41,7 @@
#define vuart_lock(vu) spinlock_obtain(&((vu)->lock))
#define vuart_unlock(vu) spinlock_release(&((vu)->lock))

-#define vm_vuart(vm) (vm->vuart)
+#define vm_vuart(vm) (&vm->vuart)

#ifdef CONFIG_PARTITION_MODE
int8_t vuart_vmid = - 1;
@@ -100,7 +100,7 @@ static uint32_t fifo_numchars(struct fifo *fifo)
*
* Return an interrupt reason if one is available.
*/
-static uint8_t vuart_intr_reason(struct vuart *vu)
+static uint8_t vuart_intr_reason(struct virt_uart *vu)
{
if (((vu->lsr & LSR_OE) != 0U) && ((vu->ier & IER_ELSI) != 0U)) {
return IIR_RLS;
@@ -118,7 +118,7 @@ static uint8_t vuart_intr_reason(struct vuart *vu)
* Toggle the COM port's intr pin depending on whether or not we have
an
* interrupt condition to report to the processor.
*/
-static void vuart_toggle_intr(struct vuart *vu)
+static void vuart_toggle_intr(struct virt_uart *vu)
{
uint8_t intr_reason;

@@ -139,7 +139,7 @@ static void vuart_write(__unused struct
vm_io_handler *hdlr, struct vm *vm,
uint16_t offset_arg, __unused size_t width, uint32_t value) {
uint16_t offset = offset_arg;
- struct vuart *vu = vm_vuart(vm);
+ struct virt_uart *vu = vm_vuart(vm);
uint8_t value_u8 = (uint8_t)value;

offset -= vu->base;
@@ -226,7 +226,7 @@ static uint32_t vuart_read(__unused struct
vm_io_handler *hdlr, struct vm *vm, {
uint16_t offset = offset_arg;
uint8_t iir, reg, intr_reason;
- struct vuart *vu = vm_vuart(vm);
+ struct virt_uart *vu = vm_vuart(vm);

offset -= vu->base;
vuart_lock(vu);
@@ -314,7 +314,7 @@ static void vuart_register_io_handler(struct vm
*vm)
/**
* @pre vu != NULL
*/
-void vuart_console_tx_chars(struct vuart *vu)
+void vuart_console_tx_chars(struct virt_uart *vu)
{
vuart_lock(vu);
while (fifo_numchars(&vu->txfifo) > 0U) { @@ -327,7 +327,7 @@ void
vuart_console_tx_chars(struct vuart *vu)
* @pre vu != NULL
* @pre vu->active == true
*/
-void vuart_console_rx_chars(struct vuart *vu)
+void vuart_console_rx_chars(struct virt_uart *vu)
{
char ch = -1;

@@ -348,7 +348,7 @@ void vuart_console_rx_chars(struct vuart *vu)
vuart_unlock(vu);
}

-struct vuart *vuart_console_active(void)
+struct virt_uart *vuart_console_active(void)
{
#ifdef CONFIG_PARTITION_MODE
struct vm *vm;
@@ -362,36 +362,30 @@ struct vuart *vuart_console_active(void)
struct vm *vm = get_vm_from_vmid(0U); #endif

- if ((vm != NULL) && (vm->vuart != NULL)) {
- struct vuart *vu = vm->vuart;
+ if (vm != NULL) {
+ struct virt_uart *vu = &vm->vuart;

if (vu->active) {
- return vm->vuart;
+ return &vm->vuart;
}
}
return NULL;
}

-void *vuart_init(struct vm *vm)
+void vuart_init(struct vm *vm)
{
- struct vuart *vu;
uint32_t divisor;

- vu = calloc(1U, sizeof(struct vuart));
- ASSERT(vu != NULL, "");
-
/* Set baud rate*/
divisor = (UART_CLOCK_RATE / BAUD_9600) >> 4U;
- vu->dll = (uint8_t)divisor;
- vu->dlh = (uint8_t)(divisor >> 8U);
-
- vu->active = false;
- vu->base = COM1_BASE;
- vu->vm = vm;
- fifo_init(&vu->rxfifo, RX_FIFO_SIZE);
- fifo_init(&vu->txfifo, TX_FIFO_SIZE);
- vuart_lock_init(vu);
+ vm->vuart.dll = (uint8_t)divisor;
+ vm->vuart.dlh = (uint8_t)(divisor >> 8U);
+
+ vm->vuart.active = false;
+ vm->vuart.base = COM1_BASE;
+ vm->vuart.vm = vm;
+ fifo_init(&vm->vuart.rxfifo, RX_FIFO_SIZE);
+ fifo_init(&vm->vuart.txfifo, TX_FIFO_SIZE);
+ vuart_lock_init(&vm->vuart);
vuart_register_io_handler(vm);
-
- return vu;
}
diff --git a/hypervisor/include/arch/x86/guest/vm.h
b/hypervisor/include/arch/x86/guest/vm.h
index f852122..20598ee 100644
--- a/hypervisor/include/arch/x86/guest/vm.h
+++ b/hypervisor/include/arch/x86/guest/vm.h
@@ -133,7 +133,7 @@ struct vm {
struct vm_pm_info pm; /* Reference to this VM's arch information */
struct vm_arch arch_vm; /* Reference to this VM's arch information
*/
enum vm_state state; /* VM state */
- void *vuart; /* Virtual UART */
+ struct virt_uart vuart; /* Virtual UART */
enum vpic_wire_mode wire_mode;
struct iommu_domain *iommu; /* iommu domain of this VM */
struct list_head list; /* list of VM */ diff --git
a/hypervisor/include/arch/x86/hv_arch.h
b/hypervisor/include/arch/x86/hv_arch.h
index 9ed9901..933a1fd 100644
--- a/hypervisor/include/arch/x86/hv_arch.h
+++ b/hypervisor/include/arch/x86/hv_arch.h
@@ -22,6 +22,7 @@
#include <guest_pm.h>
#include <host_pm.h>
#include <vpic.h>
+#include <vuart.h>
#include <vm.h>
#include <cpuid.h>
#include <mmu.h>
diff --git a/hypervisor/include/debug/vuart.h
b/hypervisor/include/debug/vuart.h
index 332be46..ac42f08 100644
--- a/hypervisor/include/debug/vuart.h
+++ b/hypervisor/include/debug/vuart.h
@@ -38,7 +38,7 @@ struct fifo {
uint32_t size; /* size of the fifo */
};

-struct vuart {
+struct virt_uart {
uint8_t data; /* Data register (R/W) */
uint8_t ier; /* Interrupt enable register (R/W) */
uint8_t lcr; /* Line control register (R/W) */
@@ -63,21 +63,20 @@ struct vuart {
extern int8_t vuart_vmid;
#endif
#ifdef HV_DEBUG
-void *vuart_init(struct vm *vm);
-struct vuart *vuart_console_active(void); -void
vuart_console_tx_chars(struct vuart *vu); -void
vuart_console_rx_chars(struct vuart *vu);
+void vuart_init(struct vm *vm);
+struct virt_uart *vuart_console_active(void); void
+vuart_console_tx_chars(struct virt_uart *vu); void
+vuart_console_rx_chars(struct virt_uart *vu);
#else
-static inline void *vuart_init(__unused struct vm *vm)
+static inline void vuart_init(__unused struct vm *vm)
{
- return NULL;
}
-static inline struct vuart *vuart_console_active(void)
+static inline struct virt_uart *vuart_console_active(void)
{
return NULL;
}
-static inline void vuart_console_tx_chars(__unused struct vuart *vu)
{} -static inline void vuart_console_rx_chars(__unused struct vuart
*vu) {}
+static inline void vuart_console_tx_chars(__unused struct virt_uart
+*vu) {} static inline void vuart_console_rx_chars(__unused struct
+virt_uart *vu) {}
#endif /*HV_DEBUG*/

#endif
diff --git a/hypervisor/include/hv_debug.h
b/hypervisor/include/hv_debug.h index 7198132..896c7a2 100644
--- a/hypervisor/include/hv_debug.h
+++ b/hypervisor/include/hv_debug.h
@@ -8,7 +8,6 @@
#define HV_DEBUG_H

#include <logmsg.h>
-#include <vuart.h>
#include <console.h>
#include <dump.h>
#include <trace.h>
--
2.7.4