Re: [PATCH] HV: Modularize vlapic to reduce usage of acrn_vm


Arindam Roy <arindam.roy@...>
 

Hi Jason,
In future in case one needs locking, then vlapic just needs to know the hw info part, instead of the whole acrn_vm.
Just trying to reduce the dependency on acrn_vm.
If not ok, will drop the patch.
Arindam

On Jul 22, 2019, at 7:06 PM, Chen, Jason CJ <jason.cj.chen@...> wrote:

On Mon, Jul 15, 2019 at 03:09:54PM -0700, Arindam Roy wrote:
V1:
In order to modularize vlapic, reduce the usage of acrn_vm.
Its ovserved most of the functionality in vlapic can be achieved
by using the substructure vm_hw_info inside acrn_vm, instead
of using the whole of acrn_vm.

This patch intoduces initial changes to achieve the same.

V2:
Modified foreach_vcpu to accept vm_hw_info *hw as argument
instead of acrn_vm *vm.
Removed foreach_vcpu_lapic and replaced with modified
foreach_vpcu.

Signed-off-by: Arindam Roy <arindam.roy@...>
hi, Roy,

Thanks for the patch, I actually have a question here, what's the real difference between using acrn_vm and vm_hw_info
here? vm_hw_info still belong to acrn_vm and it's not belonging to vlapic. Maybe it's not a real modularization effort
here :)

Thx
Jason

---
hypervisor/arch/x86/guest/assign.c | 4 +-
hypervisor/arch/x86/guest/ept.c | 9 ++--
hypervisor/arch/x86/guest/vlapic.c | 57 +++++++++++-----------
hypervisor/arch/x86/guest/vm.c | 16 ++++--
hypervisor/common/hypercall.c | 5 +-
hypervisor/debug/profiling.c | 4 +-
hypervisor/debug/shell.c | 4 +-
hypervisor/include/arch/x86/guest/vcpu.h | 8 +--
hypervisor/include/arch/x86/guest/vlapic.h | 6 ++-
hypervisor/include/arch/x86/guest/vm.h | 7 +--
10 files changed, 68 insertions(+), 52 deletions(-)

diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c
index 23c14444..80c04149 100644
--- a/hypervisor/arch/x86/guest/assign.c
+++ b/hypervisor/arch/x86/guest/assign.c
@@ -107,7 +107,7 @@ static void ptirq_build_physical_msi(struct acrn_vm *vm, struct ptirq_msi_info *
dest = info->vmsi_addr.bits.dest_field;
phys = (info->vmsi_addr.bits.dest_mode == MSI_ADDR_DESTMODE_PHYS);

- vlapic_calc_dest(vm, &vdmask, false, dest, phys, false);
+ vlapic_calc_dest(&vm->hw, &vdmask, false, dest, phys, false);
pdmask = vcpumask2pcpumask(vm, vdmask);

/* get physical delivery mode */
@@ -201,7 +201,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
/* physical destination cpu mask */
phys = (virt_rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
dest = (uint32_t)virt_rte.bits.dest_field;
- vlapic_calc_dest(vm, &vdmask, false, dest, phys, false);
+ vlapic_calc_dest(&vm->hw, &vdmask, false, dest, phys, false);
pdmask = vcpumask2pcpumask(vm, vdmask);

/* physical delivery mode */
diff --git a/hypervisor/arch/x86/guest/ept.c b/hypervisor/arch/x86/guest/ept.c
index 8b5eeeea..a48f41b7 100644
--- a/hypervisor/arch/x86/guest/ept.c
+++ b/hypervisor/arch/x86/guest/ept.c
@@ -100,6 +100,7 @@ void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,
uint16_t i;
struct acrn_vcpu *vcpu;
uint64_t prot = prot_orig;
+ struct vm_hw_info *hw = &vm->hw;

dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx size: 0x%016llx prot: 0x%016x\n",
__func__, vm->vm_id, hpa, gpa, size, prot);
@@ -114,7 +115,7 @@ void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,

mmu_add(pml4_page, hpa, gpa, size, prot, &vm->arch_vm.ept_mem_ops);

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
}
@@ -126,6 +127,7 @@ void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,
struct acrn_vcpu *vcpu;
uint16_t i;
uint64_t local_prot = prot_set;
+ struct vm_hw_info *hw = &vm->hw;

dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);

@@ -135,7 +137,7 @@ void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,

mmu_modify_or_del(pml4_page, gpa, size, local_prot, prot_clr, &(vm->arch_vm.ept_mem_ops), MR_MODIFY);

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
}
@@ -146,12 +148,13 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t
{
struct acrn_vcpu *vcpu;
uint16_t i;
+ struct vm_hw_info *hw = &vm->hw;

dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);

mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_mem_ops, MR_DEL);

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
}
diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 97d61c47..2f01fc5a 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -115,22 +115,22 @@ static inline bool vlapic_enabled(const struct acrn_vlapic *vlapic)
}

static struct acrn_vlapic *
-vm_lapic_from_vcpu_id(struct acrn_vm *vm, uint16_t vcpu_id)
+vm_lapic_from_vcpu_id(struct vm_hw_info *hw, uint16_t vcpu_id)
{
struct acrn_vcpu *vcpu;

- vcpu = vcpu_from_vid(vm, vcpu_id);
+ vcpu = &hw->vcpu_array[vcpu_id];

return vcpu_vlapic(vcpu);
}

-static uint16_t vm_apicid2vcpu_id(struct acrn_vm *vm, uint32_t lapicid)
+static uint16_t vm_apicid2vcpu_id(struct vm_hw_info *hw, uint32_t lapicid)
{
uint16_t i;
struct acrn_vcpu *vcpu;
uint16_t cpu_id = INVALID_CPU_ID;

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
const struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
if (vlapic_get_apicid(vlapic) == lapicid) {
cpu_id = vcpu->vcpu_id;
@@ -970,12 +970,12 @@ vlapic_trigger_lvt(struct acrn_vlapic *vlapic, uint32_t lvt_index)
return ret;
}

-static inline void set_dest_mask_phys(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest)
+static inline void set_dest_mask_phys(struct vm_hw_info *hw, uint64_t *dmask, uint32_t dest)
{
uint16_t vcpu_id;

- vcpu_id = vm_apicid2vcpu_id(vm, dest);
- if (vcpu_id < vm->hw.created_vcpus) {
+ vcpu_id = vm_apicid2vcpu_id(hw, dest);
+ if (vcpu_id < hw->created_vcpus) {
bitmap_set_nolock(vcpu_id, dmask);
}
}
@@ -1038,27 +1038,26 @@ static inline bool is_dest_field_matched(const struct acrn_vlapic *vlapic, uint3
* addressing specified by the (dest, phys, lowprio) tuple.
*/
void
-vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
+vlapic_calc_dest(struct vm_hw_info *hw, uint64_t *dmask, bool is_broadcast,
uint32_t dest, bool phys, bool lowprio)
{
struct acrn_vlapic *vlapic, *lowprio_dest = NULL;
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;
-
*dmask = 0UL;
if (is_broadcast) {
/* Broadcast in both logical and physical modes. */
- *dmask = vm_active_cpus(vm);
+ *dmask = vm_active_cpus(hw);
} else if (phys) {
/* Physical mode: "dest" is local APIC ID. */
- set_dest_mask_phys(vm, dmask, dest);
+ set_dest_mask_phys(hw, dmask, dest);
} else {
/*
* Logical mode: "dest" is message destination addr
* to be compared with the logical APIC ID in LDR.
*/
- foreach_vcpu(vcpu_id, vm, vcpu) {
- vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
+ foreach_vcpu(vcpu_id, hw, vcpu) {
+ vlapic = vm_lapic_from_vcpu_id(hw, vcpu_id);
if (!is_dest_field_matched(vlapic, dest)) {
continue;
}
@@ -1094,7 +1093,7 @@ vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
* @pre is_x2apic_enabled(vlapic) == true
*/
void
-vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
+vlapic_calc_dest_lapic_pt(struct vm_hw_info *hw, uint64_t *dmask, bool is_broadcast,
uint32_t dest, bool phys)
{
struct acrn_vlapic *vlapic;
@@ -1104,17 +1103,17 @@ vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast
*dmask = 0UL;
if (is_broadcast) {
/* Broadcast in both logical and physical modes. */
- *dmask = vm_active_cpus(vm);
+ *dmask = vm_active_cpus(hw);
} else if (phys) {
/* Physical mode: "dest" is local APIC ID. */
- set_dest_mask_phys(vm, dmask, dest);
+ set_dest_mask_phys(hw, dmask, dest);
} else {
/*
* Logical mode: "dest" is message destination addr
* to be compared with the logical APIC ID in LDR.
*/
- foreach_vcpu(vcpu_id, vm, vcpu) {
- vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
+ foreach_vcpu(vcpu_id, hw, vcpu) {
+ vlapic = vm_lapic_from_vcpu_id(hw, vcpu_id);
if (!is_dest_field_matched(vlapic, dest)) {
continue;
}
@@ -1211,16 +1210,16 @@ static void vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)

switch (shorthand) {
case APIC_DEST_DESTFLD:
- vlapic_calc_dest(vlapic->vm, &dmask, is_broadcast, dest, phys, false);
+ vlapic_calc_dest(&vlapic->vm->hw, &dmask, is_broadcast, dest, phys, false);
break;
case APIC_DEST_SELF:
bitmap_set_nolock(vlapic->vcpu->vcpu_id, &dmask);
break;
case APIC_DEST_ALLISELF:
- dmask = vm_active_cpus(vlapic->vm);
+ dmask = vm_active_cpus(&vlapic->vm->hw);
break;
case APIC_DEST_ALLESELF:
- dmask = vm_active_cpus(vlapic->vm);
+ dmask = vm_active_cpus(&vlapic->vm->hw);
bitmap_clear_nolock(vlapic->vcpu->vcpu_id, &dmask);
break;
default:
@@ -1760,7 +1759,7 @@ vlapic_receive_intr(struct acrn_vm *vm, bool level, uint32_t dest, bool phys,
* all interrupts originating from the ioapic or MSI specify the
* 'dest' in the legacy xAPIC format.
*/
- vlapic_calc_dest(vm, &dmask, false, dest, phys, lowprio);
+ vlapic_calc_dest(&vm->hw, &dmask, false, dest, phys, lowprio);

for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
struct acrn_vlapic *vlapic;
@@ -1825,14 +1824,14 @@ vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t lvt_ind
error = -EINVAL;
} else {
if (vcpu_id == BROADCAST_CPU_ID) {
- dmask = vm_active_cpus(vm);
+ dmask = vm_active_cpus(&vm->hw);
} else {
bitmap_set_nolock(vcpu_id, &dmask);
}
error = 0;
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
if ((dmask & (1UL << vcpu_id)) != 0UL) {
- vlapic = vm_lapic_from_vcpu_id(vm, vcpu_id);
+ vlapic = vm_lapic_from_vcpu_id(&(vm->hw), vcpu_id);
error = vlapic_trigger_lvt(vlapic, lvt_index);
if (error != 0) {
break;
@@ -1961,7 +1960,7 @@ static inline uint32_t x2apic_msr_to_regoff(uint32_t msr)
*/

static int32_t
-vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
+vlapic_x2apic_pt_icr_access(struct vm_hw_info *hw, uint64_t val)
{
uint32_t papic_id, vapic_id = (uint32_t)(val >> 32U);
uint32_t icr_low = (uint32_t)val;
@@ -1982,9 +1981,9 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
* TODO: To support logical destination and shorthand modes
*/
} else {
- vcpu_id = vm_apicid2vcpu_id(vm, vapic_id);
- if ((vcpu_id < vm->hw.created_vcpus) && (vm->hw.vcpu_array[vcpu_id].state != VCPU_OFFLINE)) {
- target_vcpu = vcpu_from_vid(vm, vcpu_id);
+ vcpu_id = vm_apicid2vcpu_id(hw, vapic_id);
+ if ((vcpu_id < hw->created_vcpus) && (hw->vcpu_array[vcpu_id].state != VCPU_OFFLINE)) {
+ target_vcpu = &(hw->vcpu_array[vcpu_id]);

switch (mode) {
case APIC_DELMODE_INIT:
@@ -2080,7 +2079,7 @@ int32_t vlapic_x2apic_write(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val)
if (is_lapic_pt_configured(vcpu->vm)) {
switch (msr) {
case MSR_IA32_EXT_APIC_ICR:
- error = vlapic_x2apic_pt_icr_access(vcpu->vm, val);
+ error = vlapic_x2apic_pt_icr_access(&vcpu->vm->hw, val);
break;
default:
pr_err("%s: unexpected MSR[0x%x] write with lapic_pt", __func__, msr);
diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index 1c80b4c9..a34fa173 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -549,6 +549,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
struct acrn_vcpu *vcpu = NULL;
struct acrn_vm_config *vm_config = NULL;
int32_t ret = 0;
+ struct vm_hw_info *hw = NULL;

pause_vm(vm);

@@ -556,7 +557,8 @@ int32_t shutdown_vm(struct acrn_vm *vm)
if (vm->state == VM_PAUSED) {
vm->state = VM_POWERED_OFF;

- foreach_vcpu(i, vm, vcpu) {
+ hw = &vm->hw;
+ foreach_vcpu(i, hw, vcpu) {
reset_vcpu(vcpu);
offline_vcpu(vcpu);

@@ -625,9 +627,10 @@ int32_t reset_vm(struct acrn_vm *vm)
uint16_t i;
struct acrn_vcpu *vcpu = NULL;
int32_t ret;
+ struct vm_hw_info *hw = &vm->hw;

if (vm->state == VM_PAUSED) {
- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
reset_vcpu(vcpu);
}
/*
@@ -661,6 +664,8 @@ void pause_vm(struct acrn_vm *vm)
{
uint16_t i;
struct acrn_vcpu *vcpu = NULL;
+ struct vm_hw_info *hw = &vm->hw;
+

if (vm->state != VM_PAUSED) {
if (is_rt_vm(vm)) {
@@ -670,14 +675,14 @@ void pause_vm(struct acrn_vm *vm)
* - It is created but doesn't start
*/
if ((vm->state == VM_POWERING_OFF) || (vm->state == VM_CREATED)) {
- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
pause_vcpu(vcpu, VCPU_ZOMBIE);
}

vm->state = VM_PAUSED;
}
} else {
- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
pause_vcpu(vcpu, VCPU_ZOMBIE);
}

@@ -805,11 +810,12 @@ void update_vm_vlapic_state(struct acrn_vm *vm)
struct acrn_vcpu *vcpu;
uint16_t vcpus_in_x2apic, vcpus_in_xapic;
enum vm_vlapic_state vlapic_state = VM_VLAPIC_XAPIC;
+ struct vm_hw_info *hw = &vm->hw;

vcpus_in_x2apic = 0U;
vcpus_in_xapic = 0U;
spinlock_obtain(&vm->vm_lock);
- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
if (is_x2apic_enabled(vcpu_vlapic(vcpu))) {
vcpus_in_x2apic++;
} else if (is_xapic_enabled(vcpu_vlapic(vcpu))) {
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index 45353727..c98ce825 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -53,10 +53,11 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid)
struct acrn_vcpu *vcpu;
uint16_t i;
int32_t ret = 0;
+ struct vm_hw_info *hw = &vm->hw;

pr_info("sos offline cpu with lapicid %lld", lapicid);

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
if (vlapic_get_apicid(vcpu_vlapic(vcpu)) == lapicid) {
/* should not offline BSP */
if (vcpu->vcpu_id == BOOT_CPU_ID) {
@@ -439,7 +440,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
* the delivery mode of vmsi will be forwarded to ICR delievry field
* and handled by hardware.
*/
- vlapic_calc_dest_lapic_pt(vm, &vdmask, false, vdest, phys);
+ vlapic_calc_dest_lapic_pt(&vm->hw, &vdmask, false, vdest, phys);
dev_dbg(ACRN_DBG_LAPICPT, "%s: vcpu destination mask 0x%016llx", __func__, vdmask);

vcpu_id = ffs64(vdmask);
diff --git a/hypervisor/debug/profiling.c b/hypervisor/debug/profiling.c
index 84a54e98..c3d050bb 100644
--- a/hypervisor/debug/profiling.c
+++ b/hypervisor/debug/profiling.c
@@ -851,6 +851,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
uint16_t i, j;
struct profiling_vm_info_list vm_info_list;
uint16_t pcpu_nums = get_pcpu_nums();
+ struct vm_hw_info *hw = NULL;

(void)memset((void *)&vm_info_list, 0U, sizeof(vm_info_list));

@@ -888,7 +889,8 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
tmp_vm->vm_id, 16U);
vm_info_list.vm_list[vm_idx].num_vcpus = 0;
i = 0U;
- foreach_vcpu(i, tmp_vm, vcpu) {
+ hw = &tmp_vm->hw;
+ foreach_vcpu(i, hw, vcpu) {
vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id
= vcpu->vcpu_id;
vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id
diff --git a/hypervisor/debug/shell.c b/hypervisor/debug/shell.c
index 65350f5f..4db3c705 100644
--- a/hypervisor/debug/shell.c
+++ b/hypervisor/debug/shell.c
@@ -629,6 +629,7 @@ static int32_t shell_list_vcpu(__unused int32_t argc, __unused char **argv)
char temp_str[MAX_STR_SIZE];
struct acrn_vm *vm;
struct acrn_vcpu *vcpu;
+ struct vm_hw_info *hw = NULL;
char state[32];
uint16_t i;
uint16_t idx;
@@ -641,7 +642,8 @@ static int32_t shell_list_vcpu(__unused int32_t argc, __unused char **argv)
if (is_poweroff_vm(vm)) {
continue;
}
- foreach_vcpu(i, vm, vcpu) {
+ hw = &vm->hw;
+ foreach_vcpu(i, hw, vcpu) {
switch (vcpu->state) {
case VCPU_INIT:
(void)strncpy_s(state, 32U, "Init", 32U);
diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h
index aead6ef8..3f5dc9b0 100644
--- a/hypervisor/include/arch/x86/guest/vcpu.h
+++ b/hypervisor/include/arch/x86/guest/vcpu.h
@@ -153,10 +153,10 @@
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */

-#define foreach_vcpu(idx, vm, vcpu) \
- for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
- (idx) < (vm)->hw.created_vcpus; \
- (idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
+#define foreach_vcpu(idx, hw, vcpu) \
+ for ((idx) = 0U, (vcpu) = &(hw->vcpu_array[(idx)]); \
+ (idx) < hw->created_vcpus; \
+ (idx)++, (vcpu) = &(hw->vcpu_array[(idx)])) \
if (vcpu->state != VCPU_OFFLINE)

enum vcpu_state {
diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h
index d3a7e197..1e6a0e1b 100644
--- a/hypervisor/include/arch/x86/guest/vlapic.h
+++ b/hypervisor/include/arch/x86/guest/vlapic.h
@@ -44,6 +44,8 @@

#define VLAPIC_MAXLVT_INDEX APIC_LVT_CMCI

+struct vm_hw_info;
+
struct vlapic_pir_desc {
uint64_t pir[4];
uint64_t pending;
@@ -191,9 +193,9 @@ int32_t apic_write_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu);
void vlapic_update_tpr_threshold(const struct acrn_vlapic *vlapic);
int32_t tpr_below_threshold_vmexit_handler(struct acrn_vcpu *vcpu);
-void vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
+void vlapic_calc_dest(struct vm_hw_info *hw, uint64_t *dmask, bool is_broadcast,
uint32_t dest, bool phys, bool lowprio);
-void vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
+void vlapic_calc_dest_lapic_pt(struct vm_hw_info *hw, uint64_t *dmask, bool is_broadcast,
uint32_t dest, bool phys);
bool is_x2apic_enabled(const struct acrn_vlapic *vlapic);
bool is_xapic_enabled(const struct acrn_vlapic *vlapic);
diff --git a/hypervisor/include/arch/x86/guest/vm.h b/hypervisor/include/arch/x86/guest/vm.h
index 23a9a206..6f59cd60 100644
--- a/hypervisor/include/arch/x86/guest/vm.h
+++ b/hypervisor/include/arch/x86/guest/vm.h
@@ -151,13 +151,13 @@ struct acrn_vm {
/*
* @pre vlapic != NULL
*/
-static inline uint64_t vm_active_cpus(const struct acrn_vm *vm)
+static inline uint64_t vm_active_cpus(const struct vm_hw_info *hw)
{
uint64_t dmask = 0UL;
uint16_t i;
const struct acrn_vcpu *vcpu;

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
bitmap_set_nolock(vcpu->vcpu_id, &dmask);
}

@@ -177,8 +177,9 @@ static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_
{
uint16_t i;
struct acrn_vcpu *vcpu, *target_vcpu = NULL;
+ struct vm_hw_info *hw = &vm->hw;

- foreach_vcpu(i, vm, vcpu) {
+ foreach_vcpu(i, hw, vcpu) {
if (vcpu->pcpu_id == pcpu_id) {
target_vcpu = vcpu;
break;
--
2.17.1



--

Thanks

Jason

Join acrn-dev@lists.projectacrn.org to automatically receive all group messages.