Date   

[RFC PATCH v3 4/6] hv: Add vlapic_has_pending_intr of apicv to check pending interrupts

Shuo A Liu
 

Sometimes HV wants to know if there are pending interrupts of one vcpu.
Add .has_pending_intr interface in acrn_apicv_ops and return the pending
interrupts status by check IRRs of apicv.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vlapic.c | 29 +++++++++++++++++++++++++++++
hypervisor/include/arch/x86/guest/vlapic.h | 2 ++
2 files changed, 31 insertions(+)

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 8c79a4d..1c7bdfb 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -1745,6 +1745,11 @@ static bool ptapic_has_pending_delivery_intr(__unused struct acrn_vcpu *vcpu)
return false;
}

+static bool ptapic_has_pending_intr(__unused struct acrn_vcpu *vcpu)
+{
+ return false;
+}
+
static bool ptapic_invalid(__unused uint32_t offset)
{
return false;
@@ -1754,6 +1759,7 @@ static const struct acrn_apicv_ops ptapic_ops = {
.accept_intr = ptapic_accept_intr,
.inject_intr = ptapic_inject_intr,
.has_pending_delivery_intr = ptapic_has_pending_delivery_intr,
+ .has_pending_intr = ptapic_has_pending_intr,
.apic_read_access_may_valid = ptapic_invalid,
.apic_write_access_may_valid = ptapic_invalid,
.x2apic_read_msr_may_valid = ptapic_invalid,
@@ -2379,6 +2385,27 @@ bool vlapic_has_pending_delivery_intr(struct acrn_vcpu *vcpu)
return vlapic->ops->has_pending_delivery_intr(vcpu);
}

+static bool apicv_basic_has_pending_intr(struct acrn_vcpu *vcpu)
+{
+ struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
+ uint32_t vector;
+
+ vector = vlapic_find_highest_irr(vlapic);
+
+ return vector != 0UL;
+}
+
+static bool apicv_advanced_has_pending_intr(struct acrn_vcpu *vcpu)
+{
+ return apicv_basic_has_pending_intr(vcpu);
+}
+
+bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu)
+{
+ struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
+ return vlapic->ops->has_pending_intr(vcpu);
+}
+
static bool apicv_basic_apic_read_access_may_valid(__unused uint32_t offset)
{
return true;
@@ -2592,6 +2619,7 @@ static const struct acrn_apicv_ops apicv_basic_ops = {
.accept_intr = apicv_basic_accept_intr,
.inject_intr = apicv_basic_inject_intr,
.has_pending_delivery_intr = apicv_basic_has_pending_delivery_intr,
+ .has_pending_intr = apicv_basic_has_pending_intr,
.apic_read_access_may_valid = apicv_basic_apic_read_access_may_valid,
.apic_write_access_may_valid = apicv_basic_apic_write_access_may_valid,
.x2apic_read_msr_may_valid = apicv_basic_x2apic_read_msr_may_valid,
@@ -2602,6 +2630,7 @@ static const struct acrn_apicv_ops apicv_advanced_ops = {
.accept_intr = apicv_advanced_accept_intr,
.inject_intr = apicv_advanced_inject_intr,
.has_pending_delivery_intr = apicv_advanced_has_pending_delivery_intr,
+ .has_pending_intr = apicv_advanced_has_pending_intr,
.apic_read_access_may_valid = apicv_advanced_apic_read_access_may_valid,
.apic_write_access_may_valid = apicv_advanced_apic_write_access_may_valid,
.x2apic_read_msr_may_valid = apicv_advanced_x2apic_read_msr_may_valid,
diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h
index f271a4e..8feea92 100644
--- a/hypervisor/include/arch/x86/guest/vlapic.h
+++ b/hypervisor/include/arch/x86/guest/vlapic.h
@@ -100,6 +100,7 @@ struct acrn_apicv_ops {
void (*accept_intr)(struct acrn_vlapic *vlapic, uint32_t vector, bool level);
bool (*inject_intr)(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
bool (*has_pending_delivery_intr)(struct acrn_vcpu *vcpu);
+ bool (*has_pending_intr)(struct acrn_vcpu *vcpu);
bool (*apic_read_access_may_valid)(uint32_t offset);
bool (*apic_write_access_may_valid)(uint32_t offset);
bool (*x2apic_read_msr_may_valid)(uint32_t offset);
@@ -118,6 +119,7 @@ void vlapic_set_apicv_ops(void);

bool vlapic_inject_intr(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
bool vlapic_has_pending_delivery_intr(struct acrn_vcpu *vcpu);
+bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu);

/**
* @brief Get physical address to PIR description.
--
2.8.3


[RFC PATCH v3 3/6] hv: vcpu: wait and signal vcpu event support

Shuo A Liu
 

Introduce two kinds of events for each vcpu,
VCPU_EVENT_IOREQ: for vcpu waiting for IO request completion
VCPU_EVENT_VIRTUAL_INTERRUPT: for vcpu waiting for virtual interrupts events
vcpu can wait for such events, and resume to run when the
event get signalled.

This patch also change IO request waiting/notifying to this way.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vcpu.c | 5 ++++-
hypervisor/common/hypercall.c | 8 +++-----
hypervisor/dm/io_req.c | 15 ++++-----------
hypervisor/include/arch/x86/guest/vcpu.h | 7 +++++++
4 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c
index ea6d0f4..6d0ecc5 100644
--- a/hypervisor/arch/x86/guest/vcpu.c
+++ b/hypervisor/arch/x86/guest/vcpu.c
@@ -795,7 +795,7 @@ void launch_vcpu(struct acrn_vcpu *vcpu)
/* help function for vcpu create */
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
- int32_t ret;
+ int32_t ret, i;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];

@@ -811,6 +811,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in;
init_thread_data(&vcpu->thread_obj);
+ for (i = 0; i < VCPU_EVENT_NUM; i++) {
+ init_event(&vcpu->events[i]);
+ }
}

return ret;
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index c114040..394f405 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -553,12 +553,10 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
__func__, vcpu_id, target_vm->vm_id);
} else {
vcpu = vcpu_from_vid(target_vm, vcpu_id);
- if (vcpu->state == VCPU_PAUSED) {
- if (!vcpu->vm->sw.is_completion_polling) {
- resume_vcpu(vcpu);
- }
- ret = 0;
+ if (!vcpu->vm->sw.is_completion_polling) {
+ signal_event(&vcpu->events[VCPU_EVENT_IOREQ]);
}
+ ret = 0;
}
}

diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c
index d0ce332..daee03d 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -103,14 +103,6 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
}
clac();

- /* pause vcpu in notification mode , wait for VHM to handle the MMIO request.
- * TODO: when pause_vcpu changed to switch vcpu out directlly, we
- * should fix the race issue between req.processed update and vcpu pause
- */
- if (!is_polling) {
- pause_vcpu(vcpu, VCPU_PAUSED);
- }
-
/* Before updating the vhm_req state, enforce all fill vhm_req operations done */
cpu_write_memory_barrier();

@@ -136,10 +128,11 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
schedule();
}
}
- } else if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
- schedule();
} else {
- ret = -EINVAL;
+ wait_event(&vcpu->events[VCPU_EVENT_IOREQ]);
+ if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
+ schedule();
+ }
}
} else {
ret = -EINVAL;
diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h
index bfee6e4..827ee6f 100644
--- a/hypervisor/include/arch/x86/guest/vcpu.h
+++ b/hypervisor/include/arch/x86/guest/vcpu.h
@@ -146,6 +146,11 @@ enum vm_cpu_mode {
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};

+enum vcpu_event_type {
+ VCPU_EVENT_IOREQ,
+ VCPU_EVENT_VIRTUAL_INTERRUPT,
+ VCPU_EVENT_NUM
+};

/* 2 worlds: 0 for Normal World, 1 for Secure World */
#define NR_WORLD 2
@@ -260,6 +265,8 @@ struct acrn_vcpu {

uint64_t reg_cached;
uint64_t reg_updated;
+
+ struct sched_event events[VCPU_EVENT_NUM];
} __aligned(PAGE_SIZE);

struct vcpu_dump {
--
2.8.3


[RFC PATCH v3 2/6] hv: sched: simple event implemention

Shuo A Liu
 

This simple event implemention can only support exclusive waiting
at same time. It mainly used by thread who want to wait for special event
happens.
Thread A who want to wait for some events calls
wait_event(struct sched_event *);

Thread B who can give the event signal calls
signal_event(struct sched_event *);

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/common/schedule.c | 45 ++++++++++++++++++++++++++++++++++++
hypervisor/include/common/schedule.h | 11 +++++++++
2 files changed, 56 insertions(+)

diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c
index 3ce6a41..2f72bc9 100644
--- a/hypervisor/common/schedule.c
+++ b/hypervisor/common/schedule.c
@@ -274,3 +274,48 @@ void run_thread(struct thread_object *obj)
obj->thread_entry(obj);
}
}
+
+void init_event(struct sched_event *event)
+{
+ spinlock_init(&event->lock);
+ event->done = 0UL;
+ event->waiting_thread = NULL;
+}
+
+void reset_event(struct sched_event *event)
+{
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&event->lock, &rflag);
+ event->done = 0UL;
+ event->waiting_thread = NULL;
+ spinlock_irqrestore_release(&event->lock, rflag);
+}
+
+/* support exclusive waiting only */
+void wait_event(struct sched_event *event)
+{
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&event->lock, &rflag);
+ ASSERT((event->waiting_thread == NULL), "only support exclusive waiting");
+ if (event->done == 0UL) {
+ event->waiting_thread = sched_get_current(get_pcpu_id());
+ sleep_thread(event->waiting_thread);
+ }
+ spinlock_irqrestore_release(&event->lock, rflag);
+}
+
+void signal_event(struct sched_event *event)
+{
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&event->lock, &rflag);
+ event->done++;
+ if (event->waiting_thread != NULL) {
+ wake_thread(event->waiting_thread);
+ event->done = 0UL;
+ event->waiting_thread = NULL;
+ }
+ spinlock_irqrestore_release(&event->lock, rflag);
+}
diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h
index 1526865..630f130 100644
--- a/hypervisor/include/common/schedule.h
+++ b/hypervisor/include/common/schedule.h
@@ -90,6 +90,17 @@ struct sched_iorr_control {
struct hv_timer tick_timer;
};

+struct sched_event {
+ spinlock_t lock;
+ uint32_t done;
+ struct thread_object* waiting_thread;
+};
+
+void init_event(struct sched_event *event);
+void reset_event(struct sched_event *event);
+void wait_event(struct sched_event *event);
+void signal_event(struct sched_event *event);
+
bool is_idle_thread(const struct thread_object *obj);
uint16_t sched_get_pcpuid(const struct thread_object *obj);
struct thread_object *sched_get_current(uint16_t pcpu_id);
--
2.8.3


[RFC PATCH v3 1/6] hv: PAUSE-loop exiting support in hypervisor

Shuo A Liu
 

As we enabled cpu sharing, PAUSE-loop exiting can help vcpu
to release its pcpu proactively. It's good for performance.

VMX_PLE_GAP: upper bound on the amount of time between two successive
executions of PAUSE in a loop.
VMX_PLE_WINDOW: upper bound on the amount of time a guest is allowed to
execute in a PAUSE loop

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
Acked-by: Eddie Dong <eddie.dong@...>
---
hypervisor/arch/x86/guest/vmcs.c | 7 ++++++-
hypervisor/arch/x86/guest/vmexit.c | 9 ++++++++-
2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c
index a125e2d..33be74f 100644
--- a/hypervisor/arch/x86/guest/vmcs.c
+++ b/hypervisor/arch/x86/guest/vmcs.c
@@ -297,7 +297,8 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
*/
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |
- VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT);
+ VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT |
+ VMX_PROCBASED_CTLS2_PAUSE_LOOP);

if (vcpu->arch.vpid != 0U) {
value32 |= VMX_PROCBASED_CTLS2_VPID;
@@ -422,6 +423,10 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
exec_vmwrite(VMX_CR3_TARGET_1, 0UL);
exec_vmwrite(VMX_CR3_TARGET_2, 0UL);
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
+
+ /* Setup PAUSE-loop exiting - 24.6.13 */
+ exec_vmwrite(VMX_PLE_GAP, 128U);
+ exec_vmwrite(VMX_PLE_WINDOW, 4096U);
}

static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c
index ac73f4c..4fb0478 100644
--- a/hypervisor/arch/x86/guest/vmexit.c
+++ b/hypervisor/arch/x86/guest/vmexit.c
@@ -30,6 +30,7 @@ static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu);
+static int32_t pause_vmexit_handler(struct acrn_vcpu *vcpu);

/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@@ -113,7 +114,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_MONITOR] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_PAUSE] = {
- .handler = unhandled_vmexit_handler},
+ .handler = pause_vmexit_handler},
[VMX_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_TPR_BELOW_THRESHOLD] = {
@@ -277,6 +278,12 @@ static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}

+static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu)
+{
+ yield_current();
+ return 0;
+}
+
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t rax, rbx, rcx, rdx;
--
2.8.3


[RFC PATCH v3 0/6] Enable PAUSE-Loop exiting and HLT emulation in hypervisor

Shuo A Liu
 

When multiple vcpus running on same pcpu, it's good for performance
if vcpu can yield pcpu proactively. When vcpus are idle, they should release
pcpu and let scheduler pickup other vcpus to run.

To avoid sleep-after-wakeup race issue, HLT emulation introduces a block_flags
and a per-vcpu lock(vcpu_lock).

v3:
1) concept: completion -> event
2) Drop vcpu layer abstraction of event. Just use wait_event/signal_event provided
by scheduling.

v2:
Drop block_flags&vcpu_lock approach, and introduce a event completion mechanism
in scheduling and vcpu layer.


Shuo A Liu (6):
hv: PAUSE-loop exiting support in hypervisor
hv: sched: simple event implemention
hv: vcpu: wait and signal vcpu event support
hv: Add vlapic_has_pending_intr of apicv to check pending interrupts
hv: HLT emulation in hypervisor
hv: Use HLT as the default idle action of service OS

hypervisor/arch/x86/guest/vcpu.c | 5 ++-
hypervisor/arch/x86/guest/vlapic.c | 31 ++++++++++++++++
hypervisor/arch/x86/guest/vmcs.c | 9 ++++-
hypervisor/arch/x86/guest/vmexit.c | 20 +++++++++-
hypervisor/common/hv_main.c | 1 +
hypervisor/common/hypercall.c | 8 ++--
hypervisor/common/schedule.c | 45 +++++++++++++++++++++++
hypervisor/dm/io_req.c | 15 ++------
hypervisor/include/arch/x86/guest/vcpu.h | 7 ++++
hypervisor/include/arch/x86/guest/vlapic.h | 2 +
hypervisor/include/common/schedule.h | 11 ++++++
hypervisor/scenarios/industry/vm_configurations.h | 2 +-
hypervisor/scenarios/sdc/vm_configurations.h | 2 +-
hypervisor/scenarios/sdc2/vm_configurations.h | 2 +-
14 files changed, 136 insertions(+), 24 deletions(-)

--
2.8.3


Re: [RFC PATCH v2 2/7] hv: sched: simple completion implemention

Eddie Dong
 

Hi Shuo:
It seems to be simple :)
From abstraction p.o.v., the APIs are still coupled with the event we are using now. If we can have a neutral event wait/signal APIs, + 2 specific event, that will be great!

In here we may 1) use neutral name, i.e. wait_event, but not wait_vcpu_event. Actually these APIs are for thread, not VCPU specific. 2) the event mechanism may couple with scheduler, but not the specific usage/EVENT we use here.

Thx, Eddie

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On
Behalf Of Shuo A Liu
Sent: Tuesday, December 31, 2019 2:52 PM
To: acrn-dev@...
Cc: Liu, Shuo A <shuo.a.liu@...>
Subject: [acrn-dev] [RFC PATCH v2 2/7] hv: sched: simple completion
implemention

This simple completion implemention can only support exclusive waiting at
same time. It mainly used by thread who want to wait for some event
happens.
Thread A who want to wait for some events calls
wait_for_completion(struct sched_completion *,
void *action(void *), void *data);
where 'action' is the callback when do wait.

Thread B who can give the completion signal calls
complete(struct sched_completion *,
void *action(void *), void *data);
where 'action' is the callback when do complete.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/common/schedule.c | 45
++++++++++++++++++++++++++++++++++++
hypervisor/include/common/schedule.h | 13 +++++++++++
2 files changed, 58 insertions(+)

diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c
index 3ce6a41..e74f739 100644
--- a/hypervisor/common/schedule.c
+++ b/hypervisor/common/schedule.c
@@ -274,3 +274,48 @@ void run_thread(struct thread_object *obj)
obj->thread_entry(obj);
}
}
+
+void init_completion(struct sched_completion *completion) {
+ spinlock_init(&completion->lock);
+ completion->done = 0UL;
+ completion->waiting_thread = NULL;
+}
+
+void reset_completion(struct sched_completion *completion) {
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&completion->lock, &rflag);
+ completion->done = 0UL;
+ completion->waiting_thread = NULL;
+ spinlock_irqrestore_release(&completion->lock, rflag); }
+
+/* support exclusive waiting only */
+void wait_for_completion(struct sched_completion *completion, wait_fn
+action, void *data) {
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&completion->lock, &rflag);
+ ASSERT((completion->waiting_thread == NULL), "only support exclusive
waiting");
+ if (completion->done == 0UL) {
+ completion->waiting_thread = sched_get_current(get_pcpu_id());
+ action(data);
+ }
+ spinlock_irqrestore_release(&completion->lock, rflag); }
+
+void complete(struct sched_completion *completion, complete_fn action,
+void *data) {
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&completion->lock, &rflag);
+ completion->done++;
+ if (completion->waiting_thread != NULL) {
+ action(data);
+ completion->done = 0UL;
+ completion->waiting_thread = NULL;
+ }
+ spinlock_irqrestore_release(&completion->lock, rflag); }
diff --git a/hypervisor/include/common/schedule.h
b/hypervisor/include/common/schedule.h
index 1526865..224ad48 100644
--- a/hypervisor/include/common/schedule.h
+++ b/hypervisor/include/common/schedule.h
@@ -90,6 +90,19 @@ struct sched_iorr_control {
struct hv_timer tick_timer;
};

+struct sched_completion {
+ spinlock_t lock;
+ uint32_t done;
+ struct thread_object* waiting_thread;
+};
+
+typedef void (*wait_fn)(void *data);
+typedef void (*complete_fn)(void *data); void init_completion(struct
+sched_completion *completion); void reset_completion(struct
+sched_completion *completion); void wait_for_completion(struct
+sched_completion *completion, wait_fn action, void *data); void
+complete(struct sched_completion *completion, complete_fn action, void
+*data);
+
bool is_idle_thread(const struct thread_object *obj); uint16_t
sched_get_pcpuid(const struct thread_object *obj); struct thread_object
*sched_get_current(uint16_t pcpu_id);
--
2.8.3



[RFC PATCH v2 7/7] hv: Use HLT as the default idle action of service OS

Shuo A Liu
 

This patch overwrites the idle driver of service OS for industry, sdc,
sdc2 scenarios. HLT will be used as the default idle action.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/scenarios/industry/vm_configurations.h | 2 +-
hypervisor/scenarios/sdc/vm_configurations.h | 2 +-
hypervisor/scenarios/sdc2/vm_configurations.h | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hypervisor/scenarios/industry/vm_configurations.h b/hypervisor/scenarios/industry/vm_configurations.h
index 482801a..633dab4 100644
--- a/hypervisor/scenarios/industry/vm_configurations.h
+++ b/hypervisor/scenarios/industry/vm_configurations.h
@@ -25,7 +25,7 @@
"i915.nuclear_pageflip=1 " \
"i915.avail_planes_per_pipe=0x01010F " \
"i915.domain_plane_owners=0x011111110000 " \
- "i915.enable_gvt=1 " \
+ "i915.enable_gvt=1 idle=halt " \
SOS_BOOTARGS_DIFF

#define VM1_CONFIG_VCPU_AFFINITY {AFFINITY_CPU(1U)}
diff --git a/hypervisor/scenarios/sdc/vm_configurations.h b/hypervisor/scenarios/sdc/vm_configurations.h
index 436e931..2814259 100644
--- a/hypervisor/scenarios/sdc/vm_configurations.h
+++ b/hypervisor/scenarios/sdc/vm_configurations.h
@@ -25,7 +25,7 @@
"i915.nuclear_pageflip=1 " \
"i915.avail_planes_per_pipe=0x01010F " \
"i915.domain_plane_owners=0x011111110000 " \
- "i915.enable_gvt=1 " \
+ "i915.enable_gvt=1 idle=halt " \
SOS_BOOTARGS_DIFF

#if CONFIG_MAX_KATA_VM_NUM > 0
diff --git a/hypervisor/scenarios/sdc2/vm_configurations.h b/hypervisor/scenarios/sdc2/vm_configurations.h
index 96e069d..5701a41 100644
--- a/hypervisor/scenarios/sdc2/vm_configurations.h
+++ b/hypervisor/scenarios/sdc2/vm_configurations.h
@@ -25,7 +25,7 @@
"i915.nuclear_pageflip=1 " \
"i915.avail_planes_per_pipe=0x01010F " \
"i915.domain_plane_owners=0x011111110000 " \
- "i915.enable_gvt=1 " \
+ "i915.enable_gvt=1 idle=halt " \
SOS_BOOTARGS_DIFF

#define VM1_CONFIG_VCPU_AFFINITY {AFFINITY_CPU(1U)}
--
2.8.3


[RFC PATCH v2 6/7] hv: HLT emulation in hypervisor

Shuo A Liu
 

HLT emulation is import to CPU resource maximum utilization. vcpu
doing HLT means it is idle and can give up CPU proactively. Thus, we
pause the vcpu in HLT emulation and resume it while event happens.

When vcpu enter HLT, it will be paused.

VM ID PCPU ID VCPU ID VCPU ROLE VCPU STATE
===== ======= ======= ========= ==========
0 0 0 PRIMARY Paused
0 1 1 SECONDARY Paused

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vlapic.c | 2 ++
hypervisor/arch/x86/guest/vmcs.c | 2 +-
hypervisor/arch/x86/guest/vmexit.c | 11 ++++++++++-
hypervisor/common/hv_main.c | 1 +
4 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 1c7bdfb..63f6b01 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -556,6 +556,7 @@ static void apicv_basic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector,

static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
{
+
/* update TMR if interrupt trigger mode has changed */
vlapic_set_tmr(vlapic, vector, level);

@@ -590,6 +591,7 @@ static void vlapic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector, bool
if ((lapic->svr.v & APIC_SVR_ENABLE) == 0U) {
dev_dbg(ACRN_DBG_LAPIC, "vlapic is software disabled, ignoring interrupt %u", vector);
} else {
+ notify_vcpu_event(vlapic->vcpu, VCPU_EVENT_INTERRUPT);
vlapic->ops->accept_intr(vlapic, vector, level);
}
}
diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c
index 33be74f..fb0ba76 100644
--- a/hypervisor/arch/x86/guest/vmcs.c
+++ b/hypervisor/arch/x86/guest/vmcs.c
@@ -272,7 +272,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS,
VMX_PROCBASED_CTLS_TSC_OFF | VMX_PROCBASED_CTLS_TPR_SHADOW |
VMX_PROCBASED_CTLS_IO_BITMAP | VMX_PROCBASED_CTLS_MSR_BITMAP |
- VMX_PROCBASED_CTLS_SECONDARY);
+ VMX_PROCBASED_CTLS_HLT | VMX_PROCBASED_CTLS_SECONDARY);

/*Disable VM_EXIT for CR3 access*/
value32 &= ~(VMX_PROCBASED_CTLS_CR3_LOAD | VMX_PROCBASED_CTLS_CR3_STORE);
diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c
index 4fb0478..ea576be 100644
--- a/hypervisor/arch/x86/guest/vmexit.c
+++ b/hypervisor/arch/x86/guest/vmexit.c
@@ -31,6 +31,7 @@ static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t pause_vmexit_handler(struct acrn_vcpu *vcpu);
+static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu);

/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@@ -59,7 +60,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_GETSEC] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_HLT] = {
- .handler = unhandled_vmexit_handler},
+ .handler = hlt_vmexit_handler},
[VMX_EXIT_REASON_INVD] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_INVLPG] = {
@@ -284,6 +285,14 @@ static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu)
return 0;
}

+static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu)
+{
+ if ((vcpu->arch.pending_req == 0UL) && (!vlapic_has_pending_intr(vcpu))) {
+ wait_vcpu_event(vcpu, VCPU_EVENT_INTERRUPT);
+ }
+ return 0;
+}
+
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t rax, rbx, rcx, rdx;
diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c
index e949149..a6920f6 100644
--- a/hypervisor/common/hv_main.c
+++ b/hypervisor/common/hv_main.c
@@ -40,6 +40,7 @@ void vcpu_thread(struct thread_object *obj)
continue;
}

+ reset_vcpu_event(vcpu, VCPU_EVENT_INTERRUPT);
profiling_vmenter_handler(vcpu);

TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);
--
2.8.3


[RFC PATCH v2 5/7] hv: Add vlapic_has_pending_intr of apicv to check pending interrupts

Shuo A Liu
 

Sometimes HV wants to know if there are pending interrupts of one vcpu.
Add .has_pending_intr interface in acrn_apicv_ops and return the pending
interrupts status by check IRRs of apicv.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vlapic.c | 29 +++++++++++++++++++++++++++++
hypervisor/include/arch/x86/guest/vlapic.h | 2 ++
2 files changed, 31 insertions(+)

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 8c79a4d..1c7bdfb 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -1745,6 +1745,11 @@ static bool ptapic_has_pending_delivery_intr(__unused struct acrn_vcpu *vcpu)
return false;
}

+static bool ptapic_has_pending_intr(__unused struct acrn_vcpu *vcpu)
+{
+ return false;
+}
+
static bool ptapic_invalid(__unused uint32_t offset)
{
return false;
@@ -1754,6 +1759,7 @@ static const struct acrn_apicv_ops ptapic_ops = {
.accept_intr = ptapic_accept_intr,
.inject_intr = ptapic_inject_intr,
.has_pending_delivery_intr = ptapic_has_pending_delivery_intr,
+ .has_pending_intr = ptapic_has_pending_intr,
.apic_read_access_may_valid = ptapic_invalid,
.apic_write_access_may_valid = ptapic_invalid,
.x2apic_read_msr_may_valid = ptapic_invalid,
@@ -2379,6 +2385,27 @@ bool vlapic_has_pending_delivery_intr(struct acrn_vcpu *vcpu)
return vlapic->ops->has_pending_delivery_intr(vcpu);
}

+static bool apicv_basic_has_pending_intr(struct acrn_vcpu *vcpu)
+{
+ struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
+ uint32_t vector;
+
+ vector = vlapic_find_highest_irr(vlapic);
+
+ return vector != 0UL;
+}
+
+static bool apicv_advanced_has_pending_intr(struct acrn_vcpu *vcpu)
+{
+ return apicv_basic_has_pending_intr(vcpu);
+}
+
+bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu)
+{
+ struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
+ return vlapic->ops->has_pending_intr(vcpu);
+}
+
static bool apicv_basic_apic_read_access_may_valid(__unused uint32_t offset)
{
return true;
@@ -2592,6 +2619,7 @@ static const struct acrn_apicv_ops apicv_basic_ops = {
.accept_intr = apicv_basic_accept_intr,
.inject_intr = apicv_basic_inject_intr,
.has_pending_delivery_intr = apicv_basic_has_pending_delivery_intr,
+ .has_pending_intr = apicv_basic_has_pending_intr,
.apic_read_access_may_valid = apicv_basic_apic_read_access_may_valid,
.apic_write_access_may_valid = apicv_basic_apic_write_access_may_valid,
.x2apic_read_msr_may_valid = apicv_basic_x2apic_read_msr_may_valid,
@@ -2602,6 +2630,7 @@ static const struct acrn_apicv_ops apicv_advanced_ops = {
.accept_intr = apicv_advanced_accept_intr,
.inject_intr = apicv_advanced_inject_intr,
.has_pending_delivery_intr = apicv_advanced_has_pending_delivery_intr,
+ .has_pending_intr = apicv_advanced_has_pending_intr,
.apic_read_access_may_valid = apicv_advanced_apic_read_access_may_valid,
.apic_write_access_may_valid = apicv_advanced_apic_write_access_may_valid,
.x2apic_read_msr_may_valid = apicv_advanced_x2apic_read_msr_may_valid,
diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h
index f271a4e..8feea92 100644
--- a/hypervisor/include/arch/x86/guest/vlapic.h
+++ b/hypervisor/include/arch/x86/guest/vlapic.h
@@ -100,6 +100,7 @@ struct acrn_apicv_ops {
void (*accept_intr)(struct acrn_vlapic *vlapic, uint32_t vector, bool level);
bool (*inject_intr)(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
bool (*has_pending_delivery_intr)(struct acrn_vcpu *vcpu);
+ bool (*has_pending_intr)(struct acrn_vcpu *vcpu);
bool (*apic_read_access_may_valid)(uint32_t offset);
bool (*apic_write_access_may_valid)(uint32_t offset);
bool (*x2apic_read_msr_may_valid)(uint32_t offset);
@@ -118,6 +119,7 @@ void vlapic_set_apicv_ops(void);

bool vlapic_inject_intr(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
bool vlapic_has_pending_delivery_intr(struct acrn_vcpu *vcpu);
+bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu);

/**
* @brief Get physical address to PIR description.
--
2.8.3


[RFC PATCH v2 4/7] hv: use vcpu event to sync the IO request waiting and completion

Shuo A Liu
 

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/common/hypercall.c | 2 +-
hypervisor/dm/io_req.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index c114040..0a649ea 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -555,7 +555,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
vcpu = vcpu_from_vid(target_vm, vcpu_id);
if (vcpu->state == VCPU_PAUSED) {
if (!vcpu->vm->sw.is_completion_polling) {
- resume_vcpu(vcpu);
+ notify_vcpu_event(vcpu, VCPU_EVENT_IOREQ);
}
ret = 0;
}
diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c
index d0ce332..dafc828 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -108,7 +108,7 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
* should fix the race issue between req.processed update and vcpu pause
*/
if (!is_polling) {
- pause_vcpu(vcpu, VCPU_PAUSED);
+ wait_vcpu_event(vcpu, VCPU_EVENT_IOREQ);
}

/* Before updating the vhm_req state, enforce all fill vhm_req operations done */
--
2.8.3


[RFC PATCH v2 3/7] hv: vcpu: wait and signal vcpu event support

Shuo A Liu
 

Introduce two kinds of events for each vcpu,
VCPU_EVENT_IOREQ: for vcpu waiting for IO request completion
VCPU_EVENT_INTERRUPT: for vcpu waiting for interrupts events
vcpu can wait(to PAUSED) for such events, and resume to RUNNING when the
event get signalled.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/arch/x86/guest/vcpu.c | 32 +++++++++++++++++++++++-
hypervisor/include/arch/x86/guest/vcpu.h | 43 ++++++++++++++++++++++++++++++++
2 files changed, 74 insertions(+), 1 deletion(-)

diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c
index ea6d0f4..1619bde 100644
--- a/hypervisor/arch/x86/guest/vcpu.c
+++ b/hypervisor/arch/x86/guest/vcpu.c
@@ -722,6 +722,33 @@ void resume_vcpu(struct acrn_vcpu *vcpu)
}
}

+static void wait_event(void *data)
+{
+ struct acrn_vcpu *vcpu = (struct acrn_vcpu *)data;
+ pause_vcpu(vcpu, VCPU_PAUSED);
+}
+
+static void complete_event(void *data)
+{
+ struct acrn_vcpu *vcpu = (struct acrn_vcpu *)data;
+ resume_vcpu(vcpu);
+}
+
+void wait_vcpu_event(struct acrn_vcpu *vcpu, enum vcpu_event_type ev)
+{
+ wait_for_completion(&vcpu->events[ev], wait_event, vcpu);
+}
+
+void notify_vcpu_event(struct acrn_vcpu *vcpu, enum vcpu_event_type ev)
+{
+ complete(&vcpu->events[ev], complete_event, vcpu);
+}
+
+void reset_vcpu_event(struct acrn_vcpu *vcpu, enum vcpu_event_type ev)
+{
+ reset_completion(&vcpu->events[ev]);
+}
+
void save_xsave_area(struct ext_context *ectx)
{
ectx->xcr0 = read_xcr(0);
@@ -795,7 +822,7 @@ void launch_vcpu(struct acrn_vcpu *vcpu)
/* help function for vcpu create */
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
- int32_t ret;
+ int32_t ret, i;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];

@@ -811,6 +838,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in;
init_thread_data(&vcpu->thread_obj);
+ for (i = 0; i < VCPU_EVENT_NUM; i++) {
+ init_completion(&vcpu->events[i]);
+ }
}

return ret;
diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h
index bfee6e4..66f0e0f 100644
--- a/hypervisor/include/arch/x86/guest/vcpu.h
+++ b/hypervisor/include/arch/x86/guest/vcpu.h
@@ -146,6 +146,11 @@ enum vm_cpu_mode {
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};

+enum vcpu_event_type {
+ VCPU_EVENT_IOREQ,
+ VCPU_EVENT_INTERRUPT,
+ VCPU_EVENT_NUM
+};

/* 2 worlds: 0 for Normal World, 1 for Secure World */
#define NR_WORLD 2
@@ -260,6 +265,8 @@ struct acrn_vcpu {

uint64_t reg_cached;
uint64_t reg_updated;
+
+ struct sched_completion events[VCPU_EVENT_NUM];
} __aligned(PAGE_SIZE);

struct vcpu_dump {
@@ -629,6 +636,42 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state);
void resume_vcpu(struct acrn_vcpu *vcpu);

/**
+ * @brief pause vcpu to wait a event
+ *
+ * Pause a vCPU to VCPU_PAUSED state and wait for a event.
+ *
+ * @param[inout] vcpu pointer to vcpu data structure
+ * @param[in] ev indicates the type of event the vcpu waiting on
+ *
+ * @return None
+ */
+void wait_vcpu_event(struct acrn_vcpu *vcpu, enum vcpu_event_type ev);
+
+/**
+ * @brief notify vcpu who is waiting on event
+ *
+ * Resume a vCPU to VCPU_RUNNING state who is waiting on specific event.
+ *
+ * @param[inout] vcpu pointer to vcpu data structure
+ * @param[in] ev indicates the type of event the vcpu waiting on
+ *
+ * @return None
+ */
+void notify_vcpu_event(struct acrn_vcpu *vcpu, enum vcpu_event_type ev);
+
+/**
+ * @brief reset a vcpu event status
+ *
+ * Reset a vcpu's event status.
+ *
+ * @param[inout] vcpu pointer to vcpu data structure
+ * @param[in] ev indicates the type of event of the vcpu
+ *
+ * @return None
+ */
+void reset_vcpu_event(struct acrn_vcpu *vcpu, enum vcpu_event_type ev);
+
+/**
* @brief set the vcpu to running state, then it will be scheculed.
*
* Adds a vCPU into the run queue and make a reschedule request for it. It sets the vCPU state to VCPU_RUNNING.
--
2.8.3


[RFC PATCH v2 2/7] hv: sched: simple completion implemention

Shuo A Liu
 

This simple completion implemention can only support exclusive waiting
at same time. It mainly used by thread who want to wait for some event
happens.
Thread A who want to wait for some events calls
wait_for_completion(struct sched_completion *,
void *action(void *), void *data);
where 'action' is the callback when do wait.

Thread B who can give the completion signal calls
complete(struct sched_completion *,
void *action(void *), void *data);
where 'action' is the callback when do complete.

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
---
hypervisor/common/schedule.c | 45 ++++++++++++++++++++++++++++++++++++
hypervisor/include/common/schedule.h | 13 +++++++++++
2 files changed, 58 insertions(+)

diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c
index 3ce6a41..e74f739 100644
--- a/hypervisor/common/schedule.c
+++ b/hypervisor/common/schedule.c
@@ -274,3 +274,48 @@ void run_thread(struct thread_object *obj)
obj->thread_entry(obj);
}
}
+
+void init_completion(struct sched_completion *completion)
+{
+ spinlock_init(&completion->lock);
+ completion->done = 0UL;
+ completion->waiting_thread = NULL;
+}
+
+void reset_completion(struct sched_completion *completion)
+{
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&completion->lock, &rflag);
+ completion->done = 0UL;
+ completion->waiting_thread = NULL;
+ spinlock_irqrestore_release(&completion->lock, rflag);
+}
+
+/* support exclusive waiting only */
+void wait_for_completion(struct sched_completion *completion, wait_fn action, void *data)
+{
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&completion->lock, &rflag);
+ ASSERT((completion->waiting_thread == NULL), "only support exclusive waiting");
+ if (completion->done == 0UL) {
+ completion->waiting_thread = sched_get_current(get_pcpu_id());
+ action(data);
+ }
+ spinlock_irqrestore_release(&completion->lock, rflag);
+}
+
+void complete(struct sched_completion *completion, complete_fn action, void *data)
+{
+ uint64_t rflag;
+
+ spinlock_irqsave_obtain(&completion->lock, &rflag);
+ completion->done++;
+ if (completion->waiting_thread != NULL) {
+ action(data);
+ completion->done = 0UL;
+ completion->waiting_thread = NULL;
+ }
+ spinlock_irqrestore_release(&completion->lock, rflag);
+}
diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h
index 1526865..224ad48 100644
--- a/hypervisor/include/common/schedule.h
+++ b/hypervisor/include/common/schedule.h
@@ -90,6 +90,19 @@ struct sched_iorr_control {
struct hv_timer tick_timer;
};

+struct sched_completion {
+ spinlock_t lock;
+ uint32_t done;
+ struct thread_object* waiting_thread;
+};
+
+typedef void (*wait_fn)(void *data);
+typedef void (*complete_fn)(void *data);
+void init_completion(struct sched_completion *completion);
+void reset_completion(struct sched_completion *completion);
+void wait_for_completion(struct sched_completion *completion, wait_fn action, void *data);
+void complete(struct sched_completion *completion, complete_fn action, void *data);
+
bool is_idle_thread(const struct thread_object *obj);
uint16_t sched_get_pcpuid(const struct thread_object *obj);
struct thread_object *sched_get_current(uint16_t pcpu_id);
--
2.8.3


[RFC PATCH v2 1/7] hv: PAUSE-loop exiting support in hypervisor

Shuo A Liu
 

As we enabled cpu sharing, PAUSE-loop exiting can help vcpu
to release its pcpu proactively. It's good for performance.

VMX_PLE_GAP: upper bound on the amount of time between two successive
executions of PAUSE in a loop.
VMX_PLE_WINDOW: upper bound on the amount of time a guest is allowed to
execute in a PAUSE loop

Signed-off-by: Shuo A Liu <shuo.a.liu@...>
Acked-by: Eddie Dong <eddie.dong@...>
---
hypervisor/arch/x86/guest/vmcs.c | 7 ++++++-
hypervisor/arch/x86/guest/vmexit.c | 9 ++++++++-
2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c
index a125e2d..33be74f 100644
--- a/hypervisor/arch/x86/guest/vmcs.c
+++ b/hypervisor/arch/x86/guest/vmcs.c
@@ -297,7 +297,8 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
*/
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |
- VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT);
+ VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT |
+ VMX_PROCBASED_CTLS2_PAUSE_LOOP);

if (vcpu->arch.vpid != 0U) {
value32 |= VMX_PROCBASED_CTLS2_VPID;
@@ -422,6 +423,10 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
exec_vmwrite(VMX_CR3_TARGET_1, 0UL);
exec_vmwrite(VMX_CR3_TARGET_2, 0UL);
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
+
+ /* Setup PAUSE-loop exiting - 24.6.13 */
+ exec_vmwrite(VMX_PLE_GAP, 128U);
+ exec_vmwrite(VMX_PLE_WINDOW, 4096U);
}

static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c
index ac73f4c..4fb0478 100644
--- a/hypervisor/arch/x86/guest/vmexit.c
+++ b/hypervisor/arch/x86/guest/vmexit.c
@@ -30,6 +30,7 @@ static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu);
+static int32_t pause_vmexit_handler(struct acrn_vcpu *vcpu);

/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@@ -113,7 +114,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_MONITOR] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_PAUSE] = {
- .handler = unhandled_vmexit_handler},
+ .handler = pause_vmexit_handler},
[VMX_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_TPR_BELOW_THRESHOLD] = {
@@ -277,6 +278,12 @@ static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}

+static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu)
+{
+ yield_current();
+ return 0;
+}
+
int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t rax, rbx, rcx, rdx;
--
2.8.3


[RFC PATCH v2 0/7] Enable PAUSE-Loop exiting and HLT emulation in hypervisor

Shuo A Liu
 

When multiple vcpus running on same pcpu, it's good for performance
if vcpu can yield pcpu proactively. When vcpus are idle, they should release
pcpu and let scheduler pickup other vcpus to run.

To avoid sleep-after-wakeup race issue, HLT emulation introduces a block_flags
and a per-vcpu lock(vcpu_lock).

v2:
Drop block_flags&vcpu_lock approach, and introduce a event completion mechanism
in scheduling and vcpu layer.

Shuo A Liu (7):
hv: PAUSE-loop exiting support in hypervisor
hv: sched: simple completion implemention
hv: vcpu: wait and signal vcpu event support
hv: use vcpu event to sync the IO request waiting and completion
hv: Add vlapic_has_pending_intr of apicv to check pending interrupts
hv: HLT emulation in hypervisor
hv: Use HLT as the default idle action of service OS

hypervisor/arch/x86/guest/vcpu.c | 32 +++++++++++++++-
hypervisor/arch/x86/guest/vlapic.c | 31 ++++++++++++++++
hypervisor/arch/x86/guest/vmcs.c | 9 ++++-
hypervisor/arch/x86/guest/vmexit.c | 20 +++++++++-
hypervisor/common/hv_main.c | 1 +
hypervisor/common/hypercall.c | 2 +-
hypervisor/common/schedule.c | 45 +++++++++++++++++++++++
hypervisor/dm/io_req.c | 2 +-
hypervisor/include/arch/x86/guest/vcpu.h | 43 ++++++++++++++++++++++
hypervisor/include/arch/x86/guest/vlapic.h | 2 +
hypervisor/include/common/schedule.h | 13 +++++++
hypervisor/scenarios/industry/vm_configurations.h | 2 +-
hypervisor/scenarios/sdc/vm_configurations.h | 2 +-
hypervisor/scenarios/sdc2/vm_configurations.h | 2 +-
14 files changed, 196 insertions(+), 10 deletions(-)

--
2.8.3


[PATCH v3] HV: wait pcpus offline only when lapic pt enabled

Victor Sun
 

When do shutdown_vm() and reset_vm(), we don't need to wait pcpus offline
if lapic_pt is not enabled for the vcpus of the VM.

The patch provides a offline_lapic_pt_enabled_pcpus() api for shutdown_vm()
and reset_vm() usage.

Signed-off-by: Victor Sun <victor.sun@...>
---
hypervisor/arch/x86/guest/vm.c | 92 +++++++++++++++++++++---------------------
1 file changed, 47 insertions(+), 45 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index cb2cf5c..32bc892 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -464,7 +464,7 @@ static uint64_t lapic_pt_enabled_pcpu_bitmap(struct acrn_vm *vm)

if (is_lapic_pt_configured(vm)) {
foreach_vcpu(i, vm, vcpu) {
- if (is_lapic_pt_enabled(vcpu)) {
+ if (is_x2apic_enabled(vcpu_vlapic(vcpu))) {
bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &bitmap);
}
}
@@ -619,13 +619,53 @@ static bool is_ready_for_system_shutdown(void)
return ret;
}

+static int32_t offline_lapic_pt_enabled_pcpus(struct acrn_vm *vm, uint64_t pcpu_mask)
+{
+ int32_t ret = 0;
+ uint16_t i;
+ uint64_t mask = pcpu_mask;
+ struct acrn_vcpu *vcpu = NULL;
+ uint16_t this_pcpu_id = get_pcpu_id();
+
+ if (bitmap_test(this_pcpu_id, &mask)) {
+ bitmap_clear_nolock(this_pcpu_id, &mask);
+ if (vm->state == VM_POWERED_OFF) {
+ /*
+ * If the current pcpu needs to offline itself,
+ * it will be done after shutdown_vm() completes
+ * in the idle thread.
+ */
+ make_pcpu_offline(this_pcpu_id);
+ } else {
+ /*
+ * The current pcpu can't reset itself
+ */
+ pr_warn("%s: cannot offline self(%u)",
+ __func__, this_pcpu_id);
+ ret = -EINVAL;
+ }
+ }
+
+ foreach_vcpu(i, vm, vcpu) {
+ if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
+ make_pcpu_offline(pcpuid_from_vcpu(vcpu));
+ }
+ }
+
+ wait_pcpus_offline(mask);
+ if (!start_pcpus(mask)) {
+ pr_fatal("Failed to start all cpus in mask(0x%lx)", mask);
+ ret = -ETIMEDOUT;
+ }
+ return ret;
+}
+
/*
* @pre vm != NULL
*/
int32_t shutdown_vm(struct acrn_vm *vm)
{
uint16_t i;
- uint16_t this_pcpu_id;
uint64_t mask;
struct acrn_vcpu *vcpu = NULL;
struct acrn_vm_config *vm_config = NULL;
@@ -636,32 +676,14 @@ int32_t shutdown_vm(struct acrn_vm *vm)
/* Only allow shutdown paused vm */
if (vm->state == VM_PAUSED) {
vm->state = VM_POWERED_OFF;
- this_pcpu_id = get_pcpu_id();
- mask = lapic_pt_enabled_pcpu_bitmap(vm);

- /*
- * If the current pcpu needs to offline itself,
- * it will be done after shutdown_vm() completes
- * in the idle thread.
- */
- if (bitmap_test(this_pcpu_id, &mask)) {
- bitmap_clear_nolock(this_pcpu_id, &mask);
- make_pcpu_offline(this_pcpu_id);
+ mask = lapic_pt_enabled_pcpu_bitmap(vm);
+ if (mask != 0UL) {
+ ret = offline_lapic_pt_enabled_pcpus(vm, mask);
}

foreach_vcpu(i, vm, vcpu) {
offline_vcpu(vcpu);
-
- if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
- make_pcpu_offline(pcpuid_from_vcpu(vcpu));
- }
- }
-
- wait_pcpus_offline(mask);
-
- if ((mask != 0UL) && (!start_pcpus(mask))) {
- pr_fatal("Failed to start all cpus in mask(0x%lx)", mask);
- ret = -ETIMEDOUT;
}

vm_config = get_vm_config(vm->vm_id);
@@ -716,38 +738,18 @@ void start_vm(struct acrn_vm *vm)
int32_t reset_vm(struct acrn_vm *vm)
{
uint16_t i;
- uint16_t this_pcpu_id;
uint64_t mask;
struct acrn_vcpu *vcpu = NULL;
int32_t ret = 0;

if (vm->state == VM_PAUSED) {
- this_pcpu_id = get_pcpu_id();
mask = lapic_pt_enabled_pcpu_bitmap(vm);
-
- /*
- * The current pcpu can't reset itself
- */
- if (bitmap_test(this_pcpu_id, &mask)) {
- pr_warn("%s: cannot offline self(%u)",
- __func__, this_pcpu_id);
- bitmap_clear_nolock(this_pcpu_id, &mask);
- ret = -EINVAL;
+ if (mask != 0UL) {
+ ret = offline_lapic_pt_enabled_pcpus(vm, mask);
}

foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu, COLD_RESET);
-
- if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
- make_pcpu_offline(pcpuid_from_vcpu(vcpu));
- }
- }
-
- wait_pcpus_offline(mask);
-
- if ((mask != 0UL) && (!start_pcpus(mask))) {
- pr_fatal("Failed to start all cpus in mask(0x%lx)", mask);
- ret = -ETIMEDOUT;
}

/*
--
2.7.4


Re: I set CONFIG_CONSOLE_LOGLEVEL_DEFAULT=6, acrn can not boot.

Zhang, XuepengX
 

yes  i set default,it is ok.


Re: [PATCH] HV: init local variable before it is used.

Li, Fei1
 

On Thu, Dec 05, 2019 at 03:31:32PM +0800, Minggui Cao wrote:
it is better to init bdfs_from_drhds.pci_bdf_map_count
before it is passed to other function to do:
bdfs_from_drhds->pci_bdf_map_count++
LGTM.

Tracked-On: #3875
Signed-off-by: Minggui Cao <minggui.cao@...>
---
hypervisor/hw/pci.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hypervisor/hw/pci.c b/hypervisor/hw/pci.c
index af395f0..02c4694 100644
--- a/hypervisor/hw/pci.c
+++ b/hypervisor/hw/pci.c
@@ -357,7 +357,7 @@ static void pci_parse_iommu_devscopes(struct pci_bdf_set *const bdfs_from_drhds,
void init_pci_pdev_list(void)
{
uint64_t buses_visited[BUSES_BITMAP_LEN] = {0UL};
- struct pci_bdf_set bdfs_from_drhds;
+ struct pci_bdf_set bdfs_from_drhds = {.pci_bdf_map_count = 0U};
uint32_t drhd_idx_pci_all = INVALID_DRHD_INDEX;
uint16_t bus;
bool was_visited = false;
--
2.7.4




Re: I set CONFIG_CONSOLE_LOGLEVEL_DEFAULT=6, acrn can not boot.

Yin, Fengwei <fengwei.yin@...>
 

Hi Geoff,

On Fri, Dec 27, 2019 at 09:03:16AM +0000, Geoffroy Van Cutsem wrote:
Do we understand why too much log output would prevent a board from
booting?
The board is running. But too many logs output to serial port. That prevents
people using the system.

Regards
Yin, Fengwei


 

From: acrn-dev@... <acrn-dev@...> On
Behalf Of Minggui Cao
Sent: Friday, December 27, 2019 3:41 AM
To: acrn-dev@...
Subject: Re: [acrn-dev] I set CONFIG_CONSOLE_LOGLEVEL_DEFAULT=6, acrn can
not boot.

 

Hi,Xuepeng,

      CONFIG_CONSOLE_LOGLEVEL_DEFAULT is used for debugging purpose, we
just set it as a proper value as

board’s capability.  If set to 6 and the board can’t boot normally, there
could be too many logs to output.

      So please lower its level and have a try.

 

Thanks!

Minggui

 

From: [1]acrn-dev@...
<[2]acrn-dev@...> On Behalf Of Zhang, XuepengX
Sent: Friday, December 27, 2019 10:25 AM
To: [3]acrn-dev@...
Subject: [acrn-dev] I set CONFIG_CONSOLE_LOGLEVEL_DEFAULT=6, acrn can not
boot.

 

when i  set  CONFIG_CONSOLE_LOGLEVEL_DEFAULT=6, acrn can not boot, the log
like this:
[80168926us][cpu=0][vm0:vcpu0][sev=4][seq=8024]:Spurious vector: 0x6c.

[80176485us][cpu=0][vm0:vcpu0][sev=6][seq=8025]:IO write on port 0070,
data 0000008a

[80185503us][cpu=0][vm0:vcpu0][sev=6][seq=8026]:vlapic: vlapic_update_ppr
0x00

[80193889us][cpu=0][vm0:vcpu0][sev=6][seq=8027]:Exit Reason:
0x000000000000001e 

[80202493us][cpu=0][vm0:vcpu0][sev=6][seq=8028]:IO read on port 0071, data
00000026

[80211394us][cpu=0][vm0:vcpu0][sev=6][seq=8029]:Exit Reason:
0x000000000000001e 

[80219991us][cpu=0][vm0:vcpu0][sev=6][seq=8030]:IO write on port 0070,
data 000000b2

[80229011us][cpu=0][vm0:vcpu0][sev=6][seq=8031]:Exit Reason:
0x000000000000001e 
[80237600us][cpu=0][vm0:vcpu0][sev=6][seq=8032]:IO write on port 0cf8,
data 00000000

[80246614us][cpu=0][vm0:vcpu0][sev=6][seq=8033]:Exit Reason:
0x0000000000000007 

[80255207us][cpu=0][vm0:vcpu0][sev=6][seq=8034]:vlapic: vlapic_update_ppr
0x00

[80263605us][cpu=0][vm0:vcpu0][sev=6][seq=8035]:Exit Reason:
0x0000000000000007 

[80272195us][cpu=0][vm0:vcpu0][sev=6][seq=8036]:vlapic: vlapic_update_ppr
0x00

[80280585us][cpu=0][vm0:vcpu0][sev=6][seq=8037]:vlapic: vlapic_update_ppr
0x60

[80288976us][cpu=0][vm0:vcpu0][sev=6][seq=8038]:vlapic: vlapic_update_ppr
0x60

[80297369us][cpu=0][vm0:vcpu0][sev=6][seq=8039]:Exit Reason:
0x000000000000001e 

[80305970us][cpu=0][vm0:vcpu0][sev=6][seq=8040]:IO read on port 0cf8, data
00000000

[80314873us][cpu=0][vm0:vcpu0][sev=6][seq=8041]:vlapic: vlapic_update_ppr
0x60

[80323262us][cpu=0][vm0:vcpu0][sev=6][seq=8042]:Exit Reason:
0x000000000000001e 

[80331859us][cpu=0][vm0:vcpu0][sev=6][seq=8043]:IO read on port 0070, data
00000032

[80340767us][cpu=0][vm0:vcpu0][sev=6][seq=8044]:vlapic: vlapic_update_ppr
0x60

[80349160us][cpu=0][vm0:vcpu0][sev=6][seq=8045]:Exit Reason:
0x0000000000000001 

[80357760us][cpu=0][vm0:vcpu0][sev=6][seq=8046]:vlapic: vlapic_update_ppr
0x60

[80366147us][cpu=0][vm0:vcpu0][sev=6][seq=8047]:Exit Reason:
0x000000000000000a 
Has anyone ever had a situation like this?



References

Visible links
1. mailto:acrn-dev@...
2. mailto:acrn-dev@...
3. mailto:acrn-dev@...
4. https://lists.projectacrn.org/g/acrn-dev/message/26306
5. mailto:acrn-dev@...?subject=Re:%20Re%3A%20%5Bacrn-dev%5D%20I%20set%20CONFIG_CONSOLE_LOGLEVEL_DEFAULT%3D6%2C%20acrn%20can%20not%20boot.
6. mailto:geoffroy.vancutsem@...?subject=Private:%20Re:%20Re%3A%20%5Bacrn-dev%5D%20I%20set%20CONFIG_CONSOLE_LOGLEVEL_DEFAULT%3D6%2C%20acrn%20can%20not%20boot.
7. https://lists.projectacrn.org/mt/69281036/767582
8. https://lists.projectacrn.org/g/acrn-dev/post
9. https://lists.projectacrn.org/g/acrn-dev/editsub/767582
10. mailto:acrn-dev+owner@...
11. https://lists.projectacrn.org/g/acrn-dev/leave/defanged


Re: [PATCH] acrn-config: override MAX_KATA_VM_NUM when use dual-core board

Liu, WeiX W
 

OK, got it. This will be fixed when PR.

Thanks,
Liu,wei

-----Original Message-----
From: VanCutsem, Geoffroy
Sent: Friday, December 27, 2019 4:57 PM
To: acrn-dev@...; Liu, WeiX W <weix.w.liu@...>
Cc: Zou, Terry <terry.zou@...>; Wu, Binbin <binbin.wu@...>
Subject: RE: [acrn-dev] [PATCH] acrn-config: override MAX_KATA_VM_NUM when use dual-core board

I would actually suggest to rephrase to: "Kata VM is not supported on dual-core systems"

-----Original Message-----
From: acrn-dev@... <acrn-dev@...>
On Behalf Of Victor Sun
Sent: Friday, December 27, 2019 9:49 AM
To: Liu, WeiX W <weix.w.liu@...>; acrn-dev@...
Cc: Zou, Terry <terry.zou@...>; Wu, Binbin <binbin.wu@...>
Subject: Re: [acrn-dev] [PATCH] acrn-config: override MAX_KATA_VM_NUM
when use dual-core board

support -> supported

On 12/27/2019 4:39 PM, Wei Liu wrote:
KATA VM will not be support on a dual-core system, override the
MAX_KATA_VM_NUM to 0.

Signed-off-by: Wei Liu <weix.w.liu@...>
---
misc/acrn-config/board_config/new_board_kconfig.py | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/misc/acrn-config/board_config/new_board_kconfig.py
b/misc/acrn-config/board_config/new_board_kconfig.py
index 2c93dc06..7fcd4c50 100644
--- a/misc/acrn-config/board_config/new_board_kconfig.py
+++ b/misc/acrn-config/board_config/new_board_kconfig.py
@@ -145,4 +145,9 @@ def generate_file(config):
print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)),
file=config)
print("CONFIG_HV_RAM_SIZE={}".format(hex(hv_ram_size)),
file=config)

+ cpu_core_num = len(board_cfg_lib.get_processor_info())
+ if cpu_core_num == 2:
+ print("# KATA VM will not be support on a dual-core
+ system",
file=config)
+ print("CONFIG_MAX_KATA_VM_NUM=0", file=config)
+
return err_dic


Re: [PATCH] acrn-config: avoid conflict slot for launch config

Liu, WeiX W
 

OK, I will modify them to b:d:f format of -s option in next version.

Thanks,
Liu,wei

-----Original Message-----
From: Sun, Victor
Sent: Friday, December 27, 2019 5:07 PM
To: Liu, WeiX W <weix.w.liu@...>; acrn-dev@...
Cc: Zou, Terry <terry.zou@...>; Wu, Binbin <binbin.wu@...>
Subject: Re: [PATCH] acrn-config: avoid conflict slot for launch config

Then how to passthru same slot:func but on different bus?

On 12/27/2019 11:15 AM, Wei Liu wrote:
The slot in launch config would be conflicted when the same bdf is
selected from webUI.
This patch do the below condition to avoid such case:
1. Add the bdf check for pass-through devices.
2. Use the dev_num:func as '-s' slot option in launch config.

Signed-off-by: Wei Liu <weix.w.liu@...>
---
misc/acrn-config/launch_config/launch_item.py | 1 +
misc/acrn-config/library/launch_cfg_lib.py | 30 ++++++++++++++++---
2 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/misc/acrn-config/launch_config/launch_item.py
b/misc/acrn-config/launch_config/launch_item.py
index c495cdca..b65c2457 100644
--- a/misc/acrn-config/launch_config/launch_item.py
+++ b/misc/acrn-config/launch_config/launch_item.py
@@ -145,6 +145,7 @@ class PthruSelected():

# check connections between several pass-through devices
launch_cfg_lib.pt_devs_check_audio(self.bdf['audio'],
self.bdf['audio_codec'])
+ launch_cfg_lib.bdf_duplicate_check(self.bdf)


class VirtioDeviceSelect():
diff --git a/misc/acrn-config/library/launch_cfg_lib.py
b/misc/acrn-config/library/launch_cfg_lib.py
index af1e6c47..aebc3c72 100644
--- a/misc/acrn-config/library/launch_cfg_lib.py
+++ b/misc/acrn-config/library/launch_cfg_lib.py
@@ -575,10 +575,10 @@ def get_slot(bdf_list, dev):
slot_list[p_id] = ''
else:
slot = int(bdf_list[p_id][3:5], 16)
- # re-allocate virtual slot while slot is 0
- if slot == 0:
- slot = virtual_dev_slot(dev)
- slot_list[p_id] = slot
+ fun = int(bdf_list[p_id][6:7], 16)
+ slot_fun = str(slot) + ":" + str(fun)
+ slot_list[p_id] = slot_fun
+ # add already used slot for pass-throught devices to
+ avoid conflict with virtio devices
PT_SLOT[dev] = slot

return slot_list
@@ -683,3 +683,25 @@ def cpu_sharing_check(cpu_sharing, item):
key = "uos:id={},{}".format(vm_i, item)
ERR_LIST[key] = "The same pcpu was configurated in scenario config, and not allow to set the cpu_sharing to 'Disabled'!"
return
+
+
+def bdf_duplicate_check(bdf_dic):
+ """
+ Check if exist duplicate slot
+ :param bdf_dic: cotains all selected pass-through devices
+ :return: None
+ """
+ bdf_used = []
+ for dev in bdf_dic.keys():
+ dev_bdf_dic = bdf_dic[dev]
+ for vm_i in dev_bdf_dic.keys():
+ dev_bdf = dev_bdf_dic[vm_i]
+ if not dev_bdf:
+ continue
+
+ if dev_bdf in bdf_used:
+ key = "uos:id={},{},{}".format(vm_i, 'passthrough_devices', dev)
+ ERR_LIST[key] = "You select the same device for {} pass-through !".format(dev)
+ return
+ else:
+ bdf_used.append(dev_bdf)

11021 - 11040 of 37344