[PATCH 1/3 V2] hv: use kick-mode in per-cpu to control kick pcpu


Minggui Cao
 

INIT signal has been used to kick off the partitioned pCPU, like RTVM,
whose LAPIC is pass-through. IPI is used to kick off sharing pCPU.

Add mode_to_kick_pcpu in per-cpu to control the way of kicking
pCPU.

Signed-off-by: Minggui Cao <minggui.cao@...>
---
hypervisor/arch/x86/cpu.c | 2 +-
hypervisor/arch/x86/guest/vcpu.c | 20 ++++++++-----------
hypervisor/arch/x86/guest/vm.c | 2 +-
hypervisor/arch/x86/guest/vmcs.c | 6 ------
hypervisor/arch/x86/lapic.c | 10 ++++++++++
hypervisor/common/sched_bvt.c | 4 ++--
hypervisor/common/sched_iorr.c | 2 +-
hypervisor/common/schedule.c | 24 +++++------------------
hypervisor/include/arch/x86/asm/lapic.h | 2 ++
hypervisor/include/arch/x86/asm/per_cpu.h | 1 +
hypervisor/include/common/schedule.h | 8 +-------
11 files changed, 32 insertions(+), 49 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 0b51fb9c8..e57a118ba 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -415,7 +415,7 @@ void make_pcpu_offline(uint16_t pcpu_id)
{
bitmap_set_lock(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
if (get_pcpu_id() != pcpu_id) {
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
+ kick_pcpu(pcpu_id);
}
}

diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c
index e07f0dafb..76babb891 100755
--- a/hypervisor/arch/x86/guest/vcpu.c
+++ b/hypervisor/arch/x86/guest/vcpu.c
@@ -255,9 +255,6 @@ static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode)
sizeof(struct run_context));
}

- /* TODO: we may need to add one scheduler->reset_data to reset the thread_obj */
- vcpu->thread_obj.notify_mode = SCHED_NOTIFY_IPI;
-
vlapic = vcpu_vlapic(vcpu);
vlapic_reset(vlapic, apicv_ops, mode);

@@ -529,6 +526,12 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
vcpu->vcpu_id = vcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;

+ if (is_lapic_pt_configured(vm)) {
+ per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_INIT;
+ } else {
+ per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_IPI;
+ }
+
/* Initialize the parent VM reference */
vcpu->vm = vm;

@@ -790,14 +793,8 @@ void kick_vcpu(struct acrn_vcpu *vcpu)
{
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);

- if ((get_pcpu_id() != pcpu_id) &&
- (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) {
- if (is_lapic_pt_enabled(vcpu)) {
- /* For lapic-pt vCPUs */
- send_single_init(pcpu_id);
- } else {
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
- }
+ if ((get_pcpu_id() != pcpu_id) && (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) {
+ kick_pcpu(pcpu_id);
}
}

@@ -970,7 +967,6 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
vcpu->thread_obj.thread_entry = vcpu_thread;
vcpu->thread_obj.pcpu_id = pcpu_id;
- /* vcpu->thread_obj.notify_mode is initialized in vcpu_reset_internal() when create vcpu */
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in;
diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index bdd31d3fa..fb0109ab0 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -1245,7 +1245,7 @@ void make_shutdown_vm_request(uint16_t pcpu_id)
{
bitmap_set_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
if (get_pcpu_id() != pcpu_id) {
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
+ kick_pcpu(pcpu_id);
}
}

diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c
index 684507d7d..cc1ad7297 100644
--- a/hypervisor/arch/x86/guest/vmcs.c
+++ b/hypervisor/arch/x86/guest/vmcs.c
@@ -629,12 +629,6 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)

update_msr_bitmap_x2apic_passthru(vcpu);

- /*
- * After passthroughing lapic to guest, we should use INIT signal to
- * notify vcpu thread instead of IPI. Because the IPI will be delivered
- * the guest directly without vmexit.
- */
- vcpu->thread_obj.notify_mode = SCHED_NOTIFY_INIT;
} else {
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC;
diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c
index 2835299b4..1a91de34c 100644
--- a/hypervisor/arch/x86/lapic.c
+++ b/hypervisor/arch/x86/lapic.c
@@ -12,6 +12,7 @@
#include <asm/cpu_caps.h>
#include <asm/lapic.h>
#include <asm/apicreg.h>
+#include <asm/irq.h>
#include <delay.h>

/* intr_lapic_icr_delivery_mode */
@@ -294,3 +295,12 @@ void send_single_init(uint16_t pcpu_id)
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);

}
+
+void kick_pcpu(uint16_t pcpu_id)
+{
+ if (per_cpu(mode_to_kick_pcpu, pcpu_id) == DEL_MODE_INIT) {
+ send_single_init(pcpu_id);
+ } else {
+ send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
+ }
+}
diff --git a/hypervisor/common/sched_bvt.c b/hypervisor/common/sched_bvt.c
index 56470953a..fc570e46c 100644
--- a/hypervisor/common/sched_bvt.c
+++ b/hypervisor/common/sched_bvt.c
@@ -139,11 +139,11 @@ static void sched_tick_handler(void *param)
if (!is_idle_thread(current)) {
data->run_countdown -= 1U;
if (data->run_countdown == 0U) {
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
} else {
if (!list_empty(&bvt_ctl->runqueue)) {
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
}
}
diff --git a/hypervisor/common/sched_iorr.c b/hypervisor/common/sched_iorr.c
index 538b9ffec..929e59fad 100644
--- a/hypervisor/common/sched_iorr.c
+++ b/hypervisor/common/sched_iorr.c
@@ -94,7 +94,7 @@ static void sched_tick_handler(void *param)
}
/* make reschedule request if current ran out of its cycles */
if (is_idle_thread(current) || data->left_cycles <= 0) {
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
}
}
diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c
index e0a123247..422fca744 100644
--- a/hypervisor/common/schedule.c
+++ b/hypervisor/common/schedule.c
@@ -127,23 +127,13 @@ struct thread_object *sched_get_current(uint16_t pcpu_id)
/**
* @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_INIT
*/
-void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode)
+void make_reschedule_request(uint16_t pcpu_id)
{
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);

bitmap_set_lock(NEED_RESCHEDULE, &ctl->flags);
if (get_pcpu_id() != pcpu_id) {
- switch (delmode) {
- case DEL_MODE_IPI:
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
- break;
- case DEL_MODE_INIT:
- send_single_init(pcpu_id);
- break;
- default:
- ASSERT(false, "Unknown delivery mode %u for pCPU%u", delmode, pcpu_id);
- break;
- }
+ kick_pcpu(pcpu_id);
}
}

@@ -202,11 +192,7 @@ void sleep_thread(struct thread_object *obj)
scheduler->sleep(obj);
}
if (is_running(obj)) {
- if (obj->notify_mode == SCHED_NOTIFY_INIT) {
- make_reschedule_request(pcpu_id, DEL_MODE_INIT);
- } else {
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
- }
+ make_reschedule_request(pcpu_id);
obj->be_blocking = true;
} else {
set_thread_status(obj, THREAD_STS_BLOCKED);
@@ -236,7 +222,7 @@ void wake_thread(struct thread_object *obj)
}
if (is_blocked(obj)) {
set_thread_status(obj, THREAD_STS_RUNNABLE);
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
obj->be_blocking = false;
}
@@ -245,7 +231,7 @@ void wake_thread(struct thread_object *obj)

void yield_current(void)
{
- make_reschedule_request(get_pcpu_id(), DEL_MODE_IPI);
+ make_reschedule_request(get_pcpu_id());
}

void run_thread(struct thread_object *obj)
diff --git a/hypervisor/include/arch/x86/asm/lapic.h b/hypervisor/include/arch/x86/asm/lapic.h
index cf5cad925..2251b2602 100644
--- a/hypervisor/include/arch/x86/asm/lapic.h
+++ b/hypervisor/include/arch/x86/asm/lapic.h
@@ -126,4 +126,6 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector);
*/
void send_single_init(uint16_t pcpu_id);

+void kick_pcpu(uint16_t pcpu_id);
+
#endif /* ARCH_X86_LAPIC_H */
diff --git a/hypervisor/include/arch/x86/asm/per_cpu.h b/hypervisor/include/arch/x86/asm/per_cpu.h
index 1c7c83d80..f05b253d0 100644
--- a/hypervisor/include/arch/x86/asm/per_cpu.h
+++ b/hypervisor/include/arch/x86/asm/per_cpu.h
@@ -54,6 +54,7 @@ struct per_cpu_region {
uint32_t lapic_id;
uint32_t lapic_ldr;
uint32_t softirq_servicing;
+ uint32_t mode_to_kick_pcpu;
struct smp_call_info_data smp_call_info;
struct list_head softirq_dev_entry_list;
#ifdef PROFILING_ON
diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h
index abac7b8bb..04bb6c872 100644
--- a/hypervisor/include/common/schedule.h
+++ b/hypervisor/include/common/schedule.h
@@ -23,11 +23,6 @@ enum thread_object_state {
THREAD_STS_BLOCKED
};

-enum sched_notify_mode {
- SCHED_NOTIFY_INIT,
- SCHED_NOTIFY_IPI
-};
-
/* Tools can configure a VM to use PRIO_LOW or PRIO_HIGH */
enum thread_priority {
PRIO_IDLE = 0,
@@ -46,7 +41,6 @@ struct thread_object {
thread_entry_t thread_entry;
volatile enum thread_object_state status;
bool be_blocking;
- enum sched_notify_mode notify_mode;

uint64_t host_sp;
switch_t switch_out;
@@ -126,7 +120,7 @@ void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag);
void init_thread_data(struct thread_object *obj);
void deinit_thread_data(struct thread_object *obj);

-void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode);
+void make_reschedule_request(uint16_t pcpu_id);
bool need_reschedule(uint16_t pcpu_id);

void run_thread(struct thread_object *obj);
--
2.25.1


Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On
Behalf Of Minggui Cao
Sent: Friday, September 23, 2022 12:08 AM
To: acrn-dev@...
Cc: Cao, Minggui <minggui.cao@...>
Subject: [acrn-dev] [PATCH 1/3 V2] hv: use kick-mode in per-cpu to control
kick pcpu

INIT signal has been used to kick off the partitioned pCPU, like RTVM, whose
LAPIC is pass-through. IPI is used to kick off sharing pCPU.

Add mode_to_kick_pcpu in per-cpu to control the way of kicking pCPU.

Signed-off-by: Minggui Cao <minggui.cao@...>
---
hypervisor/arch/x86/cpu.c | 2 +-
hypervisor/arch/x86/guest/vcpu.c | 20 ++++++++-----------
hypervisor/arch/x86/guest/vm.c | 2 +-
hypervisor/arch/x86/guest/vmcs.c | 6 ------
hypervisor/arch/x86/lapic.c | 10 ++++++++++
hypervisor/common/sched_bvt.c | 4 ++--
hypervisor/common/sched_iorr.c | 2 +-
hypervisor/common/schedule.c | 24 +++++------------------
hypervisor/include/arch/x86/asm/lapic.h | 2 ++
hypervisor/include/arch/x86/asm/per_cpu.h | 1 +
hypervisor/include/common/schedule.h | 8 +-------
11 files changed, 32 insertions(+), 49 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index
0b51fb9c8..e57a118ba 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -415,7 +415,7 @@ void make_pcpu_offline(uint16_t pcpu_id) {
bitmap_set_lock(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
if (get_pcpu_id() != pcpu_id) {
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
+ kick_pcpu(pcpu_id);
}
}

diff --git a/hypervisor/arch/x86/guest/vcpu.c
b/hypervisor/arch/x86/guest/vcpu.c
index e07f0dafb..76babb891 100755
--- a/hypervisor/arch/x86/guest/vcpu.c
+++ b/hypervisor/arch/x86/guest/vcpu.c
@@ -255,9 +255,6 @@ static void vcpu_reset_internal(struct acrn_vcpu
*vcpu, enum reset_mode mode)
sizeof(struct run_context));
}

- /* TODO: we may need to add one scheduler->reset_data to reset the
thread_obj */
- vcpu->thread_obj.notify_mode = SCHED_NOTIFY_IPI;
-
vlapic = vcpu_vlapic(vcpu);
vlapic_reset(vlapic, apicv_ops, mode);

@@ -529,6 +526,12 @@ int32_t create_vcpu(uint16_t pcpu_id, struct
acrn_vm *vm, struct acrn_vcpu **rtn
vcpu->vcpu_id = vcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;

+ if (is_lapic_pt_configured(vm)) {
+ per_cpu(mode_to_kick_pcpu, pcpu_id) =
DEL_MODE_INIT;
+ } else {
+ per_cpu(mode_to_kick_pcpu, pcpu_id) =
DEL_MODE_IPI;
+ }
+
/* Initialize the parent VM reference */
vcpu->vm = vm;

@@ -790,14 +793,8 @@ void kick_vcpu(struct acrn_vcpu *vcpu) {
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);

- if ((get_pcpu_id() != pcpu_id) &&
- (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) {
- if (is_lapic_pt_enabled(vcpu)) {
- /* For lapic-pt vCPUs */
- send_single_init(pcpu_id);
- } else {
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
- }
+ if ((get_pcpu_id() != pcpu_id) && (per_cpu(vmcs_run, pcpu_id) ==
vcpu->arch.vmcs)) {
+ kick_pcpu(pcpu_id);
}
}

@@ -970,7 +967,6 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t
pcpu_id)
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
vcpu->thread_obj.thread_entry = vcpu_thread;
vcpu->thread_obj.pcpu_id = pcpu_id;
- /* vcpu->thread_obj.notify_mode is initialized in
vcpu_reset_internal() when create vcpu */
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in; diff --git
a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index
bdd31d3fa..fb0109ab0 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -1245,7 +1245,7 @@ void make_shutdown_vm_request(uint16_t
pcpu_id) {
bitmap_set_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag,
pcpu_id));
if (get_pcpu_id() != pcpu_id) {
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
+ kick_pcpu(pcpu_id);
}
}

diff --git a/hypervisor/arch/x86/guest/vmcs.c
b/hypervisor/arch/x86/guest/vmcs.c
index 684507d7d..cc1ad7297 100644
--- a/hypervisor/arch/x86/guest/vmcs.c
+++ b/hypervisor/arch/x86/guest/vmcs.c
@@ -629,12 +629,6 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu
*vcpu)

update_msr_bitmap_x2apic_passthru(vcpu);

- /*
- * After passthroughing lapic to guest, we should use INIT
signal to
- * notify vcpu thread instead of IPI. Because the IPI will be
delivered
- * the guest directly without vmexit.
- */
- vcpu->thread_obj.notify_mode = SCHED_NOTIFY_INIT;
} else {
value32 =
exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC; diff --git
a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c index
2835299b4..1a91de34c 100644
--- a/hypervisor/arch/x86/lapic.c
+++ b/hypervisor/arch/x86/lapic.c
@@ -12,6 +12,7 @@
#include <asm/cpu_caps.h>
#include <asm/lapic.h>
#include <asm/apicreg.h>
+#include <asm/irq.h>
#include <delay.h>

/* intr_lapic_icr_delivery_mode */
@@ -294,3 +295,12 @@ void send_single_init(uint16_t pcpu_id)
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);

}
+
+void kick_pcpu(uint16_t pcpu_id)
+{
+ if (per_cpu(mode_to_kick_pcpu, pcpu_id) == DEL_MODE_INIT) {
+ send_single_init(pcpu_id);
+ } else {
+ send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
+ }
+}
diff --git a/hypervisor/common/sched_bvt.c
b/hypervisor/common/sched_bvt.c index 56470953a..fc570e46c 100644
--- a/hypervisor/common/sched_bvt.c
+++ b/hypervisor/common/sched_bvt.c
@@ -139,11 +139,11 @@ static void sched_tick_handler(void *param)
if (!is_idle_thread(current)) {
data->run_countdown -= 1U;
if (data->run_countdown == 0U) {
- make_reschedule_request(pcpu_id,
DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
} else {
if (!list_empty(&bvt_ctl->runqueue)) {
- make_reschedule_request(pcpu_id,
DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
}
}
diff --git a/hypervisor/common/sched_iorr.c
b/hypervisor/common/sched_iorr.c index 538b9ffec..929e59fad 100644
--- a/hypervisor/common/sched_iorr.c
+++ b/hypervisor/common/sched_iorr.c
@@ -94,7 +94,7 @@ static void sched_tick_handler(void *param)
}
/* make reschedule request if current ran out of its
cycles */
if (is_idle_thread(current) || data->left_cycles <= 0) {
- make_reschedule_request(pcpu_id,
DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
}
}
diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c
index e0a123247..422fca744 100644
--- a/hypervisor/common/schedule.c
+++ b/hypervisor/common/schedule.c
@@ -127,23 +127,13 @@ struct thread_object *sched_get_current(uint16_t
pcpu_id)
/**
* @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_INIT
*/
-void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode)
+void make_reschedule_request(uint16_t pcpu_id)
{
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);

bitmap_set_lock(NEED_RESCHEDULE, &ctl->flags);
if (get_pcpu_id() != pcpu_id) {
- switch (delmode) {
- case DEL_MODE_IPI:
- send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
- break;
- case DEL_MODE_INIT:
- send_single_init(pcpu_id);
- break;
- default:
- ASSERT(false, "Unknown delivery mode %u for
pCPU%u", delmode, pcpu_id);
- break;
- }
+ kick_pcpu(pcpu_id);
}
}

@@ -202,11 +192,7 @@ void sleep_thread(struct thread_object *obj)
scheduler->sleep(obj);
}
if (is_running(obj)) {
- if (obj->notify_mode == SCHED_NOTIFY_INIT) {
- make_reschedule_request(pcpu_id,
DEL_MODE_INIT);
- } else {
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
- }
+ make_reschedule_request(pcpu_id);
obj->be_blocking = true;
} else {
set_thread_status(obj, THREAD_STS_BLOCKED); @@ -236,7
+222,7 @@ void wake_thread(struct thread_object *obj)
}
if (is_blocked(obj)) {
set_thread_status(obj, THREAD_STS_RUNNABLE);
- make_reschedule_request(pcpu_id, DEL_MODE_IPI);
+ make_reschedule_request(pcpu_id);
}
obj->be_blocking = false;
}
@@ -245,7 +231,7 @@ void wake_thread(struct thread_object *obj)

void yield_current(void)
{
- make_reschedule_request(get_pcpu_id(), DEL_MODE_IPI);
+ make_reschedule_request(get_pcpu_id());
}

void run_thread(struct thread_object *obj) diff --git
a/hypervisor/include/arch/x86/asm/lapic.h
b/hypervisor/include/arch/x86/asm/lapic.h
index cf5cad925..2251b2602 100644
--- a/hypervisor/include/arch/x86/asm/lapic.h
+++ b/hypervisor/include/arch/x86/asm/lapic.h
@@ -126,4 +126,6 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t
vector);
*/
void send_single_init(uint16_t pcpu_id);

+void kick_pcpu(uint16_t pcpu_id);
+
#endif /* ARCH_X86_LAPIC_H */
diff --git a/hypervisor/include/arch/x86/asm/per_cpu.h
b/hypervisor/include/arch/x86/asm/per_cpu.h
index 1c7c83d80..f05b253d0 100644
--- a/hypervisor/include/arch/x86/asm/per_cpu.h
+++ b/hypervisor/include/arch/x86/asm/per_cpu.h
@@ -54,6 +54,7 @@ struct per_cpu_region {
uint32_t lapic_id;
uint32_t lapic_ldr;
uint32_t softirq_servicing;
+ uint32_t mode_to_kick_pcpu;
struct smp_call_info_data smp_call_info;
struct list_head softirq_dev_entry_list; #ifdef PROFILING_ON diff --git
a/hypervisor/include/common/schedule.h
b/hypervisor/include/common/schedule.h
index abac7b8bb..04bb6c872 100644
--- a/hypervisor/include/common/schedule.h
+++ b/hypervisor/include/common/schedule.h
@@ -23,11 +23,6 @@ enum thread_object_state {
THREAD_STS_BLOCKED
};

-enum sched_notify_mode {
- SCHED_NOTIFY_INIT,
- SCHED_NOTIFY_IPI
-};
-
/* Tools can configure a VM to use PRIO_LOW or PRIO_HIGH */ enum
thread_priority {
PRIO_IDLE = 0,
@@ -46,7 +41,6 @@ struct thread_object {
thread_entry_t thread_entry;
volatile enum thread_object_state status;
bool be_blocking;
- enum sched_notify_mode notify_mode;

uint64_t host_sp;
switch_t switch_out;
@@ -126,7 +120,7 @@ void release_schedule_lock(uint16_t pcpu_id,
uint64_t rflag); void init_thread_data(struct thread_object *obj); void
deinit_thread_data(struct thread_object *obj);

-void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode);
+void make_reschedule_request(uint16_t pcpu_id);
bool need_reschedule(uint16_t pcpu_id);

void run_thread(struct thread_object *obj);
--
2.25.1