Date   

[PATCH v2 0/3] HV: Add the support of 0x16 CPUID for HV and guest emulation

Zhao, Yakui
 

This is the patch set that tries to add the support of 0x16 CPUID emulation for guest
and calculate tsc based on 0x16.

0x16 CPUID emulation can help the guest OS to obtain the cpu_hz from CPUID.
0x16 CPUID on HV can help to calculate the tsc if the zero tsc is returned
from 0x15 CPUID.


Zhao Yakui (3):
HV: Add the emulation of CPUID with 0x16 leaf
HV: Use the pre-defined value to calculate tsc when cpuid(0x15)
returns zero ecx
HV: Use the CPUID(0x16) to obtain tsc_hz when zero tsc_hz is returned
by 0x15 cpuid

hypervisor/arch/x86/cpuid.c | 53 ++++++++++++++++++++++++++++++++++++++++++---
hypervisor/arch/x86/timer.c | 26 +++++++++++++++++++---
2 files changed, 73 insertions(+), 6 deletions(-)

--
2.7.4


[PATCH] HV: add pcpu id check before send IPI

Minggui Cao
 

to avoid send IPI to self, also improve the related code:
1. get_cpu_id is uint16_t now
2. MISRA-C requirement. like add {}

Signed-off-by: Minggui Cao <minggui.cao@...>
---
hypervisor/arch/x86/virq.c | 3 ++-
hypervisor/arch/x86/vmexit.c | 2 +-
hypervisor/common/schedule.c | 4 +++-
3 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c
index e9c46f7..4d3cf35 100644
--- a/hypervisor/arch/x86/virq.c
+++ b/hypervisor/arch/x86/virq.c
@@ -104,8 +104,9 @@ void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
* scheduling, we need change here to determine it target vcpu is
* VMX non-root or root mode
*/
- if ((int)get_cpu_id() != vcpu->pcpu_id)
+ if (get_cpu_id() != vcpu->pcpu_id) {
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
+ }
}

static int vcpu_do_pending_event(struct vcpu *vcpu)
diff --git a/hypervisor/arch/x86/vmexit.c b/hypervisor/arch/x86/vmexit.c
index 7f6eb40..ac908d1 100644
--- a/hypervisor/arch/x86/vmexit.c
+++ b/hypervisor/arch/x86/vmexit.c
@@ -157,7 +157,7 @@ int vmexit_handler(struct vcpu *vcpu)
uint16_t basic_exit_reason;
int ret;

- if ((int)get_cpu_id() != vcpu->pcpu_id) {
+ if (get_cpu_id() != vcpu->pcpu_id) {
pr_fatal("vcpu is not running on its pcpu!");
return -EINVAL;
}
diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c
index 1670d5f..ead3435 100644
--- a/hypervisor/common/schedule.c
+++ b/hypervisor/common/schedule.c
@@ -101,7 +101,9 @@ void make_reschedule_request(struct vcpu *vcpu)
struct sched_context *ctx = &per_cpu(sched_ctx, vcpu->pcpu_id);

bitmap_set_lock(NEED_RESCHEDULE, &ctx->flags);
- send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
+ if (get_cpu_id() != vcpu->pcpu_id) {
+ send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
+ }
}

int need_reschedule(uint16_t pcpu_id)
--
2.7.4


question about is_ept_supported()

Kaige Fu
 

Hi all,

Can acrn run on the platform which does not support EPT?

If no, seems the following code can be removed:
...
/* Check for EPT support */
if (is_ept_supported()) {
pr_dbg("EPT is supported");
}
else {
pr_err("Error: EPT is not supported");
}
...


--
Thanks
Kaige Fu


[PATCH v4 2/2] dump vcpu registers on correct vcpu

Chen, Jason CJ
 

after updated cpu context get/set method, the vcpu_dumreg cmd is not
correct anymore as the registers may read from VMCS but meantime hv
shell may not be running on target vcpu.

this patch take use of smp_call_function for vcpu dumpreg and make the
dump always come from correct vcpu.

Signed-off-by: Jason Chen CJ <jason.cj.chen@...>
Acked-by: Eddie Dong <eddie.dong@...>
---
hypervisor/arch/x86/guest/vcpu.c | 71 +++++++++++++++++++++++++++
hypervisor/debug/shell.c | 84 ++------------------------------
hypervisor/include/arch/x86/guest/vcpu.h | 1 +
3 files changed, 76 insertions(+), 80 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c
index 9dc0241..021797a 100644
--- a/hypervisor/arch/x86/guest/vcpu.c
+++ b/hypervisor/arch/x86/guest/vcpu.c
@@ -529,3 +529,74 @@ void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id)
{
bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work);
}
+
+#ifdef HV_DEBUG
+#define DUMPREG_SP_SIZE 32
+/* the input 'data' must != NULL and indicate a vcpu structure pointer */
+void vcpu_dumpreg(void *data)
+{
+ int status;
+ uint64_t i, fault_addr, tmp[DUMPREG_SP_SIZE];
+ uint32_t err_code = 0;
+ struct vcpu *vcpu = data;
+
+ printf("= VM ID %d ==== CPU ID %hu========================\r\n",
+ vcpu->vm->vm_id, vcpu->vcpu_id);
+ printf("= RIP=0x%016llx RSP=0x%016llx "
+ "RFLAGS=0x%016llx\r\n", vcpu_get_rip(vcpu),
+ vcpu_get_gpreg(vcpu, CPU_REG_RSP),
+ vcpu_get_rflags(vcpu));
+ printf("= CR0=0x%016llx CR2=0x%016llx\r\n",
+ vcpu_get_cr0(vcpu), vcpu_get_cr2(vcpu));
+ printf("= CR3=0x%016llx CR4=0x%016llx\r\n",
+ exec_vmread(VMX_GUEST_CR3), vcpu_get_cr4(vcpu));
+ printf("= RAX=0x%016llx RBX=0x%016llx "
+ "RCX=0x%016llx\r\n",
+ vcpu_get_gpreg(vcpu, CPU_REG_RAX),
+ vcpu_get_gpreg(vcpu, CPU_REG_RBX),
+ vcpu_get_gpreg(vcpu, CPU_REG_RCX));
+ printf("= RDX=0x%016llx RDI=0x%016llx "
+ "RSI=0x%016llx\r\n",
+ vcpu_get_gpreg(vcpu, CPU_REG_RDX),
+ vcpu_get_gpreg(vcpu, CPU_REG_RDI),
+ vcpu_get_gpreg(vcpu, CPU_REG_RSI));
+ printf("= RBP=0x%016llx R8=0x%016llx "
+ "R9=0x%016llx\r\n",
+ vcpu_get_gpreg(vcpu, CPU_REG_RBP),
+ vcpu_get_gpreg(vcpu, CPU_REG_R8),
+ vcpu_get_gpreg(vcpu, CPU_REG_R9));
+ printf("= R10=0x%016llx R11=0x%016llx "
+ "R12=0x%016llx\r\n",
+ vcpu_get_gpreg(vcpu, CPU_REG_R10),
+ vcpu_get_gpreg(vcpu, CPU_REG_R11),
+ vcpu_get_gpreg(vcpu, CPU_REG_R12));
+ printf("= R13=0x%016llx R14=0x%016llx R15=0x%016llx\r\n",
+ vcpu_get_gpreg(vcpu, CPU_REG_R13),
+ vcpu_get_gpreg(vcpu, CPU_REG_R14),
+ vcpu_get_gpreg(vcpu, CPU_REG_R15));
+
+ /* dump sp */
+ status = copy_from_gva(vcpu, tmp, vcpu_get_gpreg(vcpu, CPU_REG_RSP),
+ DUMPREG_SP_SIZE*sizeof(uint64_t), &err_code,
+ &fault_addr);
+ if (status < 0) {
+ /* copy_from_gva fail */
+ printf("Cannot handle user gva yet!\r\n");
+ } else {
+ printf("\r\nDump RSP for vm %hu, from gva 0x%016llx\r\n",
+ vcpu->vm->vm_id, vcpu_get_gpreg(vcpu, CPU_REG_RSP));
+
+ for (i = 0UL; i < 8UL; i++) {
+ printf("= 0x%016llx 0x%016llx "
+ "0x%016llx 0x%016llx\r\n",
+ tmp[i*4UL], tmp[(i*4UL)+1UL],
+ tmp[(i*4UL)+2UL], tmp[(i*4UL)+3UL]);
+ }
+ }
+}
+#else
+void vcpu_dumpreg(__unused struct vcpu *vcpu)
+{
+ return;
+}
+#endif /* HV_DEBUG */
diff --git a/hypervisor/debug/shell.c b/hypervisor/debug/shell.c
index 5e1dbe6..ca7fe9f 100644
--- a/hypervisor/debug/shell.c
+++ b/hypervisor/debug/shell.c
@@ -583,18 +583,14 @@ int shell_list_vcpu(__unused int argc, __unused char **argv)
return 0;
}

-#define DUMPREG_SP_SIZE 32
int shell_vcpu_dumpreg(int argc, char **argv)
{
int status = 0;
uint16_t vm_id;
uint16_t vcpu_id;
- char temp_str[MAX_STR_SIZE];
struct vm *vm;
struct vcpu *vcpu;
- uint64_t i, fault_addr;
- uint64_t tmp[DUMPREG_SP_SIZE];
- uint32_t err_code = 0;
+ uint64_t mask = 0UL;

/* User input invalidation */
if (argc != 3) {
@@ -623,82 +619,10 @@ int shell_vcpu_dumpreg(int argc, char **argv)
return -EINVAL;
}

- if (vcpu->state != VCPU_PAUSED) {
- shell_puts("NOTE: VCPU unPAUSEed, regdump "
- "may not be accurate\r\n");
- }
-
- snprintf(temp_str, MAX_STR_SIZE,
- "= VM ID %d ==== CPU ID %hu========================\r\n",
- vm->vm_id, vcpu->vcpu_id);
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= RIP=0x%016llx RSP=0x%016llx "
- "RFLAGS=0x%016llx\r\n", vcpu_get_rip(vcpu),
- vcpu_get_gpreg(vcpu, CPU_REG_RSP),
- vcpu_get_rflags(vcpu));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= CR0=0x%016llx CR2=0x%016llx\r\n",
- vcpu_get_cr0(vcpu), vcpu_get_cr2(vcpu));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= CR3=0x%016llx CR4=0x%016llx\r\n",
- exec_vmread(VMX_GUEST_CR3), vcpu_get_cr4(vcpu));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= RAX=0x%016llx RBX=0x%016llx "
- "RCX=0x%016llx\r\n",
- vcpu_get_gpreg(vcpu, CPU_REG_RAX),
- vcpu_get_gpreg(vcpu, CPU_REG_RBX),
- vcpu_get_gpreg(vcpu, CPU_REG_RCX));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= RDX=0x%016llx RDI=0x%016llx "
- "RSI=0x%016llx\r\n",
- vcpu_get_gpreg(vcpu, CPU_REG_RDX),
- vcpu_get_gpreg(vcpu, CPU_REG_RDI),
- vcpu_get_gpreg(vcpu, CPU_REG_RSI));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= RBP=0x%016llx R8=0x%016llx "
- "R9=0x%016llx\r\n",
- vcpu_get_gpreg(vcpu, CPU_REG_RBP),
- vcpu_get_gpreg(vcpu, CPU_REG_R8),
- vcpu_get_gpreg(vcpu, CPU_REG_R9));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE, "= R10=0x%016llx R11=0x%016llx "
- "R12=0x%016llx\r\n",
- vcpu_get_gpreg(vcpu, CPU_REG_R10),
- vcpu_get_gpreg(vcpu, CPU_REG_R11),
- vcpu_get_gpreg(vcpu, CPU_REG_R12));
- shell_puts(temp_str);
- snprintf(temp_str, MAX_STR_SIZE,
- "= R13=0x%016llx R14=0x%016llx R15=0x%016llx\r\n",
- vcpu_get_gpreg(vcpu, CPU_REG_R13),
- vcpu_get_gpreg(vcpu, CPU_REG_R14),
- vcpu_get_gpreg(vcpu, CPU_REG_R15));
- shell_puts(temp_str);
-
- /* dump sp */
- status = copy_from_gva(vcpu, tmp, vcpu_get_gpreg(vcpu, CPU_REG_RSP),
- DUMPREG_SP_SIZE*sizeof(uint64_t), &err_code,
- &fault_addr);
- if (status < 0) {
- /* copy_from_gva fail */
- shell_puts("Cannot handle user gva yet!\r\n");
- } else {
- snprintf(temp_str, MAX_STR_SIZE,
- "\r\nDump RSP for vm %hu, from "
- "gva 0x%016llx\r\n",
- vm_id, vcpu_get_gpreg(vcpu, CPU_REG_RSP));
- shell_puts(temp_str);
+ bitmap_set_lock(vcpu->pcpu_id, &mask);
+ smp_call_function(mask, vcpu_dumpreg, vcpu);

- for (i = 0UL; i < 8UL; i++) {
- snprintf(temp_str, MAX_STR_SIZE,
- "= 0x%016llx 0x%016llx "
- "0x%016llx 0x%016llx\r\n",
- tmp[i*4UL], tmp[(i*4UL)+1UL],
- tmp[(i*4UL)+2UL], tmp[(i*4UL)+3UL]);
- shell_puts(temp_str);
- }
- }
-
- return status;
+ return 0;
}

#define MAX_MEMDUMP_LEN (32U*8U)
diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h
index 568ec66..4709f72 100644
--- a/hypervisor/include/arch/x86/guest/vcpu.h
+++ b/hypervisor/include/arch/x86/guest/vcpu.h
@@ -289,6 +289,7 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id);

void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id);

+void vcpu_dumpreg(void *data);
#endif

#endif
--
2.7.4


[PATCH v4 1/2] add smp_call_function support

Chen, Jason CJ
 

take use of VCPU_NOTIFY vector, add smp_call_function support.
added a per_cpu field smp_call_info, and make sure each smp_call_function
must be completed one by one.

v3:
- remove per_cpu lock in smp_call_info
- use a global lock to ensure smp_call_function sequence
- use pcpu_sync_sleep to wait IPI complete

v2:
- after new smp function come, if old one exist, changed from overwirte
with the new one to ignore the new one.

Signed-off-by: Jason Chen CJ <jason.cj.chen@...>
---
hypervisor/arch/x86/cpu.c | 3 +--
hypervisor/arch/x86/notify.c | 51 ++++++++++++++++++++++++++++++++---
hypervisor/include/arch/x86/cpu.h | 1 +
hypervisor/include/arch/x86/irq.h | 7 +++++
hypervisor/include/arch/x86/per_cpu.h | 1 +
5 files changed, 57 insertions(+), 6 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 02cbaec..bd5c2fc 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -53,7 +53,6 @@ static void cpu_xsave_init(void);
static void set_current_cpu_id(uint16_t pcpu_id);
static void print_hv_banner(void);
static uint16_t get_cpu_id_from_lapic_id(uint8_t lapic_id);
-static void pcpu_sync_sleep(uint64_t *sync, uint64_t mask_bit);
int ibrs_type;
static uint64_t start_tsc __attribute__((__section__(".bss_noinit")));

@@ -797,7 +796,7 @@ static void print_hv_banner(void)
printf(boot_msg);
}

-static void pcpu_sync_sleep(uint64_t *sync, uint64_t mask_bit)
+void pcpu_sync_sleep(uint64_t *sync, uint64_t mask_bit)
{
uint64_t wake_sync = (1UL << mask_bit);

diff --git a/hypervisor/arch/x86/notify.c b/hypervisor/arch/x86/notify.c
index 2d31b36..d2bc587 100644
--- a/hypervisor/arch/x86/notify.c
+++ b/hypervisor/arch/x86/notify.c
@@ -8,15 +8,59 @@

static struct dev_handler_node *notification_node;

+static uint64_t pcpu_sync_smp_call = 0UL;
+
+spinlock_t smp_call_spinlock = {
+ .head = 0U,
+ .tail = 0U
+};
+
/* run in interrupt context */
static int kick_notification(__unused int irq, __unused void *data)
{
- /* Notification vector does not require handling here, it's just used
- * to kick taget cpu out of non-root mode.
+ /* Notification vector is used to kick taget cpu out of non-root mode.
+ * And it also serves for smp call.
*/
+ uint16_t pcpu_id = get_cpu_id();
+
+ if (bitmap_test(pcpu_id, &pcpu_sync_smp_call)) {
+ struct smp_call_info_data *smp_call =
+ &per_cpu(smp_call_info, pcpu_id);
+
+ if (smp_call->func)
+ smp_call->func(smp_call->data);
+ bitmap_clear_lock(pcpu_id, &pcpu_sync_smp_call);
+ }
+
return 0;
}

+void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
+{
+ uint16_t pcpu_id;
+ struct smp_call_info_data *smp_call;
+
+ spinlock_obtain(&smp_call_spinlock);
+ while ((pcpu_id = ffs64(mask)) != INVALID_BIT_INDEX) {
+ bitmap_clear_lock(pcpu_id, &mask);
+ if (pcpu_id == get_cpu_id()) {
+ if (func)
+ func(data);
+ } else if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
+ smp_call = &per_cpu(smp_call_info, pcpu_id);
+ smp_call->func = func;
+ smp_call->data = data;
+ bitmap_set_lock(pcpu_id, &pcpu_sync_smp_call);
+ send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
+ pcpu_sync_sleep(&pcpu_sync_smp_call, (uint64_t)pcpu_id);
+ } else {
+ /* pcpu is not in active, print error */
+ pr_err("pcpu_id %d not in active!", pcpu_id);
+ }
+ }
+ spinlock_release(&smp_call_spinlock);
+}
+
static int request_notification_irq(dev_handler_t func, void *data,
const char *name)
{
@@ -42,10 +86,9 @@ static int request_notification_irq(dev_handler_t func, void *data,

void setup_notification(void)
{
- uint16_t cpu;
+ uint16_t cpu = get_cpu_id();
char name[32] = {0};

- cpu = get_cpu_id();
if (cpu > 0U) {
return;
}
diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h
index e56f443..9ae13f6 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -330,6 +330,7 @@ void bsp_boot_init(void);
void cpu_secondary_init(void);
void start_cpus();
void stop_cpus();
+void pcpu_sync_sleep(uint64_t *sync, uint64_t mask_bit);

/* Read control register */
#define CPU_CR_READ(cr, result_ptr) \
diff --git a/hypervisor/include/arch/x86/irq.h b/hypervisor/include/arch/x86/irq.h
index a83df53..47ab85f 100644
--- a/hypervisor/include/arch/x86/irq.h
+++ b/hypervisor/include/arch/x86/irq.h
@@ -44,6 +44,13 @@ struct intr_excp_ctx {
uint64_t ss;
};

+typedef void (*smp_call_func_t)(void *data);
+struct smp_call_info_data {
+ smp_call_func_t func;
+ void *data;
+};
+void smp_call_function(uint64_t mask, smp_call_func_t func, void *data);
+
int handle_level_interrupt_common(struct irq_desc *desc,
__unused void *handler_data);
int common_handler_edge(struct irq_desc *desc, __unused void *handler_data);
diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h
index 4076e27..8ce5659 100644
--- a/hypervisor/include/arch/x86/per_cpu.h
+++ b/hypervisor/include/arch/x86/per_cpu.h
@@ -44,6 +44,7 @@ struct per_cpu_region {
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
char logbuf[LOG_MESSAGE_MAX_SIZE];
uint8_t lapic_id;
+ struct smp_call_info_data smp_call_info;
} __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE

extern struct per_cpu_region *per_cpu_data_base_ptr;
--
2.7.4


[PATCH v4 0/2] fix vcpu dump_reg cmd

Chen, Jason CJ
 

after updated cpu context get/set method, the vcpu_dumreg cmd is not
correct anymore as the registers may read from VMCS but meantime hv
shell may not be running on target vcpu.

this patch series add smp_call_function support and take use of it for
vcpu dumpreg and make the dump always come from correct vcpu.

v4:
- remove per_cpu lock in smp_call_info
- use a global lock to ensure smp_call_function sequence
- use pcpu_sync_sleep to wait IPI complete

v3:
- after new smp function come, if old one exist, changed from overwirte
with the new one to ignore the new one.

v2:
- use smp_call_function instead of a request

Jason Chen CJ (2):
add smp_call_function support
dump vcpu registers on correct vcpu

hypervisor/arch/x86/cpu.c | 3 +-
hypervisor/arch/x86/guest/vcpu.c | 71 +++++++++++++++++++++++++++
hypervisor/arch/x86/notify.c | 51 +++++++++++++++++--
hypervisor/debug/shell.c | 84 ++------------------------------
hypervisor/include/arch/x86/cpu.h | 1 +
hypervisor/include/arch/x86/guest/vcpu.h | 1 +
hypervisor/include/arch/x86/irq.h | 7 +++
hypervisor/include/arch/x86/per_cpu.h | 1 +
8 files changed, 133 insertions(+), 86 deletions(-)

--
2.7.4


[PATCH] hv: treewide: fix 'Empty parameter list to procedure/function'

Shiqing Gao
 

Use func(void) rather than func() for the function declaration and
definition based on MISRAC requirement.

Signed-off-by: Shiqing Gao <shiqing.gao@...>
---
hypervisor/arch/x86/cpu.c | 4 ++--
hypervisor/include/arch/x86/cpu.h | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 02cbaec..84a8fcf 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -667,7 +667,7 @@ static uint16_t get_cpu_id_from_lapic_id(uint8_t lapic_id)
/*
* Start all secondary CPUs.
*/
-void start_cpus()
+void start_cpus(void)
{
uint32_t timeout;
uint16_t expected_up;
@@ -710,7 +710,7 @@ void start_cpus()
}
}

-void stop_cpus()
+void stop_cpus(void)
{
uint16_t pcpu_id, expected_up;
uint32_t timeout;
diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h
index e56f443..a29a7c5 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -328,8 +328,8 @@ bool cpu_has_cap(uint32_t bit);
void load_cpu_state_data(void);
void bsp_boot_init(void);
void cpu_secondary_init(void);
-void start_cpus();
-void stop_cpus();
+void start_cpus(void);
+void stop_cpus(void);

/* Read control register */
#define CPU_CR_READ(cr, result_ptr) \
--
1.9.1


[RFC PATCH 2/2] hv: vtd: alloc iommu doamin for UOS when create_vm

Li, Fei1
 

Another minor modify: rename host_domain to dom0_domain

Signed-off-by: Li, Fei1 <fei1.li@...>
---
hypervisor/arch/x86/guest/vm.c | 6 ++++++
hypervisor/arch/x86/vtd.c | 11 ++++++-----
hypervisor/common/hypercall.c | 26 +++++++++-----------------
3 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index 0210d9b..6c20f8c 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -141,6 +141,12 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
goto err;
}

+ /* create a iommu domain for UOS */
+ if (!is_vm0(vm)) {
+ vm->iommu = create_iommu_domain(
+ HVA2HPA(vm->arch_vm.nworld_eptp), 48U);
+ }
+
/* Only for SOS: Configure VM software information */
/* For UOS: This VM software information is configure in DM */
if (is_vm0(vm)) {
diff --git a/hypervisor/arch/x86/vtd.c b/hypervisor/arch/x86/vtd.c
index b178521..8b4e634 100644
--- a/hypervisor/arch/x86/vtd.c
+++ b/hypervisor/arch/x86/vtd.c
@@ -169,7 +169,7 @@ static uint32_t dmar_hdrh_unit_count;
static uint32_t max_domain_id = 63U;
static uint64_t domain_bitmap;
static spinlock_t domain_lock;
-static struct iommu_domain *host_domain;
+static struct iommu_domain *dom0_domain;
static struct list_head iommu_domains;

static void dmar_register_hrhd(struct dmar_drhd_rt *dmar_uint);
@@ -1126,7 +1126,7 @@ int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,

/* TODO: check if the device assigned */
if (domain->dom_id != ACRN_DOM0_IOMMU_DOM_ID) {
- remove_iommu_device(host_domain, 0U, bus, devfun);
+ remove_iommu_device(dom0_domain, 0U, bus, devfun);
}
add_iommu_device(domain, 0U, bus, devfun);
return 0;
@@ -1142,7 +1142,7 @@ int unassign_iommu_device(struct iommu_domain *domain, uint8_t bus,
/* TODO: check if the device assigned */
remove_iommu_device(domain, 0U, bus, devfun);
if (domain->dom_id != ACRN_DOM0_IOMMU_DOM_ID) {
- add_iommu_device(host_domain, 0U, bus, devfun);
+ add_iommu_device(dom0_domain, 0U, bus, devfun);
}
return 0;
}
@@ -1272,11 +1272,12 @@ void init_iommu(void)

register_hrhd_units();

- host_domain = create_iommu_domain(0UL, 48U);
+ dom0_domain = create_iommu_domain(0UL, 48U);

+ /*TODO: remove me if we could assign a device dynamically */
for (bus = 0U; bus <= IOMMU_INIT_BUS_LIMIT; bus++) {
for (devfun = 0U; devfun <= 255U; devfun++) {
- add_iommu_device(host_domain, 0U,
+ add_iommu_device(dom0_domain, 0U,
(uint8_t)bus, (uint8_t)devfun);
}
}
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index c1c2e76..e763082 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -668,8 +668,15 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
uint16_t bdf;
struct vm *target_vm = get_vm_from_vmid(vmid);

- if (target_vm == NULL) {
- pr_err("%s, vm is null\n", __func__);
+ if ((target_vm == NULL) || (vmid == ACRN_DOM0_VMID)) {
+ pr_err("%s, invalid paremeter: vm[%d]\n", __func__, vmid);
+ return -EINVAL;
+ }
+
+ if ((target_vm->iommu == NULL) ||
+ (target_vm->arch_vm.nworld_eptp == NULL)) {
+ pr_err("%s, EPT of VM not set!\n", __func__,
+ target_vm->vm_id);
return -EINVAL;
}

@@ -679,21 +686,6 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
return -EIO;
}

- /* create a iommu domain for target VM if not created */
- if (target_vm->iommu == NULL) {
- if (target_vm->arch_vm.nworld_eptp == NULL) {
- pr_err("%s, EPT of VM not set!\n",
- __func__, target_vm->vm_id);
- return -EPERM;
- }
- /* TODO: how to get vm's address width? */
- target_vm->iommu = create_iommu_domain(
- HVA2HPA(target_vm->arch_vm.nworld_eptp), 48U);
- if (target_vm->iommu == NULL) {
- return -ENODEV;
- }
-
- }
ret = assign_iommu_device(target_vm->iommu,
(uint8_t)(bdf >> 8), (uint8_t)(bdf & 0xffU));

--
2.7.4


[RFC PATCH 1/2] hv: vtd: use create_iommu_domain to allocate doamin 0 iommu domain

Li, Fei1
 

Modify create_iommu_domain to be a common API which can also allocate
iommu domain for doamin 0.
Remove create_host_domain.

Signed-off-by: Li, Fei1 <fei1.li@...>
---
hypervisor/arch/x86/vtd.c | 73 +++++++++++++++------------------------
hypervisor/common/hypercall.c | 2 +-
hypervisor/include/arch/x86/vtd.h | 4 +--
3 files changed, 30 insertions(+), 49 deletions(-)

diff --git a/hypervisor/arch/x86/vtd.c b/hypervisor/arch/x86/vtd.c
index 62387fa..b178521 100644
--- a/hypervisor/arch/x86/vtd.c
+++ b/hypervisor/arch/x86/vtd.c
@@ -17,6 +17,8 @@
#define ACRN_DBG_IOMMU 6U
#endif

+#define ACRN_DOM0_IOMMU_DOM_ID (1U)
+
/* set an appropriate bus limitation when iommu init,
* to reduce memory & time cost
*/
@@ -150,10 +152,8 @@ struct dmar_context_entry {

struct iommu_domain {
struct list_head list;
- bool is_host;
bool is_tt_ept; /* if reuse EPT of the domain */
uint16_t dom_id;
- uint16_t vm_id;
uint32_t addr_width; /* address width of the domain */
uint64_t trans_table_ptr;
};
@@ -496,7 +496,7 @@ static uint8_t alloc_domain_id(void)
/* domain id 0 is reserved, when CM = 1.
* so domain id allocation start from 1
*/
- for (i = 1U; i < 64U; i++) {
+ for (i = ACRN_DOM0_IOMMU_DOM_ID; i < 64U; i++) {
mask = (1UL << i);
if ((domain_bitmap & mask) == 0UL) {
domain_bitmap |= mask;
@@ -516,20 +516,6 @@ static void free_domain_id(uint16_t dom_id)
spinlock_release(&domain_lock);
}

-static struct iommu_domain *create_host_domain(void)
-{
- struct iommu_domain *domain = calloc(1U, sizeof(struct iommu_domain));
-
- ASSERT(domain != NULL, "");
- domain->is_host = true;
- domain->dom_id = alloc_domain_id();
- /* dmar uint need to support translation passthrough */
- domain->trans_table_ptr = 0UL;
- domain->addr_width = 48U;
-
- return domain;
-}
-
static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_uint)
{
uint32_t status;
@@ -867,21 +853,18 @@ static void dmar_disable(struct dmar_drhd_rt *dmar_uint)
dmar_fault_event_mask(dmar_uint);
}

-struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_table,
+/*
+ * @pre: UOS must make sure that translation_table is not 0 and SOS
+ * is the first one to create its iommu domain.
+ */
+struct iommu_domain *create_iommu_domain(uint64_t translation_table,
uint32_t addr_width)
{
struct iommu_domain *domain;
- uint16_t domain_id;
-
- /* TODO: check if a domain with the vm_id exists */
-
- if (translation_table == 0UL) {
- pr_err("translation table is NULL");
- return NULL;
- }
+ uint16_t dom_id;

- domain_id = alloc_domain_id();
- if (domain_id > max_domain_id) {
+ dom_id = alloc_domain_id();
+ if (dom_id > max_domain_id) {
pr_err("domain id is exhausted");
return NULL;
}
@@ -889,22 +872,18 @@ struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_ta
domain = calloc(1U, sizeof(struct iommu_domain));

ASSERT(domain != NULL, "");
- domain->is_host = false;
- domain->dom_id = domain_id;
- domain->vm_id = vm_id;
- domain->trans_table_ptr = translation_table;
+ domain->dom_id = dom_id;
+ domain->trans_table_ptr = (dom_id = ACRN_DOM0_IOMMU_DOM_ID) ?
+ 0UL : translation_table;
domain->addr_width = addr_width;
- domain->is_tt_ept = true;
-
+ domain->is_tt_ept = (dom_id == ACRN_DOM0_IOMMU_DOM_ID) ? false : true;

spinlock_obtain(&domain_lock);
list_add(&domain->list, &iommu_domains);
spinlock_release(&domain_lock);

- dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
- domain->dom_id,
- domain->vm_id,
- domain->trans_table_ptr);
+ dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: ept@0x%x",
+ domain->dom_id, domain->trans_table_ptr);

return domain;
}
@@ -1031,7 +1010,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
/* setup context entry for the devfun */
upper = 0UL;
lower = 0UL;
- if (domain->is_host) {
+ if (domain->dom_id == ACRN_DOM0_IOMMU_DOM_ID) {
if (iommu_ecap_pt(dmar_uint->ecap) != 0U) {
/* When the Translation-type (T) field indicates
* pass-through processing (10b), AW field must be
@@ -1142,12 +1121,13 @@ int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
if (domain == NULL) {
- return 1;
+ return -EINVAL;
}

/* TODO: check if the device assigned */
-
- remove_iommu_device(host_domain, 0U, bus, devfun);
+ if (domain->dom_id != ACRN_DOM0_IOMMU_DOM_ID) {
+ remove_iommu_device(host_domain, 0U, bus, devfun);
+ }
add_iommu_device(domain, 0U, bus, devfun);
return 0;
}
@@ -1156,13 +1136,14 @@ int unassign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
if (domain == NULL) {
- return 1;
+ return -EINVAL;
}

/* TODO: check if the device assigned */
-
remove_iommu_device(domain, 0U, bus, devfun);
- add_iommu_device(host_domain, 0U, bus, devfun);
+ if (domain->dom_id != ACRN_DOM0_IOMMU_DOM_ID) {
+ add_iommu_device(host_domain, 0U, bus, devfun);
+ }
return 0;
}

@@ -1291,7 +1272,7 @@ void init_iommu(void)

register_hrhd_units();

- host_domain = create_host_domain();
+ host_domain = create_iommu_domain(0UL, 48U);

for (bus = 0U; bus <= IOMMU_INIT_BUS_LIMIT; bus++) {
for (devfun = 0U; devfun <= 255U; devfun++) {
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index f8efb35..c1c2e76 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -687,7 +687,7 @@ int32_t hcall_assign_ptdev(struct vm *vm, uint16_t vmid, uint64_t param)
return -EPERM;
}
/* TODO: how to get vm's address width? */
- target_vm->iommu = create_iommu_domain(vmid,
+ target_vm->iommu = create_iommu_domain(
HVA2HPA(target_vm->arch_vm.nworld_eptp), 48U);
if (target_vm->iommu == NULL) {
return -ENODEV;
diff --git a/hypervisor/include/arch/x86/vtd.h b/hypervisor/include/arch/x86/vtd.h
index 981ba7a..75a736d 100644
--- a/hypervisor/include/arch/x86/vtd.h
+++ b/hypervisor/include/arch/x86/vtd.h
@@ -475,8 +475,8 @@ int assign_iommu_device(struct iommu_domain *domain,
int unassign_iommu_device(struct iommu_domain *domain,
uint8_t bus, uint8_t devfun);

-/* Create a iommu domain for a VM specified by vm_id */
-struct iommu_domain *create_iommu_domain(uint16_t vm_id,
+/* Create a iommu domain for a VM specified */
+struct iommu_domain *create_iommu_domain(
uint64_t translation_table, uint32_t addr_width);

/* Destroy the iommu domain */
--
2.7.4


[RFC PATCH 0/2] Revisit Dom 0 add a device to iommu doamin

Li, Fei1
 

v1-v2:
allocate iommu domain for UOS when create_vm

v1:
We would let the SOS to call the hcall_assign_ptdev hypercall to assign
a device to iommu domain not add it by static.

This serial is just a preparation.

Li, Fei1 (2):
hv: vtd: use create_iommu_domain to allocate doamin 0 iommu domain
hv: vtd: alloc iommu doamin for UOS when create_vm

hypervisor/arch/x86/guest/vm.c | 6 +++
hypervisor/arch/x86/vtd.c | 78 +++++++++++++++------------------------
hypervisor/common/hypercall.c | 26 +++++--------
hypervisor/include/arch/x86/vtd.h | 4 +-
4 files changed, 47 insertions(+), 67 deletions(-)

--
2.7.4


Re: [PATCH] dm: virtio_rnd: use delayed blocking IO to make virtio_rnd works on Linux based SOS

Shuo A Liu
 

On Fri 10.Aug'18 at 11:29:43 +0800, Wang, Yu1 wrote:
On 18-08-08 02:41:49, Jie Deng wrote:
Randomness sourced from /dev/random which does not block
once it has been seeded at bootup and you will always get
something when you read from that file. This is true on
Freebsd but unfortunately things are not the same on Linux.
Most cases, you can't read anything from /dev/random especially
on current acrn platform which lacking random events.
virtio_rnd inherted from freebsd doesn't work anymore.

This patch make virtio_rnd works on Linux based SOS. It uses
block IO to sevice the front-end random driver and delays the
read operation into a new thread to avoid blocking the main
notify thread.

Signed-off-by: Jie Deng <jie.deng@...>
---
devicemodel/hw/pci/virtio/virtio_rnd.c | 91 ++++++++++++++++++++--------------
1 file changed, 55 insertions(+), 36 deletions(-)

diff --git a/devicemodel/hw/pci/virtio/virtio_rnd.c b/devicemodel/hw/pci/virtio/virtio_rnd.c
index 618aa66..81f4a8a 100644
--- a/devicemodel/hw/pci/virtio/virtio_rnd.c
+++ b/devicemodel/hw/pci/virtio/virtio_rnd.c
@@ -27,8 +27,6 @@

/*
* virtio entropy device emulation.
- * Randomness is sourced from /dev/random which does not block
- * once it has been seeded at bootup.
*/

#include <fcntl.h>
@@ -57,6 +55,10 @@ struct virtio_rnd {
pthread_mutex_t mtx;
uint64_t cfg;
int fd;
+ int in_progress;
+ pthread_t rx_tid;
+ pthread_mutex_t rx_mtx;
+ pthread_cond_t rx_cond;
/* VBS-K variables */
struct {
enum VBS_K_STATUS status;
@@ -297,37 +299,52 @@ virtio_rnd_reset(void *base)
}
}

-static void
-virtio_rnd_notify(void *base, struct virtio_vq_info *vq)
+static void *
+virtio_rnd_get_entropy(void *param)
{
+ struct virtio_rnd *rnd = param;
+ struct virtio_vq_info *vq = &rnd->vq;
struct iovec iov;
- struct virtio_rnd *rnd;
- int len;
uint16_t idx;
+ int len, error;

- rnd = base;
-
- if (rnd->fd < 0) {
- vq_endchains(vq, 0);
- return;
- }
+ for (;;) {
+ pthread_mutex_lock(&rnd->rx_mtx);
+ rnd->in_progress = 0;
Is the in_process is necessary? We call pthread_cond_signal when thread
already awake, what is the impact? I see net does the same logic, but
heci not. Let's keep it first.

@Shuo, please help evaluate HECI in future.
I think the _in_progress is mainly for virtio_net_txwait and
virtio_net_txwait in virtio-net. We have no such requirement in HECI.


Others looks good to me.

Acked-by: Yu Wang <yu1.wang@...>


+ error = pthread_cond_wait(&rnd->rx_cond, &rnd->rx_mtx);
+ assert(error == 0);

- while (vq_has_descs(vq)) {
- vq_getchain(vq, &idx, &iov, 1, NULL);
+ rnd->in_progress = 1;
+ pthread_mutex_unlock(&rnd->rx_mtx);

- len = read(rnd->fd, iov.iov_base, iov.iov_len);
+ while(vq_has_descs(vq)) {
+ vq_getchain(vq, &idx, &iov, 1, NULL);

- DPRINTF(("%s: %d\r\n", __func__, len));
+ len = read(rnd->fd, iov.iov_base, iov.iov_len);
+ assert(len > 0);

- /* Catastrophe if unable to read from /dev/random */
- assert(len > 0);
+ /* release this chain and handle more */
+ vq_relchain(vq, idx, len);
+ }

- /*
- * Release this chain and handle more
- */
- vq_relchain(vq, idx, len);
+ vq_endchains(vq, 1);
}
- vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
+}
+
+static void
+virtio_rnd_notify(void *base, struct virtio_vq_info *vq)
+{
+ struct virtio_rnd *rnd = base;
+
+ /* Any ring entries to process */
+ if (!vq_has_descs(vq))
+ return;
+
+ /* Signal the tx thread for processing */
+ pthread_mutex_lock(&rnd->rx_mtx);
+ if (rnd->in_progress == 0)
+ pthread_cond_signal(&rnd->rx_cond);
+ pthread_mutex_unlock(&rnd->rx_mtx);
}

static int
@@ -335,13 +352,12 @@ virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd = NULL;
int fd;
- int len;
- uint8_t v;
pthread_mutexattr_t attr;
int rc;
char *opt;
char *vbs_k_opt = NULL;
enum VBS_K_STATUS kstat = VIRTIO_DEV_INITIAL;
+ char tname[MAXCOMLEN + 1];

while ((opt = strsep(&opts, ",")) != NULL) {
/* vbs_k_opt should be kernel=on */
@@ -357,19 +373,9 @@ virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
/*
* Should always be able to open /dev/random.
*/
- fd = open("/dev/random", O_RDONLY | O_NONBLOCK);
-
+ fd = open("/dev/random", O_RDONLY);
assert(fd >= 0);

- /*
- * Check that device is seeded and non-blocking.
- */
- len = read(fd, &v, sizeof(v));
- if (len <= 0) {
- WPRINTF(("virtio_rnd: /dev/random not ready, read(): %d", len));
- goto fail;
- }
-
rnd = calloc(1, sizeof(struct virtio_rnd));
if (!rnd) {
WPRINTF(("virtio_rnd: calloc returns NULL\n"));
@@ -436,6 +442,15 @@ virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)

virtio_set_io_bar(&rnd->base, 0);

+ rnd->in_progress = 0;
+ pthread_mutex_init(&rnd->rx_mtx, NULL);
+ pthread_cond_init(&rnd->rx_cond, NULL);
+ pthread_create(&rnd->rx_tid, NULL, virtio_rnd_get_entropy,
+ (void *)rnd);
+ snprintf(tname, sizeof(tname), "vtrnd-%d:%d tx", dev->slot,
+ dev->func);
+ pthread_setname_np(rnd->rx_tid, tname);
+
return 0;

fail:
@@ -454,6 +469,7 @@ static void
virtio_rnd_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd;
+ void *jval;

rnd = dev->arg;
if (rnd == NULL) {
@@ -461,6 +477,9 @@ virtio_rnd_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
return;
}

+ pthread_cancel(rnd->rx_tid);
+ pthread_join(rnd->rx_tid, &jval);
+
if (rnd->vbs_k.status == VIRTIO_DEV_STARTED) {
DPRINTF(("%s: deinit virtio_rnd_k!\n", __func__));
virtio_rnd_kernel_stop(rnd);
--
2.7.4


Re: [PATCH] HV: Add the emulation of CPUID with 0x16 leaft

Eddie Dong
 

Thanks for the review.
Currently the zero is returned for the unsupported CPUID leaf when
the supported level is less than 0x16.
In theory we should emulate the unsupported CPUID before 0x16. But if
the supported level is 0x12, it is quite complex to emulate the
corresponding CPUID(0x13, 0x14). For example: the 0x14 CPUID has the
sub-
leaf.
So the zero is returned for the unsupported CPUID before 0x16.
We don't want to support legacy hardware, so we don't need to solve the
theoretical issue. Can you check the platform to see how many CPUID
leaf it supports?
We stop execution for the platform without minimal HW capability.
OK. Currently the CPU on APL can support up to 0x15 CPUID.
If it is required that it runs on the CPU of APL+, the code logic can be
simplified.
Yes :)


Re: [PATCH] hv: treewide: fix 'No default case in switch statement'

Eddie Dong
 

LGTM

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Shiqing Gao
Sent: Friday, August 10, 2018 10:48 AM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH] hv: treewide: fix 'No default case in switch
statement'

MISRAC requires that a switch statement shall contain a default clause.

This patch add the default clause and some comments for the ones violated
the rule.

Signed-off-by: Shiqing Gao <shiqing.gao@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 13 +++++++++++++
hypervisor/arch/x86/guest/vlapic.c | 6 ++++++
hypervisor/arch/x86/guest/vpic.c | 12 ++++++++++++
3 files changed, 31 insertions(+)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c
b/hypervisor/arch/x86/guest/instr_emul.c
index 1098208..cf2438a 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -1893,6 +1893,19 @@ static int decode_sib(struct instr_emul_vie *vie)
case VIE_MOD_INDIRECT_DISP32:
vie->disp_bytes = 4U;
break;
+ default:
+ /*
+ * All possible values of 'vie->mod':
+ * 1. VIE_MOD_DIRECT
+ * has been handled at the start of this function
+ * 2. VIE_MOD_INDIRECT_DISP8
+ * has been handled in prior case clauses
+ * 3. VIE_MOD_INDIRECT_DISP32
+ * has been handled in prior case clauses
+ * 4. VIE_MOD_INDIRECT
+ * will be handled later after this switch statement
+ */
+ break;
}

if (vie->mod == VIE_MOD_INDIRECT &&
diff --git a/hypervisor/arch/x86/guest/vlapic.c
b/hypervisor/arch/x86/guest/vlapic.c
index 194fd5b..35be79c 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -1127,6 +1127,12 @@ vlapic_icrlo_write_handler(struct acrn_vlapic
*vlapic)
dmask = vm_active_cpus(vlapic->vm);
bitmap_clear_lock(vlapic->vcpu->vcpu_id, &dmask);
break;
+ default:
+ /*
+ * All possible values of 'shorthand' has been handled in prior
+ * case clauses.
+ */
+ break;
}

while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) { diff --git
a/hypervisor/arch/x86/guest/vpic.c b/hypervisor/arch/x86/guest/vpic.c
index c0a7411..10534f2 100644
--- a/hypervisor/arch/x86/guest/vpic.c
+++ b/hypervisor/arch/x86/guest/vpic.c
@@ -579,6 +579,18 @@ int vpic_set_irq_trigger(struct vm *vm, uint32_t irq,
enum vpic_trigger trigger)
case 8U:
case 13U:
return -EINVAL;
+ default:
+ /*
+ * The IRQs handled earlier are the ones that could only
+ * support edge trigger, while the input parameter
+ * 'trigger' is set as LEVEL_TRIGGER. So, an error code
+ * (-EINVAL) shall be returned due to the invalid
+ * operation.
+ *
+ * All the other IRQs will be handled properly after
+ * this switch statement.
+ */
+ break;
}
}

--
1.9.1



Re: [PATCH] HV: Add the emulation of CPUID with 0x16 leaft

Zhao, Yakui
 

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Eddie Dong
Sent: Friday, August 10, 2018 10:45 AM
To: acrn-dev@...; Xu, Anthony <anthony.xu@...>
Subject: Re: [acrn-dev] [PATCH] HV: Add the emulation of CPUID with 0x16
leaft



-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Zhao, Yakui
Sent: Friday, August 10, 2018 8:32 AM
To: Xu, Anthony <anthony.xu@...>; acrn-dev@...
Subject: Re: [acrn-dev] [PATCH] HV: Add the emulation of CPUID with
0x16 leaft



-----Original Message-----
From: Xu, Anthony
Sent: Friday, August 10, 2018 6:18 AM
To: acrn-dev@...
Cc: Zhao, Yakui <yakui.zhao@...>
Subject: RE: [acrn-dev] [PATCH] HV: Add the emulation of CPUID with
0x16 leaft

Yakui,

If you want to emulate CPUID 0x16, you have to emulate all CPUID
before
0x16 if they don't exist in Host.
Thanks for the review.
Currently the zero is returned for the unsupported CPUID leaf when the
supported level is less than 0x16.
In theory we should emulate the unsupported CPUID before 0x16. But if
the supported level is 0x12, it is quite complex to emulate the
corresponding CPUID(0x13, 0x14). For example: the 0x14 CPUID has the sub-
leaf.
So the zero is returned for the unsupported CPUID before 0x16.
We don't want to support legacy hardware, so we don't need to solve the
theoretical issue. Can you check the platform to see how many CPUID leaf it
supports?
We stop execution for the platform without minimal HW capability.
OK. Currently the CPU on APL can support up to 0x15 CPUID.
If it is required that it runs on the CPU of APL+, the code logic can be simplified.




See the embedded comments.

And, ACRN doesn't check host CPUID 0x16 to get TSC frequency, can you
please add the logic in a separate patch.
The TSC frequency uses the 0x15. This is already added.
Of course the current code has one problem. If the ecx for 0x15 CPUID
is zero, it should use one derived crystal frequency to calculate the TSC.



Re: [PATCH] dm: virtio_rnd: use delayed blocking IO to make virtio_rnd works on Linux based SOS

Yu Wang
 

On 18-08-08 02:41:49, Jie Deng wrote:
Randomness sourced from /dev/random which does not block
once it has been seeded at bootup and you will always get
something when you read from that file. This is true on
Freebsd but unfortunately things are not the same on Linux.
Most cases, you can't read anything from /dev/random especially
on current acrn platform which lacking random events.
virtio_rnd inherted from freebsd doesn't work anymore.

This patch make virtio_rnd works on Linux based SOS. It uses
block IO to sevice the front-end random driver and delays the
read operation into a new thread to avoid blocking the main
notify thread.

Signed-off-by: Jie Deng <jie.deng@...>
---
devicemodel/hw/pci/virtio/virtio_rnd.c | 91 ++++++++++++++++++++--------------
1 file changed, 55 insertions(+), 36 deletions(-)

diff --git a/devicemodel/hw/pci/virtio/virtio_rnd.c b/devicemodel/hw/pci/virtio/virtio_rnd.c
index 618aa66..81f4a8a 100644
--- a/devicemodel/hw/pci/virtio/virtio_rnd.c
+++ b/devicemodel/hw/pci/virtio/virtio_rnd.c
@@ -27,8 +27,6 @@

/*
* virtio entropy device emulation.
- * Randomness is sourced from /dev/random which does not block
- * once it has been seeded at bootup.
*/

#include <fcntl.h>
@@ -57,6 +55,10 @@ struct virtio_rnd {
pthread_mutex_t mtx;
uint64_t cfg;
int fd;
+ int in_progress;
+ pthread_t rx_tid;
+ pthread_mutex_t rx_mtx;
+ pthread_cond_t rx_cond;
/* VBS-K variables */
struct {
enum VBS_K_STATUS status;
@@ -297,37 +299,52 @@ virtio_rnd_reset(void *base)
}
}

-static void
-virtio_rnd_notify(void *base, struct virtio_vq_info *vq)
+static void *
+virtio_rnd_get_entropy(void *param)
{
+ struct virtio_rnd *rnd = param;
+ struct virtio_vq_info *vq = &rnd->vq;
struct iovec iov;
- struct virtio_rnd *rnd;
- int len;
uint16_t idx;
+ int len, error;

- rnd = base;
-
- if (rnd->fd < 0) {
- vq_endchains(vq, 0);
- return;
- }
+ for (;;) {
+ pthread_mutex_lock(&rnd->rx_mtx);
+ rnd->in_progress = 0;
Is the in_process is necessary? We call pthread_cond_signal when thread
already awake, what is the impact? I see net does the same logic, but
heci not. Let's keep it first.

@Shuo, please help evaluate HECI in future.

Others looks good to me.

Acked-by: Yu Wang <yu1.wang@...>


+ error = pthread_cond_wait(&rnd->rx_cond, &rnd->rx_mtx);
+ assert(error == 0);

- while (vq_has_descs(vq)) {
- vq_getchain(vq, &idx, &iov, 1, NULL);
+ rnd->in_progress = 1;
+ pthread_mutex_unlock(&rnd->rx_mtx);

- len = read(rnd->fd, iov.iov_base, iov.iov_len);
+ while(vq_has_descs(vq)) {
+ vq_getchain(vq, &idx, &iov, 1, NULL);

- DPRINTF(("%s: %d\r\n", __func__, len));
+ len = read(rnd->fd, iov.iov_base, iov.iov_len);
+ assert(len > 0);

- /* Catastrophe if unable to read from /dev/random */
- assert(len > 0);
+ /* release this chain and handle more */
+ vq_relchain(vq, idx, len);
+ }

- /*
- * Release this chain and handle more
- */
- vq_relchain(vq, idx, len);
+ vq_endchains(vq, 1);
}
- vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
+}
+
+static void
+virtio_rnd_notify(void *base, struct virtio_vq_info *vq)
+{
+ struct virtio_rnd *rnd = base;
+
+ /* Any ring entries to process */
+ if (!vq_has_descs(vq))
+ return;
+
+ /* Signal the tx thread for processing */
+ pthread_mutex_lock(&rnd->rx_mtx);
+ if (rnd->in_progress == 0)
+ pthread_cond_signal(&rnd->rx_cond);
+ pthread_mutex_unlock(&rnd->rx_mtx);
}

static int
@@ -335,13 +352,12 @@ virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd = NULL;
int fd;
- int len;
- uint8_t v;
pthread_mutexattr_t attr;
int rc;
char *opt;
char *vbs_k_opt = NULL;
enum VBS_K_STATUS kstat = VIRTIO_DEV_INITIAL;
+ char tname[MAXCOMLEN + 1];

while ((opt = strsep(&opts, ",")) != NULL) {
/* vbs_k_opt should be kernel=on */
@@ -357,19 +373,9 @@ virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
/*
* Should always be able to open /dev/random.
*/
- fd = open("/dev/random", O_RDONLY | O_NONBLOCK);
-
+ fd = open("/dev/random", O_RDONLY);
assert(fd >= 0);

- /*
- * Check that device is seeded and non-blocking.
- */
- len = read(fd, &v, sizeof(v));
- if (len <= 0) {
- WPRINTF(("virtio_rnd: /dev/random not ready, read(): %d", len));
- goto fail;
- }
-
rnd = calloc(1, sizeof(struct virtio_rnd));
if (!rnd) {
WPRINTF(("virtio_rnd: calloc returns NULL\n"));
@@ -436,6 +442,15 @@ virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)

virtio_set_io_bar(&rnd->base, 0);

+ rnd->in_progress = 0;
+ pthread_mutex_init(&rnd->rx_mtx, NULL);
+ pthread_cond_init(&rnd->rx_cond, NULL);
+ pthread_create(&rnd->rx_tid, NULL, virtio_rnd_get_entropy,
+ (void *)rnd);
+ snprintf(tname, sizeof(tname), "vtrnd-%d:%d tx", dev->slot,
+ dev->func);
+ pthread_setname_np(rnd->rx_tid, tname);
+
return 0;

fail:
@@ -454,6 +469,7 @@ static void
virtio_rnd_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd;
+ void *jval;

rnd = dev->arg;
if (rnd == NULL) {
@@ -461,6 +477,9 @@ virtio_rnd_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
return;
}

+ pthread_cancel(rnd->rx_tid);
+ pthread_join(rnd->rx_tid, &jval);
+
if (rnd->vbs_k.status == VIRTIO_DEV_STARTED) {
DPRINTF(("%s: deinit virtio_rnd_k!\n", __func__));
virtio_rnd_kernel_stop(rnd);
--
2.7.4


[PATCH v3] HV: fix "missing for discarded return value" for vm related api

Victor Sun
 

- add handler if prepare_vm0() failed;

- remove assert in start_vm() and return -1 if failed;

changelog:
v2 -> v3: replace panic with pr_fatal and return directly
if prepare_vm0() failed;
v1 -> v2: panic if prepare_vm0() failed instead of reboot system;

Signed-off-by: Victor Sun <victor.sun@...>
---
hypervisor/arch/x86/cpu.c | 9 ++++++---
hypervisor/arch/x86/guest/vm.c | 7 ++++---
hypervisor/common/hypercall.c | 3 +--
3 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 02cbaec..6944592 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -563,9 +563,12 @@ static void bsp_boot_post(void)

exec_vmxon_instr(BOOT_CPU_ID);

- prepare_vm(BOOT_CPU_ID);
-
- default_idle();
+ if (prepare_vm0() == 0) {
+ default_idle();
+ } else {
+ pr_fatal("Prepare VM0 failed!");
+ return;
+ }

/* Control should not come here */
cpu_dead(BOOT_CPU_ID);
diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index 09ceadb..09d9fc0 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -331,7 +331,9 @@ int start_vm(struct vm *vm)

/* Only start BSP (vid = 0) and let BSP start other APs */
vcpu = vcpu_from_vid(vm, 0U);
- ASSERT(vcpu != NULL, "vm%d, vcpu0", vm->vm_id);
+ if (vcpu == NULL) {
+ return -1;
+ }
schedule_vcpu(vcpu);

return 0;
@@ -358,8 +360,7 @@ int reset_vm(struct vm *vm)

vioapic_reset(vm->arch_vm.virt_ioapic);

- start_vm(vm);
- return 0;
+ return (start_vm(vm));
}

/**
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index f8efb35..7489b99 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -280,8 +280,7 @@ int32_t hcall_reset_vm(uint16_t vmid)
if ((target_vm == NULL) || is_vm0(target_vm))
return -1;

- reset_vm(target_vm);
- return 0;
+ return (reset_vm(target_vm));
}

int32_t hcall_assert_irqline(struct vm *vm, uint16_t vmid, uint64_t param)
--
2.7.4


[PATCH] hv: treewide: fix 'No default case in switch statement'

Shiqing Gao
 

MISRAC requires that a switch statement shall contain a default clause.

This patch add the default clause and some comments for the ones
violated the rule.

Signed-off-by: Shiqing Gao <shiqing.gao@...>
---
hypervisor/arch/x86/guest/instr_emul.c | 13 +++++++++++++
hypervisor/arch/x86/guest/vlapic.c | 6 ++++++
hypervisor/arch/x86/guest/vpic.c | 12 ++++++++++++
3 files changed, 31 insertions(+)

diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c
index 1098208..cf2438a 100644
--- a/hypervisor/arch/x86/guest/instr_emul.c
+++ b/hypervisor/arch/x86/guest/instr_emul.c
@@ -1893,6 +1893,19 @@ static int decode_sib(struct instr_emul_vie *vie)
case VIE_MOD_INDIRECT_DISP32:
vie->disp_bytes = 4U;
break;
+ default:
+ /*
+ * All possible values of 'vie->mod':
+ * 1. VIE_MOD_DIRECT
+ * has been handled at the start of this function
+ * 2. VIE_MOD_INDIRECT_DISP8
+ * has been handled in prior case clauses
+ * 3. VIE_MOD_INDIRECT_DISP32
+ * has been handled in prior case clauses
+ * 4. VIE_MOD_INDIRECT
+ * will be handled later after this switch statement
+ */
+ break;
}

if (vie->mod == VIE_MOD_INDIRECT &&
diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 194fd5b..35be79c 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -1127,6 +1127,12 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
dmask = vm_active_cpus(vlapic->vm);
bitmap_clear_lock(vlapic->vcpu->vcpu_id, &dmask);
break;
+ default:
+ /*
+ * All possible values of 'shorthand' has been handled in prior
+ * case clauses.
+ */
+ break;
}

while ((vcpu_id = ffs64(dmask)) != INVALID_BIT_INDEX) {
diff --git a/hypervisor/arch/x86/guest/vpic.c b/hypervisor/arch/x86/guest/vpic.c
index c0a7411..10534f2 100644
--- a/hypervisor/arch/x86/guest/vpic.c
+++ b/hypervisor/arch/x86/guest/vpic.c
@@ -579,6 +579,18 @@ int vpic_set_irq_trigger(struct vm *vm, uint32_t irq, enum vpic_trigger trigger)
case 8U:
case 13U:
return -EINVAL;
+ default:
+ /*
+ * The IRQs handled earlier are the ones that could only
+ * support edge trigger, while the input parameter
+ * 'trigger' is set as LEVEL_TRIGGER. So, an error code
+ * (-EINVAL) shall be returned due to the invalid
+ * operation.
+ *
+ * All the other IRQs will be handled properly after
+ * this switch statement.
+ */
+ break;
}
}

--
1.9.1


Re: [PATCH] DM: allow IOC overwrite the power state request from UOS

shuo.liu@...
 

I think one of the key reason is to make sure if UOS can treat this in a full transparent way. if not, this change will be tricky.
Or maybe it is modifiable in android, but for other OS, the situation will be different.
Your comments?

-----Original Message-----
From: Wang, Yu1
Sent: Thursday, August 9, 2018 7:58 AM
To: acrn-dev@...; Liu, Yuan1 <yuan1.liu@...>; Yao, Yipeng <yipeng.yao@...>; Liu, Shuo <shuo.liu@...>
Cc: Yin, Fengwei <fengwei.yin@...>
Subject: RE: [acrn-dev] [PATCH] DM: allow IOC overwrite the power state request from UOS

Loop Yipeng, Shuo to help answer the question..


Hi Yuan, Fengwei,

On 18-08-07 20:26:29, Yuan Liu wrote:
From: Yin Fengwei <fengwei.yin@...>

On the platform with IOC, IOC could overwrite the power state change
request from UOS. For example, UOS request to enter S5.
But IOC could make it actually S3.
From my perspective, this is not what IOC want to do, this is the
request from UOS. The UOS is cleared what the IOC lifecycle messages
are sent, if want to enter S3 then should be send self-refresh otherwise heartbeat inactive.

I need Yipeng to help clarify the detail reason why send self-refresh
but with S5
PM1 setting.


Signed-off-by: Yin Fengwei <fengwei.yin@...>
Signed-off-by: Yuan Liu <yuan1.liu@...>
---
devicemodel/arch/x86/pm.c | 25 +++++++++++++++++++------
devicemodel/hw/platform/ioc.c | 17 +++++++++++++++--
devicemodel/include/ioc.h | 1 +
3 files changed, 35 insertions(+), 8 deletions(-)

diff --git a/devicemodel/arch/x86/pm.c b/devicemodel/arch/x86/pm.c
index 37d3b87..56695ed 100644
--- a/devicemodel/arch/x86/pm.c
+++ b/devicemodel/arch/x86/pm.c
@@ -41,6 +41,7 @@
#include "mevent.h"
#include "irq.h"
#include "lpc.h"
+#include "ioc.h"

static pthread_mutex_t pm_lock = PTHREAD_MUTEX_INITIALIZER; static
struct mevent *power_button; @@ -261,15 +262,27 @@
pm1_control_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
* says that '5' should be stored in SLP_TYP for S5.
*/
if (*eax & PM1_SLP_EN) {
- if ((pm1_control & PM1_SLP_TYP) >> 10 == 5) {
- error = vm_suspend(ctx,
VM_SUSPEND_POWEROFF);
- assert(error == 0 || errno == EALREADY);
+ uint32_t power_state = 0;
+
+ if (ctx->ioc_dev) {
+ power_state =
+ get_ioc_overwritten_power_state();
I suggest that to not involve any IOC code in common pm dm. The IOC is
platform specific hardware module, maybe other platform also has
similar module cause overwritten power state case.

How about create one API likes register_overwrite_power_state_cb which
can be registered by IOC DM. And change the
get_ioc_overwritten_power_state to get_overwritten_power_state, the
get_overwritten_power_state can invoke the cb registered by AIOC to return the power state.

@Fengwei, your suggestion?

}

- if ((pm1_control & PM1_SLP_TYP) >> 10 == 3) {
- error = vm_suspend(ctx,
VM_SUSPEND_SUSPEND);
- assert(error == 0 || errno == EALREADY);
+ /*
+ * If IOC doesn't exist on the platform or the
+ * power_state is invalid, then the power_state
+ * setting is based on pm1_control.
+ */
+ if (!power_state) {
+ if ((pm1_control & PM1_SLP_TYP) >> 10 == 5)
+ power_state =
VM_SUSPEND_POWEROFF;
+ else if ((pm1_control & PM1_SLP_TYP) >> 10 ==
3)
+ power_state =
VM_SUSPEND_SUSPEND;
}
+
+ error = vm_suspend(ctx, power_state);
+ assert(error == 0 || errno == EALREADY);
}
}
return 0;
diff --git a/devicemodel/hw/platform/ioc.c
b/devicemodel/hw/platform/ioc.c index dfdc3e7..3877c50 100644
--- a/devicemodel/hw/platform/ioc.c
+++ b/devicemodel/hw/platform/ioc.c
@@ -638,6 +638,17 @@ static struct wlist_group
wlist_rx_group_table[] = { static struct wlist_group
wlist_tx_group_table[] = { };

+static uint32_t ioc_overwritten_power_state; uint32_t
+get_ioc_overwritten_power_state(void)
+{
+ uint32_t ret = 0;
+
+ ret = ioc_overwritten_power_state;
+ ioc_overwritten_power_state = 0;
+
+ return ret;
+}
+
/*
* Read data from the native CBC cdevs and virtual UART based on
* IOC channel ID.
@@ -990,8 +1001,9 @@ process_ram_refresh_event(struct ioc_dev *ioc)
rc = send_tx_request(ioc, CBC_REQ_T_UOS_INACTIVE);

/*
- * TODO: set suspend to PM DM
+ * set suspend to PM DM
*/
+ ioc_overwritten_power_state = VM_SUSPEND_SUSPEND;

return rc;
}
@@ -1014,8 +1026,9 @@ process_hb_inactive_event(struct ioc_dev *ioc)
rc = send_tx_request(ioc, CBC_REQ_T_UOS_INACTIVE);

/*
- * TODO: set shutdown to PM DM
+ * set shutdown to PM DM
*/
+ ioc_overwritten_power_state = VM_SUSPEND_POWEROFF;

return rc;
}
diff --git a/devicemodel/include/ioc.h b/devicemodel/include/ioc.h
index 26a8120..cb52857 100644
--- a/devicemodel/include/ioc.h
+++ b/devicemodel/include/ioc.h
@@ -805,6 +805,7 @@ struct vmctx;
/* IOC mediator common ops */
int ioc_init(struct vmctx *ctx);
void ioc_deinit(struct vmctx *ctx);
+uint32_t get_ioc_overwritten_power_state(void);

/* Build a cbc_request and send it to CBC protocol stack */ void
ioc_build_request(struct ioc_dev *ioc, int32_t link_len, int32_t
srv_len);
--
2.7.4


Re: [PATCH] HV: Add the emulation of CPUID with 0x16 leaft

Eddie Dong
 

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Zhao, Yakui
Sent: Friday, August 10, 2018 8:32 AM
To: Xu, Anthony <anthony.xu@...>; acrn-dev@...
Subject: Re: [acrn-dev] [PATCH] HV: Add the emulation of CPUID with 0x16
leaft



-----Original Message-----
From: Xu, Anthony
Sent: Friday, August 10, 2018 6:18 AM
To: acrn-dev@...
Cc: Zhao, Yakui <yakui.zhao@...>
Subject: RE: [acrn-dev] [PATCH] HV: Add the emulation of CPUID with
0x16 leaft

Yakui,

If you want to emulate CPUID 0x16, you have to emulate all CPUID before
0x16 if they don't exist in Host.
Thanks for the review.
Currently the zero is returned for the unsupported CPUID leaf when the
supported level is less than 0x16.
In theory we should emulate the unsupported CPUID before 0x16. But if the
supported level is 0x12, it is quite complex to emulate the corresponding
CPUID(0x13, 0x14). For example: the 0x14 CPUID has the sub-leaf.
So the zero is returned for the unsupported CPUID before 0x16.
We don't want to support legacy hardware, so we don't need to solve the theoretical issue. Can you check the platform to see how many CPUID leaf it supports?
We stop execution for the platform without minimal HW capability.



See the embedded comments.

And, ACRN doesn't check host CPUID 0x16 to get TSC frequency, can you
please add the logic in a separate patch.
The TSC frequency uses the 0x15. This is already added.
Of course the current code has one problem. If the ecx for 0x15 CPUID is
zero, it should use one derived crystal frequency to calculate the TSC.


[PATCH] tools: vm_resume() requires wakeup reason

Tao, Yuhong
 

DM need to know wakeup reason when resume the VM, so
vm_resume(char *name) is updated to vm_resume(char *name, int reason),
in acrn_vm_ops.c

Signed-off-by: Tao Yuhong <yuhong.tao@...>
---
tools/acrn-manager/acrn_vm_ops.c | 4 +++-
tools/acrn-manager/acrnctl.c | 33 ++++++++++++++++++---------------
tools/acrn-manager/acrnctl.h | 2 +-
tools/acrn-manager/acrnd.c | 9 +++++++--
4 files changed, 29 insertions(+), 19 deletions(-)

diff --git a/tools/acrn-manager/acrn_vm_ops.c b/tools/acrn-manager/acrn_vm_ops.c
index cd8ad29..2f08e5c 100644
--- a/tools/acrn-manager/acrn_vm_ops.c
+++ b/tools/acrn-manager/acrn_vm_ops.c
@@ -392,7 +392,7 @@ int suspend_vm(char *vmname)
return ack.data.err;
}

-int resume_vm(char *vmname)
+int resume_vm(char *vmname, int reason)
{
struct mngr_msg req;
struct mngr_msg ack;
@@ -401,6 +401,8 @@ int resume_vm(char *vmname)
req.msgid = DM_RESUME;
req.timestamp = time(NULL);

+ req.data.reason = reason;
+
send_msg(vmname, &req, &ack);

if (ack.data.err) {
diff --git a/tools/acrn-manager/acrnctl.c b/tools/acrn-manager/acrnctl.c
index 84465a2..001eae7 100644
--- a/tools/acrn-manager/acrnctl.c
+++ b/tools/acrn-manager/acrnctl.c
@@ -466,23 +466,26 @@ static int acrnctl_do_suspend(int argc, char *argv[])
static int acrnctl_do_resume(int argc, char *argv[])
{
struct vmmngr_struct *s;
- int i;
+ int reason = 0;

- for (i = 1; i < argc; i++) {
- s = vmmngr_find(argv[i]);
- if (!s) {
- printf("Can't find vm %s\n", argv[i]);
- continue;
- }
+ s = vmmngr_find(argv[1]);
+ if (!s) {
+ printf("Can't find vm %s\n", argv[1]);
+ return -1;
+ }

- switch (s->state) {
- case VM_PAUSED:
- resume_vm(argv[i]);
- break;
- default:
- printf("%s current state %s, can't resume\n",
- argv[i], state_str[s->state]);
- }
+ if (argc == 3) {
+ reason = atoi(argv[2]);
+ reason = (reason > 0) ? reason: 0;
+ }
+
+ switch (s->state) {
+ case VM_PAUSED:
+ resume_vm(argv[1], reason);
+ break;
+ default:
+ printf("%s current state %s, can't resume\n",
+ argv[1], state_str[s->state]);
}

return 0;
diff --git a/tools/acrn-manager/acrnctl.h b/tools/acrn-manager/acrnctl.h
index 721a637..7a5a9d7 100644
--- a/tools/acrn-manager/acrnctl.h
+++ b/tools/acrn-manager/acrnctl.h
@@ -56,6 +56,6 @@ int start_vm(char *vmname);
int pause_vm(char *vmname);
int continue_vm(char *vmname);
int suspend_vm(char *vmname);
-int resume_vm(char *vmname);
+int resume_vm(char *vmname, int reason);

#endif /* _ACRNCTL_H_ */
diff --git a/tools/acrn-manager/acrnd.c b/tools/acrn-manager/acrnd.c
index 65783fd..94ed4ef 100644
--- a/tools/acrn-manager/acrnd.c
+++ b/tools/acrn-manager/acrnd.c
@@ -105,11 +105,13 @@ static void try_do_works(void)
}

static void acrnd_run_vm(char *name);
+unsigned get_sos_wakeup_reason(void);

/* Time to run/resume VM */
void acrnd_vm_timer_func(struct work_arg *arg)
{
struct vmmngr_struct *vm;
+ int reason;

if (!arg) {
pdebug();
@@ -128,7 +130,8 @@ void acrnd_vm_timer_func(struct work_arg *arg)
acrnd_run_vm(arg->name);
break;
case VM_PAUSED:
- resume_vm(arg->name);
+ reason = get_sos_wakeup_reason();
+ resume_vm(arg->name, reason);
break;
default:
pdebug();
@@ -235,6 +238,7 @@ static int active_all_vms(void)
struct vmmngr_struct *vm;
int ret = 0;
pid_t pid;
+ int reason;

vmmngr_update();

@@ -246,7 +250,8 @@ static int active_all_vms(void)
acrnd_run_vm(vm->name);
break;
case VM_PAUSED:
- ret += resume_vm(vm->name);
+ reason = get_sos_wakeup_reason();
+ ret += resume_vm(vm->name, reason);
break;
default:
pdebug();
--
2.7.4