Date   

[PATCH] dm: fix possible null pointer dereference in pci_gvt_deinit

Yonghua Huang
 

will access null pointer if 'gvt' is null.

Signed-off-by: Yonghua Huang <yonghua.huang@...>
---
devicemodel/hw/pci/gvt.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/devicemodel/hw/pci/gvt.c b/devicemodel/hw/pci/gvt.c
index c34b5e19..75cff57d 100644
--- a/devicemodel/hw/pci/gvt.c
+++ b/devicemodel/hw/pci/gvt.c
@@ -262,18 +262,20 @@ pci_gvt_deinit(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
int ret;
struct pci_gvt *gvt = pi->arg;

- if (gvt && gvt->host_config) {
- /* Free the allocated host_config */
- free(gvt->host_config);
- gvt->host_config = NULL;
+ if (gvt != NULL) {
+ if (gvt->host_config) {
+ /* Free the allocated host_config */
+ free(gvt->host_config);
+ gvt->host_config = NULL;
+ }
+
+ ret = gvt_destroy_instance(gvt);
+ if (ret)
+ WPRINTF(("GVT: %s: failed: errno=%d\n", __func__, ret));
+
+ free(gvt);
+ pi->arg = NULL;
}
-
- ret = gvt_destroy_instance(gvt);
- if (ret)
- WPRINTF(("GVT: %s: failed: errno=%d\n", __func__, ret));
-
- free(gvt);
- pi->arg = NULL;
}

struct pci_vdev_ops pci_ops_gvt = {
--
2.17.1


Re: [PATCH v2 1/1] HV: fix the issue of ACRN_REQUEST_EXCP flag is not cleared.

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>
But MISRCA doesn't allow multiple return in one function.

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Li, BingX
Sent: Monday, April 22, 2019 2:26 PM
To: acrn-dev@...
Cc: Li, BingX <bingx.li@...>
Subject: [acrn-dev] [PATCH v2 1/1] HV: fix the issue of ACRN_REQUEST_EXCP
flag is not cleared.

From: li bing <bingx.li@...>

the problem is : System will crash when run crashme.
The root cause of this problem is that when the ACRN_REQUEST_EXCP flag is
set by calling the vcpu_make_request function, the flag is not cleared.
Add the following statement to the vcpu_inject_exception function to fix the
problem:
bitmap_test_and_clear_lock(ACRN_REQUEST_EXCP, &vcpu->arch.pending_req);
Tested that one night, there was no crash.

Tracked-On: #2527
Signed-off-by: bing.li<bingx.li@...>

changlog:

v2:
1:) bitmap_clear_lock is modified to bitmap_test_and_clear_lock in
vcpu_inject_exception function
2:) in the vcpu_queue_exception, added the statement to set the
ACRN_REQUEST_EXCP flag.

v1:
using bitmap_clear_lock(ACRN_REQUEST_EXCP,
&vcpu->arch.pending_req) to clear the ACRN_REQUEST_EXCP tag in
vcpu_inject_exception function
---
hypervisor/arch/x86/guest/virq.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/hypervisor/arch/x86/guest/virq.c
b/hypervisor/arch/x86/guest/virq.c
index ab5c2f9c..cdf656d3 100644
--- a/hypervisor/arch/x86/guest/virq.c
+++ b/hypervisor/arch/x86/guest/virq.c
@@ -268,6 +268,7 @@ int32_t vcpu_queue_exception(struct acrn_vcpu
*vcpu, uint32_t vector_arg, uint32
} else {
arch->exception_info.error = 0U;
}
+ vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
}

@@ -276,6 +277,10 @@ int32_t vcpu_queue_exception(struct acrn_vcpu
*vcpu, uint32_t vector_arg, uint32

static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector) {
+ if (!bitmap_test_and_clear_lock(ACRN_REQUEST_EXCP,
&vcpu->arch.pending_req)) {
+ return;
+ }
+
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
vcpu->arch.exception_info.error);
@@ -345,7 +350,6 @@ void vcpu_inject_nmi(struct acrn_vcpu *vcpu) void
vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code) {
(void)vcpu_queue_exception(vcpu, IDT_GP, err_code);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject page fault exception(#PF) to guest */ @@ -353,28 +357,24 @@ void
vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code) {
vcpu_set_cr2(vcpu, addr);
(void)vcpu_queue_exception(vcpu, IDT_PF, err_code);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject invalid opcode exception(#UD) to guest */ void
vcpu_inject_ud(struct acrn_vcpu *vcpu) {
(void)vcpu_queue_exception(vcpu, IDT_UD, 0);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject alignment check exception(#AC) to guest */ void
vcpu_inject_ac(struct acrn_vcpu *vcpu) {
(void)vcpu_queue_exception(vcpu, IDT_AC, 0);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject stack fault exception(#SS) to guest */ void vcpu_inject_ss(struct
acrn_vcpu *vcpu) {
(void)vcpu_queue_exception(vcpu, IDT_SS, 0);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
--
2.17.1



[PATCH] hv: fix possible buffer overflow in vlapic.c

Yonghua Huang
 

Possible buffer overflow will happen in vlapic_set_tmr()
and vlapic_update_ppr(),this path is to fix them.

Signed-off-by: Yonghua Huang <yonghua.huang@...>
---
hypervisor/arch/x86/guest/vlapic.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 36ccf8bc..288076ac 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -462,11 +462,11 @@ vlapic_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
lapic = &(vlapic->apic_page);
tmrptr = &lapic->tmr[0];
if (level) {
- if (!bitmap32_test_and_set_lock((uint16_t)(vector & 0x1fU), &tmrptr[vector >> 5U].v)) {
+ if (!bitmap32_test_and_set_lock((uint16_t)(vector & 0x1fU), &tmrptr[(vector & 0xffU) >> 5U].v)) {
vcpu_set_eoi_exit_bitmap(vlapic->vcpu, vector);
}
} else {
- if (bitmap32_test_and_clear_lock((uint16_t)(vector & 0x1fU), &tmrptr[vector >> 5U].v)) {
+ if (bitmap32_test_and_clear_lock((uint16_t)(vector & 0x1fU), &tmrptr[(vector & 0xffU) >> 5U].v)) {
vcpu_clear_eoi_exit_bitmap(vlapic->vcpu, vector);
}
}
@@ -875,12 +875,10 @@ vlapic_update_ppr(struct acrn_vlapic *vlapic)
isrptr = &(vlapic->apic_page.isr[0]);
for (vector = 0U; vector < 256U; vector++) {
idx = vector >> 5U;
- if ((isrptr[idx].v & (1U << (vector & 0x1fU)))
- != 0U) {
+ if (((isrptr[idx].v & (1U << (vector & 0x1fU))) != 0U)
+ && (i < ISRVEC_STK_SIZE)) {
isrvec = (uint32_t)vlapic->isrvec_stk[i];
- if ((i > vlapic->isrvec_stk_top) ||
- ((i < ISRVEC_STK_SIZE) &&
- (isrvec != vector))) {
+ if ((i > vlapic->isrvec_stk_top) || (isrvec != vector)) {
dump_isrvec_stk(vlapic);
panic("ISR and isrvec_stk out of sync");
}
--
2.17.1


[PATCH v2 1/1] HV: fix the issue of ACRN_REQUEST_EXCP flag is not cleared.

Li, BingX
 

From: li bing <bingx.li@...>

the problem is : System will crash when run crashme.
The root cause of this problem is that when the ACRN_REQUEST_EXCP flag is set by calling
the vcpu_make_request function, the flag is not cleared.
Add the following statement to the vcpu_inject_exception function to fix the problem:
bitmap_test_and_clear_lock(ACRN_REQUEST_EXCP, &vcpu->arch.pending_req);
Tested that one night, there was no crash.

Tracked-On: #2527
Signed-off-by: bing.li<bingx.li@...>

changlog:

v2:
1:) bitmap_clear_lock is modified to bitmap_test_and_clear_lock in vcpu_inject_exception function
2:) in the vcpu_queue_exception, added the statement to set the ACRN_REQUEST_EXCP flag.

v1:
using bitmap_clear_lock(ACRN_REQUEST_EXCP, &vcpu->arch.pending_req) to clear the ACRN_REQUEST_EXCP tag in vcpu_inject_exception function
---
hypervisor/arch/x86/guest/virq.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/hypervisor/arch/x86/guest/virq.c b/hypervisor/arch/x86/guest/virq.c
index ab5c2f9c..cdf656d3 100644
--- a/hypervisor/arch/x86/guest/virq.c
+++ b/hypervisor/arch/x86/guest/virq.c
@@ -268,6 +268,7 @@ int32_t vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector_arg, uint32
} else {
arch->exception_info.error = 0U;
}
+ vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
}

@@ -276,6 +277,10 @@ int32_t vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector_arg, uint32

static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
{
+ if (!bitmap_test_and_clear_lock(ACRN_REQUEST_EXCP, &vcpu->arch.pending_req)) {
+ return;
+ }
+
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
vcpu->arch.exception_info.error);
@@ -345,7 +350,6 @@ void vcpu_inject_nmi(struct acrn_vcpu *vcpu)
void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code)
{
(void)vcpu_queue_exception(vcpu, IDT_GP, err_code);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject page fault exception(#PF) to guest */
@@ -353,28 +357,24 @@ void vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code)
{
vcpu_set_cr2(vcpu, addr);
(void)vcpu_queue_exception(vcpu, IDT_PF, err_code);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject invalid opcode exception(#UD) to guest */
void vcpu_inject_ud(struct acrn_vcpu *vcpu)
{
(void)vcpu_queue_exception(vcpu, IDT_UD, 0);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject alignment check exception(#AC) to guest */
void vcpu_inject_ac(struct acrn_vcpu *vcpu)
{
(void)vcpu_queue_exception(vcpu, IDT_AC, 0);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

/* Inject stack fault exception(#SS) to guest */
void vcpu_inject_ss(struct acrn_vcpu *vcpu)
{
(void)vcpu_queue_exception(vcpu, IDT_SS, 0);
- vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}

int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
--
2.17.1


Re: [PATCH 0/3] Completely remove enable_bar()/disable_bar()

Yin, Fengwei <fengwei.yin@...>
 

On 4/17/2019 3:13 PM, Peter Fang wrote:
Following up on d648df766c263512c93356a55ff97cb3e23fe3e4, surgically
remove all the functions related to enable_bar()/disable_bar() that got
introduced in 8787b65fdee622639925ad75fe920913e0e7f102.
Also, make accesses to mmio_hint in mem.c thread-safe because it can
potentially be accessed concurrently in emulate_mem().
Peter Fang (3):
dm: pci: change return type to bool
dm: completely remove enable_bar()/disable_bar() functions
dm: safely access MMIO hint in MMIO emulation
Acked-by: Yin Fengwei <fengwei.yin@...>

Regards
Yin, Fengwei

devicemodel/core/inout.c | 44 -----------------
devicemodel/core/mem.c | 98 ++++++-------------------------------
devicemodel/hw/pci/core.c | 17 ++++---
devicemodel/include/inout.h | 2 -
devicemodel/include/mem.h | 5 +-
5 files changed, 24 insertions(+), 142 deletions(-)


Re: [PATCH 2/3] dm: completely remove enable_bar()/disable_bar() functions

Peter Fang
 

-----Original Message-----
From: Liu, Shuo A
Sent: Saturday, April 20, 2019 7:53 PM
To: Fang, Peter <peter.fang@...>
Cc: acrn-dev@...
Subject: Re: [acrn-dev] [PATCH 2/3] dm: completely remove
enable_bar()/disable_bar() functions

On Wed 17.Apr'19 at 0:13:44 -0700, Peter Fang wrote:
Following up on d648df766c263512c93356a55ff97cb3e23fe3e4, surgically
remove all the functions related to enable_bar()/disable_bar() that got
introduced in 8787b65fdee622639925ad75fe920913e0e7f102.

Tracked-On: #2902
Signed-off-by: Peter Fang <peter.fang@...>
Minor comment inline.

Reviewed-by: Shuo A Liu <shuo.a.liu@...>

---
devicemodel/core/inout.c | 44 -------------------
devicemodel/core/mem.c | 88 ++++---------------------------------
devicemodel/include/inout.h | 2 -
devicemodel/include/mem.h | 5 +--
4 files changed, 9 insertions(+), 130 deletions(-)

diff --git a/devicemodel/core/inout.c b/devicemodel/core/inout.c index
daa8add9..a16f3db5 100644
--- a/devicemodel/core/inout.c
+++ b/devicemodel/core/inout.c
@@ -26,7 +26,6 @@
* $FreeBSD$
*/

-#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
@@ -45,7 +44,6 @@ static struct {
int flags;
inout_func_t handler;
void *arg;
- bool enabled;
} inout_handlers[MAX_IOPORTS];

static int
@@ -115,11 +113,6 @@ emulate_inout(struct vmctx *ctx, int *pvcpu,
struct pio_request *pio_request)
if (!(flags & IOPORT_F_OUT))
return -1;
}
-
- if (inout_handlers[port].enabled == false) {
- return -1;
- }
-
retval = handler(ctx, *pvcpu, in, port, bytes,
(uint32_t *)&(pio_request->value), arg);
return retval;
@@ -148,42 +141,6 @@ init_inout(void)
}
}

-int
-disable_inout(struct inout_port *iop)
-{
- int i;
-
- if (!VERIFY_IOPORT(iop->port, iop->size)) {
- printf("invalid input: port:0x%x, size:%d",
- iop->port, iop->size);
- return -1;
- }
-
- for (i = iop->port; i < iop->port + iop->size; i++) {
- inout_handlers[i].enabled = false;
- }
-
- return 0;
-}
-
-int
-enable_inout(struct inout_port *iop)
-{
- int i;
-
- if (!VERIFY_IOPORT(iop->port, iop->size)) {
- printf("invalid input: port:0x%x, size:%d",
- iop->port, iop->size);
- return -1;
- }
-
- for (i = iop->port; i < iop->port + iop->size; i++) {
- inout_handlers[i].enabled = true;
- }
-
- return 0;
-}
-
int
register_inout(struct inout_port *iop) { @@ -211,7 +168,6 @@
register_inout(struct inout_port *iop)
inout_handlers[i].flags = iop->flags;
inout_handlers[i].handler = iop->handler;
inout_handlers[i].arg = iop->arg;
- inout_handlers[i].enabled = true;
}

return 0;
diff --git a/devicemodel/core/mem.c b/devicemodel/core/mem.c index
4994eb22..fffaeda9 100644
--- a/devicemodel/core/mem.c
+++ b/devicemodel/core/mem.c
@@ -33,7 +33,6 @@
*/

#include <errno.h>
-#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
@@ -51,7 +50,6 @@ struct mmio_rb_range {
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
- bool enabled;
};

static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root,
mmio_rb_fallback; @@ -168,25 +166,18 @@ emulate_mem(struct vmctx
*ctx, struct mmio_request *mmio_req)
if (mmio_hint && paddr >= mmio_hint->mr_base &&
paddr <= mmio_hint->mr_end)
entry = mmio_hint;
-
- if (entry == NULL) {
- if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
- /* Update the per-VMU cache */
- mmio_hint = entry;
- else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry))
{
- pthread_rwlock_unlock(&mmio_rwlock);
- return -ESRCH;
- }
- }
-
- assert(entry != NULL);
-
- if (entry->enabled == false) {
+ else if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
+ /* Update the per-VMU cache */
per-VM?
Thanks for catching that. It'll be fixed in the PR. :-)


+ mmio_hint = entry;
+ else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
pthread_rwlock_unlock(&mmio_rwlock);
- return -1;
+ return -ESRCH;
}
+
pthread_rwlock_unlock(&mmio_rwlock);

+ assert(entry != NULL);
+
if (mmio_req->direction == REQUEST_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req-
value,
size, &entry->mr_param);


Re: [PATCH v8 5/5] HV: Remove dead loop in stop_cpus

Kaige Fu
 

Hi Eddie,

On 04-21 Sun 05:18, Eddie Dong wrote:
Acked-by: Eddie Dong <eddie.dong@...>
BTW, please update the design doc in RST to state our policy.
Sure. Will check and update it.

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Kaige Fu
Sent: Sunday, April 21, 2019 7:58 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v8 5/5] HV: Remove dead loop in stop_cpus

This patch removes dead loop in stop_cpus because timeout never occurs.
If target cpu received a NMI and panic, it has called cpu_dead and stop_cpus
success.
If target cpu is running, an IPI will be delivered to it and then call cpu_dead.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 38 +++++++++-----------------------------
1 file changed, 9 insertions(+), 29 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index
d341f16f..9bea1a47 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -345,44 +345,24 @@ void wait_pcpus_offline(uint64_t mask)

void stop_cpus(void)
{
- uint16_t pcpu_id, expected_up;
- uint32_t timeout;
+ uint16_t pcpu_id;
+ uint64_t mask = 0UL;

for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
if (get_cpu_id() == pcpu_id) { /* avoid offline itself */
continue;
}

+ bitmap_set_nolock(pcpu_id, &mask);
make_pcpu_offline(pcpu_id);
}

- expected_up = 1U;
- timeout = CPU_DOWN_TIMEOUT * 1000U;
- while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
- /* Delay 10us */
- udelay(10U);
-
- /* Decrement timeout value */
- timeout -= 10U;
- }
-
- if (atomic_load16(&up_count) != expected_up) {
- pr_fatal("Can't make all APs offline");
-
- /* if partial APs is down, it's not easy to recover
- * per our current implementation (need make up dead
- * APs one by one), just print error mesage and dead
- * loop here.
- *
- * FIXME:
- * We need to refine here to handle the AP offline
- * failure for release/debug version. Ideally, we should
- * define how to handle general unrecoverable error and
- * follow it here.
- */
- do {
- } while (1);
- }
+ /**
+ * Timeout never occurs here:
+ * If target cpu received a NMI and panic, it has called cpu_dead and
make_pcpu_offline success.
+ * If target cpu is running, an IPI will be delivered to it and then call
cpu_dead.
+ */
+ wait_pcpus_offline(mask);
}

void cpu_do_idle(void)
--
2.20.0





Re: [PATCH V3] hv: allocate vpid based on vm_id and vcpu_id mapping

Eddie Dong
 


+/* This is to make sure the 16 bits vpid won't overflow */ #if
+((CONFIG_MAX_VM_NUM * CONFIG_MAX_VCPUS_PER_VM) > 0xffffU)
#error "VM
+number or VCPU number are too big"
+#endif
Maybe we'd better to move this check into "arch/x86/static_checks.c"
Agree!


Re: [PATCH v8 5/5] HV: Remove dead loop in stop_cpus

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>
BTW, please update the design doc in RST to state our policy.

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Kaige Fu
Sent: Sunday, April 21, 2019 7:58 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v8 5/5] HV: Remove dead loop in stop_cpus

This patch removes dead loop in stop_cpus because timeout never occurs.
If target cpu received a NMI and panic, it has called cpu_dead and stop_cpus
success.
If target cpu is running, an IPI will be delivered to it and then call cpu_dead.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 38 +++++++++-----------------------------
1 file changed, 9 insertions(+), 29 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index
d341f16f..9bea1a47 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -345,44 +345,24 @@ void wait_pcpus_offline(uint64_t mask)

void stop_cpus(void)
{
- uint16_t pcpu_id, expected_up;
- uint32_t timeout;
+ uint16_t pcpu_id;
+ uint64_t mask = 0UL;

for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
if (get_cpu_id() == pcpu_id) { /* avoid offline itself */
continue;
}

+ bitmap_set_nolock(pcpu_id, &mask);
make_pcpu_offline(pcpu_id);
}

- expected_up = 1U;
- timeout = CPU_DOWN_TIMEOUT * 1000U;
- while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
- /* Delay 10us */
- udelay(10U);
-
- /* Decrement timeout value */
- timeout -= 10U;
- }
-
- if (atomic_load16(&up_count) != expected_up) {
- pr_fatal("Can't make all APs offline");
-
- /* if partial APs is down, it's not easy to recover
- * per our current implementation (need make up dead
- * APs one by one), just print error mesage and dead
- * loop here.
- *
- * FIXME:
- * We need to refine here to handle the AP offline
- * failure for release/debug version. Ideally, we should
- * define how to handle general unrecoverable error and
- * follow it here.
- */
- do {
- } while (1);
- }
+ /**
+ * Timeout never occurs here:
+ * If target cpu received a NMI and panic, it has called cpu_dead and
make_pcpu_offline success.
+ * If target cpu is running, an IPI will be delivered to it and then call
cpu_dead.
+ */
+ wait_pcpus_offline(mask);
}

void cpu_do_idle(void)
--
2.20.0



Re: [PATCH v8 3/5] HV: Reshuffle start_cpus and start_cpu

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Kaige Fu
Sent: Sunday, April 21, 2019 7:58 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v8 3/5] HV: Reshuffle start_cpus and start_cpu

This patch makes the following changes:
- Add one parameter 'mask' to start_cpus for later use.
- Set cpu state as DEAD instead of dead loop when fail to start cpu.
- Panic when there are any failures when start cpus in init_cpu_post and
host_enter_s3.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 39 ++++++++++++++++++++++---------
hypervisor/arch/x86/pm.c | 6 ++++-
hypervisor/include/arch/x86/cpu.h | 2 +-
3 files changed, 34 insertions(+), 13 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index
e3bdd0e2..4453e1b1 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -30,6 +30,8 @@
#define CPU_UP_TIMEOUT 100U /* millisecond */
#define CPU_DOWN_TIMEOUT 100U /* millisecond */

+#define AP_MASK (((1UL << phys_cpu_num) - 1UL) & ~(1UL << 0U))
+
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM]
__aligned(PAGE_SIZE); static uint16_t phys_cpu_num = 0U; static uint64_t
pcpu_sync = 0UL; @@ -229,7 +231,9 @@ void init_cpu_post(uint16_t
pcpu_id)

/* Start all secondary cores */
startup_paddr = prepare_trampoline();
- start_cpus();
+ if (!start_cpus(AP_MASK)) {
+ panic("Failed to start all secondary cores!");
+ }

ASSERT(get_cpu_id() == BOOT_CPU_ID, "");
} else {
@@ -287,32 +291,45 @@ static void start_cpu(uint16_t pcpu_id)

/* Check to see if expected CPU is actually up */
if (!is_pcpu_active(pcpu_id)) {
- /* Print error */
- pr_fatal("Secondary CPUs failed to come up");
-
- /* Error condition - loop endlessly for now */
- do {
- } while (1);
+ pr_fatal("Secondary CPU%hu failed to come up", pcpu_id);
+ cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
}
}

-void start_cpus(void)
+
+/**
+ * @brief Start all cpus if the bit is set in mask except itself
+ *
+ * @param[in] mask bits mask of cpus which should be started
+ *
+ * @return true if all cpus set in mask are started
+ * @return false if there are any cpus set in mask aren't started */
+bool start_cpus(uint64_t mask)
{
uint16_t i;
+ uint16_t pcpu_id = get_cpu_id();
+ uint64_t expected_start_mask = mask;

/* secondary cpu start up will wait for pcpu_sync -> 0UL */
atomic_store64(&pcpu_sync, 1UL);

- for (i = 0U; i < phys_cpu_num; i++) {
- if (get_cpu_id() == i) {
- continue;
+ i = ffs64(expected_start_mask);
+ while (i != INVALID_BIT_INDEX) {
+ bitmap_clear_nolock(i, &expected_start_mask);
+
+ if (pcpu_id == i) {
+ continue; /* Avoid start itself */
}

start_cpu(i);
+ i = ffs64(expected_start_mask);
}

/* Trigger event to allow secondary CPUs to continue */
atomic_store64(&pcpu_sync, 0UL);
+
+ return ((pcpu_active_bitmap & mask) == mask);
}

void stop_cpus(void)
diff --git a/hypervisor/arch/x86/pm.c b/hypervisor/arch/x86/pm.c index
3a0d1b2c..019d8cdd 100644
--- a/hypervisor/arch/x86/pm.c
+++ b/hypervisor/arch/x86/pm.c
@@ -20,6 +20,8 @@
#include <lapic.h>
#include <vcpu.h>

+#define AP_MASK (((1UL << get_pcpu_nums()) - 1UL) & ~(1UL << 0U))
+
struct cpu_context cpu_ctx;

/* The values in this structure should come from host ACPI table */ @@
-186,5 +188,7 @@ void host_enter_s3(struct pm_s_state_data *sstate_data,
uint32_t pm1a_cnt_val, u
clac();

/* online all APs again */
- start_cpus();
+ if (!start_cpus(AP_MASK)) {
+ panic("Failed to start all APs!");
+ }
}
diff --git a/hypervisor/include/arch/x86/cpu.h
b/hypervisor/include/arch/x86/cpu.h
index 13c2fd9e..323b0d17 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -259,7 +259,7 @@ void trampoline_start16(void); void
load_cpu_state_data(void); void init_cpu_pre(uint16_t pcpu_id_args); void
init_cpu_post(uint16_t pcpu_id); -void start_cpus(void);
+bool start_cpus(uint64_t mask);
void stop_cpus(void);
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);

--
2.20.0



Re: [PATCH v2] vlapic: refine IPI broadcast to support x2APIC mode

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Yin, Fengwei
Sent: Saturday, April 20, 2019 10:55 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v2] vlapic: refine IPI broadcast to support x2APIC
mode

According to SDM 10.6.1, if dest fields is 0xffU for register ICR operation, that
means the IPI is for broadcast.

According to SDM 10.12.9, 0xffffffffU of dest fields for x2APIC means IPI is for
broadcast.

We add new parameter to vlapic_calc_dest() to show whether the dest is for
broadcast. For IPI, we will set it according to dest fields. For ioapic and MSI, we
hardcode it to false because no broadcast for ioapic and MSI.

Signed-off-by: Yin Fengwei <fengwei.yin@...>
---
ChangeLog:
v1 -> v2:
- Have vlapic_calc_dest_lapic_pt also covered
- Remove if else for is_broadcast in vlapic_icrlo_write_handler

hypervisor/arch/x86/guest/assign.c | 4 ++--
hypervisor/arch/x86/guest/vlapic.c | 18 +++++++++++-------
hypervisor/common/hypercall.c | 2 +-
hypervisor/include/arch/x86/guest/vlapic.h | 6 ++++--
4 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/hypervisor/arch/x86/guest/assign.c
b/hypervisor/arch/x86/guest/assign.c
index 11a7bdb1..c34ebf72 100644
--- a/hypervisor/arch/x86/guest/assign.c
+++ b/hypervisor/arch/x86/guest/assign.c
@@ -89,7 +89,7 @@ static void ptirq_build_physical_msi(struct acrn_vm *vm,
struct ptirq_msi_info *
dest = info->vmsi_addr.bits.dest_field;
phys = (info->vmsi_addr.bits.dest_mode ==
MSI_ADDR_DESTMODE_PHYS);

- vlapic_calc_dest(vm, &vdmask, dest, phys, false);
+ vlapic_calc_dest(vm, &vdmask, false, dest, phys, false);
pdmask = vcpumask2pcpumask(vm, vdmask);

/* get physical delivery mode */
@@ -183,7 +183,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct
ptirq_remapping_info *entry)
/* physical destination cpu mask */
phys = (virt_rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
dest = (uint32_t)virt_rte.bits.dest_field;
- vlapic_calc_dest(vm, &vdmask, dest, phys, false);
+ vlapic_calc_dest(vm, &vdmask, false, dest, phys, false);
pdmask = vcpumask2pcpumask(vm, vdmask);

/* physical delivery mode */
diff --git a/hypervisor/arch/x86/guest/vlapic.c
b/hypervisor/arch/x86/guest/vlapic.c
index 36ccf8bc..eb6f3f74 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -1095,14 +1095,15 @@ static inline bool is_dest_field_matched(const
struct acrn_vlapic *vlapic, uint3
* addressing specified by the (dest, phys, lowprio) tuple.
*/
void
-vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool
phys, bool lowprio)
+vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
+ uint32_t dest, bool phys, bool lowprio)
{
struct acrn_vlapic *vlapic, *lowprio_dest = NULL;
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;

*dmask = 0UL;
- if (dest == 0xffU) {
+ if (is_broadcast) {
/* Broadcast in both logical and physical modes. */
*dmask = vm_active_cpus(vm);
} else if (phys) {
@@ -1150,14 +1151,15 @@ vlapic_calc_dest(struct acrn_vm *vm, uint64_t
*dmask, uint32_t dest, bool phys,
* @pre is_x2apic_enabled(vlapic) == true
*/
void
-vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, uint32_t
dest, bool phys)
+vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool
is_broadcast,
+ uint32_t dest, bool phys)
{
struct acrn_vlapic *vlapic;
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;

*dmask = 0UL;
- if (dest == 0xffU) {
+ if (is_broadcast) {
/* Broadcast in both logical and physical modes. */
*dmask = vm_active_cpus(vm);
} else if (phys) {
@@ -1229,7 +1231,7 @@ static int32_t
vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic) {
uint16_t vcpu_id;
- bool phys;
+ bool phys = false, is_broadcast = false;
int32_t ret = 0;
uint64_t dmask = 0UL;
uint32_t icr_low, icr_high, dest;
@@ -1244,8 +1246,10 @@ vlapic_icrlo_write_handler(struct acrn_vlapic
*vlapic)
icr_high = lapic->icr_hi.v;
if (is_x2apic_enabled(vlapic)) {
dest = icr_high;
+ is_broadcast = (dest == 0xffffffffU);
} else {
dest = icr_high >> APIC_ID_SHIFT;
+ is_broadcast = (dest == 0xffU);
}
vec = icr_low & APIC_VECTOR_MASK;
mode = icr_low & APIC_DELMODE_MASK;
@@ -1267,7 +1271,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic
*vlapic)

switch (shorthand) {
case APIC_DEST_DESTFLD:
- vlapic_calc_dest(vlapic->vm, &dmask, dest, phys, false);
+ vlapic_calc_dest(vlapic->vm, &dmask, is_broadcast, dest, phys,
+false);
break;
case APIC_DEST_SELF:
bitmap_set_nolock(vlapic->vcpu->vcpu_id, &dmask); @@
-1824,7 +1828,7 @@ vlapic_receive_intr(struct acrn_vm *vm, bool level,
uint32_t dest, bool phys,
* all interrupts originating from the ioapic or MSI specify the
* 'dest' in the legacy xAPIC format.
*/
- vlapic_calc_dest(vm, &dmask, dest, phys, lowprio);
+ vlapic_calc_dest(vm, &dmask, false, dest, phys, lowprio);

for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
struct acrn_vlapic *vlapic;
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index d26ace3a..3f1e723b 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -438,7 +438,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm,
const struct acrn_msi_entry
* the delivery mode of vmsi will be forwarded to ICR delievry field
* and handled by hardware.
*/
- vlapic_calc_dest_lapic_pt(vm, &vdmask, vdest, phys);
+ vlapic_calc_dest_lapic_pt(vm, &vdmask, false, vdest, phys);
dev_dbg(ACRN_DBG_LAPICPT, "%s: vcpu destination mask
0x%016llx", __func__, vdmask);

vcpu_id = ffs64(vdmask);
diff --git a/hypervisor/include/arch/x86/guest/vlapic.h
b/hypervisor/include/arch/x86/guest/vlapic.h
index f6b74f84..cc856f8a 100644
--- a/hypervisor/include/arch/x86/guest/vlapic.h
+++ b/hypervisor/include/arch/x86/guest/vlapic.h
@@ -236,8 +236,10 @@ int32_t apic_write_vmexit_handler(struct acrn_vcpu
*vcpu); int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu); void
vlapic_update_tpr_threshold(const struct acrn_vlapic *vlapic); int32_t
tpr_below_threshold_vmexit_handler(struct acrn_vcpu *vcpu); -void
vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool
phys, bool lowprio); -void vlapic_calc_dest_lapic_pt(struct acrn_vm *vm,
uint64_t *dmask, uint32_t dest, bool phys);
+void vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool
is_broadcast,
+ uint32_t dest, bool phys, bool lowprio); void
+vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool
is_broadcast,
+ uint32_t dest, bool phys);

/**
* @}
--
2.17.1



Re: [PATCH v8 4/5] HV: Reset physical core of lapic_pt vm when shutdown

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Kaige Fu
Sent: Sunday, April 21, 2019 7:58 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v8 4/5] HV: Reset physical core of lapic_pt vm
when shutdown

The physical core of lapic_pt vm should be reset for security and correctness
when shutdown the vm.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 14 +++++++++++++-
hypervisor/arch/x86/guest/vm.c | 15 ++++++++++++++-
hypervisor/include/arch/x86/cpu.h | 1 +
hypervisor/include/lib/errno.h | 2 ++
4 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index
4453e1b1..d341f16f 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -332,6 +332,17 @@ bool start_cpus(uint64_t mask)
return ((pcpu_active_bitmap & mask) == mask); }

+void wait_pcpus_offline(uint64_t mask)
+{
+ uint32_t timeout;
+
+ timeout = CPU_DOWN_TIMEOUT * 1000U;
+ while (((pcpu_active_bitmap & mask) != 0UL) && (timeout != 0U)) {
+ udelay(10U);
+ timeout -= 10U;
+ }
+}
+
void stop_cpus(void)
{
uint16_t pcpu_id, expected_up;
@@ -390,13 +401,14 @@ void cpu_dead(void)
int32_t halt = 1;
uint16_t pcpu_id = get_cpu_id();

- if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap)) {
+ if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */
vmx_off();
cache_flush_invalidate_all();

/* Set state to show CPU is dead */
cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
+ bitmap_clear_nolock(pcpu_id, &pcpu_active_bitmap);

/* Halt the CPU */
do {
diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index cebbc9cc..43ab72b0 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -446,9 +446,10 @@ int32_t create_vm(uint16_t vm_id, struct
acrn_vm_config *vm_config, struct acrn_ int32_t shutdown_vm(struct
acrn_vm *vm) {
uint16_t i;
+ uint64_t mask = 0UL;
struct acrn_vcpu *vcpu = NULL;
struct acrn_vm_config *vm_config = NULL;
- int32_t ret;
+ int32_t ret = 0;

pause_vm(vm);

@@ -459,6 +460,18 @@ int32_t shutdown_vm(struct acrn_vm *vm)
foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu);
offline_vcpu(vcpu);
+
+ if (is_lapic_pt(vm)) {
+ bitmap_set_nolock(vcpu->pcpu_id, &mask);
+ make_pcpu_offline(vcpu->pcpu_id);
+ }
+ }
+
+ wait_pcpus_offline(mask);
+
+ if (is_lapic_pt(vm) && !start_cpus(mask)) {
+ pr_fatal("Failed to start all cpus in mask(0x%llx)", mask);
+ ret = -ETIMEDOUT;
}

vm_config = get_vm_config(vm->vm_id); diff --git
a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h
index 323b0d17..276a4edc 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -260,6 +260,7 @@ void load_cpu_state_data(void); void
init_cpu_pre(uint16_t pcpu_id_args); void init_cpu_post(uint16_t pcpu_id);
bool start_cpus(uint64_t mask);
+void wait_pcpus_offline(uint64_t mask);
void stop_cpus(void);
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);

diff --git a/hypervisor/include/lib/errno.h b/hypervisor/include/lib/errno.h
index b97112f5..bc8c0db7 100644
--- a/hypervisor/include/lib/errno.h
+++ b/hypervisor/include/lib/errno.h
@@ -23,5 +23,7 @@
#define ENODEV 19
/** Indicates that argument is not valid. */
#define EINVAL 22
+/** Indicates that timeout occurs. */
+#define ETIMEDOUT 110

#endif /* ERRNO_H */
--
2.20.0



[PATCH v8 5/5] HV: Remove dead loop in stop_cpus

Kaige Fu
 

This patch removes dead loop in stop_cpus because timeout never occurs.
If target cpu received a NMI and panic, it has called cpu_dead and stop_cpus success.
If target cpu is running, an IPI will be delivered to it and then call cpu_dead.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 38 +++++++++-----------------------------
1 file changed, 9 insertions(+), 29 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index d341f16f..9bea1a47 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -345,44 +345,24 @@ void wait_pcpus_offline(uint64_t mask)

void stop_cpus(void)
{
- uint16_t pcpu_id, expected_up;
- uint32_t timeout;
+ uint16_t pcpu_id;
+ uint64_t mask = 0UL;

for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
if (get_cpu_id() == pcpu_id) { /* avoid offline itself */
continue;
}

+ bitmap_set_nolock(pcpu_id, &mask);
make_pcpu_offline(pcpu_id);
}

- expected_up = 1U;
- timeout = CPU_DOWN_TIMEOUT * 1000U;
- while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
- /* Delay 10us */
- udelay(10U);
-
- /* Decrement timeout value */
- timeout -= 10U;
- }
-
- if (atomic_load16(&up_count) != expected_up) {
- pr_fatal("Can't make all APs offline");
-
- /* if partial APs is down, it's not easy to recover
- * per our current implementation (need make up dead
- * APs one by one), just print error mesage and dead
- * loop here.
- *
- * FIXME:
- * We need to refine here to handle the AP offline
- * failure for release/debug version. Ideally, we should
- * define how to handle general unrecoverable error and
- * follow it here.
- */
- do {
- } while (1);
- }
+ /**
+ * Timeout never occurs here:
+ * If target cpu received a NMI and panic, it has called cpu_dead and make_pcpu_offline success.
+ * If target cpu is running, an IPI will be delivered to it and then call cpu_dead.
+ */
+ wait_pcpus_offline(mask);
}

void cpu_do_idle(void)
--
2.20.0


[PATCH v8 4/5] HV: Reset physical core of lapic_pt vm when shutdown

Kaige Fu
 

The physical core of lapic_pt vm should be reset for security and
correctness when shutdown the vm.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 14 +++++++++++++-
hypervisor/arch/x86/guest/vm.c | 15 ++++++++++++++-
hypervisor/include/arch/x86/cpu.h | 1 +
hypervisor/include/lib/errno.h | 2 ++
4 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 4453e1b1..d341f16f 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -332,6 +332,17 @@ bool start_cpus(uint64_t mask)
return ((pcpu_active_bitmap & mask) == mask);
}

+void wait_pcpus_offline(uint64_t mask)
+{
+ uint32_t timeout;
+
+ timeout = CPU_DOWN_TIMEOUT * 1000U;
+ while (((pcpu_active_bitmap & mask) != 0UL) && (timeout != 0U)) {
+ udelay(10U);
+ timeout -= 10U;
+ }
+}
+
void stop_cpus(void)
{
uint16_t pcpu_id, expected_up;
@@ -390,13 +401,14 @@ void cpu_dead(void)
int32_t halt = 1;
uint16_t pcpu_id = get_cpu_id();

- if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap)) {
+ if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */
vmx_off();
cache_flush_invalidate_all();

/* Set state to show CPU is dead */
cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
+ bitmap_clear_nolock(pcpu_id, &pcpu_active_bitmap);

/* Halt the CPU */
do {
diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index cebbc9cc..43ab72b0 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -446,9 +446,10 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
int32_t shutdown_vm(struct acrn_vm *vm)
{
uint16_t i;
+ uint64_t mask = 0UL;
struct acrn_vcpu *vcpu = NULL;
struct acrn_vm_config *vm_config = NULL;
- int32_t ret;
+ int32_t ret = 0;

pause_vm(vm);

@@ -459,6 +460,18 @@ int32_t shutdown_vm(struct acrn_vm *vm)
foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu);
offline_vcpu(vcpu);
+
+ if (is_lapic_pt(vm)) {
+ bitmap_set_nolock(vcpu->pcpu_id, &mask);
+ make_pcpu_offline(vcpu->pcpu_id);
+ }
+ }
+
+ wait_pcpus_offline(mask);
+
+ if (is_lapic_pt(vm) && !start_cpus(mask)) {
+ pr_fatal("Failed to start all cpus in mask(0x%llx)", mask);
+ ret = -ETIMEDOUT;
}

vm_config = get_vm_config(vm->vm_id);
diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h
index 323b0d17..276a4edc 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -260,6 +260,7 @@ void load_cpu_state_data(void);
void init_cpu_pre(uint16_t pcpu_id_args);
void init_cpu_post(uint16_t pcpu_id);
bool start_cpus(uint64_t mask);
+void wait_pcpus_offline(uint64_t mask);
void stop_cpus(void);
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);

diff --git a/hypervisor/include/lib/errno.h b/hypervisor/include/lib/errno.h
index b97112f5..bc8c0db7 100644
--- a/hypervisor/include/lib/errno.h
+++ b/hypervisor/include/lib/errno.h
@@ -23,5 +23,7 @@
#define ENODEV 19
/** Indicates that argument is not valid. */
#define EINVAL 22
+/** Indicates that timeout occurs. */
+#define ETIMEDOUT 110

#endif /* ERRNO_H */
--
2.20.0


[PATCH v8 3/5] HV: Reshuffle start_cpus and start_cpu

Kaige Fu
 

This patch makes the following changes:
- Add one parameter 'mask' to start_cpus for later use.
- Set cpu state as DEAD instead of dead loop when fail to start cpu.
- Panic when there are any failures when start cpus in init_cpu_post and host_enter_s3.

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/arch/x86/cpu.c | 39 ++++++++++++++++++++++---------
hypervisor/arch/x86/pm.c | 6 ++++-
hypervisor/include/arch/x86/cpu.h | 2 +-
3 files changed, 34 insertions(+), 13 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index e3bdd0e2..4453e1b1 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -30,6 +30,8 @@
#define CPU_UP_TIMEOUT 100U /* millisecond */
#define CPU_DOWN_TIMEOUT 100U /* millisecond */

+#define AP_MASK (((1UL << phys_cpu_num) - 1UL) & ~(1UL << 0U))
+
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
static uint16_t phys_cpu_num = 0U;
static uint64_t pcpu_sync = 0UL;
@@ -229,7 +231,9 @@ void init_cpu_post(uint16_t pcpu_id)

/* Start all secondary cores */
startup_paddr = prepare_trampoline();
- start_cpus();
+ if (!start_cpus(AP_MASK)) {
+ panic("Failed to start all secondary cores!");
+ }

ASSERT(get_cpu_id() == BOOT_CPU_ID, "");
} else {
@@ -287,32 +291,45 @@ static void start_cpu(uint16_t pcpu_id)

/* Check to see if expected CPU is actually up */
if (!is_pcpu_active(pcpu_id)) {
- /* Print error */
- pr_fatal("Secondary CPUs failed to come up");
-
- /* Error condition - loop endlessly for now */
- do {
- } while (1);
+ pr_fatal("Secondary CPU%hu failed to come up", pcpu_id);
+ cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
}
}

-void start_cpus(void)
+
+/**
+ * @brief Start all cpus if the bit is set in mask except itself
+ *
+ * @param[in] mask bits mask of cpus which should be started
+ *
+ * @return true if all cpus set in mask are started
+ * @return false if there are any cpus set in mask aren't started
+ */
+bool start_cpus(uint64_t mask)
{
uint16_t i;
+ uint16_t pcpu_id = get_cpu_id();
+ uint64_t expected_start_mask = mask;

/* secondary cpu start up will wait for pcpu_sync -> 0UL */
atomic_store64(&pcpu_sync, 1UL);

- for (i = 0U; i < phys_cpu_num; i++) {
- if (get_cpu_id() == i) {
- continue;
+ i = ffs64(expected_start_mask);
+ while (i != INVALID_BIT_INDEX) {
+ bitmap_clear_nolock(i, &expected_start_mask);
+
+ if (pcpu_id == i) {
+ continue; /* Avoid start itself */
}

start_cpu(i);
+ i = ffs64(expected_start_mask);
}

/* Trigger event to allow secondary CPUs to continue */
atomic_store64(&pcpu_sync, 0UL);
+
+ return ((pcpu_active_bitmap & mask) == mask);
}

void stop_cpus(void)
diff --git a/hypervisor/arch/x86/pm.c b/hypervisor/arch/x86/pm.c
index 3a0d1b2c..019d8cdd 100644
--- a/hypervisor/arch/x86/pm.c
+++ b/hypervisor/arch/x86/pm.c
@@ -20,6 +20,8 @@
#include <lapic.h>
#include <vcpu.h>

+#define AP_MASK (((1UL << get_pcpu_nums()) - 1UL) & ~(1UL << 0U))
+
struct cpu_context cpu_ctx;

/* The values in this structure should come from host ACPI table */
@@ -186,5 +188,7 @@ void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, u
clac();

/* online all APs again */
- start_cpus();
+ if (!start_cpus(AP_MASK)) {
+ panic("Failed to start all APs!");
+ }
}
diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h
index 13c2fd9e..323b0d17 100644
--- a/hypervisor/include/arch/x86/cpu.h
+++ b/hypervisor/include/arch/x86/cpu.h
@@ -259,7 +259,7 @@ void trampoline_start16(void);
void load_cpu_state_data(void);
void init_cpu_pre(uint16_t pcpu_id_args);
void init_cpu_post(uint16_t pcpu_id);
-void start_cpus(void);
+bool start_cpus(uint64_t mask);
void stop_cpus(void);
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);

--
2.20.0


[PATCH v8 2/5] HV: Kconfig: Remove CPU_UP_TIMEOUT

Kaige Fu
 

This patch remove the over design CPU_UP_TIMEOUT and
use CPU_UP_TIMEOUT and CPU_DOWN_TIMEOUT instead.

Signed-off-by: Kaige Fu <kaige.fu@...>
Acked-by: Eddie Dong <eddie.dong@...>
---
hypervisor/arch/x86/Kconfig | 8 --------
hypervisor/arch/x86/cpu.c | 7 +++++--
2 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/hypervisor/arch/x86/Kconfig b/hypervisor/arch/x86/Kconfig
index fb259167..744e01d4 100644
--- a/hypervisor/arch/x86/Kconfig
+++ b/hypervisor/arch/x86/Kconfig
@@ -107,14 +107,6 @@ config LOG_DESTINATION
are 3 destinations available. Bit 0 represents the serial console, bit
1 the SOS ACRN log and bit 2 NPK log. Effective only in debug builds.

-config CPU_UP_TIMEOUT
- int "Timeout in ms when bringing up secondary CPUs"
- range 100 200
- default 100
- help
- A 32-bit integer specifying the timeout in millisecond when waiting
- for secondary CPUs to start up.
-
choice
prompt "Serial IO type"
depends on !RELEASE
diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index d6546419..e3bdd0e2 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -27,6 +27,9 @@
#include <cat.h>
#include <firmware.h>

+#define CPU_UP_TIMEOUT 100U /* millisecond */
+#define CPU_DOWN_TIMEOUT 100U /* millisecond */
+
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
static uint16_t phys_cpu_num = 0U;
static uint64_t pcpu_sync = 0UL;
@@ -273,7 +276,7 @@ static void start_cpu(uint16_t pcpu_id)
/* Wait until the pcpu with pcpu_id is running and set the active bitmap or
* configured time-out has expired
*/
- timeout = (uint32_t)CONFIG_CPU_UP_TIMEOUT * 1000U;
+ timeout = CPU_UP_TIMEOUT * 1000U;
while (!is_pcpu_active(pcpu_id) && (timeout != 0U)) {
/* Delay 10us */
udelay(10U);
@@ -317,7 +320,6 @@ void stop_cpus(void)
uint16_t pcpu_id, expected_up;
uint32_t timeout;

- timeout = (uint32_t)CONFIG_CPU_UP_TIMEOUT * 1000U;
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
if (get_cpu_id() == pcpu_id) { /* avoid offline itself */
continue;
@@ -327,6 +329,7 @@ void stop_cpus(void)
}

expected_up = 1U;
+ timeout = CPU_DOWN_TIMEOUT * 1000U;
while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
/* Delay 10us */
udelay(10U);
--
2.20.0


[PATCH v8 1/5] HV: Clear DM set guest_flags when shutdown vm

Kaige Fu
 

Currently, the previous configurations about guest_flags set by DM will
not be cleared when shutdown the vm. Then it might bring issue for the
next dm-launched vm.

For example, if we create one vm with LAPIC_PASSTHROUGH flag and shutdown it.
Then the next dm-launched vm will has the LAPIC_PASSTHROUGH flag set no matter
whether we set it in DM.

This patch clears all the DM set flags when shtudown vm.

Signed-off-by: Kaige Fu <kaige.fu@...>
Acked-by: Eddie Dong <eddie.dong@...>
---
hypervisor/arch/x86/guest/vm.c | 4 ++++
hypervisor/common/hypercall.c | 4 +++-
hypervisor/scenarios/sdc/vm_configurations.h | 5 +++++
3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index 28f31228..cebbc9cc 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -447,6 +447,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
{
uint16_t i;
struct acrn_vcpu *vcpu = NULL;
+ struct acrn_vm_config *vm_config = NULL;
int32_t ret;

pause_vm(vm);
@@ -460,6 +461,9 @@ int32_t shutdown_vm(struct acrn_vm *vm)
offline_vcpu(vcpu);
}

+ vm_config = get_vm_config(vm->vm_id);
+ vm_config->guest_flags &= ~DM_OWNED_GUEST_FLAG_MASK;
+
ptdev_release_all_entries(vm);

vpci_cleanup(vm);
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index d26ace3a..e8dbf63a 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -156,7 +156,9 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
vm_id = get_vmid_by_uuid(&cv.uuid[0]);
if (vm_id < CONFIG_MAX_VM_NUM) {
vm_config = get_vm_config(vm_id);
- vm_config->guest_flags |= cv.vm_flag;
+
+ /* Filter out the bits should not set by DM and then assign it to guest_flags */
+ vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK);

/* GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH set in guest_flags */
if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0U)
diff --git a/hypervisor/scenarios/sdc/vm_configurations.h b/hypervisor/scenarios/sdc/vm_configurations.h
index 6c49865f..bba9515e 100644
--- a/hypervisor/scenarios/sdc/vm_configurations.h
+++ b/hypervisor/scenarios/sdc/vm_configurations.h
@@ -9,4 +9,9 @@

#define CONFIG_MAX_VM_NUM 2U

+/* Bits mask of guest flags that can be programmed by device model. Other bits are set by hypervisor only */
+#define DM_OWNED_GUEST_FLAG_MASK (GUEST_FLAG_SECURE_WORLD_ENABLED | GUEST_FLAG_LAPIC_PASSTHROUGH | \
+ GUEST_FLAG_RT | GUEST_FLAG_IO_COMPLETION_POLLING)
+
+
#endif /* VM_CONFIGURATIONS_H */
--
2.20.0


[PATCH v8 0/5] Minor fixes about vm and vm_config and lapic_pt vm

Kaige Fu
 

This patchset aims at fixing the following issues:
- There will be some DM set flags remained after shutdown the vm. This patchset fix it by clear
all DM set flags when shutdown the vm.

- Since lapic is passthroughed to guest, there may be wrong lapic state after lapic_pt vm shutdown.
For example, the lapic will disabled by linux guest when shutdown and we can't boot one normal
vm successfully in this situation. This patchset fix by reset all physical cores when shutdown
lapic_pt vm.

---
v7 -> v8:
- Reuse PCPU_STATE_DEAD instead of introducing a new one 'PCPU_STATE_INVALID'.
- Call make_pcpu_offline in shutdown_vm directly.

v6 -> v7:
- Add one patch to remove the CPU_UP_TIMEOUT
- Extend stop_cpus/start_cpus in order to use them to reset cpu instead of introducing reset_cpus.

v5 -> v6:
- Move panic code from start_cpus to init_cpu_post and host_enter_s3.
- Return errno to caller if timeout occurs when reset.

v4 -> v5:
- Use DM owned bit mask to clear DM set bit instead of HV owned.
- Panic when start_cpus fail
- Use pcpu_active_bitmap to check wheter pcpu is offlined.

v3 -> v4:
- Set cpu state as INVALID instead of dead loop when fail to start/stop the cpu.
- Clear all the DM set flags instead of the whole flags.

v2 -> v3:
- Just reset vm_config->guest_flags instead of whole structure.
- Wait the target cpu really enter into dead state before start_cpu and wrap it as reset_cpu.

v1 -> v2:
- Reset physical core of lapic_pt vm instead of save/restore lapic msrs.
- Change the cover letter title "Save/restore LAPIC MSRs for LAPIC_PT vm".

Kaige Fu (5):
HV: Clear DM set guest_flags when shutdown vm
HV: Kconfig: Remove CPU_UP_TIMEOUT
HV: Reshuffle start_cpus and start_cpu
HV: Reset physical core of lapic_pt vm when shutdown
HV: Remove dead loop in stop_cpus

hypervisor/arch/x86/Kconfig | 8 --
hypervisor/arch/x86/cpu.c | 96 +++++++++++---------
hypervisor/arch/x86/guest/vm.c | 19 +++-
hypervisor/arch/x86/pm.c | 6 +-
hypervisor/common/hypercall.c | 4 +-
hypervisor/include/arch/x86/cpu.h | 3 +-
hypervisor/include/lib/errno.h | 2 +
hypervisor/scenarios/sdc/vm_configurations.h | 5 +
8 files changed, 89 insertions(+), 54 deletions(-)

--
2.20.0


Re: [PATCH 3/3] dm: safely access MMIO hint in MMIO emulation

Shuo A Liu
 

On Wed 17.Apr'19 at 0:13:45 -0700, Peter Fang wrote:
mmio_hint in mem.c can potentially be accessed concurrently in
emulate_mem() because it only holds a read lock. Use a local variable to
Good catch.

make sure the same entry address is used throughout the function. Since
it only serves as a hint, it's okay if the function does not use the
most up-to-date version of mmio_hint, as long as mmio_hint is accessed
atomically.

Explicitly enforce natural alignment on mmio_hint to guarantee atomic
accesses on x86 and increase code portability, even though compilers
most likely always do it.

Entries in the RB tree are only removed in unregister_mem_int() while
holding a write lock, so accessing mmio_hint while holding a read lock
is safe.

Tracked-On: #2902
Signed-off-by: Peter Fang <peter.fang@...>
Thanks for the patch. A good fix:)
Reviewed-by: Shuo A Liu <shuo.a.liu@...>

---
devicemodel/core/mem.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/devicemodel/core/mem.c b/devicemodel/core/mem.c
index fffaeda9..83a2d58a 100644
--- a/devicemodel/core/mem.c
+++ b/devicemodel/core/mem.c
@@ -60,7 +60,7 @@ RB_PROTOTYPE_STATIC(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare)
* consecutive addresses in a range, it makes sense to cache the
* result of a lookup.
*/
-static struct mmio_rb_range *mmio_hint;
+static struct mmio_rb_range *mmio_hint __aligned(sizeof(struct mmio_rb_range *));

static pthread_rwlock_t mmio_rwlock;

@@ -156,16 +156,18 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
{
uint64_t paddr = mmio_req->address;
int size = mmio_req->size;
- struct mmio_rb_range *entry = NULL;
+ struct mmio_rb_range *hint, *entry = NULL;
int err;

pthread_rwlock_rdlock(&mmio_rwlock);
+
/*
* First check the per-VM cache
*/
- if (mmio_hint && paddr >= mmio_hint->mr_base &&
- paddr <= mmio_hint->mr_end)
- entry = mmio_hint;
+ hint = mmio_hint;
+
+ if (hint && paddr >= hint->mr_base && paddr <= hint->mr_end)
+ entry = hint;
else if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
/* Update the per-VMU cache */
mmio_hint = entry;
--
2.18.0




Re: [PATCH 2/3] dm: completely remove enable_bar()/disable_bar() functions

Shuo A Liu
 

On Wed 17.Apr'19 at 0:13:44 -0700, Peter Fang wrote:
Following up on d648df766c263512c93356a55ff97cb3e23fe3e4, surgically
remove all the functions related to enable_bar()/disable_bar() that got
introduced in 8787b65fdee622639925ad75fe920913e0e7f102.

Tracked-On: #2902
Signed-off-by: Peter Fang <peter.fang@...>
Minor comment inline.

Reviewed-by: Shuo A Liu <shuo.a.liu@...>

---
devicemodel/core/inout.c | 44 -------------------
devicemodel/core/mem.c | 88 ++++---------------------------------
devicemodel/include/inout.h | 2 -
devicemodel/include/mem.h | 5 +--
4 files changed, 9 insertions(+), 130 deletions(-)

diff --git a/devicemodel/core/inout.c b/devicemodel/core/inout.c
index daa8add9..a16f3db5 100644
--- a/devicemodel/core/inout.c
+++ b/devicemodel/core/inout.c
@@ -26,7 +26,6 @@
* $FreeBSD$
*/

-#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
@@ -45,7 +44,6 @@ static struct {
int flags;
inout_func_t handler;
void *arg;
- bool enabled;
} inout_handlers[MAX_IOPORTS];

static int
@@ -115,11 +113,6 @@ emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request)
if (!(flags & IOPORT_F_OUT))
return -1;
}
-
- if (inout_handlers[port].enabled == false) {
- return -1;
- }
-
retval = handler(ctx, *pvcpu, in, port, bytes,
(uint32_t *)&(pio_request->value), arg);
return retval;
@@ -148,42 +141,6 @@ init_inout(void)
}
}

-int
-disable_inout(struct inout_port *iop)
-{
- int i;
-
- if (!VERIFY_IOPORT(iop->port, iop->size)) {
- printf("invalid input: port:0x%x, size:%d",
- iop->port, iop->size);
- return -1;
- }
-
- for (i = iop->port; i < iop->port + iop->size; i++) {
- inout_handlers[i].enabled = false;
- }
-
- return 0;
-}
-
-int
-enable_inout(struct inout_port *iop)
-{
- int i;
-
- if (!VERIFY_IOPORT(iop->port, iop->size)) {
- printf("invalid input: port:0x%x, size:%d",
- iop->port, iop->size);
- return -1;
- }
-
- for (i = iop->port; i < iop->port + iop->size; i++) {
- inout_handlers[i].enabled = true;
- }
-
- return 0;
-}
-
int
register_inout(struct inout_port *iop)
{
@@ -211,7 +168,6 @@ register_inout(struct inout_port *iop)
inout_handlers[i].flags = iop->flags;
inout_handlers[i].handler = iop->handler;
inout_handlers[i].arg = iop->arg;
- inout_handlers[i].enabled = true;
}

return 0;
diff --git a/devicemodel/core/mem.c b/devicemodel/core/mem.c
index 4994eb22..fffaeda9 100644
--- a/devicemodel/core/mem.c
+++ b/devicemodel/core/mem.c
@@ -33,7 +33,6 @@
*/

#include <errno.h>
-#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
@@ -51,7 +50,6 @@ struct mmio_rb_range {
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
- bool enabled;
};

static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
@@ -168,25 +166,18 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
if (mmio_hint && paddr >= mmio_hint->mr_base &&
paddr <= mmio_hint->mr_end)
entry = mmio_hint;
-
- if (entry == NULL) {
- if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
- /* Update the per-VMU cache */
- mmio_hint = entry;
- else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
- pthread_rwlock_unlock(&mmio_rwlock);
- return -ESRCH;
- }
- }
-
- assert(entry != NULL);
-
- if (entry->enabled == false) {
+ else if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
+ /* Update the per-VMU cache */
per-VM?

+ mmio_hint = entry;
+ else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
pthread_rwlock_unlock(&mmio_rwlock);
- return -1;
+ return -ESRCH;
}
+
pthread_rwlock_unlock(&mmio_rwlock);

+ assert(entry != NULL);
+
if (mmio_req->direction == REQUEST_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
size, &entry->mr_param);

15981 - 16000 of 37094