Date   

Re: [PATCH 4/6] DM USB: xHCI: refine port assignment logic

Wu, Xiaoguang
 

On 18-08-13 21:40:58, Yu Wang wrote:
On 18-08-13 16:48:33, Wu, Xiaoguang wrote:
The variable native_assign_ports in struct pci_xhci_vdev is used
to record wether certain root hub port in SOS is assgned to UOS.
The logic is zero reserve and nonzero assignement.

In this patch, the meaning of native_assign_ports is expand to the
following logic:
native_assign_ports[x][y] = 0: native port x-y is not assigned.
native_assign_ports[x][y] < 0: native port x-y is assigned but
currently no virtual device attached.
native_assign_ports[x][y] > 0: native paort x-y is assigned and
virtual device attached to virtual port whose number
is stored in native_assign_ports[x][y].

Signed-off-by: Xiaoguang Wu <xiaoguang.wu@...>
Reviewed-by: Liang Yang <liang3.yang@...>
---
devicemodel/hw/pci/xhci.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/devicemodel/hw/pci/xhci.c b/devicemodel/hw/pci/xhci.c
index f12126b..2df7ce3 100644
--- a/devicemodel/hw/pci/xhci.c
+++ b/devicemodel/hw/pci/xhci.c
@@ -373,7 +373,16 @@ struct pci_xhci_vdev {
int (*excap_write)(struct pci_xhci_vdev *, uint64_t, uint64_t);
int usb2_port_start;
int usb3_port_start;
- uint8_t *native_assign_ports[USB_NATIVE_NUM_BUS];
+
+ /* native_assign_ports is used to record the assigment information:
+ * native_assign_ports[x][y] = 0: native port x-y is not assigned.
+ * native_assign_ports[x][y] < 0: native port x-y is assigned but
+ * currently no virtual device attached.
+ * native_assign_ports[x][y] > 0: native paort x-y is assigned and
For "= 0" and "< 0", please define some macros...

xgwu:

will use macro to express, like the following:

#define NATIVE_PORT_ASSIGNED(bus, port)
(native_assign_ports[(bus)][(port)] != 0)

ok, will change. thanks



+ * virtual device attached to virtual port whose number
+ * is stored in native_assign_ports[x][y].
+ */
+ int8_t *native_assign_ports[USB_NATIVE_NUM_BUS];
struct timespec mf_prev_time; /* previous time of accessing MFINDEX */
};

@@ -3365,14 +3374,14 @@ pci_xhci_parse_bus_port(struct pci_xhci_vdev *xdev, char *opts)

if (!xdev->native_assign_ports[bus]) {
xdev->native_assign_ports[bus] = calloc(USB_NATIVE_NUM_PORT,
- sizeof(uint8_t));
+ sizeof(int8_t));
if (!xdev->native_assign_ports[bus]) {
rc = -3;
goto errout;
}
}
+ xdev->native_assign_ports[bus][port] = -1;

- xdev->native_assign_ports[bus][port] = 1;
errout:
if (rc)
UPRINTF(LWRN, "%s fails, rc=%d\r\n", __func__, rc);
--
2.7.4





Re: [PATCH 3/6] DM USB: introduce struct usb_native_devinfo

Wu, Xiaoguang
 

On 18-08-13 21:37:26, Yu Wang wrote:
On 18-08-13 16:48:32, Wu, Xiaoguang wrote:
Current design cannot get physical USB device information without
the creation of pci_xhci_dev_emu. This brings some dificulties in
dificulties?
xgwu:

will change, thanks.



certain situations, hence struct usb_native_devinfo is introduced
to describe neccessary information to solve this trouble.

Signed-off-by: Xiaoguang Wu <xiaoguang.wu@...>
Reviewed-by: Liang Yang <liang3.yang@...>
---
devicemodel/hw/pci/xhci.c | 31 ++++++++++++-------------------
devicemodel/hw/platform/usb_pmapper.c | 14 +++++++++++++-
devicemodel/include/usb_core.h | 10 ++++++++++
3 files changed, 35 insertions(+), 20 deletions(-)

diff --git a/devicemodel/hw/pci/xhci.c b/devicemodel/hw/pci/xhci.c
index 490219a..f12126b 100644
--- a/devicemodel/hw/pci/xhci.c
+++ b/devicemodel/hw/pci/xhci.c
@@ -469,23 +469,22 @@ pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data)
struct pci_xhci_dev_emu *de;
struct pci_xhci_vdev *xdev;
struct usb_devemu *ue;
+ struct usb_native_devinfo *di;
int port_start, port_end;
int slot_start, slot_end;
int port, slot;
void *ud;
- uint8_t native_bus, native_pid, native_port;
- uint16_t native_vid;
- int native_speed;
int intr = 1;

xdev = hci_data;
+ di = dev_data;

assert(xdev);
assert(dev_data);
assert(xdev->devices);
assert(xdev->slots);

- de = pci_xhci_dev_create(xdev, dev_data);
+ de = pci_xhci_dev_create(xdev, di->priv_data);
if (!de) {
UPRINTF(LFTL, "fail to create device\r\n");
return -1;
@@ -499,26 +498,20 @@ pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data)
assert(ue);

/* print physical information about new device */
- ue->ue_info(ud, USB_INFO_BUS, &native_bus, sizeof(native_bus));
- ue->ue_info(ud, USB_INFO_PORT, &native_port, sizeof(native_port));
- ue->ue_info(ud, USB_INFO_VID, &native_vid, sizeof(native_vid));
- ue->ue_info(ud, USB_INFO_PID, &native_pid, sizeof(native_pid));
- ue->ue_info(ud, USB_INFO_SPEED, &native_speed, sizeof(native_speed));
Then usb_dev_info is not useful now? Should we clear it?
xgwu:

try to change, thanks.



UPRINTF(LDBG, "%04x:%04x %d-%d connecting.\r\n",
- native_vid, native_pid, native_bus, native_port);
+ di->vid, di->pid, di->bus, di->port);

- if (!xdev->native_assign_ports[native_bus] ||
- !xdev->native_assign_ports[native_bus][native_port]) {
+ if (!xdev->native_assign_ports[di->bus] ||
+ !xdev->native_assign_ports[di->bus][di->port]) {
UPRINTF(LDBG, "%04x:%04x %d-%d doesn't belong to this vm, bye."
- "\r\n", native_vid, native_pid, native_bus,
- native_port);
+ "\r\n", di->vid, di->pid, di->bus, di->port);
goto errout;
}

- UPRINTF(LDBG, "%04x:%04x %d-%d belong to this vm.\r\n", native_vid,
- native_pid, native_bus, native_port);
+ UPRINTF(LDBG, "%04x:%04x %d-%d belong to this vm.\r\n", di->vid,
+ di->pid, di->bus, di->port);

- if (ue->ue_usbver == 2)
+ if (di->bcd < 0x300)
port_start = xdev->usb2_port_start;
else
port_start = xdev->usb3_port_start;
@@ -550,11 +543,11 @@ pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data)

pci_xhci_reset_slot(xdev, slot);
UPRINTF(LDBG, "%X:%X %d-%d locates in slot %d port %d.\r\n",
- native_vid, native_pid, native_bus, native_port,
+ di->vid, di->pid, di->bus, di->port,
slot, port);

/* Trigger port change event for the arriving device */
- if (pci_xhci_port_connect(xdev, port, native_speed, intr))
+ if (pci_xhci_port_connect(xdev, port, di->speed, intr))
UPRINTF(LFTL, "fail to report port event\n");

return 0;
diff --git a/devicemodel/hw/platform/usb_pmapper.c b/devicemodel/hw/platform/usb_pmapper.c
index 9f57f1c..a9c8608 100644
--- a/devicemodel/hw/platform/usb_pmapper.c
+++ b/devicemodel/hw/platform/usb_pmapper.c
@@ -1020,6 +1020,9 @@ static int
usb_dev_native_sys_conn_cb(struct libusb_context *ctx, struct libusb_device
*ldev, libusb_hotplug_event event, void *pdata)
{
+ struct libusb_device_descriptor d;
+ struct usb_native_devinfo di;
+
UPRINTF(LDBG, "connect event\r\n");

if (!ctx || !ldev) {
@@ -1027,8 +1030,17 @@ usb_dev_native_sys_conn_cb(struct libusb_context *ctx, struct libusb_device
return -1;
}

+ libusb_get_device_descriptor(ldev, &d);
+ di.bus = libusb_get_bus_number(ldev);
+ di.speed = libusb_get_device_speed(ldev);
+ di.port = libusb_get_port_number(ldev);
+ di.pid = d.idProduct;
+ di.vid = d.idVendor;
+ di.bcd = d.bcdUSB;
+ di.priv_data = ldev;
+
if (g_ctx.conn_cb)
- g_ctx.conn_cb(g_ctx.hci_data, ldev);
+ g_ctx.conn_cb(g_ctx.hci_data, &di);

return 0;
}
diff --git a/devicemodel/include/usb_core.h b/devicemodel/include/usb_core.h
index 3a9648a..8727b8b 100644
--- a/devicemodel/include/usb_core.h
+++ b/devicemodel/include/usb_core.h
@@ -163,6 +163,16 @@ struct usb_data_xfer {
pthread_mutex_t mtx;
};

+struct usb_native_devinfo {
+ int speed;
+ uint8_t bus;
+ uint8_t port;
+ uint16_t bcd;
+ uint16_t pid;
+ uint16_t vid;
+ void *priv_data;
+};
This new structure is confused to me. There has already one structure
struct usb_dev which included native usb device information. Why define
a new one? Should we refine the struct usb_dev to replace these native
description members?

xgwu:

try to merge them as one.thanks.



+
enum USB_ERRCODE {
USB_ACK,
USB_NAK,
--
2.7.4





Re: [PATCH 2/6] DM USB: xHCI: refine xHCI PORTSC Register related functions

Wu, Xiaoguang
 

On 18-08-13 21:19:37, Yu Wang wrote:
On 18-08-13 16:48:31, Wu, Xiaoguang wrote:
PORTSC (Port Status and Control Register) register play a very
important role in USB sub-system. This patch is used to refine
related manipulation functions.

Signed-off-by: Xiaoguang Wu <xiaoguang.wu@...>
Reviewed-by: Liang Yang <liang3.yang@...>
---
devicemodel/hw/pci/xhci.c | 76 ++++++++++++++++++++++-------------------------
1 file changed, 35 insertions(+), 41 deletions(-)

diff --git a/devicemodel/hw/pci/xhci.c b/devicemodel/hw/pci/xhci.c
index 792d930..490219a 100644
--- a/devicemodel/hw/pci/xhci.c
+++ b/devicemodel/hw/pci/xhci.c
@@ -439,11 +439,14 @@ static void pci_xhci_update_ep_ring(struct pci_xhci_vdev *xdev,
struct xhci_endp_ctx *ep_ctx,
uint32_t streamid, uint64_t ringaddr,
int ccs);
-static void pci_xhci_init_port(struct pci_xhci_vdev *xdev, int portn);
+static void pci_xhci_port_init(struct pci_xhci_vdev *xdev, int portn);
+static int pci_xhci_port_connect(struct pci_xhci_vdev *xdev, int port,
+ int usb_speed, int intr);
+static int pci_xhci_port_disconnect(struct pci_xhci_vdev *xdev, int port,
+ int intr);
static struct pci_xhci_dev_emu *pci_xhci_dev_create(struct pci_xhci_vdev *
xdev, void *dev_data);
static void pci_xhci_dev_destroy(struct pci_xhci_dev_emu *de);
-static int pci_xhci_port_chg(struct pci_xhci_vdev *xdev, int port, int conn);
static void pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port,
uint32_t errcode, uint32_t evtype);
static int pci_xhci_xfer_complete(struct pci_xhci_vdev *xdev,
@@ -472,6 +475,8 @@ pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data)
void *ud;
uint8_t native_bus, native_pid, native_port;
uint16_t native_vid;
+ int native_speed;
+ int intr = 1;
Can we remove intr variable? Is hasn't changed in this function and pass
to pci_xhci_port_disconnect directly..

xgwu:

i will remove intr and add it in later patch as 'need_intr', including function parameters.
Thx.




xdev = hci_data;

@@ -498,6 +503,7 @@ pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data)
ue->ue_info(ud, USB_INFO_PORT, &native_port, sizeof(native_port));
ue->ue_info(ud, USB_INFO_VID, &native_vid, sizeof(native_vid));
ue->ue_info(ud, USB_INFO_PID, &native_pid, sizeof(native_pid));
+ ue->ue_info(ud, USB_INFO_SPEED, &native_speed, sizeof(native_speed));
UPRINTF(LDBG, "%04x:%04x %d-%d connecting.\r\n",
native_vid, native_pid, native_bus, native_port);

@@ -543,14 +549,12 @@ pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data)
xdev->ndevices++;

pci_xhci_reset_slot(xdev, slot);
- pci_xhci_init_port(xdev, port);
-
UPRINTF(LDBG, "%X:%X %d-%d locates in slot %d port %d.\r\n",
native_vid, native_pid, native_bus, native_port,
slot, port);

/* Trigger port change event for the arriving device */
- if (pci_xhci_port_chg(xdev, port, 1))
+ if (pci_xhci_port_connect(xdev, port, native_speed, intr))
UPRINTF(LFTL, "fail to report port event\n");

return 0;
@@ -566,6 +570,7 @@ pci_xhci_native_usb_dev_disconn_cb(void *hci_data, void *dev_data)
struct pci_xhci_dev_emu *edev;
struct usb_dev *udev;
uint8_t port, native_port;
+ int intr = 1;
Ditto.


assert(hci_data);
assert(dev_data);
@@ -595,7 +600,7 @@ pci_xhci_native_usb_dev_disconn_cb(void *hci_data, void *dev_data)
}

UPRINTF(LDBG, "report virtual port %d status\r\n", port);
- if (pci_xhci_port_chg(xdev, port, 0)) {
+ if (pci_xhci_port_disconnect(xdev, port, intr)) {
UPRINTF(LFTL, "fail to report event\r\n");
return -1;
}
@@ -784,32 +789,29 @@ pci_xhci_convert_speed(int lspeed)
}

static int
-pci_xhci_port_chg(struct pci_xhci_vdev *xdev, int port, int conn)
+pci_xhci_port_change(struct pci_xhci_vdev *xdev, int port, int usb_speed,
+ int conn, int intr)
{
int speed, error;
struct xhci_trb evtrb;
struct pci_xhci_portregs *reg;
- struct pci_xhci_dev_emu *dev;

assert(xdev != NULL);

reg = XHCI_PORTREG_PTR(xdev, port);
- dev = XHCI_DEVINST_PTR(xdev, port);
- if (!dev || !dev->dev_ue || !reg) {
- UPRINTF(LWRN, "find nullptr with port %d\r\n", port);
- return -1;
- }
-
if (conn == 0) {
reg->portsc &= ~XHCI_PS_CCS;
reg->portsc |= (XHCI_PS_CSC |
XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET));
} else {
- speed = pci_xhci_convert_speed(dev->dev_ue->ue_usbspeed);
+ speed = pci_xhci_convert_speed(usb_speed);
reg->portsc = XHCI_PS_CCS | XHCI_PS_PP | XHCI_PS_CSC;
reg->portsc |= XHCI_PS_SPEED_SET(speed);
}

+ if (!intr)
+ return 0;
What is mean of intr? From this patch, it almost useless..
xgwu:

change to need_intr, Thx



+
/* make an event for the guest OS */
pci_xhci_set_evtrb(&evtrb,
port,
@@ -825,6 +827,20 @@ pci_xhci_port_chg(struct pci_xhci_vdev *xdev, int port, int conn)
return (error == XHCI_TRB_ERROR_SUCCESS) ? 0 : -1;
}

+static int
+pci_xhci_port_connect(struct pci_xhci_vdev *xdev, int port, int usb_speed,
+ int intr)
+{
+ return pci_xhci_port_change(xdev, port, usb_speed, 1, intr);
+}
+
+static int
+pci_xhci_port_disconnect(struct pci_xhci_vdev *xdev, int port, int intr)
+{
+ /* for disconnect, the speed is useless */
+ return pci_xhci_port_change(xdev, port, 0, 0, intr);
+}
+
static void
pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode,
uint32_t evtype)
@@ -3206,32 +3222,10 @@ pci_xhci_reset_port(struct pci_xhci_vdev *xdev, int portn, int warm)
}

static void
-pci_xhci_init_port(struct pci_xhci_vdev *xdev, int portn)
+pci_xhci_port_init(struct pci_xhci_vdev *xdev, int portn)
Let's keep pci_xhci_init_port..
xgwu:

ok, will change. Thx



{
- struct pci_xhci_portregs *port;
- struct pci_xhci_dev_emu *dev;
-
- port = XHCI_PORTREG_PTR(xdev, portn);
- dev = XHCI_DEVINST_PTR(xdev, portn);
- if (dev) {
- port->portsc = XHCI_PS_CCS | /* connected */
- XHCI_PS_PP; /* port power */
-
- if (dev->dev_ue->ue_usbver == 2) {
- port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) |
- XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
- } else {
- port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) |
- XHCI_PS_PED | /* enabled */
- XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
- }
-
- UPRINTF(LDBG, "Init port %d 0x%x\n", portn, port->portsc);
- } else {
- port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP;
- UPRINTF(LDBG, "Init empty port %d 0x%x\n",
- portn, port->portsc);
- }
+ XHCI_PORTREG_PTR(xdev, portn)->portsc =
+ XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP;
}

static int
@@ -3567,7 +3561,7 @@ pci_xhci_parse_opts(struct pci_xhci_vdev *xdev, char *opts)

/* do not use the zero index element */
for (i = 1; i <= XHCI_MAX_DEVS; i++)
- pci_xhci_init_port(xdev, i);
+ pci_xhci_port_init(xdev, i);

errout:
if (rc) {
--
2.7.4





Re: [PATCH] hv: treewide: fix 'Shifting value too far'

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Shiqing Gao
Sent: Tuesday, August 14, 2018 10:55 AM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH] hv: treewide: fix 'Shifting value too far'

MISRA-C requires that shift operation cannot exceed the word length.

What this patch does:
- Add the pre condition for 'init_lapic' regarding to 'pcpu_id'
Currently, max 8 physical cpus are supported.
Re-design will be required if we would like to support more physical
cpus.
So, add the pre condition here to avoid the unintentional shift
operation mistakes.

- Replace the id type with uint8_t in 'vlapic_build_id'
- For VM0, it uses 'lapic_id' as its id, which is uint8_t.
- For non VM0, it uses 'vcpu_id' as its id, which is uint16_t.
Cast this id to uint8_t to make sure there is no loss of data after
left shifting 24U.

Signed-off-by: Shiqing Gao <shiqing.gao@...>
---
hypervisor/arch/x86/guest/vlapic.c | 14 ++++++++------
hypervisor/arch/x86/lapic.c | 3 +++
2 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vlapic.c
b/hypervisor/arch/x86/guest/vlapic.c
index 567270f..6cc1617 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -169,19 +169,21 @@ static inline uint32_t vlapic_build_id(struct
acrn_vlapic *vlapic) {
struct vcpu *vcpu = vlapic->vcpu;
- uint16_t id;
+ uint8_t vlapic_id;
+ uint32_t lapic_regs_id;

if (is_vm0(vcpu->vm)) {
/* Get APIC ID sequence format from cpu_storage */
- id = per_cpu(lapic_id, vcpu->vcpu_id);
+ vlapic_id = per_cpu(lapic_id, vcpu->vcpu_id);
} else {
- id = vcpu->vcpu_id;
+ vlapic_id = (uint8_t)vcpu->vcpu_id;
}

- dev_dbg(ACRN_DBG_LAPIC, "vlapic APIC PAGE ID : 0x%08x",
- (id << APIC_ID_SHIFT));
+ lapic_regs_id = vlapic_id << APIC_ID_SHIFT;

- return (id << APIC_ID_SHIFT);
+ dev_dbg(ACRN_DBG_LAPIC, "vlapic APIC PAGE ID : 0x%08x",
+lapic_regs_id);
+
+ return lapic_regs_id;
}

static void
diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c index
aa35e0a..6c7752c 100644
--- a/hypervisor/arch/x86/lapic.c
+++ b/hypervisor/arch/x86/lapic.c
@@ -209,6 +209,9 @@ void early_init_lapic(void)
}
}

+/**
+ * @pre pcpu_id < 8U
+ */
void init_lapic(uint16_t pcpu_id)
{
/* Set the Logical Destination Register */
--
1.9.1



Re: [PATCH v2] HV: Enclose debug specific code with #ifdef HV_DEBUG #ifdef

Eddie Dong
 

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Kaige Fu
Sent: Tuesday, August 14, 2018 10:58 AM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v2] HV: Enclose debug specific code with #ifdef
HV_DEBUG

Thare some debug specific code which don't run on release version, such as
vmexit_time, vmexit_cnt, sbuf related codes, etc...

This patch enclose these codes with #ifdef HV_DEBUG.

---
v1 -> v2:
- Enclose suf related codes with #ifdef HV_DEBUG

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/common/hv_main.c | 12 +++++++++---
hypervisor/common/hypercall.c | 7 +++++++
hypervisor/include/arch/x86/per_cpu.h | 4 +++-
3 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c
index 15f1d9d..e3657f9 100644
--- a/hypervisor/common/hv_main.c
+++ b/hypervisor/common/hv_main.c
@@ -59,11 +59,13 @@ void vcpu_thread(struct vcpu *vcpu)
continue;
}

+#ifdef HV_DEBUG
vmexit_end = rdtsc();
if (vmexit_begin != 0UL) {
per_cpu(vmexit_time, vcpu->pcpu_id)[basic_exit_reason]
+= (vmexit_end - vmexit_begin);
}
+#endif
TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);

/* Restore guest TSC_AUX */
@@ -79,7 +81,9 @@ void vcpu_thread(struct vcpu *vcpu)
continue;
}

+#ifdef HV_DEBUG
vmexit_begin = rdtsc();
+#endif

vcpu->arch_vcpu.nrexits++;
/* Save guest TSC_AUX */
@@ -90,16 +94,18 @@ void vcpu_thread(struct vcpu *vcpu)
CPU_IRQ_ENABLE();
/* Dispatch handler */
ret = vmexit_handler(vcpu);
+ basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
if (ret < 0) {
pr_fatal("dispatch VM exit handler failed for reason"
- " %d, ret = %d!",
- vcpu->arch_vcpu.exit_reason & 0xFFFFU, ret);
+ " %d, ret = %d!", basic_exit_reason, ret);
vcpu_inject_gp(vcpu, 0U);
continue;
}

- basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
+#ifdef HV_DEBUG
per_cpu(vmexit_cnt, vcpu->pcpu_id)[basic_exit_reason]++;
+#endif
+
TRACE_2L(TRACE_VM_EXIT, basic_exit_reason,
vcpu_get_rip(vcpu));
} while (1);
}
diff --git a/hypervisor/common/hypercall.c
b/hypervisor/common/hypercall.c index f8efb35..4d9a23b 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -788,6 +788,7 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t
vmid, uint64_t param)
return ret;
}

+#ifdef HV_DEBUG
int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param) {
struct sbuf_setup_param ssp;
@@ -808,6 +809,12 @@ int32_t hcall_setup_sbuf(struct vm *vm, uint64_t
param)

return sbuf_share_setup(ssp.pcpu_id, ssp.sbuf_id, hva); }
+#else
+int32_t hcall_setup_sbuf(__unused struct vm *vm, __unused uint64_t
+param) {
+ return -ENODEV;
+}
+#endif
We also need to do this compile option protection in vmcall.c side for HC_SETUP_SBUF.


The rest is fine. Fixed it and PR with my ack.


int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t
param) { diff --git a/hypervisor/include/arch/x86/per_cpu.h
b/hypervisor/include/arch/x86/per_cpu.h
index 4076e27..758f9c2 100644
--- a/hypervisor/include/arch/x86/per_cpu.h
+++ b/hypervisor/include/arch/x86/per_cpu.h
@@ -19,10 +19,12 @@
#include "arch/x86/guest/instr_emul.h"

struct per_cpu_region {
+#ifdef HV_DEBUG
uint64_t *sbuf[ACRN_SBUF_ID_MAX];
- uint64_t irq_count[NR_IRQS];
uint64_t vmexit_cnt[64];
uint64_t vmexit_time[64];
+#endif
+ uint64_t irq_count[NR_IRQS];
uint64_t softirq_pending;
uint64_t spurious;
uint64_t vmxon_region_pa;
--
2.7.4



Re: [PATCH] HV: io: drop REQ_STATE_FAILED

Eddie Dong
 

Acked-by: Eddie Dong <eddie.dong@...>

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Junjie Mao
Sent: Tuesday, August 14, 2018 7:23 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH] HV: io: drop REQ_STATE_FAILED

Now the DM has adopted the new VHM request state transitions and
REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will
set the state to FAILED.

This patch drops the definition to REQ_STATE_FAILED in the hypervisor,
makes ''processed'' unsigned to make the compiler happy about typing and
simplifies error handling in the following ways.

* (dm_)emulate_(pio|mmio)_post no longer returns an error code, by
introducing a
constraint that these functions must be called after an I/O request
completes (which is the case in the current design) and assuming
handlers/VHM/DM will always give a value for reads (typically all 1's if the
requested address is invalid).

* emulate_io() now returns a positive value IOREQ_PENDING to indicate that
the
request is sent to VHM. This mitigates a potential race between
dm_emulate_pio() and pio_instr_vmexit_handler() which can cause
emulate_pio_post() being called twice for the same request.

* Remove the ''processed'' member in io_request. Previously this mirrors the
state of the VHM request which terminates at either COMPLETE or FAILED.
After
the FAILED state is removed, the terminal state will always be constantly
COMPLETE. Thus the mirrored ''processed'' member is no longer useful.

Note that emulate_instruction() will always succeed after a reshuffle, and
this patch takes that assumption in advance. This does not hurt as that
returned value is not currently handled.

This patch makes it explicit that I/O emulation is not expected to fail. One
issue remains, though, which occurs when a non-aligned cross-boundary
access happens. Currently the hypervisor, VHM and DM adopts different
policy:

* Hypervisor: inject #GP if it detects that the access crossed boundary

* VHM: deliver to DM if the access does not complete falls in the range of a
client

* DM: a handler covering part of the to-be-accessed region is picked and
assertion failure can be triggered.

A high-level design covering all these components (in addition to instruction
emulation) is needed for this. Thus this patch does not yet cover the issue.

Signed-off-by: Junjie Mao <junjie.mao@...>
---
hypervisor/arch/x86/ept.c | 12 ++---
hypervisor/arch/x86/guest/vlapic.c | 1 -
hypervisor/arch/x86/io.c | 84
++++++++++++++-------------------
hypervisor/common/io_request.c | 6 +--
hypervisor/dm/vioapic.c | 1 -
hypervisor/include/arch/x86/ioreq.h | 12 ++---
hypervisor/include/public/acrn_common.h | 11 +----
7 files changed, 49 insertions(+), 78 deletions(-)

diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c index
ed1e245..01cf6f2 100644
--- a/hypervisor/arch/x86/ept.c
+++ b/hypervisor/arch/x86/ept.c
@@ -156,7 +156,6 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
exit_qual = vcpu->arch_vcpu.exit_qualification;

io_req->type = REQ_MMIO;
- io_req->processed = REQ_STATE_PENDING;

/* Specify if read or write operation */
if ((exit_qual & 0x2UL) != 0UL) {
@@ -214,14 +213,11 @@ int ept_violation_vmexit_handler(struct vcpu
*vcpu)

status = emulate_io(vcpu, io_req);

- /* io_req is hypervisor-private. For requests sent to VHM,
- * io_req->processed will be PENDING till dm_emulate_mmio_post() is
- * called on vcpu resume. */
if (status == 0) {
- if (io_req->processed != REQ_STATE_PENDING) {
- status = emulate_mmio_post(vcpu, io_req);
- }
- }
+ emulate_mmio_post(vcpu, io_req);
+ } else if (status == IOREQ_PENDING) {
+ status = 0;
+ }

return status;

diff --git a/hypervisor/arch/x86/guest/vlapic.c
b/hypervisor/arch/x86/guest/vlapic.c
index 35be79c..5229443 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -2094,7 +2094,6 @@ int vlapic_mmio_access_handler(struct vcpu
*vcpu, struct io_request *io_req,
/* Can never happen due to the range of mmio_req->direction. */
}

- io_req->processed = REQ_STATE_COMPLETE;
return ret;
}

diff --git a/hypervisor/arch/x86/io.c b/hypervisor/arch/x86/io.c index
f61b778..e968d2b 100644
--- a/hypervisor/arch/x86/io.c
+++ b/hypervisor/arch/x86/io.c
@@ -16,35 +16,33 @@ static void complete_ioreq(struct vhm_request
*vhm_req)

/**
* @pre io_req->type == REQ_PORTIO
+ *
+ * @remark This function must be called when \p io_req is completed,
+ after
+ * either a previous call to emulate_io() returning 0 or the
+ corresponding VHM
+ * request having transferred to the COMPLETE state.
*/
-static int32_t
+static void
emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req) {
- int32_t status;
struct pio_request *pio_req = &io_req->reqs.pio;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);

- if (io_req->processed == REQ_STATE_COMPLETE) {
- if (pio_req->direction == REQUEST_READ) {
- uint64_t value = (uint64_t)pio_req->value;
- int32_t context_idx = vcpu->arch_vcpu.cur_context;
- uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
+ if (pio_req->direction == REQUEST_READ) {
+ uint64_t value = (uint64_t)pio_req->value;
+ uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);

- rax = ((rax) & ~mask) | (value & mask);
- vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
- }
- status = 0;
- } else {
- status = -1;
+ rax = ((rax) & ~mask) | (value & mask);
+ vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
}
-
- return status;
}

/**
* @pre vcpu->req.type == REQ_PORTIO
+ *
+ * @remark This function must be called after the VHM request
+ corresponding to
+ * \p vcpu being transferred to the COMPLETE state.
*/
-int32_t dm_emulate_pio_post(struct vcpu *vcpu)
+void dm_emulate_pio_post(struct vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
union vhm_request_buffer *req_buf = NULL; @@ -60,35 +58,33 @@
int32_t dm_emulate_pio_post(struct vcpu *vcpu)
/* VHM emulation data already copy to req, mark to free slot now */
complete_ioreq(vhm_req);

- return emulate_pio_post(vcpu, io_req);
+ emulate_pio_post(vcpu, io_req);
}

/**
* @pre vcpu->req.type == REQ_MMIO
+ *
+ * @remark This function must be called when \p io_req is completed,
+ after
+ * either a previous call to emulate_io() returning 0 or the
+ corresponding VHM
+ * request having transferred to the COMPLETE state.
*/
-int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
+void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
{
- int32_t ret;
struct mmio_request *mmio_req = &io_req->reqs.mmio;

- if (io_req->processed == REQ_STATE_COMPLETE) {
- if (mmio_req->direction == REQUEST_READ) {
- /* Emulate instruction and update vcpu register set */
- ret = emulate_instruction(vcpu);
- } else {
- ret = 0;
- }
- } else {
- ret = 0;
+ if (mmio_req->direction == REQUEST_READ) {
+ /* Emulate instruction and update vcpu register set */
+ emulate_instruction(vcpu);
}
-
- return ret;
}

/**
* @pre vcpu->req.type == REQ_MMIO
+ *
+ * @remark This function must be called when \p io_req is completed,
+ after the
+ * corresponding VHM request having transferred to the COMPLETE state.
*/
-int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
+void dm_emulate_mmio_post(struct vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
struct io_request *io_req = &vcpu->req; @@ -104,7 +100,7 @@ int32_t
dm_emulate_mmio_post(struct vcpu *vcpu)
/* VHM emulation data already copy to req, mark to free slot now */
complete_ioreq(vhm_req);

- return emulate_mmio_post(vcpu, io_req);
+ emulate_mmio_post(vcpu, io_req);
}

#ifdef CONFIG_PARTITION_MODE
@@ -123,15 +119,12 @@ void emulate_io_post(struct vcpu *vcpu) {
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
- struct io_request *io_req = &vcpu->req;

req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
- io_req->processed = atomic_load32(&vhm_req->processed);

if ((vhm_req->valid == 0) ||
- ((io_req->processed != REQ_STATE_COMPLETE) &&
- (io_req->processed != REQ_STATE_FAILED))) {
+ (atomic_load32(&vhm_req->processed) != REQ_STATE_COMPLETE))
{
return;
}

@@ -205,7 +198,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request
*io_req)
pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices",
port, size);
status = -EIO;
- io_req->processed = REQ_STATE_FAILED;
break;
} else {
if (pio_req->direction == REQUEST_WRITE) { @@ -221,9
+213,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
pr_dbg("IO read on port %04x, data %08x",
port, pio_req->value);
}
- /* TODO: failures in the handlers should be reflected
- * here. */
- io_req->processed = REQ_STATE_COMPLETE;
status = 0;
break;
}
@@ -266,7 +255,6 @@ hv_emulate_mmio(struct vcpu *vcpu, struct
io_request *io_req)
} else if (!((address >= base) && (address + size <= end))) {
pr_fatal("Err MMIO, address:0x%llx, size:%x",
address, size);
- io_req->processed = REQ_STATE_FAILED;
return -EIO;
} else {
/* Handle this MMIO operation */
@@ -284,6 +272,7 @@ hv_emulate_mmio(struct vcpu *vcpu, struct
io_request *io_req)
* deliver to VHM.
*
* @return 0 - Successfully emulated by registered handlers.
+ * @return IOREQ_PENDING - The I/O request is delivered to VHM.
* @return -EIO - The request spans multiple devices and cannot be
emulated.
* @return Negative on other errors during emulation.
*/
@@ -303,7 +292,6 @@ emulate_io(struct vcpu *vcpu, struct io_request
*io_req)
default:
/* Unknown I/O request type */
status = -EINVAL;
- io_req->processed = REQ_STATE_FAILED;
break;
}

@@ -329,6 +317,8 @@ emulate_io(struct vcpu *vcpu, struct io_request
*io_req)
pr_fatal("Err:IO %s access to port 0x%04lx, size=%lu",
(pio_req->direction != REQUEST_READ) ? "read" : "write",
pio_req->address, pio_req->size);
+ } else {
+ status = IOREQ_PENDING;
}
#endif
}
@@ -347,7 +337,6 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
exit_qual = vcpu->arch_vcpu.exit_qualification;

io_req->type = REQ_PORTIO;
- io_req->processed = REQ_STATE_PENDING;
pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL;
pio_req->address =
VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL)
{ @@ -365,13 +354,10 @@ int32_t pio_instr_vmexit_handler(struct vcpu
*vcpu)

status = emulate_io(vcpu, io_req);

- /* io_req is hypervisor-private. For requests sent to VHM,
- * io_req->processed will be PENDING till dm_emulate_pio_post() is
- * called on vcpu resume. */
if (status == 0) {
- if (io_req->processed != REQ_STATE_PENDING) {
- status = emulate_pio_post(vcpu, io_req);
- }
+ emulate_pio_post(vcpu, io_req);
+ } else if (status == IOREQ_PENDING) {
+ status = 0;
}

return status;
diff --git a/hypervisor/common/io_request.c
b/hypervisor/common/io_request.c index 7897d6c..a4ca77e 100644
--- a/hypervisor/common/io_request.c
+++ b/hypervisor/common/io_request.c
@@ -143,7 +143,7 @@ static void local_get_req_info_(struct vhm_request
*req, int *id, char *type,

switch (req->processed) {
case REQ_STATE_COMPLETE:
- (void)strcpy_s(state, 16U, "SUCCESS");
+ (void)strcpy_s(state, 16U, "COMPLETE");
break;
case REQ_STATE_PENDING:
(void)strcpy_s(state, 16U, "PENDING"); @@ -151,8 +151,8 @@
static void local_get_req_info_(struct vhm_request *req, int *id, char *type,
case REQ_STATE_PROCESSING:
(void)strcpy_s(state, 16U, "PROCESS");
break;
- case REQ_STATE_FAILED:
- (void)strcpy_s(state, 16U, "FAILED");
+ case REQ_STATE_FREE:
+ (void)strcpy_s(state, 16U, "FREE");
break;
default:
(void)strcpy_s(state, 16U, "UNKNOWN"); diff --git
a/hypervisor/dm/vioapic.c b/hypervisor/dm/vioapic.c index
4692094..d1658ae 100644
--- a/hypervisor/dm/vioapic.c
+++ b/hypervisor/dm/vioapic.c
@@ -597,7 +597,6 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu,
struct io_request *io_req,
ret = -EINVAL;
}

- io_req->processed = REQ_STATE_COMPLETE;
return ret;
}

diff --git a/hypervisor/include/arch/x86/ioreq.h
b/hypervisor/include/arch/x86/ioreq.h
index b0b5891..23f5cc2 100644
--- a/hypervisor/include/arch/x86/ioreq.h
+++ b/hypervisor/include/arch/x86/ioreq.h
@@ -10,15 +10,15 @@
#include <types.h>
#include <acrn_common.h>

+/* The return value of emulate_io() indicating the I/O request is
+delivered to
+ * VHM but not finished yet. */
+#define IOREQ_PENDING 1
+
/* Internal representation of a I/O request. */ struct io_request {
/** Type of the request (PIO, MMIO, etc). Refer to vhm_request. */
uint32_t type;

- /** Status of request handling. Written by request handlers and read by
- * the I/O emulation framework. Refer to vhm_request. */
- int32_t processed;
-
/** Details of this request in the same format as vhm_request. */
union vhm_io_request reqs;
};
@@ -122,8 +122,8 @@ int register_mmio_emulation_handler(struct vm
*vm,
uint64_t end, void *handler_private_data); void
unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
uint64_t end);
-int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
-int32_t dm_emulate_mmio_post(struct vcpu *vcpu);
+void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
+void dm_emulate_mmio_post(struct vcpu *vcpu);

int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req); void
emulate_io_post(struct vcpu *vcpu); diff --git
a/hypervisor/include/public/acrn_common.h
b/hypervisor/include/public/acrn_common.h
index df00de8..8e24602 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -30,7 +30,6 @@
#define REQ_STATE_PENDING 0
#define REQ_STATE_COMPLETE 1
#define REQ_STATE_PROCESSING 2
-#define REQ_STATE_FAILED -1

#define REQ_PORTIO 0U
#define REQ_MMIO 1U
@@ -97,8 +96,6 @@ union vhm_io_request {
* The state transitions of a VHM request are:
*
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
- * \ /
- * +--> FAILED -+
*
* When a request is in COMPLETE or FREE state, the request is owned by
the
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the
@@ -154,12 +151,6 @@ union vhm_io_request {
*
* 2. Due to similar reasons, setting state to COMPLETE is the last
operation
* of request handling in VHM or clients in SOS.
- *
- * The state FAILED is an obsolete state to indicate that the I/O request
cannot
- * be handled. In such cases the mediators and DM should switch the state
to
- * COMPLETE with the value set to all 1s for read, and skip the request for
- * writes. This state WILL BE REMOVED after the mediators and DM are
updated to
- * follow this rule.
*/
struct vhm_request {
/**
@@ -208,7 +199,7 @@ struct vhm_request {
*
* Byte offset: 136.
*/
- int32_t processed;
+ uint32_t processed;
} __aligned(256);

union vhm_request_buffer {
--
2.7.4



[PATCH] HV: io: drop REQ_STATE_FAILED

Junjie Mao
 

Now the DM has adopted the new VHM request state transitions and
REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will set the
state to FAILED.

This patch drops the definition to REQ_STATE_FAILED in the hypervisor, makes
''processed'' unsigned to make the compiler happy about typing and simplifies
error handling in the following ways.

* (dm_)emulate_(pio|mmio)_post no longer returns an error code, by introducing a
constraint that these functions must be called after an I/O request
completes (which is the case in the current design) and assuming
handlers/VHM/DM will always give a value for reads (typically all 1's if the
requested address is invalid).

* emulate_io() now returns a positive value IOREQ_PENDING to indicate that the
request is sent to VHM. This mitigates a potential race between
dm_emulate_pio() and pio_instr_vmexit_handler() which can cause
emulate_pio_post() being called twice for the same request.

* Remove the ''processed'' member in io_request. Previously this mirrors the
state of the VHM request which terminates at either COMPLETE or FAILED. After
the FAILED state is removed, the terminal state will always be constantly
COMPLETE. Thus the mirrored ''processed'' member is no longer useful.

Note that emulate_instruction() will always succeed after a reshuffle, and this
patch takes that assumption in advance. This does not hurt as that returned
value is not currently handled.

This patch makes it explicit that I/O emulation is not expected to fail. One
issue remains, though, which occurs when a non-aligned cross-boundary access
happens. Currently the hypervisor, VHM and DM adopts different policy:

* Hypervisor: inject #GP if it detects that the access crossed boundary

* VHM: deliver to DM if the access does not complete falls in the range of a
client

* DM: a handler covering part of the to-be-accessed region is picked and
assertion failure can be triggered.

A high-level design covering all these components (in addition to instruction
emulation) is needed for this. Thus this patch does not yet cover the issue.

Signed-off-by: Junjie Mao <junjie.mao@...>
---
hypervisor/arch/x86/ept.c | 12 ++---
hypervisor/arch/x86/guest/vlapic.c | 1 -
hypervisor/arch/x86/io.c | 84 ++++++++++++++-------------------
hypervisor/common/io_request.c | 6 +--
hypervisor/dm/vioapic.c | 1 -
hypervisor/include/arch/x86/ioreq.h | 12 ++---
hypervisor/include/public/acrn_common.h | 11 +----
7 files changed, 49 insertions(+), 78 deletions(-)

diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c
index ed1e245..01cf6f2 100644
--- a/hypervisor/arch/x86/ept.c
+++ b/hypervisor/arch/x86/ept.c
@@ -156,7 +156,6 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
exit_qual = vcpu->arch_vcpu.exit_qualification;

io_req->type = REQ_MMIO;
- io_req->processed = REQ_STATE_PENDING;

/* Specify if read or write operation */
if ((exit_qual & 0x2UL) != 0UL) {
@@ -214,14 +213,11 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)

status = emulate_io(vcpu, io_req);

- /* io_req is hypervisor-private. For requests sent to VHM,
- * io_req->processed will be PENDING till dm_emulate_mmio_post() is
- * called on vcpu resume. */
if (status == 0) {
- if (io_req->processed != REQ_STATE_PENDING) {
- status = emulate_mmio_post(vcpu, io_req);
- }
- }
+ emulate_mmio_post(vcpu, io_req);
+ } else if (status == IOREQ_PENDING) {
+ status = 0;
+ }

return status;

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 35be79c..5229443 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -2094,7 +2094,6 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
/* Can never happen due to the range of mmio_req->direction. */
}

- io_req->processed = REQ_STATE_COMPLETE;
return ret;
}

diff --git a/hypervisor/arch/x86/io.c b/hypervisor/arch/x86/io.c
index f61b778..e968d2b 100644
--- a/hypervisor/arch/x86/io.c
+++ b/hypervisor/arch/x86/io.c
@@ -16,35 +16,33 @@ static void complete_ioreq(struct vhm_request *vhm_req)

/**
* @pre io_req->type == REQ_PORTIO
+ *
+ * @remark This function must be called when \p io_req is completed, after
+ * either a previous call to emulate_io() returning 0 or the corresponding VHM
+ * request having transferred to the COMPLETE state.
*/
-static int32_t
+static void
emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
{
- int32_t status;
struct pio_request *pio_req = &io_req->reqs.pio;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);

- if (io_req->processed == REQ_STATE_COMPLETE) {
- if (pio_req->direction == REQUEST_READ) {
- uint64_t value = (uint64_t)pio_req->value;
- int32_t context_idx = vcpu->arch_vcpu.cur_context;
- uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
+ if (pio_req->direction == REQUEST_READ) {
+ uint64_t value = (uint64_t)pio_req->value;
+ uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);

- rax = ((rax) & ~mask) | (value & mask);
- vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
- }
- status = 0;
- } else {
- status = -1;
+ rax = ((rax) & ~mask) | (value & mask);
+ vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
}
-
- return status;
}

/**
* @pre vcpu->req.type == REQ_PORTIO
+ *
+ * @remark This function must be called after the VHM request corresponding to
+ * \p vcpu being transferred to the COMPLETE state.
*/
-int32_t dm_emulate_pio_post(struct vcpu *vcpu)
+void dm_emulate_pio_post(struct vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
union vhm_request_buffer *req_buf = NULL;
@@ -60,35 +58,33 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
/* VHM emulation data already copy to req, mark to free slot now */
complete_ioreq(vhm_req);

- return emulate_pio_post(vcpu, io_req);
+ emulate_pio_post(vcpu, io_req);
}

/**
* @pre vcpu->req.type == REQ_MMIO
+ *
+ * @remark This function must be called when \p io_req is completed, after
+ * either a previous call to emulate_io() returning 0 or the corresponding VHM
+ * request having transferred to the COMPLETE state.
*/
-int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
+void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
{
- int32_t ret;
struct mmio_request *mmio_req = &io_req->reqs.mmio;

- if (io_req->processed == REQ_STATE_COMPLETE) {
- if (mmio_req->direction == REQUEST_READ) {
- /* Emulate instruction and update vcpu register set */
- ret = emulate_instruction(vcpu);
- } else {
- ret = 0;
- }
- } else {
- ret = 0;
+ if (mmio_req->direction == REQUEST_READ) {
+ /* Emulate instruction and update vcpu register set */
+ emulate_instruction(vcpu);
}
-
- return ret;
}

/**
* @pre vcpu->req.type == REQ_MMIO
+ *
+ * @remark This function must be called when \p io_req is completed, after the
+ * corresponding VHM request having transferred to the COMPLETE state.
*/
-int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
+void dm_emulate_mmio_post(struct vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
struct io_request *io_req = &vcpu->req;
@@ -104,7 +100,7 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
/* VHM emulation data already copy to req, mark to free slot now */
complete_ioreq(vhm_req);

- return emulate_mmio_post(vcpu, io_req);
+ emulate_mmio_post(vcpu, io_req);
}

#ifdef CONFIG_PARTITION_MODE
@@ -123,15 +119,12 @@ void emulate_io_post(struct vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
- struct io_request *io_req = &vcpu->req;

req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
- io_req->processed = atomic_load32(&vhm_req->processed);

if ((vhm_req->valid == 0) ||
- ((io_req->processed != REQ_STATE_COMPLETE) &&
- (io_req->processed != REQ_STATE_FAILED))) {
+ (atomic_load32(&vhm_req->processed) != REQ_STATE_COMPLETE)) {
return;
}

@@ -205,7 +198,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices",
port, size);
status = -EIO;
- io_req->processed = REQ_STATE_FAILED;
break;
} else {
if (pio_req->direction == REQUEST_WRITE) {
@@ -221,9 +213,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
pr_dbg("IO read on port %04x, data %08x",
port, pio_req->value);
}
- /* TODO: failures in the handlers should be reflected
- * here. */
- io_req->processed = REQ_STATE_COMPLETE;
status = 0;
break;
}
@@ -266,7 +255,6 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
} else if (!((address >= base) && (address + size <= end))) {
pr_fatal("Err MMIO, address:0x%llx, size:%x",
address, size);
- io_req->processed = REQ_STATE_FAILED;
return -EIO;
} else {
/* Handle this MMIO operation */
@@ -284,6 +272,7 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
* deliver to VHM.
*
* @return 0 - Successfully emulated by registered handlers.
+ * @return IOREQ_PENDING - The I/O request is delivered to VHM.
* @return -EIO - The request spans multiple devices and cannot be emulated.
* @return Negative on other errors during emulation.
*/
@@ -303,7 +292,6 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
default:
/* Unknown I/O request type */
status = -EINVAL;
- io_req->processed = REQ_STATE_FAILED;
break;
}

@@ -329,6 +317,8 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
pr_fatal("Err:IO %s access to port 0x%04lx, size=%lu",
(pio_req->direction != REQUEST_READ) ? "read" : "write",
pio_req->address, pio_req->size);
+ } else {
+ status = IOREQ_PENDING;
}
#endif
}
@@ -347,7 +337,6 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
exit_qual = vcpu->arch_vcpu.exit_qualification;

io_req->type = REQ_PORTIO;
- io_req->processed = REQ_STATE_PENDING;
pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL;
pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) {
@@ -365,13 +354,10 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)

status = emulate_io(vcpu, io_req);

- /* io_req is hypervisor-private. For requests sent to VHM,
- * io_req->processed will be PENDING till dm_emulate_pio_post() is
- * called on vcpu resume. */
if (status == 0) {
- if (io_req->processed != REQ_STATE_PENDING) {
- status = emulate_pio_post(vcpu, io_req);
- }
+ emulate_pio_post(vcpu, io_req);
+ } else if (status == IOREQ_PENDING) {
+ status = 0;
}

return status;
diff --git a/hypervisor/common/io_request.c b/hypervisor/common/io_request.c
index 7897d6c..a4ca77e 100644
--- a/hypervisor/common/io_request.c
+++ b/hypervisor/common/io_request.c
@@ -143,7 +143,7 @@ static void local_get_req_info_(struct vhm_request *req, int *id, char *type,

switch (req->processed) {
case REQ_STATE_COMPLETE:
- (void)strcpy_s(state, 16U, "SUCCESS");
+ (void)strcpy_s(state, 16U, "COMPLETE");
break;
case REQ_STATE_PENDING:
(void)strcpy_s(state, 16U, "PENDING");
@@ -151,8 +151,8 @@ static void local_get_req_info_(struct vhm_request *req, int *id, char *type,
case REQ_STATE_PROCESSING:
(void)strcpy_s(state, 16U, "PROCESS");
break;
- case REQ_STATE_FAILED:
- (void)strcpy_s(state, 16U, "FAILED");
+ case REQ_STATE_FREE:
+ (void)strcpy_s(state, 16U, "FREE");
break;
default:
(void)strcpy_s(state, 16U, "UNKNOWN");
diff --git a/hypervisor/dm/vioapic.c b/hypervisor/dm/vioapic.c
index 4692094..d1658ae 100644
--- a/hypervisor/dm/vioapic.c
+++ b/hypervisor/dm/vioapic.c
@@ -597,7 +597,6 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
ret = -EINVAL;
}

- io_req->processed = REQ_STATE_COMPLETE;
return ret;
}

diff --git a/hypervisor/include/arch/x86/ioreq.h b/hypervisor/include/arch/x86/ioreq.h
index b0b5891..23f5cc2 100644
--- a/hypervisor/include/arch/x86/ioreq.h
+++ b/hypervisor/include/arch/x86/ioreq.h
@@ -10,15 +10,15 @@
#include <types.h>
#include <acrn_common.h>

+/* The return value of emulate_io() indicating the I/O request is delivered to
+ * VHM but not finished yet. */
+#define IOREQ_PENDING 1
+
/* Internal representation of a I/O request. */
struct io_request {
/** Type of the request (PIO, MMIO, etc). Refer to vhm_request. */
uint32_t type;

- /** Status of request handling. Written by request handlers and read by
- * the I/O emulation framework. Refer to vhm_request. */
- int32_t processed;
-
/** Details of this request in the same format as vhm_request. */
union vhm_io_request reqs;
};
@@ -122,8 +122,8 @@ int register_mmio_emulation_handler(struct vm *vm,
uint64_t end, void *handler_private_data);
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
uint64_t end);
-int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
-int32_t dm_emulate_mmio_post(struct vcpu *vcpu);
+void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
+void dm_emulate_mmio_post(struct vcpu *vcpu);

int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
void emulate_io_post(struct vcpu *vcpu);
diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h
index df00de8..8e24602 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -30,7 +30,6 @@
#define REQ_STATE_PENDING 0
#define REQ_STATE_COMPLETE 1
#define REQ_STATE_PROCESSING 2
-#define REQ_STATE_FAILED -1

#define REQ_PORTIO 0U
#define REQ_MMIO 1U
@@ -97,8 +96,6 @@ union vhm_io_request {
* The state transitions of a VHM request are:
*
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
- * \ /
- * +--> FAILED -+
*
* When a request is in COMPLETE or FREE state, the request is owned by the
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the
@@ -154,12 +151,6 @@ union vhm_io_request {
*
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
* of request handling in VHM or clients in SOS.
- *
- * The state FAILED is an obsolete state to indicate that the I/O request cannot
- * be handled. In such cases the mediators and DM should switch the state to
- * COMPLETE with the value set to all 1s for read, and skip the request for
- * writes. This state WILL BE REMOVED after the mediators and DM are updated to
- * follow this rule.
*/
struct vhm_request {
/**
@@ -208,7 +199,7 @@ struct vhm_request {
*
* Byte offset: 136.
*/
- int32_t processed;
+ uint32_t processed;
} __aligned(256);

union vhm_request_buffer {
--
2.7.4


[PATCH v4 1/1] vbs: fix virtio_vq_index_get func handling of multi VQ concurrent request.

Ong Hock Yu <ong.hock.yu@...>
 

Under multiple VQ use case, it is possible to have concurrent requests.
Added support to return multiple vq index from all vcpu.

Change-Id: I558941fe9e1f524b91358fb9bec4c96277fd5681
Signed-off-by: Ong Hock Yu <ong.hock.yu@...>
---
drivers/vbs/vbs.c | 20 ++++++++++++++------
drivers/vbs/vbs_rng.c | 2 +-
include/linux/vbs/vbs.h | 8 ++++++--
3 files changed, 21 insertions(+), 9 deletions(-)

diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c
index 65b8a0bbe3d3..d3dcbe77ebee 100644
--- a/drivers/vbs/vbs.c
+++ b/drivers/vbs/vbs.c
@@ -148,9 +148,11 @@ long virtio_dev_deregister(struct virtio_dev_info *dev)
return 0;
}

-int virtio_vq_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map)
+int virtio_vqs_index_get(struct virtio_dev_info *dev,
+ unsigned long *ioreqs_map,
+ int *vqs_index, int max_vqs_index)
{
- int val = -1;
+ int idx = 0;
struct vhm_request *req;
int vcpu;

@@ -159,7 +161,13 @@ int virtio_vq_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map)
return -EINVAL;
}

- while (1) {
+ if(max_vqs_index < dev->_ctx.max_vcpu)
+ pr_info("%s: The allocated vqs size (%d) is smaller than the
+ number of vcpu (%d)! This might caused the process of
+ some requests be delayed.",__func__,max_vqs_index,
+ dev->_ctx.max_vcpu);
+
+ while (idx < max_vqs_index) {
vcpu = find_first_bit(ioreqs_map, dev->_ctx.max_vcpu);
if (vcpu == dev->_ctx.max_vcpu)
break;
@@ -179,9 +187,9 @@ int virtio_vq_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map)
pr_debug("%s: write request! type %d\n",
__func__, req->type);
if (dev->io_range_type == PIO_RANGE)
- val = req->reqs.pio_request.value;
+ vqs_index[idx++] = req->reqs.pio_request.value;
else
- val = req->reqs.mmio_request.value;
+ vqs_index[idx++] = req->reqs.mmio_request.value;
}
smp_mb();
atomic_set(&req->processed, REQ_STATE_COMPLETE);
@@ -189,7 +197,7 @@ int virtio_vq_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map)
}
}

- return val;
+ return idx;
}

static long virtio_vqs_info_set(struct virtio_dev_info *dev,
diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c
index fd2bb27af66e..c5e28cc12c55 100644
--- a/drivers/vbs/vbs_rng.c
+++ b/drivers/vbs/vbs_rng.c
@@ -268,7 +268,7 @@ static int handle_kick(int client_id, unsigned long *ioreqs_map)
return -EINVAL;
}

- val = virtio_vq_index_get(&rng->dev, ioreqs_map);
+ virtio_vqs_index_get(&rng->dev, ioreqs_map, &val, 1);

if (val >= 0)
handle_vq_kick(rng, val);
diff --git a/include/linux/vbs/vbs.h b/include/linux/vbs/vbs.h
index 30df8ebf68a0..964bacac865c 100644
--- a/include/linux/vbs/vbs.h
+++ b/include/linux/vbs/vbs.h
@@ -262,7 +262,7 @@ long virtio_dev_register(struct virtio_dev_info *dev);
long virtio_dev_deregister(struct virtio_dev_info *dev);

/**
- * virtio_vq_index_get - get virtqueue index that frontend kicks
+ * virtio_vqs_index_get - get virtqueue indexes that frontend kicks
*
* This API is normally called in the VBS-K device's callback
* function, to get value write to the "kick" register from
@@ -270,10 +270,14 @@ long virtio_dev_deregister(struct virtio_dev_info *dev);
*
* @dev: Pointer to VBS-K device data struct
* @ioreqs_map: requests bitmap need to handle, provided by VHM
+ * @vqs_index: array to store the vq indexes
+ * @max_vqs_index: size of vqs_index array
*
* Return: >=0 on virtqueue index, <0 on error
*/
-int virtio_vq_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map);
+int virtio_vqs_index_get(struct virtio_dev_info *dev,
+ unsigned long *ioreqs_map,
+ int *vqs_index, int max_vqs_index);

/**
* virtio_dev_reset - reset a VBS-K device
--
2.17.0


[PATCH v2] HV: Enclose debug specific code with #ifdef HV_DEBUG #ifdef

Kaige Fu
 

Thare some debug specific code which don't run on release version, such as vmexit_time,
vmexit_cnt, sbuf related codes, etc...

This patch enclose these codes with #ifdef HV_DEBUG.

---
v1 -> v2:
- Enclose suf related codes with #ifdef HV_DEBUG

Signed-off-by: Kaige Fu <kaige.fu@...>
---
hypervisor/common/hv_main.c | 12 +++++++++---
hypervisor/common/hypercall.c | 7 +++++++
hypervisor/include/arch/x86/per_cpu.h | 4 +++-
3 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c
index 15f1d9d..e3657f9 100644
--- a/hypervisor/common/hv_main.c
+++ b/hypervisor/common/hv_main.c
@@ -59,11 +59,13 @@ void vcpu_thread(struct vcpu *vcpu)
continue;
}

+#ifdef HV_DEBUG
vmexit_end = rdtsc();
if (vmexit_begin != 0UL) {
per_cpu(vmexit_time, vcpu->pcpu_id)[basic_exit_reason]
+= (vmexit_end - vmexit_begin);
}
+#endif
TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);

/* Restore guest TSC_AUX */
@@ -79,7 +81,9 @@ void vcpu_thread(struct vcpu *vcpu)
continue;
}

+#ifdef HV_DEBUG
vmexit_begin = rdtsc();
+#endif

vcpu->arch_vcpu.nrexits++;
/* Save guest TSC_AUX */
@@ -90,16 +94,18 @@ void vcpu_thread(struct vcpu *vcpu)
CPU_IRQ_ENABLE();
/* Dispatch handler */
ret = vmexit_handler(vcpu);
+ basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
if (ret < 0) {
pr_fatal("dispatch VM exit handler failed for reason"
- " %d, ret = %d!",
- vcpu->arch_vcpu.exit_reason & 0xFFFFU, ret);
+ " %d, ret = %d!", basic_exit_reason, ret);
vcpu_inject_gp(vcpu, 0U);
continue;
}

- basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
+#ifdef HV_DEBUG
per_cpu(vmexit_cnt, vcpu->pcpu_id)[basic_exit_reason]++;
+#endif
+
TRACE_2L(TRACE_VM_EXIT, basic_exit_reason, vcpu_get_rip(vcpu));
} while (1);
}
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index f8efb35..4d9a23b 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -788,6 +788,7 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
return ret;
}

+#ifdef HV_DEBUG
int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param)
{
struct sbuf_setup_param ssp;
@@ -808,6 +809,12 @@ int32_t hcall_setup_sbuf(struct vm *vm, uint64_t param)

return sbuf_share_setup(ssp.pcpu_id, ssp.sbuf_id, hva);
}
+#else
+int32_t hcall_setup_sbuf(__unused struct vm *vm, __unused uint64_t param)
+{
+ return -ENODEV;
+}
+#endif

int32_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param)
{
diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h
index 4076e27..758f9c2 100644
--- a/hypervisor/include/arch/x86/per_cpu.h
+++ b/hypervisor/include/arch/x86/per_cpu.h
@@ -19,10 +19,12 @@
#include "arch/x86/guest/instr_emul.h"

struct per_cpu_region {
+#ifdef HV_DEBUG
uint64_t *sbuf[ACRN_SBUF_ID_MAX];
- uint64_t irq_count[NR_IRQS];
uint64_t vmexit_cnt[64];
uint64_t vmexit_time[64];
+#endif
+ uint64_t irq_count[NR_IRQS];
uint64_t softirq_pending;
uint64_t spurious;
uint64_t vmxon_region_pa;
--
2.7.4


[PATCH] hv: treewide: fix 'Shifting value too far'

Shiqing Gao
 

MISRA-C requires that shift operation cannot exceed the word length.

What this patch does:
- Add the pre condition for 'init_lapic' regarding to 'pcpu_id'
Currently, max 8 physical cpus are supported.
Re-design will be required if we would like to support more physical
cpus.
So, add the pre condition here to avoid the unintentional shift
operation mistakes.

- Replace the id type with uint8_t in 'vlapic_build_id'
- For VM0, it uses 'lapic_id' as its id, which is uint8_t.
- For non VM0, it uses 'vcpu_id' as its id, which is uint16_t.
Cast this id to uint8_t to make sure there is no loss of data after
left shifting 24U.

Signed-off-by: Shiqing Gao <shiqing.gao@...>
---
hypervisor/arch/x86/guest/vlapic.c | 14 ++++++++------
hypervisor/arch/x86/lapic.c | 3 +++
2 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c
index 567270f..6cc1617 100644
--- a/hypervisor/arch/x86/guest/vlapic.c
+++ b/hypervisor/arch/x86/guest/vlapic.c
@@ -169,19 +169,21 @@ static inline uint32_t
vlapic_build_id(struct acrn_vlapic *vlapic)
{
struct vcpu *vcpu = vlapic->vcpu;
- uint16_t id;
+ uint8_t vlapic_id;
+ uint32_t lapic_regs_id;

if (is_vm0(vcpu->vm)) {
/* Get APIC ID sequence format from cpu_storage */
- id = per_cpu(lapic_id, vcpu->vcpu_id);
+ vlapic_id = per_cpu(lapic_id, vcpu->vcpu_id);
} else {
- id = vcpu->vcpu_id;
+ vlapic_id = (uint8_t)vcpu->vcpu_id;
}

- dev_dbg(ACRN_DBG_LAPIC, "vlapic APIC PAGE ID : 0x%08x",
- (id << APIC_ID_SHIFT));
+ lapic_regs_id = vlapic_id << APIC_ID_SHIFT;

- return (id << APIC_ID_SHIFT);
+ dev_dbg(ACRN_DBG_LAPIC, "vlapic APIC PAGE ID : 0x%08x", lapic_regs_id);
+
+ return lapic_regs_id;
}

static void
diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c
index aa35e0a..6c7752c 100644
--- a/hypervisor/arch/x86/lapic.c
+++ b/hypervisor/arch/x86/lapic.c
@@ -209,6 +209,9 @@ void early_init_lapic(void)
}
}

+/**
+ * @pre pcpu_id < 8U
+ */
void init_lapic(uint16_t pcpu_id)
{
/* Set the Logical Destination Register */
--
1.9.1


[PATCH] drm/i915: fix a kernel panic issue of plane restriction

He, Min <min.he@...>
 

When plane restriction feature enabled in sevice os, there could be the
case that there're some CRTC's without a primary plane. If we don't
assign any plane of pipe A to sos, like i915.avail_planes_per_pipe
=0x000F00, it will cause kernel panic when booting because it assumes
primary plane existing in intel_find_initial_plane_obj().

Added a check to the primary plane in CRTC to avoid such kind of issue.

Signed-off-by: Min He <min.he@...>
---
drivers/gpu/drm/i915/intel_display.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 01b432438bac..6aecd4c150d1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15058,7 +15058,7 @@ int intel_modeset_init(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_initial_plane_config plane_config = {};

- if (!crtc->active)
+ if (!crtc->active || !crtc->base.primary)
continue;

/*
--
2.17.0


[RFC PATCH] hv: vioapic: simple vioapic set rte

Li, Fei1
 

1. do nothing if nothing changed.
2. update ptdev native ioapic rte when (a) something changed and
(b) the old rte interrupt mask is not masked or the new rte interrupt
mask is not masked.

Signed-off-by: Li, Fei1 <fei1.li@...>
---
hypervisor/dm/vioapic.c | 28 ++++++++++------------------
1 file changed, 10 insertions(+), 18 deletions(-)

diff --git a/hypervisor/dm/vioapic.c b/hypervisor/dm/vioapic.c
index 119eabe..7c1d95d 100644
--- a/hypervisor/dm/vioapic.c
+++ b/hypervisor/dm/vioapic.c
@@ -313,12 +313,12 @@ vioapic_indirect_write(struct vioapic *vioapic, uint32_t addr, uint32_t data)
if ((regnum >= IOAPIC_REDTBL) &&
(regnum < (IOAPIC_REDTBL + (pincount * 2U)))) {
uint32_t addr_offset = regnum - IOAPIC_REDTBL;
- uint32_t rte_offset = addr_offset / 2U;
+ uint32_t rte_offset = addr_offset >> 1U;
pin = rte_offset;

last = vioapic->rtbl[pin];
new = last;
- if ((addr_offset % 2U) != 0U) {
+ if ((addr_offset & 1U) != 0U) {
new.u.hi_32 = data;
} else {
new.u.lo_32 &= RTBL_RO_BITS;
@@ -336,11 +336,13 @@ vioapic_indirect_write(struct vioapic *vioapic, uint32_t addr, uint32_t data)
}

changed = last.full ^ new.full;
+ if (changed == 0UL) {
+ return;
+ }
/* pin0 from vpic mask/unmask */
if ((pin == 0U) && ((changed & IOAPIC_RTE_INTMASK) != 0UL)) {
/* mask -> umask */
- if (((last.full & IOAPIC_RTE_INTMASK) != 0UL) &&
- ((new.full & IOAPIC_RTE_INTMASK) == 0UL)) {
+ if ((last.full & IOAPIC_RTE_INTMASK) != 0UL) {
if ((vioapic->vm->wire_mode ==
VPIC_WIRE_NULL) ||
(vioapic->vm->wire_mode ==
@@ -354,8 +356,7 @@ vioapic_indirect_write(struct vioapic *vioapic, uint32_t addr, uint32_t data)
return;
}
/* unmask -> mask */
- } else if (((last.full & IOAPIC_RTE_INTMASK) == 0UL) &&
- ((new.full & IOAPIC_RTE_INTMASK) != 0UL)) {
+ } else {
if (vioapic->vm->wire_mode ==
VPIC_WIRE_IOAPIC) {
vioapic->vm->wire_mode =
@@ -363,9 +364,6 @@ vioapic_indirect_write(struct vioapic *vioapic, uint32_t addr, uint32_t data)
dev_dbg(ACRN_DBG_IOAPIC,
"vpic wire mode -> INTR");
}
- } else {
- /* Can never happen since IOAPIC_RTE_INTMASK
- * is changed. */
}
}
vioapic->rtbl[pin] = new;
@@ -405,15 +403,9 @@ vioapic_indirect_write(struct vioapic *vioapic, uint32_t addr, uint32_t data)
vioapic_send_intr(vioapic, pin);
}

- /* remap for active: interrupt mask -> unmask
- * remap for deactive: interrupt mask & vector set to 0
- * remap for trigger mode change
- * remap for polarity change
- */
- if ( ((changed & IOAPIC_RTE_INTMASK) != 0UL) ||
- ((changed & IOAPIC_RTE_TRGRMOD) != 0UL) ||
- ((changed & IOAPIC_RTE_INTPOL ) != 0UL) ) {
-
+ /* remap for ptdev */
+ if (((new.full & IOAPIC_RTE_INTMASK) == 0UL) ||
+ ((last.full & IOAPIC_RTE_INTMASK) == 0U)) {
/* VM enable intr */
struct ptdev_intx_info intx;

--
2.7.4


[RFC PATCH] Simple vioapic set rte

Li, Fei1
 

Now the guest may change "Destination Field", "Trigger Mode", "Interrupt Input Pin Polarity"
even "Interrupt Vector" when "Interrupt Mask" not masked. So we should update the pass through
device interrupt pin rte in this situation. The old logic would update it only when "Interrupt Mask"
or "Trigger Mode" or "Interrupt Input Pin Polarity" was changed.

And a monir modify: return earlier if nothing changed.

Li, Fei1 (1):
hv: vioapic: simple vioapic set rte

hypervisor/dm/vioapic.c | 28 ++++++++++------------------
1 file changed, 10 insertions(+), 18 deletions(-)

--
2.7.4


Re: [PATCH v2] tools: acrntrace: Add ring buffer mode

Kaige Fu
 

Hi Eddie,
On 08-13 Mon 15:49, Eddie Dong wrote:
We don't want to complicate the debug tools too much. I remember we can already set the size of the log file. That is enough.
Per current implementation, we have removed the size limitation of log file and
store the trace data in current dir instead of /temp/. It is user's responsibility
to avoid running out of storage of the disk.

Thx Eddie

-----Original Message-----
From: acrn-dev@...
[mailto:acrn-dev@...] On Behalf Of Zhipeng Gong
Sent: Monday, August 13, 2018 9:31 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v2] tools: acrntrace: Add ring buffer mode

When running longevity test and capturing acrntrace, generated acrntrace
files sizes are too big.
Sometimes we don't care very old trace. This patch adds ring buffer mode,
fixes acrntrace file size and overwrites the oldest trace with new trace when
the buffer is full.

v2:
- update README.rst

Signed-off-by: Zhipeng Gong <zhipeng.gong@...>
---
tools/acrntrace/README.rst | 4 ++--
tools/acrntrace/acrntrace.c | 44
+++++++++++++++++++++++++++++++++++++++++---
tools/acrntrace/acrntrace.h | 1 +
3 files changed, 44 insertions(+), 5 deletions(-)

diff --git a/tools/acrntrace/README.rst b/tools/acrntrace/README.rst index
433f161..f25c5b0 100644
--- a/tools/acrntrace/README.rst
+++ b/tools/acrntrace/README.rst
@@ -21,8 +21,8 @@ Options:
-i period specify polling interval in milliseconds [1-999]
-t max_time max time to capture trace data (in second)
-c clear the buffered old data
--r free_space amount of free space (in MB) remaining on the
disk
- before acrntrace stops
+-r ring_buffer_size ring buffer size (in MB), which enables ring buffer
+ mode and overwrites old trace with new
trace
+when full

The ``acrntrace_format.py`` is a offline tool for parsing trace data (as
output by acrntrace) to human-readable formats based on given format.
diff --git a/tools/acrntrace/acrntrace.c b/tools/acrntrace/acrntrace.c index
39fda35..e4b3899 100644
--- a/tools/acrntrace/acrntrace.c
+++ b/tools/acrntrace/acrntrace.c
@@ -37,16 +37,19 @@ static char trace_file_dir[TRACE_FILE_DIR_LEN];

static reader_struct *reader;
static int pcpu_num = 0;
+static int ring_buffer_size = 0;

static void display_usage(void)
{
printf("acrntrace - tool to collect ACRN trace data\n"
- "[Usage] acrntrace [-i] [period in msec] [-ch]\n\n"
+ "[Usage] acrntrace [-i] [period in msec] [-r] [ring buffer size]
[-ch]\n\n"
"[Options]\n"
"\t-h: print this message\n"
"\t-i: period_in_ms: specify polling interval [1-999]\n"
"\t-t: max time to capture trace data (in second)\n"
- "\t-c: clear the buffered old data\n");
+ "\t-c: clear the buffered old data\n"
+ "\t-r: ring_buffer_size (in MB), which enables ring buffer mode"
+ " and overwrites old trace with new trace when full\n");
}

static void timer_handler(union sigval sv) @@ -113,6 +116,15 @@ static int
parse_opt(int argc, char *argv[])
timeout = ret;
pr_dbg("Capture trace data for at most %ds\n", ret);
break;
+ case 'r':
+ ret = atoi(optarg);
+ if (ret <= 0) {
+ pr_err("'-r' require integer greater than 0\n");
+ return -EINVAL;
+ }
+ ring_buffer_size = ret * 1024 * 1024;
+ pr_dbg("Ring buffer size is %dM\n", ret);
+ break;
case 'c':
flags |= FLAG_CLEAR_BUF;
break;
@@ -206,6 +218,15 @@ static void reader_fn(param_t * param)
trace_ev_t e;
struct statvfs stat;
uint64_t freespace;
+ int pos = 0;
+
+ if (ring_buffer_size != 0) {
+ param->buffer = malloc(ring_buffer_size);
+ if (!param->buffer) {
+ perror("Failed to allocate ring buffer\n");
+ return;
+ }
+ }

pr_dbg("reader thread[%lu] created for FILE*[0x%p]\n",
pthread_self(), fp);
@@ -219,7 +240,13 @@ static void reader_fn(param_t * param)

while (1) {
do {
- ret = sbuf_write(fd, sbuf);
+ if (ring_buffer_size != 0) {
+ ret = sbuf_get(sbuf, param->buffer + pos);
+ pos += sbuf->ele_size;
+ if (pos + sbuf->ele_size > ring_buffer_size)
+ pos = 0;
+ } else
+ ret = sbuf_write(fd, sbuf);
} while (ret > 0);

usleep(period);
@@ -284,6 +311,17 @@ static void destory_reader(reader_struct * reader)
reader->thrd = 0;
}

+ if (ring_buffer_size != 0 && reader->param.buffer) {
+ int ret;
+
+ ret = write(reader->param.trace_fd, reader->param.buffer,
+ ring_buffer_size);
+ if (ret != ring_buffer_size) {
+ perror("Failed to write ring buffer\n");
+ }
+ free(reader->param.buffer);
+ }
+
if (reader->param.sbuf) {
munmap(reader->param.sbuf, MMAP_SIZE);
reader->param.sbuf = NULL;
diff --git a/tools/acrntrace/acrntrace.h b/tools/acrntrace/acrntrace.h index
ea54f4d..6615157 100644
--- a/tools/acrntrace/acrntrace.h
+++ b/tools/acrntrace/acrntrace.h
@@ -76,6 +76,7 @@ typedef struct {
int trace_fd;
shared_buf_t *sbuf;
pthread_mutex_t *sbuf_lock;
+ uint8_t *buffer;
} param_t;

typedef struct {
--
2.7.4





Re: [PATCH v2] tools: acrntrace: Add ring buffer mode

Kaige Fu
 

On 08-13 Mon 14:12, Geoffroy Van Cutsem wrote:


-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Zhipeng Gong
Sent: Monday, August 13, 2018 3:31 PM
To: acrn-dev@...
Subject: [acrn-dev] [PATCH v2] tools: acrntrace: Add ring buffer mode

When running longevity test and capturing acrntrace, generated acrntrace
files sizes are too big.
Sometimes we don't care very old trace. This patch adds ring buffer mode,
fixes acrntrace file size and overwrites the oldest trace with new trace when
the buffer is full.

v2:
- update README.rst

Signed-off-by: Zhipeng Gong <zhipeng.gong@...>
---
tools/acrntrace/README.rst | 4 ++--
tools/acrntrace/acrntrace.c | 44
+++++++++++++++++++++++++++++++++++++++++---
tools/acrntrace/acrntrace.h | 1 +
3 files changed, 44 insertions(+), 5 deletions(-)

diff --git a/tools/acrntrace/README.rst b/tools/acrntrace/README.rst index
433f161..f25c5b0 100644
--- a/tools/acrntrace/README.rst
+++ b/tools/acrntrace/README.rst
@@ -21,8 +21,8 @@ Options:
-i period specify polling interval in milliseconds [1-999]
-t max_time max time to capture trace data (in second)
-c clear the buffered old data
--r free_space amount of free space (in MB) remaining on the disk
- before acrntrace stops
+-r ring_buffer_size ring buffer size (in MB), which enables ring buffer
+ mode and overwrites old trace with new trace
+when full
Suggestion: s/when full/when reaching the end of the buffer

There is another paragraph later in the README.rst files that says:
512MB storage space will be reserved by default. This ensures that acrntrace
will exit automatically when the free storage space on the disk is less than
reserved space. Reserved space on the disk is configurable through '-r'.
This should be updated too, shouldn't it?
This paragraph has been removed.

In current implementation, we store the trace data in current directory with no
size limitation. Now, it's user's responsibility to avoid running out of storage of the disk.

Thanks,
Geoffroy


The ``acrntrace_format.py`` is a offline tool for parsing trace data (as output
by acrntrace) to human-readable formats based on given format.
diff --git a/tools/acrntrace/acrntrace.c b/tools/acrntrace/acrntrace.c index
39fda35..e4b3899 100644
--- a/tools/acrntrace/acrntrace.c
+++ b/tools/acrntrace/acrntrace.c
@@ -37,16 +37,19 @@ static char trace_file_dir[TRACE_FILE_DIR_LEN];

static reader_struct *reader;
static int pcpu_num = 0;
+static int ring_buffer_size = 0;

static void display_usage(void)
{
printf("acrntrace - tool to collect ACRN trace data\n"
- "[Usage] acrntrace [-i] [period in msec] [-ch]\n\n"
+ "[Usage] acrntrace [-i] [period in msec] [-r] [ring buffer size] [-
ch]\n\n"
"[Options]\n"
"\t-h: print this message\n"
"\t-i: period_in_ms: specify polling interval [1-999]\n"
"\t-t: max time to capture trace data (in second)\n"
- "\t-c: clear the buffered old data\n");
+ "\t-c: clear the buffered old data\n"
+ "\t-r: ring_buffer_size (in MB), which enables ring buffer mode"
+ " and overwrites old trace with new trace when full\n");
}

static void timer_handler(union sigval sv) @@ -113,6 +116,15 @@ static int
parse_opt(int argc, char *argv[])
timeout = ret;
pr_dbg("Capture trace data for at most %ds\n", ret);
break;
+ case 'r':
+ ret = atoi(optarg);
+ if (ret <= 0) {
+ pr_err("'-r' require integer greater than 0\n");
+ return -EINVAL;
+ }
+ ring_buffer_size = ret * 1024 * 1024;
+ pr_dbg("Ring buffer size is %dM\n", ret);
+ break;
case 'c':
flags |= FLAG_CLEAR_BUF;
break;
@@ -206,6 +218,15 @@ static void reader_fn(param_t * param)
trace_ev_t e;
struct statvfs stat;
uint64_t freespace;
+ int pos = 0;
+
+ if (ring_buffer_size != 0) {
+ param->buffer = malloc(ring_buffer_size);
+ if (!param->buffer) {
+ perror("Failed to allocate ring buffer\n");
+ return;
+ }
+ }

pr_dbg("reader thread[%lu] created for FILE*[0x%p]\n",
pthread_self(), fp);
@@ -219,7 +240,13 @@ static void reader_fn(param_t * param)

while (1) {
do {
- ret = sbuf_write(fd, sbuf);
+ if (ring_buffer_size != 0) {
+ ret = sbuf_get(sbuf, param->buffer + pos);
+ pos += sbuf->ele_size;
+ if (pos + sbuf->ele_size > ring_buffer_size)
+ pos = 0;
+ } else
+ ret = sbuf_write(fd, sbuf);
} while (ret > 0);

usleep(period);
@@ -284,6 +311,17 @@ static void destory_reader(reader_struct * reader)
reader->thrd = 0;
}

+ if (ring_buffer_size != 0 && reader->param.buffer) {
+ int ret;
+
+ ret = write(reader->param.trace_fd, reader->param.buffer,
+ ring_buffer_size);
+ if (ret != ring_buffer_size) {
+ perror("Failed to write ring buffer\n");
+ }
+ free(reader->param.buffer);
+ }
+
if (reader->param.sbuf) {
munmap(reader->param.sbuf, MMAP_SIZE);
reader->param.sbuf = NULL;
diff --git a/tools/acrntrace/acrntrace.h b/tools/acrntrace/acrntrace.h index
ea54f4d..6615157 100644
--- a/tools/acrntrace/acrntrace.h
+++ b/tools/acrntrace/acrntrace.h
@@ -76,6 +76,7 @@ typedef struct {
int trace_fd;
shared_buf_t *sbuf;
pthread_mutex_t *sbuf_lock;
+ uint8_t *buffer;
} param_t;

typedef struct {
--
2.7.4





Re: [PATCH 1/2] vhm: add ioeventfd support for ACRN hypervisor service module

Shuo A Liu
 

On Mon 13.Aug'18 at 17:03:44 +0800, Jie Deng wrote:

On 2018/8/13 14:40, Shuo Liu wrote:
+
+static struct acrn_vhm_ioeventfd *vhm_ioeventfd_match(
+ struct vhm_ioeventfd_info *info,
+ u64 addr, u64 data, int length, int type)
+{
+ struct acrn_vhm_ioeventfd *p = NULL;
+
+ list_for_each_entry(p, &info->ioeventfds, list) {
+ if (p->type == type &&
+ p->addr == addr &&
+ p->length == length &&
+ (p->data == data || p->wildcard))
+ return p;
Is it better to be "(p->wildcard || p->data == data)" ?
Sure. I will change to
(p->wildcard || p->data == data)

Thanks




[PATCH v2] HV: Add const qualifiers where required

Arindam Roy <arindam.roy@...>
 

V1:
In order to better comply with MISRA C,
add const qualifiers whereeven required.
In the patch, these are being added to pointers
which are normally used in "get" functions.

V2: Corrected the issues in the patch
pointed by Junjie in his review comments.
Moved the const qualifiers to the correct
places. Removed some changes which are not
needed.

Signed-off-by: Arindam Roy <arindam.roy@...>
---
hypervisor/arch/x86/cpu_state_tbl.c | 2 +-
hypervisor/arch/x86/cpuid.c | 6 +++---
hypervisor/arch/x86/ept.c | 14 +++++++-------
hypervisor/arch/x86/guest/guest.c | 10 +++++-----
hypervisor/include/arch/x86/cpuid.h | 2 +-
hypervisor/include/arch/x86/guest/guest.h | 6 +++---
hypervisor/include/arch/x86/io.h | 18 +++++++++---------
hypervisor/include/arch/x86/mmu.h | 22 +++++++++++-----------
hypervisor/include/arch/x86/pgtable.h | 8 ++++----
hypervisor/include/arch/x86/vmx.h | 2 +-
10 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/hypervisor/arch/x86/cpu_state_tbl.c b/hypervisor/arch/x86/cpu_state_tbl.c
index 6f192dad..80093c96 100644
--- a/hypervisor/arch/x86/cpu_state_tbl.c
+++ b/hypervisor/arch/x86/cpu_state_tbl.c
@@ -89,7 +89,7 @@ static const struct cpu_state_table {
}
};

-static int get_state_tbl_idx(char *cpuname)
+static int get_state_tbl_idx(const char *cpuname)
{
int i;
int count = ARRAY_SIZE(cpu_state_tbl);
diff --git a/hypervisor/arch/x86/cpuid.c b/hypervisor/arch/x86/cpuid.c
index 18ebfcc7..215bdcce 100644
--- a/hypervisor/arch/x86/cpuid.c
+++ b/hypervisor/arch/x86/cpuid.c
@@ -8,7 +8,7 @@

extern bool x2apic_enabled;

-static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
+static inline struct vcpuid_entry *find_vcpuid_entry(const struct vcpu *vcpu,
uint32_t leaf_arg, uint32_t subleaf)
{
uint32_t i = 0U, nr, half;
@@ -66,7 +66,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
}

static inline int set_vcpuid_entry(struct vm *vm,
- struct vcpuid_entry *entry)
+ const struct vcpuid_entry *entry)
{
struct vcpuid_entry *tmp;
size_t entry_size = sizeof(struct vcpuid_entry);
@@ -273,7 +273,7 @@ int set_vcpuid_entries(struct vm *vm)
return 0;
}

-void guest_cpuid(struct vcpu *vcpu,
+void guest_cpuid(const struct vcpu *vcpu,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c
index a7bcb43e..6a327574 100644
--- a/hypervisor/arch/x86/ept.c
+++ b/hypervisor/arch/x86/ept.c
@@ -10,7 +10,7 @@

#define ACRN_DBG_EPT 6U

-static uint64_t find_next_table(uint32_t table_offset, void *table_base)
+static uint64_t find_next_table(uint32_t table_offset, const void *table_base)
{
uint64_t table_entry;
uint64_t table_present;
@@ -100,7 +100,7 @@ void destroy_ept(struct vm *vm)
free_ept_mem(vm->arch_vm.m2p);
}

-uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
+uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size)
{
uint64_t hpa = 0UL;
uint64_t *pgentry, pg_size = 0UL;
@@ -124,12 +124,12 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
}

/* using return value 0 as failure, make sure guest will not use hpa 0 */
-uint64_t gpa2hpa(struct vm *vm, uint64_t gpa)
+uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa)
{
return local_gpa2hpa(vm, gpa, NULL);
}

-uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)
+uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa)
{
uint64_t *pgentry, pg_size = 0UL;

@@ -284,7 +284,7 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
return status;
}

-int ept_mr_add(struct vm *vm, uint64_t hpa_arg,
+int ept_mr_add(const struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg)
{
struct mem_map_params map_params;
@@ -323,7 +323,7 @@ int ept_mr_add(struct vm *vm, uint64_t hpa_arg,
return 0;
}

-int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
+int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr)
{
@@ -341,7 +341,7 @@ int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
return ret;
}

-int ept_mr_del(struct vm *vm, uint64_t *pml4_page,
+int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size)
{
struct vcpu *vcpu;
diff --git a/hypervisor/arch/x86/guest/guest.c b/hypervisor/arch/x86/guest/guest.c
index dfb33bca..47692e30 100644
--- a/hypervisor/arch/x86/guest/guest.c
+++ b/hypervisor/arch/x86/guest/guest.c
@@ -46,7 +46,7 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
return dmask;
}

-bool vm_lapic_disabled(struct vm *vm)
+bool vm_lapic_disabled(const struct vm *vm)
{
uint16_t i;
struct vcpu *vcpu;
@@ -276,7 +276,7 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
return ret;
}

-static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
+static inline uint32_t local_copy_gpa(const struct vm *vm, void *h_ptr, uint64_t gpa,
uint32_t size, uint32_t fix_pg_size, bool cp_from_vm)
{
uint64_t hpa;
@@ -308,7 +308,7 @@ static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
return len;
}

-static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
+static inline int copy_gpa(const struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
uint32_t size_arg, bool cp_from_vm)
{
void *h_ptr = h_ptr_arg;
@@ -385,12 +385,12 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
* - some other gpa from hypercall parameters, VHM should make sure it's
* continuous
*/
-int copy_from_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
+int copy_from_gpa(const struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
{
return copy_gpa(vm, h_ptr, gpa, size, 1);
}

-int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
+int copy_to_gpa(const struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
{
return copy_gpa(vm, h_ptr, gpa, size, 0);
}
diff --git a/hypervisor/include/arch/x86/cpuid.h b/hypervisor/include/arch/x86/cpuid.h
index 8fef50fc..35003928 100644
--- a/hypervisor/include/arch/x86/cpuid.h
+++ b/hypervisor/include/arch/x86/cpuid.h
@@ -132,7 +132,7 @@ static inline void cpuid_subleaf(uint32_t leaf, uint32_t subleaf,
}

int set_vcpuid_entries(struct vm *vm);
-void guest_cpuid(struct vcpu *vcpu,
+void guest_cpuid(const struct vcpu *vcpu,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);

diff --git a/hypervisor/include/arch/x86/guest/guest.h b/hypervisor/include/arch/x86/guest/guest.h
index 0e291a75..46ef2143 100644
--- a/hypervisor/include/arch/x86/guest/guest.h
+++ b/hypervisor/include/arch/x86/guest/guest.h
@@ -93,7 +93,7 @@ enum vm_paging_mode {
/*
* VM related APIs
*/
-bool vm_lapic_disabled(struct vm *vm);
+bool vm_lapic_disabled(const struct vm *vm);
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);

int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
@@ -121,8 +121,8 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu);
typedef int (*vm_sw_loader_t)(struct vm *vm, struct vcpu *vcpu);
extern vm_sw_loader_t vm_sw_loader;

-int copy_from_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
-int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
+int copy_from_gpa(const struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
+int copy_to_gpa(const struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
int copy_from_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
diff --git a/hypervisor/include/arch/x86/io.h b/hypervisor/include/arch/x86/io.h
index e2f4a8ec..e13dcb34 100644
--- a/hypervisor/include/arch/x86/io.h
+++ b/hypervisor/include/arch/x86/io.h
@@ -81,7 +81,7 @@ static inline uint32_t pio_read(uint16_t addr, size_t sz)
* @param value The 32 bit value to write.
* @param addr The memory address to write to.
*/
-static inline void mmio_write32(uint32_t value, void *addr)
+static inline void mmio_write32(uint32_t value, const void *addr)
{
volatile uint32_t *addr32 = (volatile uint32_t *)addr;
*addr32 = value;
@@ -92,7 +92,7 @@ static inline void mmio_write32(uint32_t value, void *addr)
* @param value The 16 bit value to write.
* @param addr The memory address to write to.
*/
-static inline void mmio_write16(uint16_t value, void *addr)
+static inline void mmio_write16(uint16_t value, const void *addr)
{
volatile uint16_t *addr16 = (volatile uint16_t *)addr;
*addr16 = value;
@@ -103,7 +103,7 @@ static inline void mmio_write16(uint16_t value, void *addr)
* @param value The 8 bit value to write.
* @param addr The memory address to write to.
*/
-static inline void mmio_write8(uint8_t value, void *addr)
+static inline void mmio_write8(uint8_t value, const void *addr)
{
volatile uint8_t *addr8 = (volatile uint8_t *)addr;
*addr8 = value;
@@ -115,7 +115,7 @@ static inline void mmio_write8(uint8_t value, void *addr)
*
* @return The 32 bit value read from the given address.
*/
-static inline uint32_t mmio_read32(void *addr)
+static inline uint32_t mmio_read32(const void *addr)
{
return *((volatile uint32_t *)addr);
}
@@ -126,7 +126,7 @@ static inline uint32_t mmio_read32(void *addr)
*
* @return The 16 bit value read from the given address.
*/
-static inline uint16_t mmio_read16(void *addr)
+static inline uint16_t mmio_read16(const void *addr)
{
return *((volatile uint16_t *)addr);
}
@@ -137,7 +137,7 @@ static inline uint16_t mmio_read16(void *addr)
*
* @return The 8 bit value read from the given address.
*/
-static inline uint8_t mmio_read8(void *addr)
+static inline uint8_t mmio_read8(const void *addr)
{
return *((volatile uint8_t *)addr);
}
@@ -150,7 +150,7 @@ static inline uint8_t mmio_read8(void *addr)
* @param mask The mask to apply to the value read.
* @param value The 32 bit value to write.
*/
-static inline void set32(void *addr, uint32_t mask, uint32_t value)
+static inline void set32(const void *addr, uint32_t mask, uint32_t value)
{
uint32_t temp_val;

@@ -165,7 +165,7 @@ static inline void set32(void *addr, uint32_t mask, uint32_t value)
* @param mask The mask to apply to the value read.
* @param value The 16 bit value to write.
*/
-static inline void set16(void *addr, uint16_t mask, uint16_t value)
+static inline void set16(const void *addr, uint16_t mask, uint16_t value)
{
uint16_t temp_val;

@@ -180,7 +180,7 @@ static inline void set16(void *addr, uint16_t mask, uint16_t value)
* @param mask The mask to apply to the value read.
* @param value The 8 bit value to write.
*/
-static inline void set8(void *addr, uint8_t mask, uint8_t value)
+static inline void set8(const void *addr, uint8_t mask, uint8_t value)
{
uint8_t temp_val;

diff --git a/hypervisor/include/arch/x86/mmu.h b/hypervisor/include/arch/x86/mmu.h
index 27699035..9918efe0 100644
--- a/hypervisor/include/arch/x86/mmu.h
+++ b/hypervisor/include/arch/x86/mmu.h
@@ -259,27 +259,27 @@ enum _page_table_present {
#define PAGE_SIZE_1G MEM_1G

/* Inline functions for reading/writing memory */
-static inline uint8_t mem_read8(void *addr)
+static inline uint8_t mem_read8(const void *addr)
{
return *(volatile uint8_t *)(addr);
}

-static inline uint16_t mem_read16(void *addr)
+static inline uint16_t mem_read16(const void *addr)
{
return *(volatile uint16_t *)(addr);
}

-static inline uint32_t mem_read32(void *addr)
+static inline uint32_t mem_read32(const void *addr)
{
return *(volatile uint32_t *)(addr);
}

-static inline uint64_t mem_read64(void *addr)
+static inline uint64_t mem_read64(const void *addr)
{
return *(volatile uint64_t *)(addr);
}

-static inline void mem_write8(void *addr, uint8_t data)
+static inline void mem_write8(const void *addr, uint8_t data)
{
volatile uint8_t *addr8 = (volatile uint8_t *)addr;
*addr8 = data;
@@ -379,15 +379,15 @@ static inline void clflush(volatile void *p)
bool is_ept_supported(void);
uint64_t create_guest_initial_paging(struct vm *vm);
void destroy_ept(struct vm *vm);
-uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
-uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
-uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
-int ept_mr_add(struct vm *vm, uint64_t hpa_arg,
+uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
+uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
+uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
+int ept_mr_add(const struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg);
-int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
+int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr);
-int ept_mr_del(struct vm *vm, uint64_t *pml4_page,
+int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size);
void free_ept_mem(void *pml4_addr);
int ept_violation_vmexit_handler(struct vcpu *vcpu);
diff --git a/hypervisor/include/arch/x86/pgtable.h b/hypervisor/include/arch/x86/pgtable.h
index 51733611..ffa3b179 100644
--- a/hypervisor/include/arch/x86/pgtable.h
+++ b/hypervisor/include/arch/x86/pgtable.h
@@ -53,17 +53,17 @@ static inline uint64_t *pml4e_offset(uint64_t *pml4_page, uint64_t addr)
return pml4_page + pml4e_index(addr);
}

-static inline uint64_t *pdpte_offset(uint64_t *pml4e, uint64_t addr)
+static inline uint64_t *pdpte_offset(const uint64_t *pml4e, uint64_t addr)
{
return pml4e_page_vaddr(*pml4e) + pdpte_index(addr);
}

-static inline uint64_t *pde_offset(uint64_t *pdpte, uint64_t addr)
+static inline uint64_t *pde_offset(const uint64_t *pdpte, uint64_t addr)
{
return pdpte_page_vaddr(*pdpte) + pde_index(addr);
}

-static inline uint64_t *pte_offset(uint64_t *pde, uint64_t addr)
+static inline uint64_t *pte_offset(const uint64_t *pde, uint64_t addr)
{
return pde_page_vaddr(*pde) + pte_index(addr);
}
@@ -71,7 +71,7 @@ static inline uint64_t *pte_offset(uint64_t *pde, uint64_t addr)
/*
* pgentry may means pml4e/pdpte/pde/pte
*/
-static inline uint64_t get_pgentry(uint64_t *pte)
+static inline uint64_t get_pgentry(const uint64_t *pte)
{
return *pte;
}
diff --git a/hypervisor/include/arch/x86/vmx.h b/hypervisor/include/arch/x86/vmx.h
index 938a2017..ede9cf7b 100644
--- a/hypervisor/include/arch/x86/vmx.h
+++ b/hypervisor/include/arch/x86/vmx.h
@@ -449,7 +449,7 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value);
int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0);
int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4);

-static inline enum vm_cpu_mode get_vcpu_mode(struct vcpu *vcpu)
+static inline enum vm_cpu_mode get_vcpu_mode(const struct vcpu *vcpu)
{
return vcpu->arch_vcpu.cpu_mode;
}
--
2.17.1


Re: [PATCH 0/3] add hkdf key derivation support based on mbedtls

Zhu, Bing
 

The loc of code is 1731. (most of ~5900 loc are comments)

-------------------------------------------------------------------------------
Language files blank comment code
-------------------------------------------------------------------------------
C 5 323 155 1359
C/C++ Header 5 288 3428 372
-------------------------------------------------------------------------------
SUM: 10 611 3583 1731
-------------------------------------------------------------------------------

-----Original Message-----
From: Xu, Anthony
Sent: Tuesday, August 14, 2018 2:31 AM
To: Zhu, Bing <bing.zhu@...>; Dong, Eddie <eddie.dong@...>; acrn-
dev@...
Cc: Wang, Kai Z <kai.z.wang@...>; Chen, Gang G <gang.g.chen@...>
Subject: RE: [acrn-dev] [PATCH 0/3] add hkdf key derivation support based on
mbedtls

15 files changed, 5975 insertions(+), 70 deletions(-)

This patch adds ~ 5900 LOC. Way too big, it increases the ACRN hypervisor code
size by > 20%. that's my concern.

Any other solutions?
SOS is a special VM, If we let SOS know the physical platform, what's the real
impact?


Anthony

-----Original Message-----
From: Zhu, Bing
Sent: Saturday, August 11, 2018 3:09 AM
To: Dong, Eddie <eddie.dong@...>; Xu, Anthony
<anthony.xu@...>; acrn-dev@...
Cc: Wang, Kai Z <kai.z.wang@...>; Chen, Gang G
<gang.g.chen@...>
Subject: RE: [acrn-dev] [PATCH 0/3] add hkdf key derivation support
based on mbedtls

Right, and the key derivation is pretty fast, and this operation is
called only whenever a new UOS starts, just only once.

SOS, like any other guest VMs, must not have knowledge of that
physical platform secret (this secret behaves a root key/secret from
VM's perspective)

Bing

-----Original Message-----
From: Dong, Eddie
Sent: Saturday, August 11, 2018 6:05 PM
To: Xu, Anthony <anthony.xu@...>;
acrn-dev@...
Cc: Zhu, Bing <bing.zhu@...>; Wang, Kai Z
<kai.z.wang@...>;
Chen,
Gang G <gang.g.chen@...>
Subject: RE: [acrn-dev] [PATCH 0/3] add hkdf key derivation support
based
on
mbedtls


Is this operation performance critical?
Can we let SOS do the en/decryption through a new request type?
It is not performance critical, but we cannot let SOS know the
secret of
physical
platform.


Re: [RFC PATCH 2/2] hv: add a dummy file to do compile time assert

Xu, Anthony
 

Can we generate the OFFSET MACRO at build time?

Anthony

-----Original Message-----
From: acrn-dev@... [mailto:acrn-dev@...]
On Behalf Of Junjie Mao
Sent: Monday, August 13, 2018 3:24 AM
To: Wu, Binbin <binbin.wu@...>
Cc: acrn-dev@...
Subject: Re: [acrn-dev] [RFC PATCH 2/2] hv: add a dummy file to do compile
time assert

"Wu, Binbin" <binbin.wu@...> writes:

Add a dummy file to do compile time assert for member offset within a
struct.
If the statement is not true, there will be error during compile time.
The file will not increase the size of HV binary.

Signed-off-by: Binbin Wu <binbin.wu@...>
Reviewed-by: Junjie Mao <junjie.mao@...>

This looks good to me as it is explicitly required that constant array
sizes shall be positive in clause 2, section 6.7.5.2, C99.

---
hypervisor/Makefile | 1 +
hypervisor/arch/x86/dummy.c | 24 ++++++++++++++++++++++++
2 files changed, 25 insertions(+)
create mode 100644 hypervisor/arch/x86/dummy.c

diff --git a/hypervisor/Makefile b/hypervisor/Makefile
index b0588b7..81d23bd 100644
--- a/hypervisor/Makefile
+++ b/hypervisor/Makefile
@@ -140,6 +140,7 @@ C_SRCS += arch/x86/cpu_state_tbl.c
C_SRCS += arch/x86/mtrr.c
C_SRCS += arch/x86/pm.c
S_SRCS += arch/x86/wakeup.S
+C_SRCS += arch/x86/dummy.c
C_SRCS += arch/x86/guest/vcpu.c
C_SRCS += arch/x86/guest/vm.c
C_SRCS += arch/x86/guest/vlapic.c
diff --git a/hypervisor/arch/x86/dummy.c b/hypervisor/arch/x86/dummy.c
new file mode 100644
index 0000000..d1b5ca3
--- /dev/null
+++ b/hypervisor/arch/x86/dummy.c
@@ -0,0 +1,24 @@
+#include <lib/types.h>
+#include <lib/util.h>
+#include <vm0_boot.h>
+
+#define CAT_(A,B) A ## B
+#define CTASSERT(expr) \
+typedef int CAT_(CTA_DummyType,__LINE__)[(expr) ? 1 : -1]
+
+CTASSERT(BOOT_CTX_CR0_OFFSET == offsetof(struct boot_ctx, cr0));
+CTASSERT(BOOT_CTX_CR3_OFFSET == offsetof(struct boot_ctx, cr3));
+CTASSERT(BOOT_CTX_CR4_OFFSET == offsetof(struct boot_ctx, cr4));
+CTASSERT(BOOT_CTX_IDT_OFFSET == offsetof(struct boot_ctx, idt));
+CTASSERT(BOOT_CTX_GDT_OFFSET == offsetof(struct boot_ctx, gdt));
+CTASSERT(BOOT_CTX_LDT_SEL_OFFSET == offsetof(struct boot_ctx,
ldt_sel));
+CTASSERT(BOOT_CTX_TR_SEL_OFFSET == offsetof(struct boot_ctx,
tr_sel));
+CTASSERT(BOOT_CTX_CS_SEL_OFFSET == offsetof(struct boot_ctx,
cs_sel));
+CTASSERT(BOOT_CTX_SS_SEL_OFFSET == offsetof(struct boot_ctx,
ss_sel));
+CTASSERT(BOOT_CTX_DS_SEL_OFFSET == offsetof(struct boot_ctx,
ds_sel));
+CTASSERT(BOOT_CTX_ES_SEL_OFFSET == offsetof(struct boot_ctx,
es_sel));
+CTASSERT(BOOT_CTX_FS_SEL_OFFSET == offsetof(struct boot_ctx,
fs_sel));
+CTASSERT(BOOT_CTX_GS_SEL_OFFSET == offsetof(struct boot_ctx,
gs_sel));
+CTASSERT(BOOT_CTX_CS_AR_OFFSET == offsetof(struct boot_ctx,
cs_ar));
+CTASSERT(BOOT_CTX_EFER_LOW_OFFSET == offsetof(struct boot_ctx,
ia32_efer));
+CTASSERT(BOOT_CTX_EFER_HIGH_OFFSET == offsetof(struct boot_ctx,
ia32_efer) + 4);
--
2.7.4




Re: [PATCH 0/3] add hkdf key derivation support based on mbedtls

Xu, Anthony
 

15 files changed, 5975 insertions(+), 70 deletions(-)

This patch adds ~ 5900 LOC. Way too big, it increases the ACRN hypervisor code size by > 20%. that's my concern.

Any other solutions?
SOS is a special VM, If we let SOS know the physical platform, what's the real impact?


Anthony

-----Original Message-----
From: Zhu, Bing
Sent: Saturday, August 11, 2018 3:09 AM
To: Dong, Eddie <eddie.dong@...>; Xu, Anthony
<anthony.xu@...>; acrn-dev@...
Cc: Wang, Kai Z <kai.z.wang@...>; Chen, Gang G
<gang.g.chen@...>
Subject: RE: [acrn-dev] [PATCH 0/3] add hkdf key derivation support based
on mbedtls

Right, and the key derivation is pretty fast, and this operation is called only
whenever a new UOS starts, just only once.

SOS, like any other guest VMs, must not have knowledge of that physical
platform secret (this secret behaves a root key/secret from VM's perspective)

Bing

-----Original Message-----
From: Dong, Eddie
Sent: Saturday, August 11, 2018 6:05 PM
To: Xu, Anthony <anthony.xu@...>; acrn-dev@...
Cc: Zhu, Bing <bing.zhu@...>; Wang, Kai Z <kai.z.wang@...>;
Chen,
Gang G <gang.g.chen@...>
Subject: RE: [acrn-dev] [PATCH 0/3] add hkdf key derivation support based
on
mbedtls


Is this operation performance critical?
Can we let SOS do the en/decryption through a new request type?
It is not performance critical, but we cannot let SOS know the secret of
physical
platform.