[PATCH v3 1/3] hv: support asyncio request


Conghui Chen
 

In the original IO request flow, User VM need to wait for the completion
of the I/O request before return. But there is a kind of register, such
as the NOTIFY register in virtio devices, is used for FE driver to
notify BE driver, and can be asynchronously processed in BE side. For
that purpose, ACRN hypervisor can easily emulate this register by
sending a notification to vCPU in Service VM side. This way, FE side can
resume to work without waiting for the full completion of BE side
response.

Signed-off-by: Conghui <conghui.chen@...>
---
hypervisor/arch/x86/guest/vmcall.c | 2 ++
hypervisor/common/hypercall.c | 32 ++++++++++++++++++++++
hypervisor/dm/io_req.c | 12 ++++++++
hypervisor/include/arch/x86/asm/guest/vm.h | 2 ++
hypervisor/include/common/hypercall.h | 1 +
hypervisor/include/dm/io_req.h | 2 +-
hypervisor/include/public/acrn_common.h | 23 ++++++++++++++++
hypervisor/include/public/acrn_hv_defs.h | 1 +
8 files changed, 74 insertions(+), 1 deletion(-)

diff --git a/hypervisor/arch/x86/guest/vmcall.c b/hypervisor/arch/x86/guest/vmcall.c
index a691149ba..911c7fb46 100644
--- a/hypervisor/arch/x86/guest/vmcall.c
+++ b/hypervisor/arch/x86/guest/vmcall.c
@@ -58,6 +58,8 @@ static const struct hc_dispatch hc_dispatch_table[] = {
.handler = hcall_inject_msi},
[HC_IDX(HC_SET_IOREQ_BUFFER)] = {
.handler = hcall_set_ioreq_buffer},
+ [HC_IDX(HC_SET_ASYNCIO_IOREQ_BUFFER)] = {
+ .handler = hcall_set_asyncio_ioreq_buffer},
[HC_IDX(HC_NOTIFY_REQUEST_FINISH)] = {
.handler = hcall_notify_ioreq_finish},
[HC_IDX(HC_VM_SET_MEMORY_REGIONS)] = {
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index e27eaf7e5..c934201bd 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -518,6 +518,38 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
return ret;
}

+int32_t hcall_set_asyncio_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
+ __unused uint64_t param1, uint64_t param2)
+{
+ struct acrn_vm *vm = vcpu->vm;
+ uint64_t hpa;
+ int32_t ret = -1;
+
+ if (is_created_vm(target_vm)) {
+ uint64_t iobuf;
+
+ if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) {
+ dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p",
+ target_vm->vm_id, iobuf);
+
+ hpa = gpa2hpa(vm, iobuf);
+ if (hpa == INVALID_HPA) {
+ pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
+ __func__, vm->vm_id, iobuf);
+ target_vm->sw.asyncio_shared_page = NULL;
+ } else {
+ target_vm->sw.asyncio_shared_page = hpa2hva(hpa);
+ pr_err("init asyncio page base=%lx \n", target_vm->sw.asyncio_shared_page);
+ init_asyncio(target_vm);
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
/**
* @brief notify request done
*
diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c
index afb695179..6afb09509 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -177,6 +177,18 @@ void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state)
}
}

+void init_asyncio(struct acrn_vm *vm)
+{
+ struct acrn_asyncio_req_buffer *req_buf = NULL;
+
+ req_buf = (struct acrn_asyncio_req_buffer *)vm->sw.asyncio_shared_page;
+ if (req_buf != NULL) {
+ stac();
+ (void)memset(req_buf, 0U, sizeof(struct acrn_asyncio_req_buffer));
+ clac();
+ }
+}
+
void set_hsm_notification_vector(uint32_t vector)
{
acrn_hsm_notification_vector = vector;
diff --git a/hypervisor/include/arch/x86/asm/guest/vm.h b/hypervisor/include/arch/x86/asm/guest/vm.h
index 62267af27..978c188a0 100644
--- a/hypervisor/include/arch/x86/asm/guest/vm.h
+++ b/hypervisor/include/arch/x86/asm/guest/vm.h
@@ -69,6 +69,7 @@ struct vm_sw_info {
struct sw_module_info acpi_info;
/* HVA to IO shared page */
void *io_shared_page;
+ void *asyncio_shared_page;
/* If enable IO completion polling mode */
bool is_polling_ioreq;
};
@@ -143,6 +144,7 @@ struct acrn_vm {
uint16_t vm_id; /* Virtual machine identifier */
enum vm_state state; /* VM state */
struct acrn_vuart vuart[MAX_VUART_NUM_PER_VM]; /* Virtual UART */
+
enum vpic_wire_mode wire_mode;
struct iommu_domain *iommu; /* iommu domain of this VM */
/* vm_state_lock used to protect vm/vcpu state transition,
diff --git a/hypervisor/include/common/hypercall.h b/hypervisor/include/common/hypercall.h
index 3810f3b6d..232239009 100644
--- a/hypervisor/include/common/hypercall.h
+++ b/hypervisor/include/common/hypercall.h
@@ -202,6 +202,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint
*/
int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);

+int32_t hcall_set_asyncio_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
/**
* @brief notify request done
*
diff --git a/hypervisor/include/dm/io_req.h b/hypervisor/include/dm/io_req.h
index f64abf22c..f12194351 100644
--- a/hypervisor/include/dm/io_req.h
+++ b/hypervisor/include/dm/io_req.h
@@ -209,7 +209,7 @@ uint32_t get_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id);
* @return None
*/
void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state);
-
+void init_asyncio(struct acrn_vm *vm);
/**
* @brief Set the vector for HV callback HSM
*
diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h
index fee71a655..0d768c6f0 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -26,6 +26,7 @@
*/

#define ACRN_IO_REQUEST_MAX 16U
+#define ACRN_ASYNCIO_REQUEST_MAX 508U

#define ACRN_IOREQ_STATE_PENDING 0U
#define ACRN_IOREQ_STATE_COMPLETE 1U
@@ -342,6 +343,28 @@ struct acrn_io_request_buffer {
};
};

+/**
+ * struct acrn_asyncio_request - total size of the structure is 4096
+ * @processed_idx: The idx kernel has processed.
+ * @avail_idx: The available idx which has IO request to be processed.
+ * @reserved: Reserved fields.
+ * @fds: The eventfds list.
+ */
+
+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1


Eddie Dong
 

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On
Behalf Of Conghui Chen
Sent: Friday, August 26, 2022 1:02 AM
To: acrn-dev@...
Cc: Chen, Conghui <conghui.chen@...>
Subject: [acrn-dev] [PATCH v3 1/3] hv: support asyncio request

In the original IO request flow, User VM need to wait for the completion of the
I/O request before return. But there is a kind of register, such as the NOTIFY
register in virtio devices, is used for FE driver to notify BE driver, and can be
asynchronously processed in BE side. For that purpose, ACRN hypervisor can
easily emulate this register by sending a notification to vCPU in Service VM
side. This way, FE side can resume to work without waiting for the full
completion of BE side response.

Signed-off-by: Conghui <conghui.chen@...>
---
hypervisor/arch/x86/guest/vmcall.c | 2 ++
hypervisor/common/hypercall.c | 32 ++++++++++++++++++++++
hypervisor/dm/io_req.c | 12 ++++++++
hypervisor/include/arch/x86/asm/guest/vm.h | 2 ++
hypervisor/include/common/hypercall.h | 1 +
hypervisor/include/dm/io_req.h | 2 +-
hypervisor/include/public/acrn_common.h | 23 ++++++++++++++++
hypervisor/include/public/acrn_hv_defs.h | 1 +
8 files changed, 74 insertions(+), 1 deletion(-)

diff --git a/hypervisor/arch/x86/guest/vmcall.c
b/hypervisor/arch/x86/guest/vmcall.c
index a691149ba..911c7fb46 100644
--- a/hypervisor/arch/x86/guest/vmcall.c
+++ b/hypervisor/arch/x86/guest/vmcall.c
@@ -58,6 +58,8 @@ static const struct hc_dispatch hc_dispatch_table[] = {
.handler = hcall_inject_msi},
[HC_IDX(HC_SET_IOREQ_BUFFER)] = {
.handler = hcall_set_ioreq_buffer},
+ [HC_IDX(HC_SET_ASYNCIO_IOREQ_BUFFER)] = {
+ .handler = hcall_set_asyncio_ioreq_buffer},
[HC_IDX(HC_NOTIFY_REQUEST_FINISH)] = {
.handler = hcall_notify_ioreq_finish},
[HC_IDX(HC_VM_SET_MEMORY_REGIONS)] = { diff --git
a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index
e27eaf7e5..c934201bd 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -518,6 +518,38 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vcpu
*vcpu, struct acrn_vm *target_vm
return ret;
}

+int32_t hcall_set_asyncio_ioreq_buffer(struct acrn_vcpu *vcpu, struct
acrn_vm *target_vm,
+ __unused uint64_t param1, uint64_t param2) {
+ struct acrn_vm *vm = vcpu->vm;
+ uint64_t hpa;
+ int32_t ret = -1;
+
+ if (is_created_vm(target_vm)) {
+ uint64_t iobuf;
+
+ if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) {
+ dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET
BUFFER=0x%p",
+ target_vm->vm_id, iobuf);
+
+ hpa = gpa2hpa(vm, iobuf);
+ if (hpa == INVALID_HPA) {
+ pr_err("%s,vm[%hu] gpa 0x%lx,GPA is
unmapping.",
+ __func__, vm->vm_id, iobuf);
+ target_vm->sw.asyncio_shared_page = NULL;
+ } else {
+ target_vm->sw.asyncio_shared_page =
hpa2hva(hpa);
+ pr_err("init asyncio page base=%lx \n",
target_vm->sw.asyncio_shared_page);
+ init_asyncio(target_vm);
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
/**
* @brief notify request done
*
diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c index
afb695179..6afb09509 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -177,6 +177,18 @@ void set_io_req_state(struct acrn_vm *vm, uint16_t
vcpu_id, uint32_t state)
}
}

+void init_asyncio(struct acrn_vm *vm)
+{
+ struct acrn_asyncio_req_buffer *req_buf = NULL;
+
+ req_buf = (struct acrn_asyncio_req_buffer *)vm-
sw.asyncio_shared_page;
+ if (req_buf != NULL) {
+ stac();
+ (void)memset(req_buf, 0U, sizeof(struct
acrn_asyncio_req_buffer));
+ clac();
+ }
+}
+
void set_hsm_notification_vector(uint32_t vector) {
acrn_hsm_notification_vector = vector; diff --git
a/hypervisor/include/arch/x86/asm/guest/vm.h
b/hypervisor/include/arch/x86/asm/guest/vm.h
index 62267af27..978c188a0 100644
--- a/hypervisor/include/arch/x86/asm/guest/vm.h
+++ b/hypervisor/include/arch/x86/asm/guest/vm.h
@@ -69,6 +69,7 @@ struct vm_sw_info {
struct sw_module_info acpi_info;
/* HVA to IO shared page */
void *io_shared_page;
+ void *asyncio_shared_page;
/* If enable IO completion polling mode */
bool is_polling_ioreq;
};
@@ -143,6 +144,7 @@ struct acrn_vm {
uint16_t vm_id; /* Virtual machine identifier */
enum vm_state state; /* VM state */
struct acrn_vuart vuart[MAX_VUART_NUM_PER_VM]; /*
Virtual UART */
+
enum vpic_wire_mode wire_mode;
struct iommu_domain *iommu; /* iommu domain of this VM */
/* vm_state_lock used to protect vm/vcpu state transition, diff --git
a/hypervisor/include/common/hypercall.h
b/hypervisor/include/common/hypercall.h
index 3810f3b6d..232239009 100644
--- a/hypervisor/include/common/hypercall.h
+++ b/hypervisor/include/common/hypercall.h
@@ -202,6 +202,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu,
struct acrn_vm *target_vm, uint
*/
int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm
*target_vm, uint64_t param1, uint64_t param2);

+int32_t hcall_set_asyncio_ioreq_buffer(struct acrn_vcpu *vcpu, struct
+acrn_vm *target_vm, uint64_t param1, uint64_t param2);
/**
* @brief notify request done
*
diff --git a/hypervisor/include/dm/io_req.h b/hypervisor/include/dm/io_req.h
index f64abf22c..f12194351 100644
--- a/hypervisor/include/dm/io_req.h
+++ b/hypervisor/include/dm/io_req.h
@@ -209,7 +209,7 @@ uint32_t get_io_req_state(struct acrn_vm *vm,
uint16_t vcpu_id);
* @return None
*/
void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state);
-
+void init_asyncio(struct acrn_vm *vm);
/**
* @brief Set the vector for HV callback HSM
*
diff --git a/hypervisor/include/public/acrn_common.h
b/hypervisor/include/public/acrn_common.h
index fee71a655..0d768c6f0 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -26,6 +26,7 @@
*/

#define ACRN_IO_REQUEST_MAX 16U
+#define ACRN_ASYNCIO_REQUEST_MAX 508U

#define ACRN_IOREQ_STATE_PENDING 0U
#define ACRN_IOREQ_STATE_COMPLETE 1U
@@ -342,6 +343,28 @@ struct acrn_io_request_buffer {
};
};

+/**
+ * struct acrn_asyncio_request - total size of the structure is 4096
+ * @processed_idx: The idx kernel has processed.
+ * @avail_idx: The available idx which has IO request to be processed.
+ * @reserved: Reserved fields.
+ * @fds: The eventfds list.
+ */
+
+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
We should have size of queues here, rather than hardcoded ACRN_ASYNCIO_REQUEST_MAX.
The service VM can set the size, and hypervisor just take it (as if inside the one page async-io size).

+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h
b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE
++ 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1





Eddie Dong
 

BTW the rest is fine.
Please get +1 from internal kernel driver maintainer

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On
Behalf Of Eddie Dong
Sent: Sunday, August 28, 2022 11:19 PM
To: acrn-dev@...
Cc: Chen, Conghui <conghui.chen@...>
Subject: Re: [acrn-dev] [PATCH v3 1/3] hv: support asyncio request



-----Original Message-----
From: acrn-dev@... <acrn-dev@...>
On Behalf Of Conghui Chen
Sent: Friday, August 26, 2022 1:02 AM
To: acrn-dev@...
Cc: Chen, Conghui <conghui.chen@...>
Subject: [acrn-dev] [PATCH v3 1/3] hv: support asyncio request

In the original IO request flow, User VM need to wait for the
completion of the I/O request before return. But there is a kind of
register, such as the NOTIFY register in virtio devices, is used for
FE driver to notify BE driver, and can be asynchronously processed in
BE side. For that purpose, ACRN hypervisor can easily emulate this
register by sending a notification to vCPU in Service VM side. This
way, FE side can resume to work without waiting for the full completion of
BE side response.

Signed-off-by: Conghui <conghui.chen@...>
---
hypervisor/arch/x86/guest/vmcall.c | 2 ++
hypervisor/common/hypercall.c | 32 ++++++++++++++++++++++
hypervisor/dm/io_req.c | 12 ++++++++
hypervisor/include/arch/x86/asm/guest/vm.h | 2 ++
hypervisor/include/common/hypercall.h | 1 +
hypervisor/include/dm/io_req.h | 2 +-
hypervisor/include/public/acrn_common.h | 23 ++++++++++++++++
hypervisor/include/public/acrn_hv_defs.h | 1 +
8 files changed, 74 insertions(+), 1 deletion(-)

diff --git a/hypervisor/arch/x86/guest/vmcall.c
b/hypervisor/arch/x86/guest/vmcall.c
index a691149ba..911c7fb46 100644
--- a/hypervisor/arch/x86/guest/vmcall.c
+++ b/hypervisor/arch/x86/guest/vmcall.c
@@ -58,6 +58,8 @@ static const struct hc_dispatch hc_dispatch_table[] = {
.handler = hcall_inject_msi},
[HC_IDX(HC_SET_IOREQ_BUFFER)] = {
.handler = hcall_set_ioreq_buffer},
+ [HC_IDX(HC_SET_ASYNCIO_IOREQ_BUFFER)] = {
+ .handler = hcall_set_asyncio_ioreq_buffer},
[HC_IDX(HC_NOTIFY_REQUEST_FINISH)] = {
.handler = hcall_notify_ioreq_finish},
[HC_IDX(HC_VM_SET_MEMORY_REGIONS)] = { diff --git
a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index
e27eaf7e5..c934201bd 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -518,6 +518,38 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vcpu
*vcpu, struct acrn_vm *target_vm
return ret;
}

+int32_t hcall_set_asyncio_ioreq_buffer(struct acrn_vcpu *vcpu, struct
acrn_vm *target_vm,
+ __unused uint64_t param1, uint64_t param2) {
+ struct acrn_vm *vm = vcpu->vm;
+ uint64_t hpa;
+ int32_t ret = -1;
+
+ if (is_created_vm(target_vm)) {
+ uint64_t iobuf;
+
+ if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) {
+ dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET
BUFFER=0x%p",
+ target_vm->vm_id, iobuf);
+
+ hpa = gpa2hpa(vm, iobuf);
+ if (hpa == INVALID_HPA) {
+ pr_err("%s,vm[%hu] gpa 0x%lx,GPA is
unmapping.",
+ __func__, vm->vm_id, iobuf);
+ target_vm->sw.asyncio_shared_page = NULL;
+ } else {
+ target_vm->sw.asyncio_shared_page =
hpa2hva(hpa);
+ pr_err("init asyncio page base=%lx \n",
target_vm->sw.asyncio_shared_page);
+ init_asyncio(target_vm);
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
/**
* @brief notify request done
*
diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c index
afb695179..6afb09509 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -177,6 +177,18 @@ void set_io_req_state(struct acrn_vm *vm,
uint16_t vcpu_id, uint32_t state)
}
}

+void init_asyncio(struct acrn_vm *vm) {
+ struct acrn_asyncio_req_buffer *req_buf = NULL;
+
+ req_buf = (struct acrn_asyncio_req_buffer *)vm-
sw.asyncio_shared_page;
+ if (req_buf != NULL) {
+ stac();
+ (void)memset(req_buf, 0U, sizeof(struct
acrn_asyncio_req_buffer));
+ clac();
+ }
+}
+
void set_hsm_notification_vector(uint32_t vector) {
acrn_hsm_notification_vector = vector; diff --git
a/hypervisor/include/arch/x86/asm/guest/vm.h
b/hypervisor/include/arch/x86/asm/guest/vm.h
index 62267af27..978c188a0 100644
--- a/hypervisor/include/arch/x86/asm/guest/vm.h
+++ b/hypervisor/include/arch/x86/asm/guest/vm.h
@@ -69,6 +69,7 @@ struct vm_sw_info {
struct sw_module_info acpi_info;
/* HVA to IO shared page */
void *io_shared_page;
+ void *asyncio_shared_page;
/* If enable IO completion polling mode */
bool is_polling_ioreq;
};
@@ -143,6 +144,7 @@ struct acrn_vm {
uint16_t vm_id; /* Virtual machine identifier */
enum vm_state state; /* VM state */
struct acrn_vuart vuart[MAX_VUART_NUM_PER_VM]; /*
Virtual UART */
+
enum vpic_wire_mode wire_mode;
struct iommu_domain *iommu; /* iommu domain of this VM */
/* vm_state_lock used to protect vm/vcpu state transition, diff
--git a/hypervisor/include/common/hypercall.h
b/hypervisor/include/common/hypercall.h
index 3810f3b6d..232239009 100644
--- a/hypervisor/include/common/hypercall.h
+++ b/hypervisor/include/common/hypercall.h
@@ -202,6 +202,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu,
struct acrn_vm *target_vm, uint
*/
int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm
*target_vm, uint64_t param1, uint64_t param2);

+int32_t hcall_set_asyncio_ioreq_buffer(struct acrn_vcpu *vcpu, struct
+acrn_vm *target_vm, uint64_t param1, uint64_t param2);
/**
* @brief notify request done
*
diff --git a/hypervisor/include/dm/io_req.h
b/hypervisor/include/dm/io_req.h index f64abf22c..f12194351 100644
--- a/hypervisor/include/dm/io_req.h
+++ b/hypervisor/include/dm/io_req.h
@@ -209,7 +209,7 @@ uint32_t get_io_req_state(struct acrn_vm *vm,
uint16_t vcpu_id);
* @return None
*/
void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t
state);
-
+void init_asyncio(struct acrn_vm *vm);
/**
* @brief Set the vector for HV callback HSM
*
diff --git a/hypervisor/include/public/acrn_common.h
b/hypervisor/include/public/acrn_common.h
index fee71a655..0d768c6f0 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -26,6 +26,7 @@
*/

#define ACRN_IO_REQUEST_MAX 16U
+#define ACRN_ASYNCIO_REQUEST_MAX 508U

#define ACRN_IOREQ_STATE_PENDING 0U
#define ACRN_IOREQ_STATE_COMPLETE 1U
@@ -342,6 +343,28 @@ struct acrn_io_request_buffer {
};
};

+/**
+ * struct acrn_asyncio_request - total size of the structure is 4096
+ * @processed_idx: The idx kernel has processed.
+ * @avail_idx: The available idx which has IO request to be processed.
+ * @reserved: Reserved fields.
+ * @fds: The eventfds list.
+ */
+
+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
We should have size of queues here, rather than hardcoded
ACRN_ASYNCIO_REQUEST_MAX.
The service VM can set the size, and hypervisor just take it (as if inside the one
page async-io size).

+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h
b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE
++ 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1








Conghui Chen
 

Hi Eddie,

+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
We should have size of queues here, rather than hardcoded
ACRN_ASYNCIO_REQUEST_MAX.
The service VM can set the size, and hypervisor just take it (as if inside the one
page async-io size).
Sure, will setup the size of the queue through a hypercall, thanks.

Regards,
Conghui.


+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h
b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE
++ 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1





Eddie Dong
 

-----Original Message-----
From: Chen, Conghui <conghui.chen@...>
Sent: Sunday, August 28, 2022 11:41 PM
To: Dong, Eddie <eddie.dong@...>; acrn-dev@...
Subject: RE: [acrn-dev] [PATCH v3 1/3] hv: support asyncio request

Hi Eddie,

+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
We should have size of queues here, rather than hardcoded
ACRN_ASYNCIO_REQUEST_MAX.
The service VM can set the size, and hypervisor just take it (as if
inside the one page async-io size).
Sure, will setup the size of the queue through a hypercall, thanks.
No need of new hypercall. This can be a "RO" field in the page. Set of the initial page can convey this information.

BTW, do we need to carry a magic such as "ACRN_ASYNIO PAGE" in the page, together w/ size?
In the page, we also need to set the reset value of head/tail idx, like HW device did.



Regards,
Conghui.


+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM
hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h
b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE
++ 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1





Conghui Chen
 

Hi Eddie,

+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
We should have size of queues here, rather than hardcoded
ACRN_ASYNCIO_REQUEST_MAX.
The service VM can set the size, and hypervisor just take it (as if
inside the one page async-io size).
Sure, will setup the size of the queue through a hypercall, thanks.
No need of new hypercall. This can be a "RO" field in the page. Set of the
initial page can convey this information.

BTW, do we need to carry a magic such as "ACRN_ASYNIO PAGE" in the page,
together w/ size?

Is the magic used to show that the page is initiated as an asyncio page?
If yes, then we can add the magic number in dm and check it in kernel and hv.

In the page, we also need to set the reset value of head/tail idx, like HW
device did.
Current flow is:

1. dm malloc a page, pass the address through ioctl.
2. kernel pin the page, pass the address through hypercall.
3. hv record the address, do initiation for the page(head/tail/fds)

So, I suppose the flow need to change to :
1. dm malloc a page, initiate page (setup magic number, size, head/tail, int fds to 0), pass the address through ioctl.
2. kernel check the magic, pin the page, pass the address through hypercall.
3. hv check the magic, record the address.


Regards,
Conghui.



Regards,
Conghui.


+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM
hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h
b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE
++ 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1





Eddie Dong
 

-----Original Message-----
From: Chen, Conghui <conghui.chen@...>
Sent: Tuesday, August 30, 2022 7:50 PM
To: Dong, Eddie <eddie.dong@...>; acrn-dev@...
Subject: RE: [acrn-dev] [PATCH v3 1/3] hv: support asyncio request

Hi Eddie,

+struct acrn_asyncio_request {
+ uint64_t processed_idx;
+ uint64_t avail_idx;
We should have size of queues here, rather than hardcoded
ACRN_ASYNCIO_REQUEST_MAX.
The service VM can set the size, and hypervisor just take it (as
if inside the one page async-io size).
Sure, will setup the size of the queue through a hypercall, thanks.
No need of new hypercall. This can be a "RO" field in the page. Set
of the initial page can convey this information.

BTW, do we need to carry a magic such as "ACRN_ASYNIO PAGE" in the
page, together w/ size?

Is the magic used to show that the page is initiated as an asyncio page?
YES

If yes, then we can add the magic number in dm and check it in kernel and hv.

In the page, we also need to set the reset value of head/tail idx,
like HW device did.
Current flow is:

1. dm malloc a page, pass the address through ioctl.
2. kernel pin the page, pass the address through hypercall.
3. hv record the address, do initiation for the page(head/tail/fds)

So, I suppose the flow need to change to :
1. dm malloc a page, initiate page (setup magic number, size, head/tail, int fds
to 0), pass the address through ioctl.
2. kernel check the magic, pin the page, pass the address through hypercall.
3. hv check the magic, record the address.
YES



Regards,
Conghui.



Regards,
Conghui.


+ uint64_t reserved[2];
+ uint64_t fds[ACRN_ASYNCIO_REQUEST_MAX];
+} __attribute__((aligned(8)));
+
+struct acrn_asyncio_req_buffer {
+ union {
+ struct acrn_asyncio_request reqs;
+ uint8_t reserved[4096];
+ };
+};
+
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM
hypercall
*/
diff --git a/hypervisor/include/public/acrn_hv_defs.h
b/hypervisor/include/public/acrn_hv_defs.h
index 728948186..70a318f55 100644
--- a/hypervisor/include/public/acrn_hv_defs.h
+++ b/hypervisor/include/public/acrn_hv_defs.h
@@ -50,6 +50,7 @@
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x00UL)
#define HC_NOTIFY_REQUEST_FINISH BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE + 0x01UL)
+#define HC_SET_ASYNCIO_IOREQ_BUFFER BASE_HC_ID(HC_ID,
HC_ID_IOREQ_BASE
++ 0x02UL)

/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
--
2.25.1