[PATCH v4 5/7] hv: dispatch asyncio request


Conghui Chen
 

Add dispatch func for asyncio request, if the I/O base is found in asyncio
list, queue the fd to the asyncio buffer.
'avail_idx' is updated by hv to show the available index which has
request need to be processed.
'processed_idx' is updated by kernel after the request is processed.

Signed-off-by: Conghui <conghui.chen@...>
---
hypervisor/common/sbuf.c | 2 +
hypervisor/dm/io_req.c | 75 +++++++++++++++++++++----
hypervisor/include/public/acrn_common.h | 3 +
3 files changed, 68 insertions(+), 12 deletions(-)

diff --git a/hypervisor/common/sbuf.c b/hypervisor/common/sbuf.c
index 73131fd53..7ff3be1dd 100644
--- a/hypervisor/common/sbuf.c
+++ b/hypervisor/common/sbuf.c
@@ -66,6 +66,8 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
to = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->tail;

(void)memcpy_s(to, sbuf->ele_size, data, sbuf->ele_size);
+ /* make sure write data before update head */
+ cpu_write_memory_barrier();

if (trigger_overwrite) {
sbuf->head = sbuf_next_ptr(sbuf->head,
diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c
index 3d8609807..a17817fae 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -7,6 +7,7 @@
#include <asm/irq.h>
#include <errno.h>
#include <logmsg.h>
+#include <sbuf.h>

#define DBG_LEVEL_IOREQ 6U

@@ -126,6 +127,53 @@ static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE);
}

+static int acrn_insert_asyncio(struct acrn_vcpu *vcpu, const struct io_request *io_req)
+{
+ uint64_t addr = 0;
+ uint32_t type = 0;
+ bool need_inject = false;
+ struct asyncio_req *iter_req;
+ struct list_head *pos;
+ struct acrn_vm *vm = vcpu->vm;
+ struct shared_buf *sbuf =
+ (struct shared_buf *)vm->sw.asyncio_sbuf;
+ int ret = -ENODEV;
+
+ if (sbuf != NULL) {
+ switch (io_req->io_type) {
+ case ACRN_IOREQ_TYPE_PORTIO:
+ addr = io_req->reqs.pio_request.address;
+ type = ACRN_ASYNCIO_PIO;
+ break;
+
+ case ACRN_IOREQ_TYPE_MMIO:
+ addr = io_req->reqs.mmio_request.address;
+ type = ACRN_ASYNCIO_MMIO;
+ break;
+ default:
+ break;
+ }
+ if (addr) {
+ spinlock_obtain(&vm->asyncio_lock);
+ list_for_each(pos, &vm->asyncio_queue) {
+ iter_req = container_of(pos, struct asyncio_req, list);
+ if (iter_req->addr == addr && iter_req->type == type) {
+ if (sbuf_put(sbuf, (uint8_t *)&iter_req->fd)
+ == sizeof(iter_req->fd)) {
+ need_inject = true;
+ break;
+ }
+ }
+ }
+ spinlock_release(&vm->asyncio_lock);
+ if (need_inject) {
+ arch_fire_hsm_interrupt();
+ ret = 0;
+ }
+ }
+ }
+ return ret;
+}
/**
* @brief Deliver \p io_req to Service VM and suspend \p vcpu till its completion
*
@@ -644,18 +692,21 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
*
* ACRN insert request to HSM and inject upcall.
*/
- status = acrn_insert_request(vcpu, io_req);
- if (status == 0) {
- dm_emulate_io_complete(vcpu);
- } else {
- /* here for both IO & MMIO, the direction, address,
- * size definition is same
- */
- struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
-
- pr_fatal("%s Err: access dir %d, io_type %d, addr = 0x%lx, size=%lu", __func__,
- pio_req->direction, io_req->io_type,
- pio_req->address, pio_req->size);
+ status = acrn_insert_asyncio(vcpu, io_req);
+ if (status != 0) {
+ status = acrn_insert_request(vcpu, io_req);
+ if (status == 0) {
+ dm_emulate_io_complete(vcpu);
+ } else {
+ /* here for both IO & MMIO, the direction, address,
+ * size definition is same
+ */
+ struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
+
+ pr_fatal("%s Err: access dir %d, io_type %d, addr = 0x%lx, size=%lu", __func__,
+ pio_req->direction, io_req->io_type,
+ pio_req->address, pio_req->size);
+ }
}
}

diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h
index 6fbab30a6..cb8ae9775 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -726,6 +726,9 @@ struct acrn_vdev {
uint8_t args[128];
};

+#define ACRN_ASYNCIO_PIO (0x01U)
+#define ACRN_ASYNCIO_MMIO (0x02U)
+
#define SBUF_MAGIC 0x5aa57aa71aa13aa3UL
#define SBUF_MAX_SIZE (1UL << 22U)
#define SBUF_HEAD_SIZE 64U
--
2.25.1


Eddie Dong
 

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On
Behalf Of Conghui Chen
Sent: Monday, September 12, 2022 8:28 PM
To: acrn-dev@...
Cc: Chen, Conghui <conghui.chen@...>
Subject: [acrn-dev] [PATCH v4 5/7] hv: dispatch asyncio request

Add dispatch func for asyncio request, if the I/O base is found in asyncio list,
queue the fd to the asyncio buffer.
'avail_idx' is updated by hv to show the available index which has request need
to be processed.
'processed_idx' is updated by kernel after the request is processed.

Signed-off-by: Conghui <conghui.chen@...>
---
hypervisor/common/sbuf.c | 2 +
hypervisor/dm/io_req.c | 75 +++++++++++++++++++++----
hypervisor/include/public/acrn_common.h | 3 +
3 files changed, 68 insertions(+), 12 deletions(-)

diff --git a/hypervisor/common/sbuf.c b/hypervisor/common/sbuf.c index
73131fd53..7ff3be1dd 100644
--- a/hypervisor/common/sbuf.c
+++ b/hypervisor/common/sbuf.c
@@ -66,6 +66,8 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t
*data)
to = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->tail;

(void)memcpy_s(to, sbuf->ele_size, data, sbuf->ele_size);
+ /* make sure write data before update head */
+ cpu_write_memory_barrier();

if (trigger_overwrite) {
sbuf->head = sbuf_next_ptr(sbuf->head, diff --git
a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c index
3d8609807..a17817fae 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -7,6 +7,7 @@
#include <asm/irq.h>
#include <errno.h>
#include <logmsg.h>
+#include <sbuf.h>

#define DBG_LEVEL_IOREQ 6U

@@ -126,6 +127,53 @@ static inline bool has_complete_ioreq(const struct
acrn_vcpu *vcpu)
return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) ==
ACRN_IOREQ_STATE_COMPLETE); }

+static int acrn_insert_asyncio(struct acrn_vcpu *vcpu, const struct
+io_request *io_req) {
+ uint64_t addr = 0;
+ uint32_t type = 0;
+ bool need_inject = false;
+ struct asyncio_req *iter_req;
+ struct list_head *pos;
+ struct acrn_vm *vm = vcpu->vm;
+ struct shared_buf *sbuf =
+ (struct shared_buf *)vm->sw.asyncio_sbuf;
+ int ret = -ENODEV;
+
+ if (sbuf != NULL) {
+ switch (io_req->io_type) {
+ case ACRN_IOREQ_TYPE_PORTIO:
+ addr = io_req->reqs.pio_request.address;
+ type = ACRN_ASYNCIO_PIO;
+ break;
+
+ case ACRN_IOREQ_TYPE_MMIO:
+ addr = io_req->reqs.mmio_request.address;
+ type = ACRN_ASYNCIO_MMIO;
+ break;
+ default:
+ break;
+ }
+ if (addr) {
+ spinlock_obtain(&vm->asyncio_lock);
+ list_for_each(pos, &vm->asyncio_queue) {
+ iter_req = container_of(pos, struct
asyncio_req, list);
+ if (iter_req->addr == addr && iter_req->type
== type) {
+ if (sbuf_put(sbuf, (uint8_t
*)&iter_req->fd)
+ == sizeof(iter_req-
fd)) {
+ need_inject = true;
+ break;
+ }
+ }
+ }
+ spinlock_release(&vm->asyncio_lock);
+ if (need_inject) {
+ arch_fire_hsm_interrupt();
+ ret = 0;
+ }
+ }
+ }
+ return ret;
+}
/**
* @brief Deliver \p io_req to Service VM and suspend \p vcpu till its
completion
*
@@ -644,18 +692,21 @@ emulate_io(struct acrn_vcpu *vcpu, struct
io_request *io_req)
*
* ACRN insert request to HSM and inject upcall.
*/
- status = acrn_insert_request(vcpu, io_req);
- if (status == 0) {
- dm_emulate_io_complete(vcpu);
- } else {
- /* here for both IO & MMIO, the direction, address,
- * size definition is same
- */
- struct acrn_pio_request *pio_req = &io_req-
reqs.pio_request;
-
- pr_fatal("%s Err: access dir %d, io_type %d, addr =
0x%lx, size=%lu", __func__,
- pio_req->direction, io_req->io_type,
- pio_req->address, pio_req->size);
+ status = acrn_insert_asyncio(vcpu, io_req);
+ if (status != 0) {
+ status = acrn_insert_request(vcpu, io_req);
This is a kind of double try to insert the request. It works but not explicitly enough. How about this way:

If (async_io(..)) {
acrn_insert_asyncio(vcpu, iter_req);
} else {
acrn_insert_request(vcpu, io_req);
}


+ if (status == 0) {
+ dm_emulate_io_complete(vcpu);
+ } else {
+ /* here for both IO & MMIO, the direction,
address,
+ * size definition is same
+ */
+ struct acrn_pio_request *pio_req = &io_req-
reqs.pio_request;
+
+ pr_fatal("%s Err: access dir %d, io_type %d,
addr = 0x%lx, size=%lu", __func__,
+ pio_req->direction, io_req->io_type,
+ pio_req->address, pio_req->size);
+ }
}
}

diff --git a/hypervisor/include/public/acrn_common.h
b/hypervisor/include/public/acrn_common.h
index 6fbab30a6..cb8ae9775 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -726,6 +726,9 @@ struct acrn_vdev {
uint8_t args[128];
};

+#define ACRN_ASYNCIO_PIO (0x01U)
+#define ACRN_ASYNCIO_MMIO (0x02U)
+
#define SBUF_MAGIC 0x5aa57aa71aa13aa3UL
#define SBUF_MAX_SIZE (1UL << 22U)
#define SBUF_HEAD_SIZE 64U
--
2.25.1





Conghui Chen
 

Hi Eddie,

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On
Behalf Of Conghui Chen
Sent: Monday, September 12, 2022 8:28 PM
To: acrn-dev@...
Cc: Chen, Conghui <conghui.chen@...>
Subject: [acrn-dev] [PATCH v4 5/7] hv: dispatch asyncio request

Add dispatch func for asyncio request, if the I/O base is found in asyncio
list,
queue the fd to the asyncio buffer.
'avail_idx' is updated by hv to show the available index which has request
need
to be processed.
'processed_idx' is updated by kernel after the request is processed.

Signed-off-by: Conghui <conghui.chen@...>
---
hypervisor/common/sbuf.c | 2 +
hypervisor/dm/io_req.c | 75 +++++++++++++++++++++----
hypervisor/include/public/acrn_common.h | 3 +
3 files changed, 68 insertions(+), 12 deletions(-)

diff --git a/hypervisor/common/sbuf.c b/hypervisor/common/sbuf.c index
73131fd53..7ff3be1dd 100644
--- a/hypervisor/common/sbuf.c
+++ b/hypervisor/common/sbuf.c
@@ -66,6 +66,8 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t
*data)
to = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->tail;

(void)memcpy_s(to, sbuf->ele_size, data, sbuf->ele_size);
+ /* make sure write data before update head */
+ cpu_write_memory_barrier();

if (trigger_overwrite) {
sbuf->head = sbuf_next_ptr(sbuf->head, diff --git
a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c index
3d8609807..a17817fae 100644
--- a/hypervisor/dm/io_req.c
+++ b/hypervisor/dm/io_req.c
@@ -7,6 +7,7 @@
#include <asm/irq.h>
#include <errno.h>
#include <logmsg.h>
+#include <sbuf.h>

#define DBG_LEVEL_IOREQ 6U

@@ -126,6 +127,53 @@ static inline bool has_complete_ioreq(const
struct
acrn_vcpu *vcpu)
return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) ==
ACRN_IOREQ_STATE_COMPLETE); }

+static int acrn_insert_asyncio(struct acrn_vcpu *vcpu, const struct
+io_request *io_req) {
+ uint64_t addr = 0;
+ uint32_t type = 0;
+ bool need_inject = false;
+ struct asyncio_req *iter_req;
+ struct list_head *pos;
+ struct acrn_vm *vm = vcpu->vm;
+ struct shared_buf *sbuf =
+ (struct shared_buf *)vm->sw.asyncio_sbuf;
+ int ret = -ENODEV;
+
+ if (sbuf != NULL) {
+ switch (io_req->io_type) {
+ case ACRN_IOREQ_TYPE_PORTIO:
+ addr = io_req->reqs.pio_request.address;
+ type = ACRN_ASYNCIO_PIO;
+ break;
+
+ case ACRN_IOREQ_TYPE_MMIO:
+ addr = io_req->reqs.mmio_request.address;
+ type = ACRN_ASYNCIO_MMIO;
+ break;
+ default:
+ break;
+ }
+ if (addr) {
+ spinlock_obtain(&vm->asyncio_lock);
+ list_for_each(pos, &vm->asyncio_queue) {
+ iter_req = container_of(pos, struct
asyncio_req, list);
+ if (iter_req->addr == addr && iter_req->type
== type) {
+ if (sbuf_put(sbuf, (uint8_t
*)&iter_req->fd)
+ == sizeof(iter_req-
fd)) {
+ need_inject = true;
+ break;
+ }
+ }
+ }
+ spinlock_release(&vm->asyncio_lock);
+ if (need_inject) {
+ arch_fire_hsm_interrupt();
+ ret = 0;
+ }
+ }
+ }
+ return ret;
+}
/**
* @brief Deliver \p io_req to Service VM and suspend \p vcpu till its
completion
*
@@ -644,18 +692,21 @@ emulate_io(struct acrn_vcpu *vcpu, struct
io_request *io_req)
*
* ACRN insert request to HSM and inject upcall.
*/
- status = acrn_insert_request(vcpu, io_req);
- if (status == 0) {
- dm_emulate_io_complete(vcpu);
- } else {
- /* here for both IO & MMIO, the direction, address,
- * size definition is same
- */
- struct acrn_pio_request *pio_req = &io_req-
reqs.pio_request;
-
- pr_fatal("%s Err: access dir %d, io_type %d, addr =
0x%lx, size=%lu", __func__,
- pio_req->direction, io_req->io_type,
- pio_req->address, pio_req->size);
+ status = acrn_insert_asyncio(vcpu, io_req);
+ if (status != 0) {
+ status = acrn_insert_request(vcpu, io_req);
This is a kind of double try to insert the request. It works but not explicitly
enough. How about this way:

If (async_io(..)) {
status = acrn_insert_asyncio(vcpu, iter_req);
}
If (status != 0)
acrn_insert_request(vcpu, io_req);
}
Yes, it is a kind of double try.
But it can cover this case: when an asyncio fails to inject (maybe the sbuf is full), it will fall back to synchronize IO injection.

So, can I change it to:
If (async_io(..)) {
status = acrn_insert_asyncio(vcpu, iter_req);
}
If (status !=0) {
status = acrn_insert_request(vcpu, io_req);
}

Regards,
Conghui.


+ if (status == 0) {
+ dm_emulate_io_complete(vcpu);
+ } else {
+ /* here for both IO & MMIO, the direction,
address,
+ * size definition is same
+ */
+ struct acrn_pio_request *pio_req = &io_req-
reqs.pio_request;
+
+ pr_fatal("%s Err: access dir %d, io_type %d,
addr = 0x%lx, size=%lu", __func__,
+ pio_req->direction, io_req->io_type,
+ pio_req->address, pio_req->size);
+ }
}
}

diff --git a/hypervisor/include/public/acrn_common.h
b/hypervisor/include/public/acrn_common.h
index 6fbab30a6..cb8ae9775 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -726,6 +726,9 @@ struct acrn_vdev {
uint8_t args[128];
};

+#define ACRN_ASYNCIO_PIO (0x01U)
+#define ACRN_ASYNCIO_MMIO (0x02U)
+
#define SBUF_MAGIC 0x5aa57aa71aa13aa3UL
#define SBUF_MAX_SIZE (1UL << 22U)
#define SBUF_HEAD_SIZE 64U
--
2.25.1





Eddie Dong
 

+ if (status != 0) {
+ status = acrn_insert_request(vcpu, io_req);
This is a kind of double try to insert the request. It works but not
explicitly enough. How about this way:

If (async_io(..)) {
status = acrn_insert_asyncio(vcpu, iter_req); }
If (status != 0)
acrn_insert_request(vcpu, io_req);
}
Yes, it is a kind of double try.
But it can cover this case: when an asyncio fails to inject (maybe the sbuf is
full), it will fall back to synchronize IO injection.
In case of sbuf full, you may yield the CPU and retry later on.
Making it as a sync IO doesn't help. And it may introduce a new potential issue:
The later IO (asyncIO is handled as syncIO) may be processed earlier than the pending previous asyncIO (in sbuf).

This kind of issue is not necessary to us.


So, can I change it to:
If (async_io(..)) {
status = acrn_insert_asyncio(vcpu, iter_req); } If (status !=0) {
status = acrn_insert_request(vcpu, io_req); }

Regards,
Conghui.


Conghui Chen
 

Hi Eddie,

-----Original Message-----
From: Dong, Eddie <eddie.dong@...>
Sent: Tuesday, September 20, 2022 4:30 AM
To: Chen, Conghui <conghui.chen@...>; acrn-
dev@...
Subject: RE: [acrn-dev] [PATCH v4 5/7] hv: dispatch asyncio request

+ if (status != 0) {
+ status = acrn_insert_request(vcpu, io_req);
This is a kind of double try to insert the request. It works but not
explicitly enough. How about this way:

If (async_io(..)) {
status = acrn_insert_asyncio(vcpu, iter_req); }
If (status != 0)
acrn_insert_request(vcpu, io_req);
}
Yes, it is a kind of double try.
But it can cover this case: when an asyncio fails to inject (maybe the sbuf is
full), it will fall back to synchronize IO injection.
In case of sbuf full, you may yield the CPU and retry later on.
Making it as a sync IO doesn't help. And it may introduce a new potential issue:
The later IO (asyncIO is handled as syncIO) may be processed earlier
than the pending previous asyncIO (in sbuf).

This kind of issue is not necessary to us.
I got your point, will add logic to process the sbuf full issue, thanks.


Regards,
Conghui.