Date   

[PATCH v2 0/1]

Zhao, Yuanyuan
 

V2
1. Change hugetlb fs form '/path/vmname/hugetlb' to '/path/vmname'.

2. Change the length limit of vmname form 16 to 15.

3. Improve the commit message.


V1:

Replace it with vmname as the identifier for VM
and remove it from acrn-dm parameter list.

*** BLURB HERE ***

Yuanyuan Zhao (1):
dm: replace UUID with vmname.

devicemodel/core/hugetlb.c | 27 ++++++---------------------
devicemodel/core/main.c | 15 ++++++---------
devicemodel/core/vmmapi.c | 17 ++---------------
devicemodel/include/dm.h | 3 +--
devicemodel/include/vmmapi.h | 1 -
5 files changed, 15 insertions(+), 48 deletions(-)

--
2.17.1


Re: [PATCH] dm: replace UUID with vmname.

Zhao, Yuanyuan
 

-----Original Message-----
From: Wang, Yu1 <yu1.wang@...>
Sent: Wednesday, October 20, 2021 9:33 AM
To: Zhao, Yuanyuan <yuanyuan.zhao@...>
Cc: acrn-dev@...
Subject: Re: [PATCH] dm: replace UUID with vmname.

On Tue, Oct 19, 2021 at 04:45:26PM +0800, Zhao, Yuanyuan wrote:


-----Original Message-----
From: Wang, Yu1 <yu1.wang@...>
Sent: Tuesday, October 19, 2021 11:01 AM
To: Zhao, Yuanyuan <yuanyuan.zhao@...>
Cc: acrn-dev@...
Subject: Re: [PATCH] dm: replace UUID with vmname.

There has a warning when apply the patch:

ywan170@ywan170-OptiPlex-7050:~/work2/acrn-
hypervisor/devicemodel$ git am ~/incoming/uuid.patch
Applying: dm: replace UUID with vmname.
.git/rebase-apply/patch:111: trailing whitespace.

warning: 1 line adds whitespace errors.

Another thing, please follow the guide to configure your git enviroment.
Somehow, the Signed-off has lost.

https://projectacrn.github.io/2.5/developer-
guides/contribute_guidelines.html

On Tue, Oct 19, 2021 at 09:36:05AM +0800, Yuanyuan Zhao wrote:
There is overlap in the usage of UUID and vmname.
For UUID is not explicit for users, replace it with vmname as the
identifier for VM and remove it from acrn-dm parameter list.
The commit message is not enough. You need to explain the background.

The UUID has several usages before:
1, For HV to identify the static VM configuration of post-launched VM.
2, Seed virtualization.
3, Slightly prevent launching malicous VM from SOS as lack of secure boot.

The UUID is confused to user, user don't understand what it is. And
user don't know where to get/apply the UUID. The worst experience is
user can't launch any VMs w/o re-compile the hv. Everything needs to
be static decided in building phase.

Now we decide to remove UUID and split each usage. For 1st usage,
use vmname as the identifier of static VM configuration. For 2nd
one, we will use --vseed as the new parameter. For 3rd one, will pretect
by SOS's dm-verity.

This patch will remove the UUID parameter and support 1st&3rd usages
from DM part. For 2nd usage, another patch will be submitted later.


---
devicemodel/core/hugetlb.c | 19 ++-----------------
devicemodel/core/main.c | 15 ++++++---------
devicemodel/core/vmmapi.c | 17 ++---------------
devicemodel/include/dm.h | 3 +--
devicemodel/include/vmmapi.h | 1 -
5 files changed, 11 insertions(+), 44 deletions(-)

diff --git a/devicemodel/core/hugetlb.c
b/devicemodel/core/hugetlb.c index e14faf46e..9b02c4b5a 100644
--- a/devicemodel/core/hugetlb.c
+++ b/devicemodel/core/hugetlb.c
@@ -168,8 +168,6 @@ static int unlock_acrn_hugetlb(void)

static int open_hugetlbfs(struct vmctx *ctx, int level) {
- char uuid_str[48];
- uint8_t UUID[16];
char *path;
size_t len;
struct statfs fs;
@@ -181,27 +179,14 @@ static int open_hugetlbfs(struct vmctx *ctx,
int
level)

path = hugetlb_priv[level].node_path;
memset(path, '\0', MAX_PATH_LEN);
- snprintf(path, MAX_PATH_LEN, "%s%s/",
hugetlb_priv[level].mount_path, ctx->name);
+ snprintf(path, MAX_PATH_LEN, "%s%s/hugetlb",
+hugetlb_priv[level].mount_path, ctx->name);
Why we need to add "hugetlb" as the suffix?
[Yuanyuan:] "hugetlb" replace UUID as file name.
We already have the vmname as the file name, right?
[Yuanyuan:] OK, I will remove the hugetlb str.


len = strnlen(path, MAX_PATH_LEN);
- /* UUID will use 32 bytes */
- if (len + 32 > MAX_PATH_LEN) {
+ if (len > MAX_PATH_LEN) {
pr_err("PATH overflow");
return -ENOMEM;
}

- uuid_copy(UUID, ctx->vm_uuid);
- snprintf(uuid_str, sizeof(uuid_str),
- "%02X%02X%02X%02X%02X%02X%02X%02X"
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- UUID[0], UUID[1], UUID[2], UUID[3],
- UUID[4], UUID[5], UUID[6], UUID[7],
- UUID[8], UUID[9], UUID[10], UUID[11],
- UUID[12], UUID[13], UUID[14], UUID[15]);
-
- *(path + len) = '\0';
- strncat(path, uuid_str, strnlen(uuid_str, sizeof(uuid_str)));
-
pr_info("open hugetlbfs file %s\n", path);

hugetlb_priv[level].fd = open(path, O_CREAT | O_RDWR, 0644);
diff --git a/devicemodel/core/main.c b/devicemodel/core/main.c
index
957c4d93f..2447b5f20 100644
--- a/devicemodel/core/main.c
+++ b/devicemodel/core/main.c
@@ -81,7 +81,6 @@ typedef void (*vmexit_handler_t)(struct vmctx *,

char *vmname;

-char *guest_uuid_str;
char *vsbl_file_name;
char *ovmf_file_name;
char *ovmf_code_file_name;
@@ -145,7 +144,7 @@ usage(int code)
"Usage: %s [-hAWYv] [-B bootargs] [-E elf_image_path]\n"
" %*s [-G GVT_args] [-i ioc_mediator_parameters] [-k
kernel_image_path]\n"
" %*s [-l lpc] [-m mem] [-r ramdisk_image_path]\n"
- " %*s [-s pci] [-U uuid] [--vsbl vsbl_file_name] [--ovmf
ovmf_file_path]\n"
+ " %*s [-s pci] [--vsbl vsbl_file_name] [--ovmf
ovmf_file_path]\n"
" %*s [--part_info part_info_name] [--enable_trusty] [--
intr_monitor param_setting]\n"
" %*s [--acpidev_pt HID] [--mmiodev_pt
MMIO_Regions]\n"
" %*s [--vtpm2 sock_path] [--virtio_poll interval] [--
mac_seed seed_string]\n"
@@ -164,7 +163,6 @@ usage(int code)
" -m: memory size in MB\n"
" -r: ramdisk image path\n"
" -s: <slot,driver,configinfo> PCI slot config\n"
- " -U: uuid\n"
" -v: version\n"
" -W: force virtio to use single-vector MSI\n"
" -Y: disable MPtable generation\n"
@@ -777,7 +775,6 @@ static struct option long_options[] = {
{"lpc", required_argument, 0, 'l' },
{"pci_slot", required_argument, 0, 's' },
{"memsize", required_argument, 0, 'm' },
- {"uuid", required_argument, 0, 'U' },
{"virtio_msix", no_argument, 0, 'W' },
{"mptgen", no_argument, 0, 'Y' },
{"kernel", required_argument, 0, 'k' },
@@ -872,9 +869,6 @@ main(int argc, char *argv[])
if (vm_parse_memsize(optarg, &memsize) != 0)
errx(EX_USAGE, "invalid memsize '%s'",
optarg);
break;
- case 'U':
- guest_uuid_str = optarg;
- break;
case 'W':
virtio_msix = 0;
break;
@@ -1006,11 +1000,14 @@ main(int argc, char *argv[])
argc -= optind;
argv += optind;

- if (argc != 1)
+ if (argc != 1) {
+ pr_err("The vmname(<vm>) is necessary!\n");
usage(1);
+ }

vmname = argv[0];
- if (strnlen(vmname, MAX_VMNAME_LEN) >= MAX_VMNAME_LEN)
{
+
+ if (strnlen(vmname, PATH_MAX) > MAX_VMNAME_LEN) {
Why?
[Yuanyuan:] I think 'strnlen' can't find out if the name is longer than n.
If the name length is MAX_VMNAME_LEN, a bigger string
length should be set.

The strnlen has excluded the '\0', so if the returned length is equal to
MAX_VMNAME_LEN, then it means exceed, right?
[Yuanyuan:] Yes. These code assume the max length of vmname str is 16.
Now the length limit is 15, I will set the n as MAX_VMNAME_LEN.

pr_err("vmname size exceed %u\n", MAX_VMNAME_LEN);
exit(1);
}
diff --git a/devicemodel/core/vmmapi.c b/devicemodel/core/vmmapi.c
index 9c9e54d02..6ffa2f507 100644
--- a/devicemodel/core/vmmapi.c
+++ b/devicemodel/core/vmmapi.c
@@ -167,7 +167,6 @@ vm_create(const char *name, uint64_t req_buf,
int
*vcpu_num)
struct vmctx *ctx;
struct acrn_vm_creation create_vm;
int error, retry = 10;
- uuid_t vm_uuid;
struct stat tmp_st;

memset(&create_vm, 0, sizeof(struct acrn_vm_creation)); @@ -
187,19
+186,6 @@ vm_create(const char *name, uint64_t req_buf, int
*vcpu_num)
goto err;
}

- if (guest_uuid_str == NULL)
- guest_uuid_str = "d2795438-25d6-11e8-864e-cb7a18b34643";
-
- error = uuid_parse(guest_uuid_str, vm_uuid);
- if (error != 0)
- goto err;
-
- /* save vm uuid to ctx */
- uuid_copy(ctx->vm_uuid, vm_uuid);
-
- /* Pass uuid as parameter of create vm*/
- uuid_copy(create_vm.uuid, vm_uuid);
-
ctx->gvt_enabled = false;
ctx->fd = devfd;
ctx->lowmem_limit = PCI_EMUL_MEMBASE32; @@ -224,6 +210,7
@@
vm_create(const char *name, uint64_t req_buf, int *vcpu_num)

/* command line arguments specified CPU affinity could
overwrite
HV's static configuration */
create_vm.cpu_affinity = cpu_affinity_bitmap;
+ strncpy((char *)create_vm.name, name, strnlen(name,
+MAX_VMNAME_LEN));
Which patch will add the name field in acrn_vm_creation? BTW, does
HSM driver also needs to be changed? Or just rename the uuid field to
name?

[Yuanyuan:] HV patch is https://lists.projectacrn.org/g/acrn-
dev/message/33570
And HSM driver has nothing to do with uuid.
Get it. I will review together.



if (is_rtvm) {
create_vm.vm_flag |= GUEST_FLAG_RT; @@ -711,7 +698,7
@@
vm_get_config(struct vmctx *ctx, struct acrn_vm_config_header
*vm_cfg, struct ac

for (i = 0; i < platform_info.sw.max_vms; i++) {
pcfg = (struct acrn_vm_config_header *)(configs_buff +
(i *
platform_info.sw.vm_config_size));
- if (!uuid_compare(ctx->vm_uuid, pcfg->uuid))
+ if (!strncmp(ctx->name, pcfg->name, strnlen(ctx->name,
+MAX_VMNAME_LEN)))
break;
}

diff --git a/devicemodel/include/dm.h b/devicemodel/include/dm.h
index 1d28e1b90..ea3499b9b 100644
--- a/devicemodel/include/dm.h
+++ b/devicemodel/include/dm.h
@@ -33,10 +33,9 @@
#include "types.h"
#include "dm_string.h"

-#define MAX_VMNAME_LEN 128U
+#define MAX_VMNAME_LEN 16U

struct vmctx;
-extern char *guest_uuid_str;
extern uint8_t trusty_enabled;
extern char *vsbl_file_name;
extern char *ovmf_file_name;
diff --git a/devicemodel/include/vmmapi.h
b/devicemodel/include/vmmapi.h index c8dab4f52..523c2ef10 100644
--- a/devicemodel/include/vmmapi.h
+++ b/devicemodel/include/vmmapi.h
@@ -57,7 +57,6 @@ struct vmctx {
size_t highmem;
char *baseaddr;
char *name;
- uuid_t vm_uuid;

/* fields to track virtual devices */
void *atkbdc_base;
--
2.17.1


2021 ACRN Project Technical Community Meeting (2021/1~2021/12): @ Monthly 3rd Wednesday 4PM (China-Shanghai), Wednesday 10AM (Europe-Munich), Tuesday 1AM (US-West Coast)

Zou, Terry
 

Special Notes: If you have Zoom connection issue by using web browser, please install & launch Zoom application, manually input the meeting ID (320664063) to join the Zoom meeting.
 
Agenda & Archives:
WW Topic Presenter Status
WW04 ACRN PCI based vUART introduction Tao Yuhong 1/20/2021
Chinese New Year Break
WW13 ACRN Real-Time Enhancement Introduction Huang Yonghua 3/24/2021
WW17 Enable ACRN on TGL NUC11 Liu Fuzhong 4/21/2021
WW21 ACRN Memory Layout Related Boot Issue Diagnosis Sun Victor 5/19/2021
WW30 ACRN Config Tool 2.0 Introduction Xie Nanlin 7/21/2021
WW34 ACRN RTVM  Performance of Sharing Storage Cao Minggui 8/18/2021
WW39 ACRN Software SRAM Introduction Huang Yonghua 9/15/2021
WW47 ACRN Nested Virtualization Introduction Shen Fangfang 11/17/2021
 
Project ACRN: A flexible, light-weight, open source reference hypervisor for IoT devices
We invite you to attend a monthly "Technical Community" meeting where we'll meet community members and talk about the ACRN project and plans.
As we explore community interest and involvement opportunities, we'll (re)schedule these meetings at a time convenient to most attendees:
  • Meets every 3rd Wednesday, Starting Jan 20, 2021: 4-5:00 PM (China-Shanghai), Wednesday 10-11:00 AM (Europe-Munich), Tuesday 1-2:00 AM (US-West Coast)
  • Chairperson: Terry ZOU, terry.zou@... (Intel)
  • Online conference link: https://zoom.com.cn/j/320664063
  • Zoom Meeting ID: 320 664 063
  • Special Notes: If you have Zoom connection issue by using web browser, please launch Zoom application, manually input the meeting ID (320664063) to join the Zoom meeting.
  • Online conference phone:
  • China: +86 010 87833177  or 400 669 9381 (Toll Free)
  • Germany: +49 (0) 30 3080 6188  or +49 800 724 3138 (Toll Free)
  • US: +1 669 900 6833  or +1 646 558 8656   or +1 877 369 0926 (Toll Free) or +1 855 880 1246 (Toll Free)
  • Additional international phone numbers
  • Meeting Notes:
 
 


Canceled: 2021 ACRN Project Technical Community Meeting (2021/1~2021/12): @ Monthly 3rd Wednesday 4PM (China-Shanghai), Wednesday 10AM (Europe-Munich), Tuesday 1AM (US-West Coast)

Zou, Terry
 

Reschedule ‘ACRN Nested Virtualization Introduction’ to Nov, thanks.
 
WW47 ACRN Nested Virtualization Introduction Shen Fangfang 11/17/2021
 
Special Notes: If you have Zoom connection issue by using web browser, please install & launch Zoom application, manually input the meeting ID (320664063) to join the Zoom meeting.
 
Agenda & Archives:
WW Topic Presenter Status
WW04 ACRN PCI based vUART introduction Tao Yuhong 1/20/2021
Chinese New Year Break
WW13 ACRN Real-Time Enhancement Introduction Huang Yonghua 3/24/2021
WW17 Enable ACRN on TGL NUC11 Liu Fuzhong 4/21/2021
WW21 ACRN Memory Layout Related Boot Issue Diagnosis Sun Victor 5/19/2021
WW30 ACRN Config Tool 2.0 Introduction Xie Nanlin 7/21/2021
WW34 ACRN RTVM  Performance of Sharing Storage Cao Minggui 8/18/2021
WW39 ACRN Software SRAM Introduction Huang Yonghua 9/15/2021
WW47 ACRN Nested Virtualization Introduction Shen Fangfang 11/17/2021
 
Project ACRN: A flexible, light-weight, open source reference hypervisor for IoT devices
We invite you to attend a monthly "Technical Community" meeting where we'll meet community members and talk about the ACRN project and plans.
As we explore community interest and involvement opportunities, we'll (re)schedule these meetings at a time convenient to most attendees:
  • Meets every 3rd Wednesday, Starting Jan 20, 2021: 4-5:00 PM (China-Shanghai), Wednesday 10-11:00 AM (Europe-Munich), Tuesday 1-2:00 AM (US-West Coast)
  • Chairperson: Terry ZOU, terry.zou@... (Intel)
  • Online conference link: https://zoom.com.cn/j/320664063
  • Zoom Meeting ID: 320 664 063
  • Special Notes: If you have Zoom connection issue by using web browser, please launch Zoom application, manually input the meeting ID (320664063) to join the Zoom meeting.
  • Online conference phone:
  • China: +86 010 87833177  or 400 669 9381 (Toll Free)
  • Germany: +49 (0) 30 3080 6188  or +49 800 724 3138 (Toll Free)
  • US: +1 669 900 6833  or +1 646 558 8656   or +1 877 369 0926 (Toll Free) or +1 855 880 1246 (Toll Free)
  • Additional international phone numbers
  • Meeting Notes:
 
 


Re: [PATCH] dm: replace UUID with vmname.

Yu Wang
 

On Tue, Oct 19, 2021 at 04:45:26PM +0800, Zhao, Yuanyuan wrote:


-----Original Message-----
From: Wang, Yu1 <yu1.wang@...>
Sent: Tuesday, October 19, 2021 11:01 AM
To: Zhao, Yuanyuan <yuanyuan.zhao@...>
Cc: acrn-dev@...
Subject: Re: [PATCH] dm: replace UUID with vmname.

There has a warning when apply the patch:

ywan170@ywan170-OptiPlex-7050:~/work2/acrn-
hypervisor/devicemodel$ git am ~/incoming/uuid.patch
Applying: dm: replace UUID with vmname.
.git/rebase-apply/patch:111: trailing whitespace.

warning: 1 line adds whitespace errors.

Another thing, please follow the guide to configure your git enviroment.
Somehow, the Signed-off has lost.

https://projectacrn.github.io/2.5/developer-
guides/contribute_guidelines.html

On Tue, Oct 19, 2021 at 09:36:05AM +0800, Yuanyuan Zhao wrote:
There is overlap in the usage of UUID and vmname.
For UUID is not explicit for users,
replace it with vmname as the identifier for VM and remove it from
acrn-dm parameter list.
The commit message is not enough. You need to explain the background.

The UUID has several usages before:
1, For HV to identify the static VM configuration of post-launched VM.
2, Seed virtualization.
3, Slightly prevent launching malicous VM from SOS as lack of secure boot.

The UUID is confused to user, user don't understand what it is. And user
don't know where to get/apply the UUID. The worst experience is user can't
launch any VMs w/o re-compile the hv. Everything needs to be static decided
in building phase.

Now we decide to remove UUID and split each usage. For 1st usage, use
vmname as the identifier of static VM configuration. For 2nd one, we will use
--vseed as the new parameter. For 3rd one, will pretect by SOS's dm-verity.

This patch will remove the UUID parameter and support 1st&3rd usages from
DM part. For 2nd usage, another patch will be submitted later.


---
devicemodel/core/hugetlb.c | 19 ++-----------------
devicemodel/core/main.c | 15 ++++++---------
devicemodel/core/vmmapi.c | 17 ++---------------
devicemodel/include/dm.h | 3 +--
devicemodel/include/vmmapi.h | 1 -
5 files changed, 11 insertions(+), 44 deletions(-)

diff --git a/devicemodel/core/hugetlb.c b/devicemodel/core/hugetlb.c
index e14faf46e..9b02c4b5a 100644
--- a/devicemodel/core/hugetlb.c
+++ b/devicemodel/core/hugetlb.c
@@ -168,8 +168,6 @@ static int unlock_acrn_hugetlb(void)

static int open_hugetlbfs(struct vmctx *ctx, int level) {
- char uuid_str[48];
- uint8_t UUID[16];
char *path;
size_t len;
struct statfs fs;
@@ -181,27 +179,14 @@ static int open_hugetlbfs(struct vmctx *ctx, int
level)

path = hugetlb_priv[level].node_path;
memset(path, '\0', MAX_PATH_LEN);
- snprintf(path, MAX_PATH_LEN, "%s%s/",
hugetlb_priv[level].mount_path, ctx->name);
+ snprintf(path, MAX_PATH_LEN, "%s%s/hugetlb",
+hugetlb_priv[level].mount_path, ctx->name);
Why we need to add "hugetlb" as the suffix?
[Yuanyuan:] "hugetlb" replace UUID as file name.
We already have the vmname as the file name, right?



len = strnlen(path, MAX_PATH_LEN);
- /* UUID will use 32 bytes */
- if (len + 32 > MAX_PATH_LEN) {
+ if (len > MAX_PATH_LEN) {
pr_err("PATH overflow");
return -ENOMEM;
}

- uuid_copy(UUID, ctx->vm_uuid);
- snprintf(uuid_str, sizeof(uuid_str),
- "%02X%02X%02X%02X%02X%02X%02X%02X"
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- UUID[0], UUID[1], UUID[2], UUID[3],
- UUID[4], UUID[5], UUID[6], UUID[7],
- UUID[8], UUID[9], UUID[10], UUID[11],
- UUID[12], UUID[13], UUID[14], UUID[15]);
-
- *(path + len) = '\0';
- strncat(path, uuid_str, strnlen(uuid_str, sizeof(uuid_str)));
-
pr_info("open hugetlbfs file %s\n", path);

hugetlb_priv[level].fd = open(path, O_CREAT | O_RDWR, 0644); diff
--git a/devicemodel/core/main.c b/devicemodel/core/main.c index
957c4d93f..2447b5f20 100644
--- a/devicemodel/core/main.c
+++ b/devicemodel/core/main.c
@@ -81,7 +81,6 @@ typedef void (*vmexit_handler_t)(struct vmctx *,

char *vmname;

-char *guest_uuid_str;
char *vsbl_file_name;
char *ovmf_file_name;
char *ovmf_code_file_name;
@@ -145,7 +144,7 @@ usage(int code)
"Usage: %s [-hAWYv] [-B bootargs] [-E elf_image_path]\n"
" %*s [-G GVT_args] [-i ioc_mediator_parameters] [-k
kernel_image_path]\n"
" %*s [-l lpc] [-m mem] [-r ramdisk_image_path]\n"
- " %*s [-s pci] [-U uuid] [--vsbl vsbl_file_name] [--ovmf
ovmf_file_path]\n"
+ " %*s [-s pci] [--vsbl vsbl_file_name] [--ovmf
ovmf_file_path]\n"
" %*s [--part_info part_info_name] [--enable_trusty] [--
intr_monitor param_setting]\n"
" %*s [--acpidev_pt HID] [--mmiodev_pt
MMIO_Regions]\n"
" %*s [--vtpm2 sock_path] [--virtio_poll interval] [--
mac_seed seed_string]\n"
@@ -164,7 +163,6 @@ usage(int code)
" -m: memory size in MB\n"
" -r: ramdisk image path\n"
" -s: <slot,driver,configinfo> PCI slot config\n"
- " -U: uuid\n"
" -v: version\n"
" -W: force virtio to use single-vector MSI\n"
" -Y: disable MPtable generation\n"
@@ -777,7 +775,6 @@ static struct option long_options[] = {
{"lpc", required_argument, 0, 'l' },
{"pci_slot", required_argument, 0, 's' },
{"memsize", required_argument, 0, 'm' },
- {"uuid", required_argument, 0, 'U' },
{"virtio_msix", no_argument, 0, 'W' },
{"mptgen", no_argument, 0, 'Y' },
{"kernel", required_argument, 0, 'k' },
@@ -872,9 +869,6 @@ main(int argc, char *argv[])
if (vm_parse_memsize(optarg, &memsize) != 0)
errx(EX_USAGE, "invalid memsize '%s'",
optarg);
break;
- case 'U':
- guest_uuid_str = optarg;
- break;
case 'W':
virtio_msix = 0;
break;
@@ -1006,11 +1000,14 @@ main(int argc, char *argv[])
argc -= optind;
argv += optind;

- if (argc != 1)
+ if (argc != 1) {
+ pr_err("The vmname(<vm>) is necessary!\n");
usage(1);
+ }

vmname = argv[0];
- if (strnlen(vmname, MAX_VMNAME_LEN) >= MAX_VMNAME_LEN)
{
+
+ if (strnlen(vmname, PATH_MAX) > MAX_VMNAME_LEN) {
Why?
[Yuanyuan:] I think 'strnlen' can't find out if the name is longer than n.
If the name length is MAX_VMNAME_LEN, a bigger string length should be set.
The strnlen has excluded the '\0', so if the returned length is equal to
MAX_VMNAME_LEN, then it means exceed, right?


pr_err("vmname size exceed %u\n", MAX_VMNAME_LEN);
exit(1);
}
diff --git a/devicemodel/core/vmmapi.c b/devicemodel/core/vmmapi.c
index 9c9e54d02..6ffa2f507 100644
--- a/devicemodel/core/vmmapi.c
+++ b/devicemodel/core/vmmapi.c
@@ -167,7 +167,6 @@ vm_create(const char *name, uint64_t req_buf, int
*vcpu_num)
struct vmctx *ctx;
struct acrn_vm_creation create_vm;
int error, retry = 10;
- uuid_t vm_uuid;
struct stat tmp_st;

memset(&create_vm, 0, sizeof(struct acrn_vm_creation)); @@ -
187,19
+186,6 @@ vm_create(const char *name, uint64_t req_buf, int
*vcpu_num)
goto err;
}

- if (guest_uuid_str == NULL)
- guest_uuid_str = "d2795438-25d6-11e8-864e-cb7a18b34643";
-
- error = uuid_parse(guest_uuid_str, vm_uuid);
- if (error != 0)
- goto err;
-
- /* save vm uuid to ctx */
- uuid_copy(ctx->vm_uuid, vm_uuid);
-
- /* Pass uuid as parameter of create vm*/
- uuid_copy(create_vm.uuid, vm_uuid);
-
ctx->gvt_enabled = false;
ctx->fd = devfd;
ctx->lowmem_limit = PCI_EMUL_MEMBASE32; @@ -224,6 +210,7
@@
vm_create(const char *name, uint64_t req_buf, int *vcpu_num)

/* command line arguments specified CPU affinity could overwrite
HV's static configuration */
create_vm.cpu_affinity = cpu_affinity_bitmap;
+ strncpy((char *)create_vm.name, name, strnlen(name,
+MAX_VMNAME_LEN));
Which patch will add the name field in acrn_vm_creation? BTW, does HSM
driver also needs to be changed? Or just rename the uuid field to name?
[Yuanyuan:] HV patch is https://lists.projectacrn.org/g/acrn-dev/message/33570
And HSM driver has nothing to do with uuid.
Get it. I will review together.



if (is_rtvm) {
create_vm.vm_flag |= GUEST_FLAG_RT; @@ -711,7 +698,7
@@
vm_get_config(struct vmctx *ctx, struct acrn_vm_config_header *vm_cfg,
struct ac

for (i = 0; i < platform_info.sw.max_vms; i++) {
pcfg = (struct acrn_vm_config_header *)(configs_buff + (i *
platform_info.sw.vm_config_size));
- if (!uuid_compare(ctx->vm_uuid, pcfg->uuid))
+ if (!strncmp(ctx->name, pcfg->name, strnlen(ctx->name,
+MAX_VMNAME_LEN)))
break;
}

diff --git a/devicemodel/include/dm.h b/devicemodel/include/dm.h index
1d28e1b90..ea3499b9b 100644
--- a/devicemodel/include/dm.h
+++ b/devicemodel/include/dm.h
@@ -33,10 +33,9 @@
#include "types.h"
#include "dm_string.h"

-#define MAX_VMNAME_LEN 128U
+#define MAX_VMNAME_LEN 16U

struct vmctx;
-extern char *guest_uuid_str;
extern uint8_t trusty_enabled;
extern char *vsbl_file_name;
extern char *ovmf_file_name;
diff --git a/devicemodel/include/vmmapi.h
b/devicemodel/include/vmmapi.h index c8dab4f52..523c2ef10 100644
--- a/devicemodel/include/vmmapi.h
+++ b/devicemodel/include/vmmapi.h
@@ -57,7 +57,6 @@ struct vmctx {
size_t highmem;
char *baseaddr;
char *name;
- uuid_t vm_uuid;

/* fields to track virtual devices */
void *atkbdc_base;
--
2.17.1


[PATCH V6 1/8] hv/config-tools: add the support for vCAT

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Add the VCAT_ENABLED element to RDTType so that user can enable/disable vCAT globally

Add the GUEST_FLAG_VCAT_ENABLED guest flag to enable/disable vCAT per-VM.

Currently we have the following per-VM clos element in scenario file for RDT use:
<clos>
<vcpu_clos>0</vcpu_clos>
<vcpu_clos>0</vcpu_clos>
</clos>

When the GUEST_FLAG_VCAT_ENABLED guest flag is not specified, clos is for RDT use,
vcpu_clos is per-CPU and it configures each CPU in VMs to a desired CLOS ID.

When the GUEST_FLAG_VCAT_ENABLED guest flag is specified, vCAT is enabled for this VM,
clos is for vCAT use, vcpu_clos is not per-CPU anymore in this case, just a list of
physical CLOSIDs (minimum 2) that are assigned to VMs for vCAT use. Each vcpu_clos
will be mapped to a virtual CLOSID, the first vcpu_clos is mapped to virtual CLOSID
0 and the second is mapped to virtual CLOSID 1, etc

Add xs:assert to prevent any problems with invalid configuration data for vCAT:

If any GUEST_FLAG_VCAT_ENABLED guest flag is specified, both RDT_ENABLED and VCAT_ENABLED
must be 'y'

If VCAT_ENABLED is 'y', RDT_ENABLED must be 'y' and CDP_ENABLED must be 'n'

For a vCAT VM, vcpu_clos cannot be set to CLOSID 0, CLOSID 0 is reserved to be used by hypervisor

For a vCAT VM, number of clos/vcpu_clos elements must be greater than 1

For a vCAT VM, each clos/vcpu_clos must be less than L2/L3 COS_MAX

For a vCAT VM, its clos/vcpu_clos elements cannot contain duplicate values

There should not be any CLOS IDs overlap between a vCAT VM and any other VMs

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/include/public/acrn_common.h | 1 +
misc/config_tools/library/common.py | 2 +-
misc/config_tools/library/scenario_cfg_lib.py | 5 +-
.../scenario_config/scenario_item.py | 2 +-
misc/config_tools/schema/VMtypes.xsd | 13 +++-
misc/config_tools/schema/config.xsd | 73 +++++++++++++++++++
misc/config_tools/schema/types.xsd | 10 ++-
misc/config_tools/xforms/config_common.xsl | 11 ++-
misc/config_tools/xforms/lib.xsl | 2 +-
9 files changed, 107 insertions(+), 12 deletions(-)

diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h
index de460853e..f8a8a76db 100644
--- a/hypervisor/include/public/acrn_common.h
+++ b/hypervisor/include/public/acrn_common.h
@@ -57,6 +57,7 @@
#define GUEST_FLAG_RT (1UL << 4U) /* Whether the vm is RT-VM */
#define GUEST_FLAG_NVMX_ENABLED (1UL << 5U) /* Whether this VM supports nested virtualization */
#define GUEST_FLAG_SECURITY_VM (1UL << 6U) /* Whether this VM needs to do security-vm related fixup (TPM2 and SMBIOS pt) */
+#define GUEST_FLAG_VCAT_ENABLED (1UL << 7U) /* Whether this VM supports vCAT */

/* TODO: We may need to get this addr from guest ACPI instead of hardcode here */
#define VIRTUAL_SLEEP_CTL_ADDR 0x400U /* Pre-launched VM uses ACPI reduced HW mode and sleep control register */
diff --git a/misc/config_tools/library/common.py b/misc/config_tools/library/common.py
index cc5de6115..fc09be4c0 100644
--- a/misc/config_tools/library/common.py
+++ b/misc/config_tools/library/common.py
@@ -23,7 +23,7 @@ DATACHECK_SCHEMA_FILE = SOURCE_ROOT_DIR + 'misc/config_tools/schema/datachecks.x
PY_CACHES = ["__pycache__", "../board_config/__pycache__", "../scenario_config/__pycache__"]
GUEST_FLAG = ["0", "0UL", "GUEST_FLAG_SECURE_WORLD_ENABLED", "GUEST_FLAG_LAPIC_PASSTHROUGH",
"GUEST_FLAG_IO_COMPLETION_POLLING", "GUEST_FLAG_NVMX_ENABLED", "GUEST_FLAG_HIDE_MTRR",
- "GUEST_FLAG_RT", "GUEST_FLAG_SECURITY_VM"]
+ "GUEST_FLAG_RT", "GUEST_FLAG_SECURITY_VM", "GUEST_FLAG_VCAT_ENABLED"]

MULTI_ITEM = ["guest_flag", "pcpu_id", "vcpu_clos", "input", "block", "network", "pci_dev", "shm_region", "communication_vuart"]

diff --git a/misc/config_tools/library/scenario_cfg_lib.py b/misc/config_tools/library/scenario_cfg_lib.py
index 4a2be7cd2..ffb8aa3bc 100644
--- a/misc/config_tools/library/scenario_cfg_lib.py
+++ b/misc/config_tools/library/scenario_cfg_lib.py
@@ -1044,7 +1044,7 @@ def check_target_connection(vm_id, target_vm_id, target_uart_id, vm_visited, leg
raise TargetError("target vm{}'s vuart{} is not present".format(target_vm_id ,target_uart_id))


-def vcpu_clos_check(cpus_per_vm, clos_per_vm, prime_item, item):
+def vcpu_clos_check(cpus_per_vm, clos_per_vm, guest_flags, prime_item, item):

if not board_cfg_lib.is_rdt_enabled():
return
@@ -1052,6 +1052,9 @@ def vcpu_clos_check(cpus_per_vm, clos_per_vm, prime_item, item):
common_clos_max = board_cfg_lib.get_common_clos_max()

for vm_i,vcpus in cpus_per_vm.items():
+ if "GUEST_FLAG_VCAT_ENABLED" in guest_flags[vm_i]:
+ continue
+
clos_per_vm_len = 0
if vm_i in clos_per_vm:
clos_per_vm_len = len(clos_per_vm[vm_i])
diff --git a/misc/config_tools/scenario_config/scenario_item.py b/misc/config_tools/scenario_config/scenario_item.py
index b362ced71..662af1a1b 100644
--- a/misc/config_tools/scenario_config/scenario_item.py
+++ b/misc/config_tools/scenario_config/scenario_item.py
@@ -400,7 +400,7 @@ class VmInfo:
scenario_cfg_lib.load_vm_check(self.load_vm, "load_vm")
scenario_cfg_lib.guest_flag_check(self.guest_flags, "guest_flags", "guest_flag")
err_dic = scenario_cfg_lib.vm_cpu_affinity_check(self.scenario_info, self.cpus_per_vm, "pcpu_id")
- scenario_cfg_lib.vcpu_clos_check(self.cpus_per_vm, self.clos_per_vm, "clos", "vcpu_clos")
+ scenario_cfg_lib.vcpu_clos_check(self.cpus_per_vm, self.clos_per_vm, self.guest_flags, "clos", "vcpu_clos")

self.mem_info.check_item()
self.os_cfg.check_item()
diff --git a/misc/config_tools/schema/VMtypes.xsd b/misc/config_tools/schema/VMtypes.xsd
index f05ef821a..6f398815c 100644
--- a/misc/config_tools/schema/VMtypes.xsd
+++ b/misc/config_tools/schema/VMtypes.xsd
@@ -39,6 +39,7 @@
- ``GUEST_FLAG_RT`` specify that the VM is an RT-VM (real-time)
- ``GUEST_FLAG_NVMX_ENABLED`` specify that the VM supports nested virtualization
- ``GUEST_FLAG_SECURITY_VM`` specify that the VM needs to do security-vm related
+- ``GUEST_FLAG_VCAT_ENABLED`` specify that the VM supports CAT virtualization
fixup (TPM2 passthrough and SMBIOS passthrough)</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
@@ -52,6 +53,7 @@
<xs:enumeration value="GUEST_FLAG_RT" />
<xs:enumeration value="GUEST_FLAG_NVMX_ENABLED" />
<xs:enumeration value="GUEST_FLAG_SECURITY_VM" />
+ <xs:enumeration value="GUEST_FLAG_VCAT_ENABLED" />
</xs:restriction>
</xs:simpleType>

@@ -85,9 +87,14 @@ to.</xs:documentation>
<xs:sequence>
<xs:element name="vcpu_clos" type="xs:integer" default="0" maxOccurs="unbounded">
<xs:annotation>
- <xs:documentation>Configure each CPU in VMs to a desired CLOS ID in the ``VM`` section of the
-scenario file. Follow :ref:`rdt_detection_capabilities`
-to identify the maximum supported CLOS ID that can be used.</xs:documentation>
+ <xs:documentation>By default (``GUEST_FLAG_VCAT_ENABLED`` is not specified):
+vcpu_clos is per-CPU and it configures each CPU in VMs to a desired CLOS ID in the ``VM`` section of the
+scenario file. Follow :ref:`rdt_detection_capabilities` to identify the maximum supported CLOS ID that can be used.
+
+If ``GUEST_FLAG_VCAT_ENABLED`` is specified:
+vcpu_clos is not per-CPU anymore, just a list of physical CLOSIDs (minimum 2) that are assigned to VMs
+for vCAT use. Each vcpu_clos will be mapped to a virtual CLOSID, the first vcpu_clos is mapped to virtual
+CLOSID 0 and the second is mapped to virtual CLOSID 1, etc.</xs:documentation>
</xs:annotation>
</xs:element>
</xs:sequence>
diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd
index 36b44fb28..5dd8687f4 100644
--- a/misc/config_tools/schema/config.xsd
+++ b/misc/config_tools/schema/config.xsd
@@ -492,6 +492,79 @@ to launch post-launched User VMs.</xs:documentation>
<xs:documentation>Per VM GUEST_FLAG_NVMX_ENABLED can be set only if CONFIG_NVMX_ENABLED is set.</xs:documentation>
</xs:annotation>
</xs:assert>
+
+ <xs:assert test="if (//RDT_ENABLED = 'y')
+ then not (//CDP_ENABLED = 'y' and //VCAT_ENABLED = 'y')
+ else true()">
+ <xs:annotation>
+ <xs:documentation>vCAT can be enabled only when RDT_ENABLED is 'y' and CDP_ENABLED is 'n'</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
+ <xs:assert test="if (count(//guest_flag[text() = 'GUEST_FLAG_VCAT_ENABLED']) > 0)
+ then //RDT_ENABLED = 'y' and //VCAT_ENABLED = 'y'
+ else true()">
+ <xs:annotation>
+ <xs:documentation>Per VM GUEST_FLAG_VCAT_ENABLED can be set only when RDT_ENABLED is 'y' and VCAT_ENABLED is 'y'.</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
+ <xs:assert test="every $vm in vm satisfies
+ (
+ if (//RDT_ENABLED = 'y' and //VCAT_ENABLED = 'y' and $vm/guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED'])
+ then count($vm/clos/vcpu_clos) > 1
+ else true()
+ )
+ ">
+ <xs:annotation>
+ <xs:documentation>For a vCAT VM, number of clos/vcpu_clos elements must be greater than 1!</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
+ <xs:assert test="if (//RDT_ENABLED = 'y' and //VCAT_ENABLED = 'y')
+ then count(vm[guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED'] and count(clos/vcpu_clos[. = 0])]) = 0
+ else true()">
+ <xs:annotation>
+ <xs:documentation>For a vCAT VM, vcpu_clos cannot be set to CLOSID 0, CLOSID 0 is reserved to be used by hypervisor</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
+ <xs:assert test="every $vm in vm satisfies
+ (
+ if (//RDT_ENABLED = 'y' and //VCAT_ENABLED = 'y' and $vm/guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED'])
+ then count($vm[clos/vcpu_clos[. &gt;= count($vm/..//CLOS_MASK)]]) = 0
+ else true()
+ )
+ ">
+ <xs:annotation>
+ <xs:documentation>For a vCAT VM, each clos/vcpu_clos must be less than L2/L3 COS_MAX!</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
+ <xs:assert test="every $vm in vm satisfies
+ (
+ if (//RDT_ENABLED = 'y' and //VCAT_ENABLED = 'y' and $vm/guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED'])
+ then count($vm/clos/vcpu_clos) = count(distinct-values($vm/clos/vcpu_clos))
+ else true()
+ )
+ ">
+ <xs:annotation>
+ <xs:documentation>For a vCAT VM, its clos/vcpu_clos elements cannot contain duplicate values</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
+ <xs:assert test="every $vm1 in vm, $vm2 in $vm1/following-sibling::vm satisfies
+ (
+ if (//RDT_ENABLED = 'y' and //VCAT_ENABLED = 'y' and ($vm1/guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED'] or $vm2/guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED']))
+ then count($vm1/clos/vcpu_clos[. = $vm2/clos/vcpu_clos]) = 0
+ else true()
+ )
+ ">
+ <xs:annotation>
+ <xs:documentation>if RDT_ENABLED is 'y', there should not be any CLOS IDs overlap between a vCAT VM and any other VMs</xs:documentation>
+ </xs:annotation>
+ </xs:assert>
+
</xs:complexType>

<xs:element name="acrn-config" type="ACRNConfigType" />
diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index f00c7eb25..57342dad3 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -202,8 +202,14 @@ RDT, setting this option to ``y`` is ignored.</xs:documentation>
<xs:annotation>
<xs:documentation>Specify whether to enable Code and Data Prioritization (CDP).
CDP is an extension of CAT. Set to 'y' to enable the feature or 'n' to disable it.
-The 'y' will be ignored when hardware does not support CDP. Default
-value ``n``.</xs:documentation>
+The 'y' will be ignored when hardware does not support CDP.</xs:documentation>
+ </xs:annotation>
+ </xs:element>
+ <xs:element name="VCAT_ENABLED" type="Boolean" default="n">
+ <xs:annotation>
+ <xs:documentation>Specify whether to enable CAT virtualization (vCAT).
+Set to 'y' to enable the feature or 'n' to disable it.
+The 'y' will be ignored when hardware does not support CAT.</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="CLOS_MASK" type="xs:string" minOccurs="0" maxOccurs="unbounded">
diff --git a/misc/config_tools/xforms/config_common.xsl b/misc/config_tools/xforms/config_common.xsl
index e4bb228d4..4464155ff 100644
--- a/misc/config_tools/xforms/config_common.xsl
+++ b/misc/config_tools/xforms/config_common.xsl
@@ -94,10 +94,15 @@
<xsl:with-param name="value" select="RDT/RDT_ENABLED" />
</xsl:call-template>

- <xsl:if test="RDT/RDT_ENABLED = 'y'">
+ <xsl:if test="acrn:is-rdt-enabled()">
<xsl:call-template name="boolean-by-key-value">
- <xsl:with-param name="key" select="'CDP_ENABLED'" />
- <xsl:with-param name="value" select="RDT/CDP_ENABLED" />
+ <xsl:with-param name="key" select="'CDP_ENABLED'" />
+ <xsl:with-param name="value" select="RDT/CDP_ENABLED" />
+ </xsl:call-template>
+
+ <xsl:call-template name="boolean-by-key-value">
+ <xsl:with-param name="key" select="'VCAT_ENABLED'" />
+ <xsl:with-param name="value" select="RDT/VCAT_ENABLED" />
</xsl:call-template>
</xsl:if>

diff --git a/misc/config_tools/xforms/lib.xsl b/misc/config_tools/xforms/lib.xsl
index 1597cc54b..57345d419 100644
--- a/misc/config_tools/xforms/lib.xsl
+++ b/misc/config_tools/xforms/lib.xsl
@@ -356,7 +356,7 @@

<func:function name="acrn:is-rdt-enabled">
<xsl:choose>
- <xsl:when test="//RDT_ENABLED = 'y'">
+ <xsl:when test="acrn:is-rdt-supported() and //RDT_ENABLED = 'y'">
<func:result select="true()" />
</xsl:when>
<xsl:otherwise>
--
2.25.1


[PATCH V6 0/8] Adding L2/L3 vCAT support

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

This patch series attempt to add L2/L3 vCAT support for ACRN.

V6:
- Add the is_contiguous_bit_set() function to check if vcbm is contiguous,
now write_vcbm() will return non-zero to vmexit_handler if vcbm is not contiguous,
which will result in #GP injected to guest

- vcbm set bits should only be in the range of [0, vcbm_len) (max_vcbm),
so mask with max_vcbm to prevent erroneous vCBM value

- Add vCAT logic in prepare_auto_msr_area():
RDT: only load/restore MSR_IA32_PQR_ASSOC when hv and guest have different settings
vCAT: always load/restore MSR_IA32_PQR_ASSOC

- Removed min cbm_len special handling code in vCBM MSR write handler write_vcbm()
to simply the code. Now in write_vcbm(), the following actions are performed in order:
write vcbm
vcbm to pcbm
write pcbm

- More coding style fix:
Rename/move functions
Add more comments

V5:
- Add the GUEST_FLAG_VCAT_ENABLED guest flag so that it can
be used to enable/disable vCAT per-VM, previously the vcat
attribute of the clos element is used to do this, this is
removed in V5 and replaced with GUEST_FLAG_VCAT_ENABLED
guest flag option

- Initialize both vMSR and pMSR for vCAR MSRs during vmcs init

- More coding style fix:
Add more comments
Rename variables and functions
Move functions


V4:
Address review comments:
Add max_l2_pcbm and max_l3_pcbm to struct acrn_vm_config, which are
bitmasks that select all the physical L2/L3 cache ways assigned to
the VM.

Add two functions: vcat_get_cbm_len() and vclosid_to_pclosid()

Improve the readability of vCAT Code
Add more comments
Rename variables and functions
Rewrite the prepare_auto_msr_area() function


V3:
Address review comments:
Change the config-tools to generate the CONFIG_VCAT_ENABLED define

Build vCAT code only when CONFIG_VCAT_ENABLED is defined

Improve the readability of vCAT Code and perform code refactoring:
Add more comments to explain the whys of the code
Rename variables and functions
Rewrite quite some functions
Avoid defining empty functions


Dropped some commits, focus on vCAT now


V2:
1. Removed the support for passthru from V1, now only support the
following virtualization method:
Virtualize both MSR_IA32_type_MASK_n msrs and MSR_IA32_PQR_ASSOC

2. Some fixes to rdt.c/rdt.h

3. Refine some configtool code

4. Redesign the xml schema to allow each resource type
(L2, L3, MBA) to have a separate list of CLOS register values,
in a unified way in scenario file

5. Add an optional vcat attribute to the per-VM clos element in
scenario file to support vCAT

6. Amend the struct acrn_vm_config to make it compatible with vCAT

7. Removed the support for multiple clos mask lists (per RDT resource and per cache id)
from V1, this greatly simplified the vCAT config. Now always assume that the clos mask
list for each RDT resource is shared system-wide.

V1:
This patch series include the following implementations:
1. Support for three vCAT virtualization methods:
- Pass through the MSR_IA32_type_MASK_n and MSR_IA32_PQR_ASSOC
msrs

- Virtualize both MSR_IA32_type_MASK_n msrs and MSR_IA32_PQR_ASSOC

- Virtualize MSR_IA32_Ln_MASK_BASE mrs, and pass through
MSR_IA32_PQR_ASSOC

2. vmconfig vCAT support and sanity checking to ensure:
The vCAT related configuration options have been specified correctly
in scenario file

3. Fix some vmconfig bugs

dongshen (8):
hv/config-tools: add the support for vCAT
hv/config_tools: amend the struct acrn_vm_config to make it compatible
with vCAT
hv: vCAT: initialize the emulated_guest_msrs array for CAT msrs during
platform initialization
hv: vCAT: initialize vCAT MSRs during vmcs init
hv: vCAT: expose CAT capabilities to vCAT-enabled VM
hv: vCAT: implementing the vCAT MSRs read handler
hv: vCAT: implementing the vCAT MSRs write handler
hv: vCAT: propagate vCBM to other vCPUs that share cache with vcpu

hypervisor/Makefile | 3 +
hypervisor/arch/x86/cpu.c | 4 +
hypervisor/arch/x86/cpu_caps.c | 55 ++
hypervisor/arch/x86/guest/vcat.c | 642 ++++++++++++++++++
hypervisor/arch/x86/guest/vcpuid.c | 167 +++++
hypervisor/arch/x86/guest/vm.c | 10 +
hypervisor/arch/x86/guest/vmsr.c | 155 ++++-
hypervisor/arch/x86/rdt.c | 8 +
hypervisor/common/hypercall.c | 50 +-
hypervisor/include/arch/x86/asm/cpu_caps.h | 4 +
hypervisor/include/arch/x86/asm/guest/vcat.h | 22 +
hypervisor/include/arch/x86/asm/guest/vcpu.h | 19 +-
hypervisor/include/arch/x86/asm/guest/vm.h | 1 +
hypervisor/include/arch/x86/asm/msr.h | 1 +
hypervisor/include/arch/x86/asm/rdt.h | 1 +
hypervisor/include/arch/x86/asm/vm_config.h | 21 +-
hypervisor/include/public/acrn_common.h | 1 +
misc/config_tools/library/common.py | 2 +-
misc/config_tools/library/scenario_cfg_lib.py | 5 +-
.../scenario_config/scenario_item.py | 2 +-
misc/config_tools/schema/VMtypes.xsd | 13 +-
misc/config_tools/schema/config.xsd | 73 ++
misc/config_tools/schema/types.xsd | 10 +-
misc/config_tools/xforms/config_common.xsl | 11 +-
misc/config_tools/xforms/lib.xsl | 13 +-
misc/config_tools/xforms/misc_cfg.h.xsl | 7 -
.../xforms/vm_configurations.c.xsl | 43 +-
misc/hv_prebuild/vm_cfg_checks.c | 9 +-
28 files changed, 1256 insertions(+), 96 deletions(-)
create mode 100644 hypervisor/arch/x86/guest/vcat.c
create mode 100644 hypervisor/include/arch/x86/asm/guest/vcat.h

--
2.25.1


[PATCH V6 8/8] hv: vCAT: propagate vCBM to other vCPUs that share cache with vcpu

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Move the nearest_pow2() and get_cache_shift() functions from hypercall.c to cpu_caps.c
Store L2/L3 cat id shift in struct cpuinfo_x86

Implement the propagate_vcbm() function:
Set vCBM to to all the vCPUs that share cache with vcpu
to mimic hardware CAT behavior

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/arch/x86/cpu_caps.c | 55 +++++++++++++++++++
hypervisor/arch/x86/guest/vcat.c | 62 +++++++++++++++++++++-
hypervisor/common/hypercall.c | 50 ++---------------
hypervisor/include/arch/x86/asm/cpu_caps.h | 4 ++
4 files changed, 122 insertions(+), 49 deletions(-)

diff --git a/hypervisor/arch/x86/cpu_caps.c b/hypervisor/arch/x86/cpu_caps.c
index 89fd559b1..33ea3aa36 100644
--- a/hypervisor/arch/x86/cpu_caps.c
+++ b/hypervisor/arch/x86/cpu_caps.c
@@ -16,6 +16,7 @@
#include <errno.h>
#include <logmsg.h>
#include <asm/guest/vmcs.h>
+#include <asm/lib/bits.h>

/* TODO: add more capability per requirement */
/* APICv features */
@@ -322,6 +323,58 @@ static uint64_t get_address_mask(uint8_t limit)
return ((1UL << limit) - 1UL) & PAGE_MASK;
}

+/*
+ * nearest_pow2(n) is the nearest power of 2 integer that is not less than n
+ * The last (most significant) bit set of (n*2-1) matches the above definition
+ */
+static uint32_t nearest_pow2(uint32_t n)
+{
+ uint32_t p = n;
+
+ if (n >= 2U) {
+ p = fls32(2U*n - 1U);
+ }
+
+ return p;
+}
+
+static void get_cat_id_shift(uint32_t *l2_cat_id_shift, uint32_t *l3_cat_id_shift)
+{
+ uint32_t subleaf;
+
+ *l2_cat_id_shift = 0U;
+ *l3_cat_id_shift = 0U;
+
+ for (subleaf = 0U;; subleaf++) {
+ uint32_t eax, ebx, ecx, edx;
+ uint32_t cache_type, cache_level, id, shift;
+
+ cpuid_subleaf(0x4U, subleaf, &eax, &ebx, &ecx, &edx);
+
+ cache_type = eax & 0x1fU;
+ cache_level = (eax >> 5U) & 0x7U;
+
+ /* Intel SDM Vol 2, CPUID 04H:
+ * EAX: bits 25 - 14: Maximum number of addressable IDs for logical processors sharing this cache.
+ * The nearest power-of-2 integer that is not smaller than (1 + EAX[25:14]) is the number of unique
+ * initial APIC IDs reserved for addressing different logical processors sharing this cache
+ */
+ id = (eax >> 14U) & 0xfffU;
+ shift = nearest_pow2(id + 1U);
+
+ /* No more caches */
+ if ((cache_type == 0U) || (cache_type >= 4U)) {
+ break;
+ }
+
+ if (cache_level == 2U) {
+ *l2_cat_id_shift = shift;
+ } else if (cache_level == 3U) {
+ *l3_cat_id_shift = shift;
+ }
+ }
+}
+
void init_pcpu_capabilities(void)
{
uint32_t eax, unused;
@@ -385,6 +438,8 @@ void init_pcpu_capabilities(void)
get_address_mask(boot_cpu_data.phys_bits);
}

+ get_cat_id_shift(&boot_cpu_data.l2_cat_id_shift, &boot_cpu_data.l3_cat_id_shift);
+
detect_pcpu_cap();
}

diff --git a/hypervisor/arch/x86/guest/vcat.c b/hypervisor/arch/x86/guest/vcat.c
index 1dda6b61f..9e302b25e 100644
--- a/hypervisor/arch/x86/guest/vcat.c
+++ b/hypervisor/arch/x86/guest/vcat.c
@@ -410,6 +410,55 @@ static uint32_t vmsr_to_pmsr(const struct acrn_vm *vm, uint32_t vmsr, int res)
return pmsr;
}

+static void get_vcat_id_shift(uint32_t *l2_shift, uint32_t *l3_shift)
+{
+ struct cpuinfo_x86 *cpu_info = get_pcpu_info();
+
+ /* Assume that virtual cat id shift is equal to physical cat id shift for now */
+ *l2_shift = cpu_info->l2_cat_id_shift;
+ *l3_shift = cpu_info->l3_cat_id_shift;
+}
+
+/**
+ * @brief Propagate vCBM to other vCPUs that share cache with vcpu
+ * @pre vcpu != NULL && vcpu->vm != NULL
+ */
+static void propagate_vcbm(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val)
+{
+ uint16_t i;
+ struct acrn_vcpu *tmp_vcpu;
+ uint32_t l2_shift, l3_shift, l2_id, l3_id;
+ struct acrn_vm *vm = vcpu->vm;
+ uint32_t apicid = vlapic_get_apicid(vcpu_vlapic(vcpu));
+
+ get_vcat_id_shift(&l2_shift, &l3_shift);
+ /* Intel SDM Vol 2, CPUID 04H:
+ * EAX: bits 25 - 14: Maximum number of addressable IDs for logical processors sharing this cache.
+ *
+ * l2_shift/l3_shift: the nearest power-of-2 integer that is not smaller than (1 + EAX[25:14])
+ * is the number of unique initial APIC IDs reserved for addressing different logical processors
+ * sharing this cache
+ */
+ l2_id = apicid >> l2_shift;
+ l3_id = apicid >> l3_shift;
+
+ /*
+ * Determine which logical processors share an MSR (for instance local
+ * to a core, or shared across multiple cores) by checking if they have the same
+ * L2/L3 cache id
+ */
+ foreach_vcpu(i, vm, tmp_vcpu) {
+ uint32_t tmp_apicid = vlapic_get_apicid(vcpu_vlapic(tmp_vcpu));
+ uint32_t tmp_l2_id = tmp_apicid >> l2_shift;
+ uint32_t tmp_l3_id = tmp_apicid >> l3_shift;
+
+ if ((is_l2_vcbm_msr(vm, vmsr) && (l2_id == tmp_l2_id))
+ || (is_l3_vcbm_msr(vm, vmsr) && (l3_id == tmp_l3_id))) {
+ vcpu_set_guest_msr(tmp_vcpu, vmsr, val);
+ }
+ }
+}
+
static void write_pcbm(uint32_t pmsr, uint64_t pcbm)
{
/* Preserve reserved bits, and only set the pCBM bits */
@@ -461,8 +510,17 @@ static int32_t write_vcbm(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val, i
uint32_t pmsr;
uint64_t pcbm;

- /* Write vCBM first */
- vcpu_set_guest_msr(vcpu, vmsr, masked_vcbm | (val & 0xFFFFFFFF00000000UL));
+ /*
+ * Write vCBM first:
+ * The L2 mask MSRs are scoped at the same level as the L2 cache (similarly,
+ * the L3 mask MSRs are scoped at the same level as the L3 cache).
+ *
+ * For example, the MSR_IA32_L3_MASK_n MSRs are scoped at socket level, which means if
+ * we program MSR_IA32_L3_MASK_n on one cpu and the same MSR_IA32_L3_MASK_n on all other cpus
+ * of the same socket will also get the change!
+ * Set vcbm to all the vCPUs that share cache with vcpu to mimic this hardware behavior.
+ */
+ propagate_vcbm(vcpu, vmsr, masked_vcbm | (val & 0xFFFFFFFF00000000UL));

/* Write pCBM: */
pmsr = vmsr_to_pmsr(vcpu->vm, vmsr, res);
diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c
index fb19fe4a0..3f5134d45 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -133,52 +133,6 @@ int32_t hcall_get_api_version(struct acrn_vcpu *vcpu, __unused struct acrn_vm *t
return copy_to_gpa(vcpu->vm, &version, param1, sizeof(version));
}

-/*
- * nearest_pow2(n) is the nearest power of 2 integer that is not less than n
- * The last (most significant) bit set of (n*2-1) matches the above definition
- */
-static uint32_t nearest_pow2(uint32_t n)
-{
- uint32_t p = n;
-
- if (n >= 2U) {
- p = fls32(2U*n - 1U);
- }
-
- return p;
-}
-
-static void get_cache_shift(uint32_t *l2_shift, uint32_t *l3_shift)
-{
- uint32_t subleaf;
-
- *l2_shift = 0U;
- *l3_shift = 0U;
-
- for (subleaf = 0U;; subleaf++) {
- uint32_t eax, ebx, ecx, edx;
- uint32_t cache_type, cache_level, id, shift;
-
- cpuid_subleaf(0x4U, subleaf, &eax, &ebx, &ecx, &edx);
-
- cache_type = eax & 0x1fU;
- cache_level = (eax >> 5U) & 0x7U;
- id = (eax >> 14U) & 0xfffU;
- shift = nearest_pow2(id + 1U);
-
- /* No more caches */
- if ((cache_type == 0U) || (cache_type >= 4U)) {
- break;
- }
-
- if (cache_level == 2U) {
- *l2_shift = shift;
- } else if (cache_level == 3U) {
- *l3_shift = shift;
- }
- }
-}
-
/**
* @brief Get basic platform information.
*
@@ -204,8 +158,10 @@ int32_t hcall_get_platform_info(struct acrn_vcpu *vcpu, __unused struct acrn_vm
if (ret == 0) {
uint16_t i;
uint16_t pcpu_nums = get_pcpu_nums();
+ struct cpuinfo_x86 *cpu_info = get_pcpu_info();

- get_cache_shift(&pi.hw.l2_cat_shift, &pi.hw.l3_cat_shift);
+ pi.hw.l2_cat_shift = cpu_info->l2_cat_id_shift;
+ pi.hw.l3_cat_shift = cpu_info->l3_cat_id_shift;

for (i = 0U; i < min(pcpu_nums, ACRN_PLATFORM_LAPIC_IDS_MAX); i++) {
pi.hw.lapic_ids[i] = per_cpu(lapic_id, i);
diff --git a/hypervisor/include/arch/x86/asm/cpu_caps.h b/hypervisor/include/arch/x86/asm/cpu_caps.h
index 4f679f209..c22e306a0 100644
--- a/hypervisor/include/arch/x86/asm/cpu_caps.h
+++ b/hypervisor/include/arch/x86/asm/cpu_caps.h
@@ -43,6 +43,10 @@ struct cpuinfo_x86 {
uint64_t physical_address_mask;
uint32_t cpuid_leaves[FEATURE_WORDS];
char model_name[64];
+ /* Right-shift count that will allow software to extract part of APIC ID to distinguish L2 CAT ID */
+ uint32_t l2_cat_id_shift;
+ /* Right-shift count that will allow software to extract part of APIC ID to distinguish L3 CAT ID */
+ uint32_t l3_cat_id_shift;
};

bool has_monitor_cap(void);
--
2.25.1


[PATCH V6 4/8] hv: vCAT: initialize vCAT MSRs during vmcs init

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Initialize vCBM MSR

Initialize vCLOSID MSR

Add some vCAT functions:
Retrieve max_vcbm
Check if vCAT is configured or not for the VM
Map vclosid to pclosid
write_vclosid: vCLOSID MSR write handler
write_vcbm: vCBM MSR write handler

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/Makefile | 3 +
hypervisor/arch/x86/guest/vcat.c | 408 +++++++++++++++++++
hypervisor/arch/x86/guest/vm.c | 10 +
hypervisor/arch/x86/guest/vmsr.c | 15 +-
hypervisor/arch/x86/rdt.c | 8 +
hypervisor/include/arch/x86/asm/guest/vcat.h | 18 +
hypervisor/include/arch/x86/asm/guest/vm.h | 1 +
hypervisor/include/arch/x86/asm/rdt.h | 1 +
8 files changed, 462 insertions(+), 2 deletions(-)
create mode 100644 hypervisor/arch/x86/guest/vcat.c
create mode 100644 hypervisor/include/arch/x86/asm/guest/vcat.h

diff --git a/hypervisor/Makefile b/hypervisor/Makefile
index 50bb2891c..a5c02d115 100644
--- a/hypervisor/Makefile
+++ b/hypervisor/Makefile
@@ -330,6 +330,9 @@ VP_DM_C_SRCS += arch/x86/guest/vmx_io.c
VP_DM_C_SRCS += arch/x86/guest/instr_emul.c
VP_DM_C_SRCS += arch/x86/guest/lock_instr_emul.c
VP_DM_C_SRCS += arch/x86/guest/vm_reset.c
+ifeq ($(CONFIG_VCAT_ENABLED),y)
+VP_DM_C_SRCS += arch/x86/guest/vcat.c
+endif
VP_DM_C_SRCS += common/ptdev.c

# virtual platform trusty
diff --git a/hypervisor/arch/x86/guest/vcat.c b/hypervisor/arch/x86/guest/vcat.c
new file mode 100644
index 000000000..49968b8d9
--- /dev/null
+++ b/hypervisor/arch/x86/guest/vcat.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <types.h>
+#include <errno.h>
+#include <logmsg.h>
+#include <asm/cpufeatures.h>
+#include <asm/cpuid.h>
+#include <asm/rdt.h>
+#include <asm/lib/bits.h>
+#include <asm/board.h>
+#include <asm/vm_config.h>
+#include <asm/msr.h>
+#include <asm/guest/vcpu.h>
+#include <asm/guest/vm.h>
+#include <asm/guest/vcat.h>
+#include <asm/per_cpu.h>
+
+/*
+ * List of acronyms used here:
+ *
+ * - CAT:
+ * Cache Allocation Technology
+ *
+ *- vCAT:
+ * Virtual CAT
+ *
+ *- MSRs:
+ * Machine Specific Registers, each MSR is identified by a 32-bit integer.
+ *
+ *- pMSR:
+ * physical MSR
+ *
+ *- vMSR:
+ * virtual MSR
+ *
+ *- COS/CLOS:
+ * Class of Service. Also mean COS MSRs
+ *
+ *- CLOSID:
+ * Each CLOS has a number ID, ranges from 0 to COS_MAX
+ *
+ *- CLOSIDn:
+ * Each CLOS has a number ID denoted by n
+ *
+ *- COS_MAX:
+ * Max number of COS MSRs. ACRN uses the smallest number of
+ * CLOSIDs of all supported resources as COS_MAX to have consistent
+ * allocation
+ *
+-* pCLOSID:
+ * Physical CLOSID
+ *
+ *- vCLOSID:
+ * Virtual CLOSID
+ *
+ *- MSR_IA32_type_MASK_n
+ * type: L2 or L3
+ * One CAT (CBM) MSR, where n corresponds to a number (CLOSIDn)
+ *
+ *- CBM:
+ * Capacity bitmask (cache bit mask), specifies which region of cache
+ * can be filled into, all (and only) contiguous '1' combinations are allowed
+ *
+ *- pCBM:
+ * Physical CBM
+ *
+ *- pCBM length (pcbm_len):
+ * pcbm_len is calculated by `bitmap_weight(max_pcbm)`
+ * indicates number of bits set in max_pcbm
+ *
+ *- max_pcbm (maximum physical cache space assigned to VM):
+ * max_pcbm is a contiguous capacity bitmask (CBM) starting at bit position low
+ * (the lowest assigned physical cache way) and ending at position high
+ * (the highest assigned physical cache way, inclusive).
+ * As CBM only allows contiguous '1' combinations, so max_pcbm essentially
+ * is a bitmask that selects/covers all the physical cache ways assigned to the VM.
+ *
+ * Example:
+ * pcbm_len=20
+ * max_pcbm=0xfffff
+ *
+ *- CLOS_MASK/max_pcbm: (maximum assigned/reserved physical cache space)
+ * vCAT is built on top of RDT, vCAT on ACRN is enabled by configuring the FEATURES/RDT
+ * and vm sub-sections of the scenario XML file as in the below example:
+ * <RDT>
+ * <RDT_ENABLED>y</RDT_ENABLED>
+ * <CDP_ENABLED>n</CDP_ENABLED>
+ * <VCAT_ENABLED>y</VCAT_ENABLED>
+ * <CLOS_MASK>0x7ff</CLOS_MASK>
+ * <CLOS_MASK>0x7ff</CLOS_MASK>
+ * <CLOS_MASK>0x7ff</CLOS_MASK>
+ * <CLOS_MASK>0xff800</CLOS_MASK>
+ * <CLOS_MASK>0xff800</CLOS_MASK>
+ * <CLOS_MASK>0xff800</CLOS_MASK>
+ * <CLOS_MASK>0xff800</CLOS_MASK>
+ * <CLOS_MASK>0xff800</CLOS_MASK>
+ * /RDT>
+ *
+ * <vm id="0">
+ * <guest_flags>
+ <guest_flag>GUEST_FLAG_VCAT_ENABLED</guest_flag>
+ </guest_flags>
+ * <clos>
+ * <vcpu_clos>3</vcpu_clos>
+ * <vcpu_clos>4</vcpu_clos>
+ * <vcpu_clos>5</vcpu_clos>
+ * <vcpu_clos>6</vcpu_clos>
+ * <vcpu_clos>7</vcpu_clos>
+ * </clos>
+ * </vm>
+ *
+ * <vm id="1">
+ * <clos>
+ * <vcpu_clos>1</vcpu_clos>
+ * <vcpu_clos>2</vcpu_clos>
+ * </clos>
+ * </vm>
+ *
+ * vm_configurations.c (generated by config-tools) with the above vCAT config:
+ *
+ * static uint16_t vm0_vcpu_clos[5U] = {3U, 4U, 5U, 6U, 7U};
+ * static uint16_t vm1_vcpu_clos[2U] = {1U, 2U};
+ *
+ * struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
+ * {
+ * .guest_flags = (GUEST_FLAG_VCAT_ENABLED),
+ * .pclosids = vm0_vcpu_clos,
+ * .num_pclosids = 5U,
+ * .max_l3_pcbm = 0xff800U,
+ * },
+ * {
+ * .pclosids = vm1_vcpu_clos,
+ * .num_pclosids = 2U,
+ * },
+ * };
+ *
+ * Config CLOS_MASK/max_pcbm per pCLOSID:
+ * vCAT is enabled by setting both RDT_ENABLED and VCAT_ENABLED to 'y',
+ * then specify the GUEST_FLAG_VCAT_ENABLED guest flag for the desired VMs.
+ * Each CLOS_MASK (a.k.a. max_pcbm) setting corresponds to a pCLOSID and
+ * specifies the allocated portion (ways) of cache.
+ * For example, if COS_MAX is 7, then 8 CLOS_MASK settings need to be in place
+ * where each setting corresponds to a pCLOSID starting from 0.
+ * Each CLOS_MASK may or may not overlap with the CLOS_MASK of another pCLOSID depending
+ * on whether overlapped or isolated bitmask is desired for particular performance
+ * consideration.
+ *
+ * Assign pCLOSIDs per VM
+ * Assign the desired pCLOSIDs to each VM in the vm/clos section of the scenario file
+ * by defining the vcpu_clos settings.
+ * All pCLOSIDs should be configured with the same pCBM (max_pcbm) to simplify vCAT
+ * config and ensure vCAT capability symmetry across cpus of the VM. In the above example,
+ * pCLOSIDs 3 to 7 are all configured with the same pCBM value 0xff800, which
+ * means a total of 9 physical cache ways have been reserved for all the cpus
+ * belonging to VM0
+ *
+ *- vCBM:
+ * Virtual CBM
+ *
+ *- vCBM length (vcbm_len):
+ * max number of bits to set for vCBM.
+ * vcbm_len is set equal to pcbm_len
+ * vCBM length is reported to guest VMs by using vCPUID (EAX=10H)
+ *
+ *- max_vcbm (maximum virtual cache space):
+ * Fully open vCBM (all ones bitmask), max vCBM is calculated
+ * by `(1 << vcbm_len) - 1`
+ *
+ * Usually, vCLOSID0 is associated with the fully open vCBM to access all assigned virtual caches
+ */
+
+/**
+ * @pre vm != NULL
+ */
+bool is_l2_vcat_configured(const struct acrn_vm *vm)
+{
+ return is_vcat_configured(vm) && (get_rdt_res_cap_info(RDT_RESOURCE_L2)->num_closids > 0U);
+}
+
+/**
+ * @pre vm != NULL
+ */
+bool is_l3_vcat_configured(const struct acrn_vm *vm)
+{
+ return is_vcat_configured(vm) && (get_rdt_res_cap_info(RDT_RESOURCE_L3)->num_closids > 0U);
+}
+
+/**
+ * @brief Return number of vCLOSIDs of this VM
+ *
+ * @pre vm != NULL && vm->vm_id < CONFIG_MAX_VM_NUM
+ */
+uint16_t vcat_get_num_vclosids(const struct acrn_vm *vm)
+{
+ uint16_t num_vclosids = 0U;
+
+ if (is_vcat_configured(vm)) {
+ /*
+ * For performance and simplicity, here number of vCLOSIDs (num_vclosids) is set
+ * equal to the number of pCLOSIDs assigned to this VM (get_vm_config(vm->vm_id)->num_pclosids).
+ * But technically, we do not have to make such an assumption. For example,
+ * Hypervisor could implement CLOSID context switch, then number of vCLOSIDs
+ * can be greater than the number of pCLOSIDs assigned. etc.
+ */
+ num_vclosids = get_vm_config(vm->vm_id)->num_pclosids;
+ }
+
+ return num_vclosids;
+}
+
+/**
+ * @brief Map vCLOSID to pCLOSID
+ *
+ * @pre vm != NULL && vm->vm_id < CONFIG_MAX_VM_NUM
+ * @pre (get_vm_config(vm->vm_id)->pclosids != NULL) && (vclosid < get_vm_config(vm->vm_id)->num_pclosids)
+ */
+static uint16_t vclosid_to_pclosid(const struct acrn_vm *vm, uint16_t vclosid)
+{
+ ASSERT(vclosid < vcat_get_num_vclosids(vm), "vclosid is out of range!");
+
+ /*
+ * pclosids points to an array of assigned pCLOSIDs
+ * Use vCLOSID as the index into the pclosids array, returning the corresponding pCLOSID
+ *
+ * Note that write_vcat_msr() calls vclosid_to_pclosid() indirectly, in write_vcat_msr(),
+ * the is_l2_vcbm_msr()/is_l3_vcbm_msr() calls ensure that vclosid is always less than
+ * get_vm_config(vm->vm_id)->num_pclosids, so vclosid is always an array index within bound here
+ */
+ return get_vm_config(vm->vm_id)->pclosids[vclosid];
+}
+
+/**
+ * @brief Return the max_pcbm of this VM.
+ * @pre vm != NULL && vm->vm_id < CONFIG_MAX_VM_NUM
+ * @pre res == RDT_RESOURCE_L2 || res == RDT_RESOURCE_L3
+ */
+static uint64_t get_max_pcbm(const struct acrn_vm *vm, int res)
+{
+ uint64_t max_pcbm = 0UL;
+
+ if (is_l2_vcat_configured(vm) && (res == RDT_RESOURCE_L2)) {
+ max_pcbm = get_vm_config(vm->vm_id)->max_l2_pcbm;
+ } else if (is_l3_vcat_configured(vm) && (res == RDT_RESOURCE_L3)) {
+ max_pcbm = get_vm_config(vm->vm_id)->max_l3_pcbm;
+ }
+
+ return max_pcbm;
+}
+
+/**
+ * @brief Retrieve vcbm_len of vm
+ * @pre vm != NULL
+ */
+uint16_t vcat_get_vcbm_len(const struct acrn_vm *vm, int res)
+{
+ /* vcbm_len = pcbm_len */
+ return bitmap_weight(get_max_pcbm(vm, res));
+}
+
+/**
+ * @brief Retrieve max_vcbm of vm
+ * @pre vm != NULL
+ */
+static uint64_t vcat_get_max_vcbm(const struct acrn_vm *vm, int res)
+{
+ uint16_t vcbm_len = vcat_get_vcbm_len(vm, res);
+ uint64_t max_vcbm = 0UL;
+
+ if (vcbm_len != 0U) {
+ max_vcbm = (1U << vcbm_len) - 1U;
+ }
+
+ return max_vcbm;
+}
+
+/**
+ * @brief Map vMSR address (abbreviated as vmsr) to corresponding pMSR address (abbreviated as pmsr)
+ * Each vMSR or pMSR is identified by a 32-bit integer
+ *
+ * @pre vm != NULL
+ * @pre res == RDT_RESOURCE_L2 || res == RDT_RESOURCE_L3
+ */
+static uint32_t vmsr_to_pmsr(const struct acrn_vm *vm, uint32_t vmsr, int res)
+{
+ uint32_t pmsr = vmsr;
+ uint16_t vclosid;
+
+ switch (res) {
+ case RDT_RESOURCE_L2:
+ vclosid = vmsr - MSR_IA32_L2_MASK_BASE;
+ pmsr = MSR_IA32_L2_MASK_BASE + vclosid_to_pclosid(vm, vclosid);
+ break;
+
+ case RDT_RESOURCE_L3:
+ vclosid = vmsr - MSR_IA32_L3_MASK_BASE;
+ pmsr = MSR_IA32_L3_MASK_BASE + vclosid_to_pclosid(vm, vclosid);
+ break;
+
+ default:
+ break;
+ }
+
+ return pmsr;
+}
+
+/**
+ * @brief vCBM MSR write handler
+ *
+ * @pre vcpu != NULL && vcpu->vm != NULL
+ */
+static int32_t write_vcbm(__unused struct acrn_vcpu *vcpu, __unused uint32_t vmsr, __unused uint64_t val, __unused int res)
+{
+ /* TODO: this is going to be implemented in a subsequent commit, will perform the following actions:
+ * write vCBM
+ * vmsr_to_pmsr and vcbm_to_pcbm
+ * write pCBM
+ */
+ return -EFAULT;
+}
+
+/**
+ * @brief vCLOSID MSR write handler
+ *
+ * @pre vcpu != NULL && vcpu->vm != NULL
+ */
+static int32_t write_vclosid(struct acrn_vcpu *vcpu, uint64_t val)
+{
+ uint16_t vclosid, pclosid;
+
+ /* Write the new vCLOSID value */
+ vcpu_set_guest_msr(vcpu, MSR_IA32_PQR_ASSOC, val);
+
+ vclosid = (uint16_t)((val >> 32U) & 0xFFFFFFFFUL);
+ pclosid = vclosid_to_pclosid(vcpu->vm, vclosid);
+ /*
+ * Write the new pCLOSID value to the guest msr area
+ *
+ * The prepare_auto_msr_area() function has already initialized the vcpu->arch.msr_area
+ * as follows:
+ * vcpu_clos = cfg->pclosids[vcpu->vcpu_id%cfg->num_pclosids]
+ * vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC
+ * vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(vcpu_clos)
+ * vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC
+ * vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(hv_clos)
+ * vcpu->arch.msr_area.count = 1
+ *
+ * So here we only need to update the vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value field,
+ * all other vcpu->arch.msr_area fields remains unchanged at runtime.
+ */
+ vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(pclosid);
+
+ return 0;
+}
+
+/**
+ * @brief Initialize vCBM MSRs
+ *
+ * @pre vcpu != NULL && vcpu->vm != NULL
+ */
+static void init_vcbms(struct acrn_vcpu *vcpu, int res, uint32_t msr_base)
+{
+ uint64_t max_vcbm = vcat_get_max_vcbm(vcpu->vm, res);
+
+ if (max_vcbm != 0UL) {
+ uint32_t vmsr;
+ /* num_vcbm_msrs = num_vclosids */
+ uint16_t num_vcbm_msrs = vcat_get_num_vclosids(vcpu->vm);
+
+ /*
+ * For each vCBM MSR, its initial vCBM is set to max_vcbm,
+ * a bitmask with vcbm_len bits (from 0 to vcbm_len - 1, inclusive)
+ * set to 1 and all other bits set to 0.
+ *
+ * As CBM only allows contiguous '1' combinations, so max_vcbm essentially
+ * is a bitmask that selects all the virtual cache ways assigned to the VM.
+ * It covers all the virtual cache ways the guest VM may access, i.e. the
+ * superset bitmask.
+ */
+ for (vmsr = msr_base; vmsr < (msr_base + num_vcbm_msrs); vmsr++) {
+ uint32_t pmsr = vmsr_to_pmsr(vcpu->vm, vmsr, res);
+ /* Set initial vMSR value: copy reserved bits from corresponding pMSR, and set vCBM to max_vcbm */
+ uint64_t val = (msr_read(pmsr) & 0xFFFFFFFF00000000UL) | max_vcbm;
+
+ /* Write vCBM MSR */
+ (void)write_vcbm(vcpu, vmsr, val, res);
+ }
+ }
+}
+
+/**
+ * @brief Initialize vCAT MSRs
+ *
+ * @pre vcpu != NULL && vcpu->vm != NULL
+ */
+void init_vcat_msrs(struct acrn_vcpu *vcpu)
+{
+ if (is_vcat_configured(vcpu->vm)) {
+ init_vcbms(vcpu, RDT_RESOURCE_L2, MSR_IA32_L2_MASK_BASE);
+
+ init_vcbms(vcpu, RDT_RESOURCE_L3, MSR_IA32_L3_MASK_BASE);
+
+ (void)write_vclosid(vcpu, clos2pqr_msr(0U));
+ }
+}
diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c
index 66f60d289..a7848743a 100644
--- a/hypervisor/arch/x86/guest/vm.c
+++ b/hypervisor/arch/x86/guest/vm.c
@@ -151,6 +151,16 @@ bool is_nvmx_configured(const struct acrn_vm *vm)
return ((vm_config->guest_flags & GUEST_FLAG_NVMX_ENABLED) != 0U);
}

+/**
+ * @pre vm != NULL && vm_config != NULL && vm->vmid < CONFIG_MAX_VM_NUM
+ */
+bool is_vcat_configured(const struct acrn_vm *vm)
+{
+ struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
+
+ return ((vm_config->guest_flags & GUEST_FLAG_VCAT_ENABLED) != 0U);
+}
+
/**
* @brief VT-d PI posted mode can possibly be used for PTDEVs assigned
* to this VM if platform supports VT-d PI AND lapic passthru is not configured
diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c
index 971e0b8fb..a3a1a8705 100644
--- a/hypervisor/arch/x86/guest/vmsr.c
+++ b/hypervisor/arch/x86/guest/vmsr.c
@@ -22,6 +22,7 @@
#include <asm/tsc.h>
#include <trace.h>
#include <logmsg.h>
+#include <asm/guest/vcat.h>

#define INTERCEPT_DISABLE (0U)
#define INTERCEPT_READ (1U << 0U)
@@ -338,8 +339,10 @@ static void prepare_auto_msr_area(struct acrn_vcpu *vcpu)

vcpu_clos = cfg->pclosids[vcpu->vcpu_id%cfg->num_pclosids];

- /* RDT: only load/restore MSR IA32_PQR_ASSOC when hv and guest have different settings */
- if (vcpu_clos != hv_clos) {
+ /* RDT: only load/restore MSR_IA32_PQR_ASSOC when hv and guest have different settings
+ * vCAT: always load/restore MSR_IA32_PQR_ASSOC
+ */
+ if (is_vcat_configured(vcpu->vm) || (vcpu_clos != hv_clos)) {
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(vcpu_clos);
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
@@ -371,6 +374,14 @@ void init_emulated_msrs(struct acrn_vcpu *vcpu)
}

vcpu_set_guest_msr(vcpu, MSR_IA32_FEATURE_CONTROL, val64);
+
+#ifdef CONFIG_VCAT_ENABLED
+ /*
+ * init_vcat_msrs() will overwrite the vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value
+ * set by prepare_auto_msr_area()
+ */
+ init_vcat_msrs(vcpu);
+#endif
}

#ifdef CONFIG_VCAT_ENABLED
diff --git a/hypervisor/arch/x86/rdt.c b/hypervisor/arch/x86/rdt.c
index f6dc960f9..b21fd5209 100644
--- a/hypervisor/arch/x86/rdt.c
+++ b/hypervisor/arch/x86/rdt.c
@@ -60,6 +60,14 @@ static struct rdt_info res_cap_info[RDT_NUM_RESOURCES] = {
},
};

+/*
+ * @pre res == RDT_RESOURCE_L3 || res == RDT_RESOURCE_L2 || res == RDT_RESOURCE_MBA
+ */
+const struct rdt_info *get_rdt_res_cap_info(int res)
+{
+ return &res_cap_info[res];
+}
+
/*
* @pre res == RDT_RESOURCE_L3 || res == RDT_RESOURCE_L2
*/
diff --git a/hypervisor/include/arch/x86/asm/guest/vcat.h b/hypervisor/include/arch/x86/asm/guest/vcat.h
new file mode 100644
index 000000000..6d8e587c4
--- /dev/null
+++ b/hypervisor/include/arch/x86/asm/guest/vcat.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef VCAT_H_
+#define VCAT_H_
+
+#include <asm/guest/vm.h>
+
+bool is_l2_vcat_configured(const struct acrn_vm *vm);
+bool is_l3_vcat_configured(const struct acrn_vm *vm);
+uint16_t vcat_get_vcbm_len(const struct acrn_vm *vm, int res);
+void init_vcat_msrs(struct acrn_vcpu *vcpu);
+
+#endif /* VCAT_H_ */
+
diff --git a/hypervisor/include/arch/x86/asm/guest/vm.h b/hypervisor/include/arch/x86/asm/guest/vm.h
index 9a6171be0..2cb06c027 100644
--- a/hypervisor/include/arch/x86/asm/guest/vm.h
+++ b/hypervisor/include/arch/x86/asm/guest/vm.h
@@ -256,6 +256,7 @@ void vrtc_init(struct acrn_vm *vm);
bool is_lapic_pt_configured(const struct acrn_vm *vm);
bool is_rt_vm(const struct acrn_vm *vm);
bool is_nvmx_configured(const struct acrn_vm *vm);
+bool is_vcat_configured(const struct acrn_vm *vm);
bool is_pi_capable(const struct acrn_vm *vm);
bool has_rt_vm(void);
struct acrn_vm *get_highest_severity_vm(bool runtime);
diff --git a/hypervisor/include/arch/x86/asm/rdt.h b/hypervisor/include/arch/x86/asm/rdt.h
index f6c4448e2..95e149fcb 100644
--- a/hypervisor/include/arch/x86/asm/rdt.h
+++ b/hypervisor/include/arch/x86/asm/rdt.h
@@ -47,5 +47,6 @@ void init_rdt_info(void);
void setup_clos(uint16_t pcpu_id);
uint64_t clos2pqr_msr(uint16_t clos);
bool is_platform_rdt_capable(void);
+const struct rdt_info *get_rdt_res_cap_info(int res);

#endif /* RDT_H */
--
2.25.1


[PATCH V6 5/8] hv: vCAT: expose CAT capabilities to vCAT-enabled VM

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Expose CAT feature to vCAT VM by reporting the number of
cache ways/CLOSIDs via the 04H/10H cpuid instructions, so that the
VM can take advantage of CAT to prioritize and partition cache
resource for its own tasks.

Add the vcat_pcbm_to_vcbm() function to map pcbm to vcbm

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/arch/x86/guest/vcat.c | 43 +++++
hypervisor/arch/x86/guest/vcpuid.c | 167 +++++++++++++++++++
hypervisor/include/arch/x86/asm/guest/vcat.h | 2 +
3 files changed, 212 insertions(+)

diff --git a/hypervisor/arch/x86/guest/vcat.c b/hypervisor/arch/x86/guest/vcat.c
index 49968b8d9..91522a560 100644
--- a/hypervisor/arch/x86/guest/vcat.c
+++ b/hypervisor/arch/x86/guest/vcat.c
@@ -277,6 +277,49 @@ static uint64_t vcat_get_max_vcbm(const struct acrn_vm *vm, int res)
return max_vcbm;
}

+/**
+ * @brief Map pCBM to vCBM
+ *
+ * @pre vm != NULL
+ */
+uint64_t vcat_pcbm_to_vcbm(const struct acrn_vm *vm, uint64_t pcbm, int res)
+{
+ /*
+ * max_pcbm/CLOS_MASK is defined in scenario file and is a contiguous bitmask starting
+ * at bit position low (the lowest assigned physical cache way) and ending at position
+ * high (the highest assigned physical cache way, inclusive). As CBM only allows
+ * contiguous '1' combinations, so max_pcbm essentially is a bitmask that selects/covers
+ * all the physical cache ways assigned to the VM.
+ *
+ * For illustrative purpose, here we assume that we have the two functions
+ * GENMASK() and BIT() defined as follows:
+ * GENMASK(high, low): create a contiguous bitmask starting at bit position low and
+ * ending at position high, inclusive.
+ * BIT(n): create a bitmask with bit n set.
+ *
+ * max_pcbm, min_pcbm, max_vcbm, min_vcbm and the relationship between them
+ * can be expressed as:
+ * max_pcbm = GENMASK(high, low)
+ * min_pcbm = BIT(low)
+ *
+ * max_vcbm = GENMASK(high - low, 0)
+ * min_vcbm = BIT(0)
+ *
+ * pcbm to vcbm conversion (mask off the unwanted bits to prevent erroneous mask values):
+ * vcbm = (pcbm & max_pcbm) >> low
+ *
+ * max_pcbm will be mapped to max_vcbm
+ * min_pcbm will be mapped to min_vcbm
+ */
+ uint64_t max_pcbm = get_max_pcbm(vm, res);
+
+ /* Find the position low (the first bit set) in max_pcbm */
+ uint16_t low = ffs64(max_pcbm);
+
+ /* pcbm set bits should only be in the range of [low, high] */
+ return (pcbm & max_pcbm) >> low;
+}
+
/**
* @brief Map vMSR address (abbreviated as vmsr) to corresponding pMSR address (abbreviated as pmsr)
* Each vMSR or pMSR is identified by a 32-bit integer
diff --git a/hypervisor/arch/x86/guest/vcpuid.c b/hypervisor/arch/x86/guest/vcpuid.c
index bc19d0481..af5ee941a 100644
--- a/hypervisor/arch/x86/guest/vcpuid.c
+++ b/hypervisor/arch/x86/guest/vcpuid.c
@@ -15,6 +15,8 @@
#include <asm/sgx.h>
#include <asm/tsc.h>
#include <logmsg.h>
+#include <asm/rdt.h>
+#include <asm/guest/vcat.h>

static inline const struct vcpuid_entry *local_find_vcpuid_entry(const struct acrn_vcpu *vcpu,
uint32_t leaf, uint32_t subleaf)
@@ -286,6 +288,149 @@ static int32_t set_vcpuid_sgx(struct acrn_vm *vm)
return result;
}

+#ifdef CONFIG_VCAT_ENABLED
+/**
+ * @brief * Number of ways (CBM length) is detected with CPUID.0x4
+ *
+ * @pre vm != NULL
+ */
+static int32_t set_vcpuid_vcat_04h(const struct acrn_vm *vm, struct vcpuid_entry *entry)
+{
+ uint32_t cache_type = entry->eax & 0x1FU; /* EAX bits 04:00 */
+ uint32_t cache_level = (entry->eax >> 5U) & 0x7U; /* EAX bits 07:05 */
+ uint16_t vcbm_len = 0U;
+
+ if (cache_level == 2U) {
+ vcbm_len = vcat_get_vcbm_len(vm, RDT_RESOURCE_L2);
+ } else if (cache_level == 3U) {
+ vcbm_len = vcat_get_vcbm_len(vm, RDT_RESOURCE_L3);
+ }
+
+ /*
+ * cache_type:
+ * 0 = Null - No more caches.
+ * 1 = Data Cache.
+ * 2 = Instruction Cache.
+ * 3 = Unified Cache.
+ * 4-31 = Reserved
+ *
+ * cache_level (starts at 1):
+ * 2 = L2
+ * 3 = L3
+ */
+ if (((cache_type == 0x1U) || (cache_type == 0x3U)) && (vcbm_len != 0U)) {
+ /*
+ * EBX Bits 11 - 00: L = System Coherency Line Size**.
+ * Bits 21 - 12: P = Physical Line partitions**.
+ * Bits 31 - 22: W = Ways of associativity**.
+ */
+ entry->ebx &= ~0xFFC00000U;
+ /* Report # of cache ways (CBM length) to guest VM */
+ entry->ebx |= (vcbm_len - 1U) << 22U;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief RDT allocation enumeration sub-leaf (EAX = 10H, ECX = 0)
+ * Expose CAT capabilities to guest VM
+ *
+ * @pre vm != NULL
+ */
+static int32_t set_vcpuid_vcat_10h_subleaf_0(struct acrn_vm *vm, bool l2, bool l3)
+{
+ struct vcpuid_entry entry;
+
+ init_vcpuid_entry(CPUID_RDT_ALLOCATION, 0U, CPUID_CHECK_SUBLEAF, &entry);
+
+ entry.ebx &= ~0xeU; /* Set the L3/L2/MBA bits (bits 1, 2, and 3) all to 0 (not supported) */
+
+ if (l2) {
+ /* Bit 02: Supports L2 Cache Allocation Technology if 1 */
+ entry.ebx |= 0x4U;
+ }
+
+ if (l3) {
+ /* Bit 01: Supports L3 Cache Allocation Technology if 1 */
+ entry.ebx |= 0x2U;
+ }
+
+ return set_vcpuid_entry(vm, &entry);
+}
+
+/**
+ * @brief L2/L3 enumeration sub-leaf
+ *
+ * @pre vm != NULL
+ */
+static int32_t set_vcpuid_vcat_10h_subleaf_res(struct acrn_vm *vm, uint32_t subleaf, uint16_t num_vclosids)
+{
+ struct vcpuid_entry entry;
+ uint16_t vcbm_len;
+ int res;
+
+ if (subleaf == 1U) {
+ res = RDT_RESOURCE_L3;
+ } else {
+ res = RDT_RESOURCE_L2;
+ }
+ vcbm_len = vcat_get_vcbm_len(vm, res);
+
+ /* Set cache cbm_len */
+ init_vcpuid_entry(CPUID_RDT_ALLOCATION, subleaf, CPUID_CHECK_SUBLEAF, &entry);
+
+ if ((entry.eax != 0U) && (vcbm_len != 0U)) {
+ /* Bits 4 - 00: Length of the capacity bit mask for the corresponding ResID using minus-one notation */
+ entry.eax = (entry.eax & ~0x1F) | (vcbm_len - 1U);
+
+ /* Bits 31 - 00: Bit-granular map of isolation/contention of allocation units
+ * Each set bit within the length of the CBM indicates the corresponding unit of the L2/L3 allocation
+ * may be used by other entities in the platform. Each cleared bit within the length of the CBM
+ * indicates the corresponding allocation unit can be configured to implement a priority-based
+ * allocation scheme chosen by an OS/VMM without interference with other hardware agents in the system.
+ */
+ entry.ebx = (uint32_t)vcat_pcbm_to_vcbm(vm, entry.ebx, res);
+
+ /* Do not support CDP for now */
+ entry.ecx &= ~0x4U;
+
+ /* Report max CLOS to guest VM
+ * Bits 15 - 00: Highest COS number supported for this ResID using minus-one notation
+ */
+ entry.edx = (entry.edx & 0xFFFF0000U) | (num_vclosids - 1U);
+ }
+
+ return set_vcpuid_entry(vm, &entry);
+}
+
+/**
+ * @pre vm != NULL
+ */
+static int32_t set_vcpuid_vcat_10h(struct acrn_vm *vm)
+{
+ int32_t result;
+ uint16_t num_vclosids = vcat_get_num_vclosids(vm);
+ bool l2 = is_l2_vcat_configured(vm);
+ bool l3 = is_l3_vcat_configured(vm);
+
+ /* RDT allocation enumeration sub-leaf (EAX=10H, ECX=0) */
+ result = set_vcpuid_vcat_10h_subleaf_0(vm, l2, l3);
+
+ if ((result == 0) && l2) {
+ /* L2 enumeration sub-leaf (EAX=10H, ECX=2) */
+ result = set_vcpuid_vcat_10h_subleaf_res(vm, 2U, num_vclosids);
+ }
+
+ if ((result == 0) && l3) {
+ /* L3 enumeration sub-leaf (EAX=10H, ECX=1) */
+ result = set_vcpuid_vcat_10h_subleaf_res(vm, 1U, num_vclosids);
+ }
+
+ return result;
+}
+#endif
+
static int32_t set_vcpuid_extended_function(struct acrn_vm *vm)
{
uint32_t i, limit;
@@ -376,6 +521,12 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
if (entry.eax == 0U) {
break;
}
+
+#ifdef CONFIG_VCAT_ENABLED
+ if (is_vcat_configured(vm)) {
+ result = set_vcpuid_vcat_04h(vm, &entry);
+ }
+#endif
result = set_vcpuid_entry(vm, &entry);
if (result != 0) {
/* wants to break out of switch */
@@ -392,6 +543,13 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
if (is_vsgx_supported(vm->vm_id)) {
entry.ebx |= CPUID_EBX_SGX;
}
+
+#ifdef CONFIG_VCAT_ENABLED
+ if (is_vcat_configured(vm)) {
+ /* Bit 15: Supports Intel Resource Director Technology (Intel RDT) Allocation capability if 1 */
+ entry.ebx |= CPUID_EBX_PQE;
+ }
+#endif
result = set_vcpuid_entry(vm, &entry);
break;
case 0x12U:
@@ -408,7 +566,16 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)

/* Intel RDT */
case 0x0fU:
+ break;
+ /* Intel RDT */
case 0x10U:
+#ifdef CONFIG_VCAT_ENABLED
+ if (is_vcat_configured(vm)) {
+ result = set_vcpuid_vcat_10h(vm);
+ }
+#endif
+ break;
+
/* Intel Processor Trace */
case 0x14U:
/* PCONFIG */
diff --git a/hypervisor/include/arch/x86/asm/guest/vcat.h b/hypervisor/include/arch/x86/asm/guest/vcat.h
index 6d8e587c4..a9518bded 100644
--- a/hypervisor/include/arch/x86/asm/guest/vcat.h
+++ b/hypervisor/include/arch/x86/asm/guest/vcat.h
@@ -13,6 +13,8 @@ bool is_l2_vcat_configured(const struct acrn_vm *vm);
bool is_l3_vcat_configured(const struct acrn_vm *vm);
uint16_t vcat_get_vcbm_len(const struct acrn_vm *vm, int res);
void init_vcat_msrs(struct acrn_vcpu *vcpu);
+uint16_t vcat_get_num_vclosids(const struct acrn_vm *vm);
+uint64_t vcat_pcbm_to_vcbm(const struct acrn_vm *vm, uint64_t pcbm, int res);

#endif /* VCAT_H_ */

--
2.25.1


[PATCH V6 2/8] hv/config_tools: amend the struct acrn_vm_config to make it compatible with vCAT

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

For vCAT, it may need to store more than MAX_VCPUS_PER_VM of closids,
change clos in vm_config.h to a pointer to accommodate this situation

Rename clos to pclosids

pclosids now is a pointer to an array of physical CLOSIDs that is defined
in vm_configurations.c by vmconfig. The number of elements in the array
must be equal to the value given by num_pclosids

Add max_type_pcbm (type: l2 or l3) to struct acrn_vm_config, which stores a bitmask
that selects/covers all the physical cache ways assigned to the VM

Change vmsr.c to accommodate this amended data structure

Change the config-tools to generate vm_configurations.c, and fill in the num_closids
and clos pointers based on the information from the scenario file.

Now vm_configurations.c.xsl generates all the clos related code so remove the same
code from misc_cfg.h.xsl.

Examples:

Scenario file:

<RDT>
<RDT_ENABLED>y</RDT_ENABLED>
<CDP_ENABLED>n</CDP_ENABLED>
<VCAT_ENABLED>y</VCAT_ENABLED>
<CLOS_MASK>0x7ff</CLOS_MASK>
<CLOS_MASK>0x7ff</CLOS_MASK>
<CLOS_MASK>0x7ff</CLOS_MASK>
<CLOS_MASK>0xff800</CLOS_MASK>
<CLOS_MASK>0xff800</CLOS_MASK>
<CLOS_MASK>0xff800</CLOS_MASK>
<CLOS_MASK>0xff800</CLOS_MASK>
<CLOS_MASK>0xff800</CLOS_MASK>
/RDT>

<vm id="0">
<guest_flags>
<guest_flag>GUEST_FLAG_VCAT_ENABLED</guest_flag>
</guest_flags>
<clos>
<vcpu_clos>3</vcpu_clos>
<vcpu_clos>4</vcpu_clos>
<vcpu_clos>5</vcpu_clos>
<vcpu_clos>6</vcpu_clos>
<vcpu_clos>7</vcpu_clos>
</clos>
</vm>

<vm id="1">
<clos>
<vcpu_clos>1</vcpu_clos>
<vcpu_clos>2</vcpu_clos>
</clos>
</vm>

vm_configurations.c (generated by config-tools) with the above vCAT config:

static uint16_t vm0_vcpu_clos[5U] = {3U, 4U, 5U, 6U, 7U};
static uint16_t vm1_vcpu_clos[2U] = {1U, 2U};

struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
{
.guest_flags = (GUEST_FLAG_VCAT_ENABLED),
.pclosids = vm0_vcpu_clos,
.num_pclosids = 5U,
.max_l3_pcbm = 0xff800U,
},
{
.pclosids = vm1_vcpu_clos,
.num_pclosids = 2U,
},
};

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/arch/x86/guest/vmsr.c | 38 +++++++++-------
hypervisor/include/arch/x86/asm/vm_config.h | 21 +++++++--
misc/config_tools/xforms/lib.xsl | 11 +++++
misc/config_tools/xforms/misc_cfg.h.xsl | 7 ---
.../xforms/vm_configurations.c.xsl | 43 +++++++++++++++++--
misc/hv_prebuild/vm_cfg_checks.c | 9 ++--
6 files changed, 95 insertions(+), 34 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c
index 96ce15151..e83c3069c 100644
--- a/hypervisor/arch/x86/guest/vmsr.c
+++ b/hypervisor/arch/x86/guest/vmsr.c
@@ -305,24 +305,32 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, uint32_t mode)
}

/**
- * @pre vcpu != NULL
+ * @pre vcpu != NULL && vcpu->vm != NULL && vcpu->vm->vm_id < CONFIG_MAX_VM_NUM
+ * @pre (is_platform_rdt_capable() == false()) || (is_platform_rdt_capable() && get_vm_config(vcpu->vm->vm_id)->pclosids != NULL)
*/
-static void prepare_auto_msr_area (struct acrn_vcpu *vcpu)
+static void prepare_auto_msr_area(struct acrn_vcpu *vcpu)
{
- struct acrn_vm_config *cfg = get_vm_config(vcpu->vm->vm_id);
- uint16_t vcpu_clos = cfg->clos[vcpu->vcpu_id];
-
vcpu->arch.msr_area.count = 0U;

- /* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */
- if (is_platform_rdt_capable() && (vcpu_clos != hv_clos)) {
- vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
- vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(vcpu_clos);
- vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
- vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(hv_clos);
- vcpu->arch.msr_area.count++;
- pr_acrnlog("switch clos for VM %u vcpu_id %u, host 0x%x, guest 0x%x",
- vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, vcpu_clos);
+ if (is_platform_rdt_capable()) {
+ struct acrn_vm_config *cfg = get_vm_config(vcpu->vm->vm_id);
+ uint16_t vcpu_clos;
+
+ ASSERT(cfg->pclosids != NULL, "error, cfg->pclosids is NULL");
+
+ vcpu_clos = cfg->pclosids[vcpu->vcpu_id%cfg->num_pclosids];
+
+ /* RDT: only load/restore MSR IA32_PQR_ASSOC when hv and guest have different settings */
+ if (vcpu_clos != hv_clos) {
+ vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
+ vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(vcpu_clos);
+ vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
+ vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(hv_clos);
+ vcpu->arch.msr_area.count++;
+
+ pr_acrnlog("switch clos for VM %u vcpu_id %u, host 0x%x, guest 0x%x",
+ vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, vcpu_clos);
+ }
}
}

@@ -392,7 +400,7 @@ void init_msr_emulation(struct acrn_vcpu *vcpu)
pr_dbg("VMX_MSR_BITMAP: 0x%016lx ", value64);

/* Initialize the MSR save/store area */
- prepare_auto_msr_area (vcpu);
+ prepare_auto_msr_area(vcpu);

/* Setup initial value for emulated MSRs */
init_emulated_msrs(vcpu);
diff --git a/hypervisor/include/arch/x86/asm/vm_config.h b/hypervisor/include/arch/x86/asm/vm_config.h
index 528210375..051c07148 100644
--- a/hypervisor/include/arch/x86/asm/vm_config.h
+++ b/hypervisor/include/arch/x86/asm/vm_config.h
@@ -182,9 +182,24 @@ struct acrn_vm_config {
* SOS can get the vm_configs[] array through hypercall, but SOS may not
* need to parse these members.
*/
- uint16_t clos[MAX_VCPUS_PER_VM]; /* Class of Service, effective only if CONFIG_RDT_ENABLED
- * is defined on CAT capable platforms
- */
+
+ uint16_t num_pclosids; /* This defines the number of elements in the array pointed to by pclosids */
+ /* pclosids: a pointer to an array of physical CLOSIDs (pCLOSIDs)) that is defined in vm_configurations.c
+ * by vmconfig,
+ * applicable only if CONFIG_RDT_ENABLED is defined on CAT capable platforms.
+ * The number of elements in the array must be equal to the value given by num_pclosids
+ */
+ uint16_t *pclosids;
+
+ /* max_type_pcbm (type: l2 or l3) specifies the allocated portion of physical cache
+ * for the VM and is a contiguous capacity bitmask (CBM) starting at bit position low
+ * (the lowest assigned physical cache way) and ending at position high
+ * (the highest assigned physical cache way, inclusive).
+ * As CBM only allows contiguous '1' combinations, so max_type_pcbm essentially
+ * is a bitmask that selects/covers all the physical cache ways assigned to the VM.
+ */
+ uint32_t max_l2_pcbm;
+ uint32_t max_l3_pcbm;

struct vuart_config vuart[MAX_VUART_NUM_PER_VM];/* vuart configuration for VM */

diff --git a/misc/config_tools/xforms/lib.xsl b/misc/config_tools/xforms/lib.xsl
index 57345d419..cc26ebddb 100644
--- a/misc/config_tools/xforms/lib.xsl
+++ b/misc/config_tools/xforms/lib.xsl
@@ -376,6 +376,17 @@
</xsl:choose>
</func:function>

+ <func:function name="acrn:is-vcat-enabled">
+ <xsl:choose>
+ <xsl:when test="acrn:is-rdt-enabled() and //VCAT_ENABLED = 'y'">
+ <func:result select="true()" />
+ </xsl:when>
+ <xsl:otherwise>
+ <func:result select="false()" />
+ </xsl:otherwise>
+ </xsl:choose>
+ </func:function>
+
<func:function name="acrn:is-rdt-supported">
<xsl:variable name="rdt_resource" select="acrn:get-normalized-closinfo-rdt-res-str()" />
<xsl:variable name="rdt_res_clos_max" select="acrn:get-normalized-closinfo-rdt-clos-max-str()" />
diff --git a/misc/config_tools/xforms/misc_cfg.h.xsl b/misc/config_tools/xforms/misc_cfg.h.xsl
index 35fa10740..b409acf0a 100644
--- a/misc/config_tools/xforms/misc_cfg.h.xsl
+++ b/misc/config_tools/xforms/misc_cfg.h.xsl
@@ -122,12 +122,6 @@
</xsl:for-each>
</xsl:template>

-<xsl:template name="vcpu_clos">
- <xsl:for-each select="vm">
- <xsl:value-of select="acrn:define(concat('VM', @id, '_VCPU_CLOS'), concat('{', acrn:string-join(clos/vcpu_clos, ',', '', 'U'),'}'), '')" />
- </xsl:for-each>
-</xsl:template>
-
<!-- HV_SUPPORTED_MAX_CLOS:
The maximum CLOS that is allowed by ACRN hypervisor.
Its value is set to be least common Max CLOS (CPUID.(EAX=0x10,ECX=ResID):EDX[15:0])
@@ -172,7 +166,6 @@
<xsl:for-each select="hv/FEATURES/RDT/CLOS_MASK">
<xsl:value-of select="acrn:define(concat('CLOS_MASK_', position() - 1), current(), 'U')" />
</xsl:for-each>
- <xsl:call-template name="vcpu_clos" />
<xsl:value-of select="$endif" />
</xsl:if>
</xsl:template>
diff --git a/misc/config_tools/xforms/vm_configurations.c.xsl b/misc/config_tools/xforms/vm_configurations.c.xsl
index b23b28a3a..4de8962fc 100644
--- a/misc/config_tools/xforms/vm_configurations.c.xsl
+++ b/misc/config_tools/xforms/vm_configurations.c.xsl
@@ -50,6 +50,21 @@
</xsl:if>
</xsl:for-each>

+ <xsl:if test="acrn:is-rdt-enabled()">
+ <xsl:value-of select="$newline" />
+ <xsl:value-of select="acrn:ifdef('CONFIG_RDT_ENABLED')" />
+
+ <xsl:for-each select="vm">
+ <xsl:value-of select="concat('static uint16_t ', concat('vm', @id, '_vcpu_clos'), '[', count(clos/vcpu_clos), 'U] = {')" />
+ <xsl:value-of select="acrn:string-join(clos/vcpu_clos, ', ', '', 'U')" />
+ <xsl:text>};</xsl:text>
+ <xsl:value-of select="$newline" />
+ </xsl:for-each>
+
+ <xsl:value-of select="$endif" />
+ <xsl:value-of select="$newline" />
+ </xsl:if>
+
<!-- Definition of vm_configs -->
<xsl:value-of select="acrn:array-initializer('struct acrn_vm_config', 'vm_configs', 'CONFIG_MAX_VM_NUM')" />
<xsl:apply-templates select="vm"/>
@@ -70,7 +85,11 @@
</xsl:if>
<xsl:value-of select="acrn:initializer('vm_prio', priority)" />
<xsl:apply-templates select="guest_flags" />
- <xsl:apply-templates select="clos" />
+
+ <xsl:if test="acrn:is-rdt-enabled()">
+ <xsl:apply-templates select="clos" />
+ </xsl:if>
+
<xsl:call-template name="cpu_affinity" />
<xsl:apply-templates select="epc_section" />
<xsl:apply-templates select="memory" />
@@ -133,7 +152,23 @@

<xsl:template match="clos">
<xsl:value-of select="acrn:ifdef('CONFIG_RDT_ENABLED')" />
- <xsl:value-of select="acrn:initializer('clos', concat('VM', ../@id, '_VCPU_CLOS'))" />
+ <xsl:value-of select="acrn:initializer('pclosids', concat('vm', ../@id, '_vcpu_clos'))" />
+
+ <xsl:value-of select="acrn:initializer('num_pclosids', concat(count(vcpu_clos), 'U'))" />
+
+ <xsl:if test="acrn:is-vcat-enabled() and ../guest_flags[guest_flag = 'GUEST_FLAG_VCAT_ENABLED']">
+ <xsl:variable name="rdt_res_str" select="acrn:get-normalized-closinfo-rdt-res-str()" />
+ <xsl:variable name="closid" select="vcpu_clos[1]" />
+
+ <xsl:if test="contains($rdt_res_str, 'L2')">
+ <xsl:value-of select="acrn:initializer('max_l2_pcbm', concat(../../hv/FEATURES/RDT/CLOS_MASK[$closid + 1], 'U'))" />
+ </xsl:if>
+
+ <xsl:if test="contains($rdt_res_str, 'L3')">
+ <xsl:value-of select="acrn:initializer('max_l3_pcbm', concat(../../hv/FEATURES/RDT/CLOS_MASK[$closid + 1], 'U'))" />
+ </xsl:if>
+ </xsl:if>
+
<xsl:value-of select="$endif" />
</xsl:template>

@@ -148,13 +183,13 @@
<xsl:value-of select="acrn:initializer('size', concat('VM', ../@id, '_CONFIG_MEM_SIZE'))" />
<xsl:value-of select="acrn:initializer('start_hpa2', concat('VM', ../@id, '_CONFIG_MEM_START_HPA2'))" />
<xsl:value-of select="acrn:initializer('size_hpa2', concat('VM', ../@id, '_CONFIG_MEM_SIZE_HPA2'))" />
- </xsl:otherwise>
+ </xsl:otherwise>
</xsl:choose>
<xsl:text>},</xsl:text>
<xsl:value-of select="$newline" />
</xsl:template>

- <xsl:template match="epc_section">
+ <xsl:template match="epc_section">
<xsl:if test="base != '0' and size != '0'">
<xsl:value-of select="acrn:initializer('epc', '{', true())" />
<xsl:value-of select="acrn:initializer('base', base)" />
diff --git a/misc/hv_prebuild/vm_cfg_checks.c b/misc/hv_prebuild/vm_cfg_checks.c
index 8620c1580..a966883ae 100644
--- a/misc/hv_prebuild/vm_cfg_checks.c
+++ b/misc/hv_prebuild/vm_cfg_checks.c
@@ -84,13 +84,12 @@ static bool check_vm_clos_config(uint16_t vm_id)
uint16_t platform_clos_num = HV_SUPPORTED_MAX_CLOS;
bool ret = true;
struct acrn_vm_config *vm_config = get_vm_config(vm_id);
- uint16_t vcpu_num = bitmap_weight(vm_config->cpu_affinity);

- for (i = 0U; i < vcpu_num; i++) {
- if (((platform_clos_num != 0U) && (vm_config->clos[i] == platform_clos_num))
- || (vm_config->clos[i] > platform_clos_num)) {
+ for (i = 0U; i < vm_config->num_pclosids; i++) {
+ if (((platform_clos_num != 0U) && (vm_config->pclosids[i] == platform_clos_num))
+ || (vm_config->pclosids[i] > platform_clos_num)) {
printf("vm%u: vcpu%u clos(%u) exceed the max clos(%u).",
- vm_id, i, vm_config->clos[i], platform_clos_num);
+ vm_id, i, vm_config->pclosids[i], platform_clos_num);
ret = false;
break;
}
--
2.25.1


[PATCH V6 7/8] hv: vCAT: implementing the vCAT MSRs write handler

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Implement the write_vcat_msr() function to handle the MSR_IA32_PQR_ASSOC
and MSR_IA32_type_MASK_n vCAT MSRs write request.

Several vCAT P2V (physical to virtual) and V2P (virtual to physical)
mappings exist:

struct acrn_vm_config *vm_config = get_vm_config(vm_id)

max_pcbm = vm_config->max_type_pcbm (type: l2 or l3)
mask_shift = ffs64(max_pcbm)

vclosid = vmsr - MSR_IA32_type_MASK_0
pclosid = vm_config->pclosids[vclosid]

pmsr = MSR_IA32_type_MASK_0 + pclosid
pcbm = vcbm << mask_shift
vcbm = pcbm >> mask_shift

Where
MSR_IA32_type_MASK_n: L2 or L3 mask msr address for CLOSIDn, from
0C90H through 0D8FH (inclusive).

max_pcbm: a bitmask that selects all the physical cache ways assigned to the VM

vclosid: virtual CLOSID, always starts from 0

pclosid: corresponding physical CLOSID for a given vclosid

vmsr: virtual msr address, passed to vCAT handlers by the
caller functions rdmsr_vmexit_handler()/wrmsr_vmexit_handler()

pmsr: physical msr address

vcbm: virtual CBM, passed to vCAT handlers by the
caller functions rdmsr_vmexit_handler()/wrmsr_vmexit_handler()

pcbm: physical CBM

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/arch/x86/guest/vcat.c | 100 +++++++++++++++++--
hypervisor/arch/x86/guest/vmsr.c | 9 ++
hypervisor/include/arch/x86/asm/guest/vcat.h | 1 +
3 files changed, 104 insertions(+), 6 deletions(-)

diff --git a/hypervisor/arch/x86/guest/vcat.c b/hypervisor/arch/x86/guest/vcat.c
index 03e9a3cc9..1dda6b61f 100644
--- a/hypervisor/arch/x86/guest/vcat.c
+++ b/hypervisor/arch/x86/guest/vcat.c
@@ -365,6 +365,21 @@ int32_t read_vcat_msr(const struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t *rva
return ret;
}

+/**
+ * @brief Map vCBM to pCBM
+ *
+ * @pre vm != NULL
+ */
+static uint64_t vcbm_to_pcbm(const struct acrn_vm *vm, uint64_t vcbm, int res)
+{
+ uint64_t max_pcbm = get_max_pcbm(vm, res);
+
+ /* Find the position low (the first bit set) in max_pcbm */
+ uint16_t low = ffs64(max_pcbm);
+
+ return vcbm << low;
+}
+
/**
* @brief Map vMSR address (abbreviated as vmsr) to corresponding pMSR address (abbreviated as pmsr)
* Each vMSR or pMSR is identified by a 32-bit integer
@@ -395,19 +410,70 @@ static uint32_t vmsr_to_pmsr(const struct acrn_vm *vm, uint32_t vmsr, int res)
return pmsr;
}

+static void write_pcbm(uint32_t pmsr, uint64_t pcbm)
+{
+ /* Preserve reserved bits, and only set the pCBM bits */
+ uint64_t pmsr_value = (msr_read(pmsr) & 0xFFFFFFFF00000000UL) | pcbm;
+
+ msr_write(pmsr, pmsr_value);
+}
+
+/* Check if bitmask is contiguous:
+ * All (and only) contiguous '1' combinations are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.)
+ */
+static bool is_contiguous_bit_set(uint64_t bitmask)
+{
+ bool ret = false;
+ uint64_t tmp64 = bitmask;
+
+ if (tmp64 != 0UL) {
+ while ((tmp64 & 1UL) == 0UL) {
+ tmp64 >>= 1U;
+ }
+
+ while ((tmp64 & 1UL) != 0UL) {
+ tmp64 >>= 1U;
+ }
+
+ if (tmp64 == 0UL) {
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
/**
* @brief vCBM MSR write handler
*
* @pre vcpu != NULL && vcpu->vm != NULL
*/
-static int32_t write_vcbm(__unused struct acrn_vcpu *vcpu, __unused uint32_t vmsr, __unused uint64_t val, __unused int res)
+static int32_t write_vcbm(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val, int res)
{
- /* TODO: this is going to be implemented in a subsequent commit, will perform the following actions:
- * write vCBM
- * vmsr_to_pmsr and vcbm_to_pcbm
- * write pCBM
+ int32_t ret = -EINVAL;
+ /*
+ * vcbm set bits should only be in the range of [0, vcbm_len) (vcat_get_max_vcbm),
+ * so mask with vcat_get_max_vcbm to prevent erroneous vCBM value
*/
- return -EFAULT;
+ uint64_t masked_vcbm = val & vcat_get_max_vcbm(vcpu->vm, res);
+
+ if (is_contiguous_bit_set(masked_vcbm)) {
+ uint32_t pmsr;
+ uint64_t pcbm;
+
+ /* Write vCBM first */
+ vcpu_set_guest_msr(vcpu, vmsr, masked_vcbm | (val & 0xFFFFFFFF00000000UL));
+
+ /* Write pCBM: */
+ pmsr = vmsr_to_pmsr(vcpu->vm, vmsr, res);
+ pcbm = vcbm_to_pcbm(vcpu->vm, masked_vcbm, res);
+ write_pcbm(pmsr, pcbm);
+
+ ret = 0;
+ }
+
+ /* Return non-zero to vmexit_handler if vcbm is not contiguous, which will result in #GP injected to guest */
+ return ret;
}

/**
@@ -444,6 +510,28 @@ static int32_t write_vclosid(struct acrn_vcpu *vcpu, uint64_t val)
return 0;
}

+/**
+ * @brief vCAT MSRs write handler
+ *
+ * @pre vcpu != NULL && vcpu->vm != NULL
+ */
+int32_t write_vcat_msr(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val)
+{
+ int32_t ret = -EACCES;
+
+ if (is_vcat_configured(vcpu->vm)) {
+ if (vmsr == MSR_IA32_PQR_ASSOC) {
+ ret = write_vclosid(vcpu, val);
+ } else if (is_l2_vcbm_msr(vcpu->vm, vmsr)) {
+ ret = write_vcbm(vcpu, vmsr, val, RDT_RESOURCE_L2);
+ } else if (is_l3_vcbm_msr(vcpu->vm, vmsr)) {
+ ret = write_vcbm(vcpu, vmsr, val, RDT_RESOURCE_L3);
+ }
+ }
+
+ return ret;
+}
+
/**
* @brief Initialize vCBM MSRs
*
diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c
index 06825d211..412388106 100644
--- a/hypervisor/arch/x86/guest/vmsr.c
+++ b/hypervisor/arch/x86/guest/vmsr.c
@@ -1092,6 +1092,15 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
}
break;
}
+#ifdef CONFIG_VCAT_ENABLED
+ case MSR_IA32_L2_MASK_BASE ... (MSR_IA32_L2_MASK_BASE + NUM_VCAT_L2_MSRS - 1U):
+ case MSR_IA32_L3_MASK_BASE ... (MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS - 1U):
+ case MSR_IA32_PQR_ASSOC:
+ {
+ err = write_vcat_msr(vcpu, msr, v);
+ break;
+ }
+#endif
default:
{
if (is_x2apic_msr(msr)) {
diff --git a/hypervisor/include/arch/x86/asm/guest/vcat.h b/hypervisor/include/arch/x86/asm/guest/vcat.h
index 16ae0802b..8effa7604 100644
--- a/hypervisor/include/arch/x86/asm/guest/vcat.h
+++ b/hypervisor/include/arch/x86/asm/guest/vcat.h
@@ -16,6 +16,7 @@ void init_vcat_msrs(struct acrn_vcpu *vcpu);
uint16_t vcat_get_num_vclosids(const struct acrn_vm *vm);
uint64_t vcat_pcbm_to_vcbm(const struct acrn_vm *vm, uint64_t pcbm, int res);
int32_t read_vcat_msr(const struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t *rval);
+int32_t write_vcat_msr(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val);

#endif /* VCAT_H_ */

--
2.25.1


[PATCH V6 3/8] hv: vCAT: initialize the emulated_guest_msrs array for CAT msrs during platform initialization

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Initialize the emulated_guest_msrs[] array at runtime for
MSR_IA32_type_MASK_n and MSR_IA32_PQR_ASSOC msrs, there is no good
way to do this initialization statically at build time

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/arch/x86/cpu.c | 4 +
hypervisor/arch/x86/guest/vmsr.c | 88 +++++++++++++++++++-
hypervisor/include/arch/x86/asm/guest/vcpu.h | 19 ++++-
hypervisor/include/arch/x86/asm/msr.h | 1 +
4 files changed, 109 insertions(+), 3 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index e1ee4770e..18c33c7d1 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -186,6 +186,10 @@ void init_pcpu_pre(bool is_bsp)
panic("System IOAPIC info is incorrect!");
}

+#ifdef CONFIG_VCAT_ENABLED
+ init_intercepted_cat_msr_list();
+#endif
+
#ifdef CONFIG_RDT_ENABLED
init_rdt_info();
#endif
diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c
index e83c3069c..971e0b8fb 100644
--- a/hypervisor/arch/x86/guest/vmsr.c
+++ b/hypervisor/arch/x86/guest/vmsr.c
@@ -28,7 +28,7 @@
#define INTERCEPT_WRITE (1U << 1U)
#define INTERCEPT_READ_WRITE (INTERCEPT_READ | INTERCEPT_WRITE)

-static const uint32_t emulated_guest_msrs[NUM_GUEST_MSRS] = {
+static uint32_t emulated_guest_msrs[NUM_GUEST_MSRS] = {
/*
* MSRs that trusty may touch and need isolation between secure and normal world
* This may include MSR_IA32_STAR, MSR_IA32_LSTAR, MSR_IA32_FMASK,
@@ -79,6 +79,24 @@ static const uint32_t emulated_guest_msrs[NUM_GUEST_MSRS] = {
#ifdef CONFIG_NVMX_ENABLED
LIST_OF_VMX_MSRS,
#endif
+
+ /* The following range of elements are reserved for vCAT usage and are
+ * initialized dynamically by init_intercepted_cat_msr_list() during platform initialization:
+ * [(NUM_GUEST_MSRS - NUM_VCAT_MSRS) ... (NUM_GUEST_MSRS - 1)] = {
+ * The following layout of each CAT MSR entry is determined by catmsr_to_index_of_emulated_msr():
+ * MSR_IA32_L3_MASK_BASE,
+ * MSR_IA32_L3_MASK_BASE + 1,
+ * ...
+ * MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS - 1,
+ *
+ * MSR_IA32_L2_MASK_BASE + NUM_VCAT_L3_MSRS,
+ * MSR_IA32_L2_MASK_BASE + NUM_VCAT_L3_MSRS + 1,
+ * ...
+ * MSR_IA32_L2_MASK_BASE + NUM_VCAT_L3_MSRS + NUM_VCAT_L2_MSRS - 1,
+ *
+ * MSR_IA32_PQR_ASSOC + NUM_VCAT_L3_MSRS + NUM_VCAT_L2_MSRS
+ * }
+ */
};

static const uint32_t mtrr_msrs[] = {
@@ -355,6 +373,74 @@ void init_emulated_msrs(struct acrn_vcpu *vcpu)
vcpu_set_guest_msr(vcpu, MSR_IA32_FEATURE_CONTROL, val64);
}

+#ifdef CONFIG_VCAT_ENABLED
+/**
+ * @brief Map CAT MSR address to zero based index
+ *
+ * @pre ((msr >= MSR_IA32_L3_MASK_BASE) && msr < (MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS))
+ * || ((msr >= MSR_IA32_L2_MASK_BASE) && msr < (MSR_IA32_L2_MASK_BASE + NUM_VCAT_L2_MSRS))
+ * || (msr == MSR_IA32_PQR_ASSOC)
+ */
+static uint32_t cat_msr_to_index_of_emulated_msr(uint32_t msr)
+{
+ uint32_t index = 0U;
+
+ /* L3 MSRs indices assignment for MSR_IA32_L3_MASK_BASE ~ (MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS):
+ * 0
+ * 1
+ * ...
+ * (NUM_VCAT_L3_MSRS - 1)
+ *
+ * L2 MSRs indices assignment:
+ * NUM_VCAT_L3_MSRS
+ * ...
+ * NUM_VCAT_L3_MSRS + NUM_VCAT_L2_MSRS - 1
+
+ * PQR index assignment for MSR_IA32_PQR_ASSOC:
+ * NUM_VCAT_L3_MSRS
+ */
+
+ if ((msr >= MSR_IA32_L3_MASK_BASE) && (msr < (MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS))) {
+ index = msr - MSR_IA32_L3_MASK_BASE;
+ } else if ((msr >= MSR_IA32_L2_MASK_BASE) && (msr < (MSR_IA32_L2_MASK_BASE + NUM_VCAT_L2_MSRS))) {
+ index = msr - MSR_IA32_L2_MASK_BASE + NUM_VCAT_L3_MSRS;
+ } else if (msr == MSR_IA32_PQR_ASSOC) {
+ index = NUM_VCAT_L3_MSRS + NUM_VCAT_L2_MSRS;
+ } else {
+ ASSERT(false, "invalid CAT msr address");
+ }
+
+ return index;
+}
+
+static void init_cat_msr_entry(uint32_t msr)
+{
+ /* Get index into the emulated_guest_msrs[] table for a given CAT MSR */
+ uint32_t index = cat_msr_to_index_of_emulated_msr(msr) + NUM_GUEST_MSRS - NUM_VCAT_MSRS;
+
+ emulated_guest_msrs[index] = msr;
+}
+
+/* Init emulated_guest_msrs[] dynamically for CAT MSRs */
+void init_intercepted_cat_msr_list(void)
+{
+ uint32_t msr;
+
+ /* MSR_IA32_L2_MASK_n MSRs */
+ for (msr = MSR_IA32_L2_MASK_BASE; msr < (MSR_IA32_L2_MASK_BASE + NUM_VCAT_L2_MSRS); msr++) {
+ init_cat_msr_entry(msr);
+ }
+
+ /* MSR_IA32_L3_MASK_n MSRs */
+ for (msr = MSR_IA32_L3_MASK_BASE; msr < (MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS); msr++) {
+ init_cat_msr_entry(msr);
+ }
+
+ /* MSR_IA32_PQR_ASSOC */
+ init_cat_msr_entry(MSR_IA32_PQR_ASSOC);
+}
+#endif
+
/**
* @pre vcpu != NULL
*/
diff --git a/hypervisor/include/arch/x86/asm/guest/vcpu.h b/hypervisor/include/arch/x86/asm/guest/vcpu.h
index 65ec52203..e251fb4cf 100644
--- a/hypervisor/include/arch/x86/asm/guest/vcpu.h
+++ b/hypervisor/include/arch/x86/asm/guest/vcpu.h
@@ -29,6 +29,7 @@
#include <asm/guest/instr_emul.h>
#include <asm/guest/nested.h>
#include <asm/vmx.h>
+#include <asm/vm_config.h>

/**
* @brief vcpu
@@ -173,12 +174,26 @@ enum reset_mode;

#define NUM_WORLD_MSRS 2U
#define NUM_COMMON_MSRS 23U
+
+#ifdef CONFIG_VCAT_ENABLED
+#define NUM_VCAT_L2_MSRS MAX_CACHE_CLOS_NUM_ENTRIES
+#define NUM_VCAT_L3_MSRS MAX_CACHE_CLOS_NUM_ENTRIES
+/* L2/L3 mask MSRs plus MSR_IA32_PQR_ASSOC */
+#define NUM_VCAT_MSRS (NUM_VCAT_L2_MSRS + NUM_VCAT_L3_MSRS + 1U)
+#else
+#define NUM_VCAT_L2_MSRS 0U
+#define NUM_VCAT_L3_MSRS 0U
+#define NUM_VCAT_MSRS 0U
+#endif
+
+/* For detailed layout of the emulated guest MSRs, see emulated_guest_msrs[NUM_GUEST_MSRS] in vmsr.c */
#ifdef CONFIG_NVMX_ENABLED
-#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS + NUM_VMX_MSRS)
+#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS + NUM_VMX_MSRS + NUM_VCAT_MSRS)
#else
-#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS)
+#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS + NUM_VCAT_MSRS)
#endif

+
#define EOI_EXIT_BITMAP_SIZE 256U

struct guest_cpu_context {
diff --git a/hypervisor/include/arch/x86/asm/msr.h b/hypervisor/include/arch/x86/asm/msr.h
index 9c9b56bf7..6556267f5 100644
--- a/hypervisor/include/arch/x86/asm/msr.h
+++ b/hypervisor/include/arch/x86/asm/msr.h
@@ -617,6 +617,7 @@ static inline bool is_x2apic_msr(uint32_t msr)
struct acrn_vcpu;

void init_msr_emulation(struct acrn_vcpu *vcpu);
+void init_intercepted_cat_msr_list(void);
uint32_t vmsr_get_guest_msr_index(uint32_t msr);
void update_msr_bitmap_x2apic_apicv(struct acrn_vcpu *vcpu);
void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu);
--
2.25.1


[PATCH V6 6/8] hv: vCAT: implementing the vCAT MSRs read handler

Dongsheng Zhang
 

From: dongshen <dongsheng.x.zhang@...>

Implement the read_vcat_msr() function to handle the MSR_IA32_PQR_ASSOC
and MSR_IA32_type_MASK_n vCAT MSRs read request.

Tracked-On: #5917
Signed-off-by: dongshen <dongsheng.x.zhang@...>
---
hypervisor/arch/x86/guest/vcat.c | 45 ++++++++++++++++++++
hypervisor/arch/x86/guest/vmsr.c | 9 ++++
hypervisor/include/arch/x86/asm/guest/vcat.h | 1 +
3 files changed, 55 insertions(+)

diff --git a/hypervisor/arch/x86/guest/vcat.c b/hypervisor/arch/x86/guest/vcat.c
index 91522a560..03e9a3cc9 100644
--- a/hypervisor/arch/x86/guest/vcat.c
+++ b/hypervisor/arch/x86/guest/vcat.c
@@ -320,6 +320,51 @@ uint64_t vcat_pcbm_to_vcbm(const struct acrn_vm *vm, uint64_t pcbm, int res)
return (pcbm & max_pcbm) >> low;
}

+/**
+ * @pre vm != NULL
+ */
+static bool is_l2_vcbm_msr(const struct acrn_vm *vm, uint32_t vmsr)
+{
+ /* num_vcbm_msrs = num_vclosids */
+ uint16_t num_vcbm_msrs = vcat_get_num_vclosids(vm);
+
+ return ((get_rdt_res_cap_info(RDT_RESOURCE_L2)->num_closids > 0U)
+ && (vmsr >= MSR_IA32_L2_MASK_BASE) && (vmsr < (MSR_IA32_L2_MASK_BASE + num_vcbm_msrs)));
+}
+
+/**
+ * @pre vm != NULL
+ */
+static bool is_l3_vcbm_msr(const struct acrn_vm *vm, uint32_t vmsr)
+{
+ /* num_vcbm_msrs = num_vclosids */
+ uint16_t num_vcbm_msrs = vcat_get_num_vclosids(vm);
+
+ return ((get_rdt_res_cap_info(RDT_RESOURCE_L3)->num_closids > 0U)
+ && (vmsr >= MSR_IA32_L3_MASK_BASE) && (vmsr < (MSR_IA32_L3_MASK_BASE + num_vcbm_msrs)));
+}
+
+/**
+ * @brief vCAT MSRs read handler
+ *
+ * @pre vcpu != NULL && vcpu->vm != NULL && rval != NULL
+ */
+int32_t read_vcat_msr(const struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t *rval)
+{
+ int ret = -EACCES;
+ struct acrn_vm *vm = vcpu->vm;
+
+ if (is_vcat_configured(vm) && ((vmsr == MSR_IA32_PQR_ASSOC)
+ || is_l2_vcbm_msr(vm, vmsr) || is_l3_vcbm_msr(vm, vmsr))) {
+ *rval = vcpu_get_guest_msr(vcpu, vmsr);
+ ret = 0;
+ } else {
+ *rval = 0UL;
+ }
+
+ return ret;
+}
+
/**
* @brief Map vMSR address (abbreviated as vmsr) to corresponding pMSR address (abbreviated as pmsr)
* Each vMSR or pMSR is identified by a 32-bit integer
diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c
index a3a1a8705..06825d211 100644
--- a/hypervisor/arch/x86/guest/vmsr.c
+++ b/hypervisor/arch/x86/guest/vmsr.c
@@ -703,6 +703,15 @@ int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
}
break;
}
+#ifdef CONFIG_VCAT_ENABLED
+ case MSR_IA32_L2_MASK_BASE ... (MSR_IA32_L2_MASK_BASE + NUM_VCAT_L2_MSRS - 1U):
+ case MSR_IA32_L3_MASK_BASE ... (MSR_IA32_L3_MASK_BASE + NUM_VCAT_L3_MSRS - 1U):
+ case MSR_IA32_PQR_ASSOC:
+ {
+ err = read_vcat_msr(vcpu, msr, &v);
+ break;
+ }
+#endif
default:
{
if (is_x2apic_msr(msr)) {
diff --git a/hypervisor/include/arch/x86/asm/guest/vcat.h b/hypervisor/include/arch/x86/asm/guest/vcat.h
index a9518bded..16ae0802b 100644
--- a/hypervisor/include/arch/x86/asm/guest/vcat.h
+++ b/hypervisor/include/arch/x86/asm/guest/vcat.h
@@ -15,6 +15,7 @@ uint16_t vcat_get_vcbm_len(const struct acrn_vm *vm, int res);
void init_vcat_msrs(struct acrn_vcpu *vcpu);
uint16_t vcat_get_num_vclosids(const struct acrn_vm *vm);
uint64_t vcat_pcbm_to_vcbm(const struct acrn_vm *vm, uint64_t pcbm, int res);
+int32_t read_vcat_msr(const struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t *rval);

#endif /* VCAT_H_ */

--
2.25.1


Re: [PATCH v3 02/11] misc: life_mngr: add uart channel module

Li, Fei1
 

On Tue, Oct 19, 2021 at 11:29:25AM +0800, Wu, Xiangyang wrote:
In the uart module, the following functions are implemented:
- init_uart_channel
Initialize a lock and configuration of uart channel.
- open_uart_channel
Allocate one channel device instance to store information about
one uart channel device which will be opened.
For master channel, create two threads, one thread
is to listen and wait sync messaage from slave channel, another
thread is to poll message from slave channel.
For slave channel, create one thread to send sync message
to master channel every 5 second until acked sync
message is received from master channel and poll meessage from
master channel.
- set_uart_data_handler
Set handler to handle message received
- wait_uart_threads
Wait uart thread to exit
- close_uart_channel
Close uart channel and release channel device instance

Tracked-On: #6652

Signed-off-by: Xiangyang Wu <xiangyang.wu@...>
---
misc/services/life_mngr/uart_channel.c | 254 +++++++++++++++++++++++++
misc/services/life_mngr/uart_channel.h | 102 ++++++++++
2 files changed, 356 insertions(+)
create mode 100644 misc/services/life_mngr/uart_channel.c
create mode 100644 misc/services/life_mngr/uart_channel.h

diff --git a/misc/services/life_mngr/uart_channel.c b/misc/services/life_mngr/uart_channel.c
new file mode 100644
index 000000000..8f1a0cf53
--- /dev/null
+++ b/misc/services/life_mngr/uart_channel.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C)2021 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <pthread.h>
+#include <limits.h>
+#include <stdint.h>
+#include "uart_channel.h"
+#include "log.h"
+#include "config.h"
+
+#define SYNC_FMT "sync:%s"
+#define list_foreach_safe(var, head, field, tvar) \
+for ((var) = LIST_FIRST((head)); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1);\
+ (var) = (tvar))
+
+static void get_channel_dev_id(struct channel_dev *c_dev)
This function is very confuse. this function name means you get dev_id from channel_dev and return the id.
But this function doesn't do this work.
+{
+ int count = 0, pos = 0;
+ bool found = false;
+
+ while (c_dev->buf[count] != '\0') {
+ if (c_dev->buf[count] == ':') {
+ c_dev->buf[count] = '\0';
+ found = true;
+ }
+ if (found)
+ c_dev->name[pos++] = c_dev->buf[++count];
+ else
+ count++;
+ }
+ if (found)
+ LOG_PRINTF("Device fd:%d, VM name:%s\n", get_uart_dev_fd(c_dev->uart_device), c_dev->name);
+}
+/**
+ * @brief Wait to connect device in uart channel
+ */
+static void *listen_uart_channel_dev(void *arg)
What does this function do ?
+{
+ int num;
+ struct channel_dev *c_dev = (struct channel_dev *)arg;
+ struct uart_channel *c = c_dev->channel;
+
+ LOG_PRINTF("Lifecycle manager in service VM fd=%d tty node=%s\n",
+ get_uart_dev_fd(c_dev->uart_device), get_uart_dev_path(c_dev->uart_device));
+ memset(c_dev->buf, 0, sizeof(c_dev->buf));
+ while (c_dev->listening) {
+ do {
+ if (!c_dev->listening)
+ break;
+ num = receive_message(c_dev->uart_device, (uint8_t *)c_dev->buf,
+ sizeof(c_dev->buf));
+ if (num == 0) {
+ LOG_PRINTF("Wait user VM on dev (%s), try to connect it again\n",
+ get_uart_dev_path(c_dev->uart_device));
+ usleep(5 * SCECOND_TO_US);
+ continue;
+ }
+ get_channel_dev_id(c_dev);
+ if (strncmp(SYNC_CMD, c_dev->buf, sizeof(SYNC_CMD)) == 0)
+ break;
+ } while (1);
+ /** Add channel device instance into UART connection list */
+ if (strncmp(SYNC_CMD, c_dev->buf, sizeof(SYNC_CMD)) == 0) {
+ pthread_mutex_lock(&c->tty_conn_list_lock);
+ LIST_INSERT_HEAD(&c->tty_conn_head, c_dev, list);
+ pthread_mutex_unlock(&c->tty_conn_list_lock);
+ }
+ if (strlen(c_dev->buf) > 0)
+ c->data_handler((const char *)c_dev->buf, get_uart_dev_fd(c_dev->uart_device));
+ }
+ LOG_PRINTF("Lifecycle manager stops to listen device:%s\n",
+ get_uart_dev_path(c_dev->uart_device));
+ return NULL;
+}
+/**
+ * @brief Wait to connect device in the uart channel
+ * and poll message
+ *
+ * Send sync message every 5 second and wait acked sync message from master
+ * channel, add uart channel device instance into uart connection list, poll
+ * message.
+ */
+static void *connect_uart_channel_server(void *arg)
+{
+ int ret, num;
+ struct channel_dev *c_dev = (struct channel_dev *)arg;
+ struct uart_channel *c = c_dev->channel;
+ char buf[CHANNEL_DEV_NAME_MAX + SYNC_LEN];
+
+ snprintf(buf, sizeof(buf), SYNC_FMT, c->conf.identifier);
+
+ do {
+ LOG_PRINTF("Send sync command:%s identifier=%s\n", buf, c->conf.identifier);
+ ret = send_message(c_dev->uart_device, buf, sizeof(buf));
+ if (ret != 0)
+ LOG_WRITE("Send sync command to service VM fail\n");
+ memset(c_dev->buf, 0, sizeof(c_dev->buf));
+ num = receive_message(c_dev->uart_device, (uint8_t *)c_dev->buf, sizeof(c_dev->buf));
+ if (strncmp(ACKED_SYNC, c_dev->buf, sizeof(ACKED_SYNC)) == 0)
+ break;
+ usleep(5 * SCECOND_TO_US);
+ } while (1);
+ /** Add channel device instance into UART connection list */
+ pthread_mutex_lock(&c->tty_conn_list_lock);
+ LIST_INSERT_HEAD(&c->tty_conn_head, c_dev, list);
+ pthread_mutex_unlock(&c->tty_conn_list_lock);
+ c->data_handler((const char *)c_dev->buf, get_uart_dev_fd(c_dev->uart_device));
+
+ while (c_dev->polling) {
+ memset(c_dev->buf, 0, sizeof(c_dev->buf));
+ do {
+ num = receive_message(c_dev->uart_device, (uint8_t *)c_dev->buf,
+ sizeof(c_dev->buf));
+ } while (num == 0);
+ c->data_handler((const char *)c_dev->buf, get_uart_dev_fd(c_dev->uart_device));
+ }
+ LOG_WRITE("Lifecycle manager: disconnect\n");
+ return NULL;
Why needs return ?
+}
+
+static void *poll_uart_channel_events(void *arg)
+{
+ struct channel_dev *c_dev = (struct channel_dev *)arg;
+ int num = 0;
+ int ret;
+ struct uart_channel *c;
+
+ c = c_dev->channel;
+ sem_wait(&c_dev->dev_sem);
+ LOG_PRINTF("UART polling fd=%d...\n", get_uart_dev_fd(c_dev->uart_device));
+ while (c_dev->polling) {
+ memset(c_dev->buf, 0, sizeof(c_dev->buf));
+ do {
+ num = receive_message(c_dev->uart_device, (uint8_t *)c_dev->buf,
+ sizeof(c_dev->buf));
+ /**
+ * Retry to send poweroff command to slave channel device when it
+ * misses this command
+ */
+ while ((c_dev->retry_poweroff > 0) && (num == 0)) {
+ c_dev->retry_poweroff--;
+ usleep(VM_SHUTDOWN_INTERVAL * SCECOND_TO_US);
+ LOG_PRINTF("Retry to send poweroff to user VM (%s)\n",
+ c_dev->name);
+ ret = send_message(c_dev->uart_device, POWEROFF_CMD,
+ sizeof(POWEROFF_CMD));
+ if (ret != 0)
+ LOG_WRITE("Send poweroff message to user VM fail\n");
+
+ num = receive_message(c_dev->uart_device, (uint8_t *)c_dev->buf,
+ sizeof(c_dev->buf));
+ /**
+ * The slave channel device may be inactive, poweroff comamnd
+ * sending will timeout
+ */
+ if ((c_dev->retry_poweroff == 0) && (num == 0)) {
+ LOG_PRINTF("Send poweroff to user VM (%s) TIMEOUT\n",
+ c_dev->name);
+ num = 1;
+ memcpy(c_dev->buf, VM_POWEROFF_TIMEOUT,
+ strlen(VM_POWEROFF_TIMEOUT));
+ break;
+ }
+ }
+ } while (num == 0);
+ if (strlen(c_dev->buf) > 0) {
+ get_channel_dev_id(c_dev);
+ c->data_handler((const char *)c_dev->buf, get_uart_dev_fd(c_dev->uart_device));
+ }
You not only poll the channel events, but aslo hand the events here.
+ }
+ LOG_PRINTF("Lifecycle manager stops to poll device:%s\n",
+ get_uart_dev_path(c_dev->uart_device));
+ return NULL;
??? why needs this return ?
+}
+
+void set_uart_data_handler(struct uart_channel *c, data_handler_f *fn)
Could we just pass data_handler_f as an input in open_uart_channel ?

+{
+ c->data_handler = fn;
+}


+int open_uart_channel(struct uart_channel *c, char *path, bool master)
+{
+ struct uart_dev *dev;
+ struct channel_dev *c_dev;
+
+ dev = init_uart_dev(path);
+ if (dev == NULL)
+ return -1;
+
+ c_dev = calloc(1, sizeof(*c_dev));
+ if (!c_dev) {
+ LOG_PRINTF("%s: Failed to alloc mem for UART channel device\n", __func__);
Do we need to deinit the uart device here ?
+ return -1;
+ }
+ c_dev->uart_device = dev;
+ c_dev->channel = c;
+ c_dev->listening = true;
+ c_dev->polling = true;
+ sem_init(&c_dev->dev_sem, 0, 0);
+ /** Add channel device instance into open list */
+ pthread_mutex_lock(&c->tty_conn_list_lock);
+ LIST_INSERT_HEAD(&c->tty_open_head, c_dev, open_list);
+ pthread_mutex_unlock(&c->tty_conn_list_lock);
+ if (master) {
+ pthread_create(&c_dev->listen_thread, NULL, listen_uart_channel_dev, c_dev);
+ pthread_create(&c_dev->connect_thread, NULL, poll_uart_channel_events, c_dev);
+ } else {
+ pthread_create(&c_dev->listen_thread, NULL, connect_uart_channel_server, c_dev);
+ }
I think you should call init_uart_channel here.
+ return 0;
+}
+void close_uart_channel(struct uart_channel *c, bool master)
+{
+ struct channel_dev *c_dev;
+
+ LIST_FOREACH(c_dev, &c->tty_open_head, open_list) {
+ pthread_mutex_lock(&c->tty_conn_list_lock);
+ LIST_REMOVE(c_dev, open_list);
+ pthread_mutex_unlock(&c->tty_conn_list_lock);
+
+ deinit_uart_dev(c_dev->uart_device);
+ free(c_dev);
+ }
+}
+void wait_uart_threads(struct uart_channel *c, bool master)
Could we add a field in struct uart_channel to idicate whether this channel is master or not ?
+{
+ struct channel_dev *c_dev;
+
+ LIST_FOREACH(c_dev, &c->tty_open_head, open_list) {
+ pthread_join(c_dev->listen_thread, NULL);
+ if (master)
+ pthread_join(c_dev->connect_thread, NULL);
+ }
+}
+void init_uart_channel(struct uart_channel *c, char *id, char *s5_dev_name)
Do we need to check the input variable ? This function should be setup uart_channel_conf
+{
+ pthread_mutex_init(&c->tty_conn_list_lock, NULL);
+ memcpy(c->conf.identifier, id, strlen(id));
+ memcpy(c->conf.allowed_s5_dev_name, s5_dev_name,
+ strlen(s5_dev_name));
I think you need to initial each element in struct uart_channel, no ?
+}
+
diff --git a/misc/services/life_mngr/uart_channel.h b/misc/services/life_mngr/uart_channel.h
new file mode 100644
index 000000000..e0e3018f5
--- /dev/null
+++ b/misc/services/life_mngr/uart_channel.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C)2021 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef _UART_CHANNEL_H_
+#define _UART_CHANNEL_H_
+#include <sys/queue.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <sys/un.h>
+#include "uart.h"
+
+#define WAIT_USER_VM_POWEROFF (10*SCECOND_TO_US)
+
+#define SYNC_CMD "sync"
+#define ACKED_SYNC "acked_sync"
+#define POWEROFF_CMD "poweroff_cmd"
+#define VM_POWEROFF_TIMEOUT "vm_poweroff_timeout"
+#define SYNC_LEN (sizeof(SYNC_CMD))
+
+#define CHANNEL_DEV_NAME_MAX 128U
+#define CHANNEL_DEV_BUF_LEN 256U
+
+typedef void data_handler_f(const char *cmd_name, int fd);
+
+struct channel_dev {
+ struct uart_dev *uart_device;
+ char name[CHANNEL_DEV_NAME_MAX]; /**< user VM name */
+ bool listening; /**< listen thread loop flag */
+ bool polling; /**< message polling thread loop flag */
+ pthread_t listen_thread;
+ pthread_t connect_thread;
+
+ char buf[CHANNEL_DEV_BUF_LEN]; /**< store received message */
+ int len; /**< buf len */
You mean the len of message in buf ?
+
+ LIST_ENTRY(channel_dev) list; /**< list node used in UART connection list */
+ LIST_ENTRY(channel_dev) open_list; /**< list node used UART opening list */
+
+ struct uart_channel *channel; /**< point to UART server */
+ sem_t dev_sem; /**< semaphore used to start polling message */
+ int retry_poweroff; /**< the number of sending poweroff command */
+};
+struct channel_config {
+ /**
+ * the name of UART device for which
+ * system shutdown request is valid
+ */
+ char allowed_s5_dev_name[TTY_PATH_MAX];
+ char identifier[CHANNEL_DEV_NAME_MAX]; /**< the user VM name which is configured by user */
+};
+struct uart_channel {
+ data_handler_f *data_handler;
+ LIST_HEAD(tty_head, channel_dev) tty_conn_head; /* UART connection list */
+ LIST_HEAD(tty_open_head, channel_dev) tty_open_head; /* UART opening list */
+ pthread_mutex_t tty_conn_list_lock;
+
+ struct channel_config conf;
+
+ /**
+ * the flag indicating the system shutdown request
+ * is allowed to be sent through the UART
+ */
+ bool is_allowed_s5;
+};
+
+/**
+ * @brief Initialize a lock and configuration of uart channel
+ */
+void init_uart_channel(struct uart_channel *c, char *id, char *s5_dev_name);
+/**
+ * @brief Open one uart channel according to device name
+ *
+ * Allocate one channel device instance to store information about
+ * one uart channel device which will be opened.
+ * For master channel, create two threads, one thread
+ * is to listen and wait sync messaage from slave channel, another thread
+ * is to poll message from slave channel.
+ * For slave channel, create one thread to send sync message
+ * to master channel every 5 second until acked sync
+ * message is received from master channel and poll meessage from master channel.
+ *
+ * @param uart Point to uart server
+ * @param path Start address of the name of the device which will
+ * be opened
+ * @param master the uart channel is a master channel or slave channel
+ */
+int open_uart_channel(struct uart_channel *c, char *path, bool master);
+/**
+ * @brief Set handler to handle message received
+ */
+void set_uart_data_handler(struct uart_channel *c, data_handler_f *fn);
+/**
+ * @brief Wait uart thread to exit
+ */
+void wait_uart_threads(struct uart_channel *c, bool master);
+/**
+ * @brief Close uart channel and release channel device instance
+ */
+void close_uart_channel(struct uart_channel *c, bool master);
+#endif
+
--
2.25.1






Re: [PATCH 00/34] ACRN terminology for SOS

Geoffroy Van Cutsem
 

Fix the few remaining comments throughout the patch series but after
that LGTM.

Acked-by: Geoffroy Van Cutsem <geoffroy.vancutsem@...>

On Tue, 2021-10-19 at 15:19 +0800, Liu Long wrote:
From: Liu Long <longliu@...>
Your email address is incorrect, it should be long.liu@...

(this is the case for the entire list of patches)



Service VM: a special Virtual Machine (VM), directly launched by
the hypervisor. The Service VM can access hardware resources directly
by
running native drivers and provides device sharing services to
post-launched User VMs through the ACRN Device Model (DM). Hardware
resources include CPUs, memory, graphics memory, USB devices, disk,
and
network mediation. For that we rename the SOS/sos to
SERVICE_VM/service_vm
For the string that point to specific OS we use the service_vm_os

V2-->V1
Split the patch to depend on the parameter or function modify
Change the service_os to service_vm_os


Liu Long (34):
ACRN: hv: Rename sos_vm_ptr to service_vm_ptr
ACRN: hv: Rename is_sos_vm to is_service_vm
ACRN: hv: Rename get_sos_vm to get_service_vm
ACRN: hv: Rename prepare_sos_vm_memmap to prepare_service_vm_memmap
ACRN: hv: Rename parameter named sos to service_vm in vm.c
ACRN: hv: Rename create_sos_vm_e820 to create_service_vm_e820
ACRN: hv: Rename filter_mem_from_sos_e820 to
filter_mem_from_service_vm_e820
ACRN: hv: Renmae sos_vm_e820 to service_vm_e820
ACRN: hv Rename sos_vm_config to service_vm_config
ACRN: hv Rename sos to service_vm in hcall
ACRN: hv: Rename sos_vm to service_vm
ACRN: hv: Rename sos_vm to service_vm in hsm interrupt
ACRN: hv: Rename sos_vm_hpa2gpa to service_vm_hpa2gpa
ACRN: hv: Rename SOS to Service_VM in emu type
ACRN: hv: Rename SOS_VM_NUM to SERVICE_VM_NUM
ACRN: DM: Rename SOS_SOCKET_PORT to SERVICE_VM_SOCKET_PORT
ACRN: Misc: Rename SOS_CONSOLE to SERVICE_VM_CONSOLE
ACRN: hv: Rename SOS_BOOTARGS_DIF to SERVICE_VM_BOOTARGS_DIF
ACRN: misc: Rename SOS_VM_CONFIG_CPU_AFFINITY
ACRN: Misc: Rename is_sos_vm to is_service_vm
ACRN: misc: Rename sos in sos_extend_all_cpus to service_vm
ACRN: misc: Rename sos to service_vm in scenario py
ACRN: misc: Rename sos to service_vm in launch_cfg_lib
ACRN: misc: Rename sos to service_vm in cpu_affinity
ACRN: misc: Rename sos to service_vm in config_tools
ACRN: hv: Rename sos to service_vm in boot
ACRN: misc: Rename vmtype from SOS_VM to SERVICE_VM
ACRN: config_app: Rename sos to service_vm in config_app
ACRN: board_config: Rename sos to service_vm
ACRN: launch_config: Rename sos to service_vm
ACRN: life_mngr: Rename sos to service_vm
ACRN: config_tool: Renam sos to service_vm
ACRN: misc: Rename SOS_COM to SERVICE_VM_CON
ACRN: misc: Renam SOS to SERVICE_VM

devicemodel/core/pm_vuart.c | 4 +-
devicemodel/hw/platform/acpi/acpi.c | 8 +-
hypervisor/arch/x86/configs/pci_dev.c | 6 +-
hypervisor/arch/x86/e820.c | 2 +-
hypervisor/arch/x86/guest/assign.c | 14 +--
hypervisor/arch/x86/guest/ept.c | 8 +-
hypervisor/arch/x86/guest/pm.c | 8 +-
hypervisor/arch/x86/guest/vcpuid.c | 2 +-
hypervisor/arch/x86/guest/ve820.c | 28 +++---
hypervisor/arch/x86/guest/vlapic.c | 2 +-
hypervisor/arch/x86/guest/vm.c | 92 +++++++++------
----
hypervisor/arch/x86/guest/vm_reset.c | 6 +-
hypervisor/arch/x86/guest/vmcall.c | 33 +++----
hypervisor/arch/x86/guest/vmsr.c | 2 +-
hypervisor/arch/x86/guest/vmtrr.c | 4 +-
hypervisor/arch/x86/guest/vmx_io.c | 8 +-
hypervisor/arch/x86/mmu.c | 2 +-
hypervisor/arch/x86/seed/seed.c | 6 +-
hypervisor/boot/guest/bzimage_loader.c | 44 ++++-----
hypervisor/boot/guest/vboot_info.c | 6 +-
hypervisor/boot/include/boot.h | 4 +-
hypervisor/common/hypercall.c | 72 +++++++--------
hypervisor/common/ptdev.c | 6 +-
hypervisor/debug/hypercall.c | 8 +-
hypervisor/debug/shell.c | 2 +-
hypervisor/dm/io_req.c | 6 +-
hypervisor/dm/mmio_dev.c | 4 +-
hypervisor/dm/vioapic.c | 4 +-
hypervisor/dm/vpci/pci_pt.c | 8 +-
hypervisor/dm/vpci/vpci.c | 44 ++++-----
hypervisor/dm/vpic.c | 4 +-
.../include/arch/x86/asm/guest/assign.h | 2 +-
hypervisor/include/arch/x86/asm/guest/ept.h | 6 +-
hypervisor/include/arch/x86/asm/guest/vm.h | 14 +--
hypervisor/include/arch/x86/asm/pci_dev.h | 2 +-
hypervisor/include/arch/x86/asm/vm_config.h | 16 ++--
hypervisor/include/common/hypercall.h | 54 +++++------
hypervisor/include/common/vm_uuids.h | 2 +-
hypervisor/include/public/acrn_common.h | 6 +-
hypervisor/include/public/acrn_hv_defs.h | 6 +-
misc/config_tools/board_config/vbar_base_h.py | 40 ++++----
.../config_app/templates/scenario.html | 2 +-
misc/config_tools/config_app/views.py | 18 ++--
.../data/cfl-k700-i7/hybrid_rt.xml | 2 +-
misc/config_tools/data/cfl-k700-i7/shared.xml | 2 +-
.../data/generic_board/hybrid.xml | 2 +-
.../data/generic_board/shared.xml | 2 +-
misc/config_tools/data/nuc11tnbi5/hybrid.xml | 12 +--
misc/config_tools/data/nuc11tnbi5/shared.xml | 10 +-
misc/config_tools/data/qemu/sdc.xml | 2 +-
misc/config_tools/data/whl-ipc-i5/hybrid.xml | 2 +-
.../data/whl-ipc-i5/hybrid_rt.xml | 2 +-
misc/config_tools/data/whl-ipc-i5/sdc.xml | 2 +-
misc/config_tools/data/whl-ipc-i5/shared.xml | 2 +-
misc/config_tools/hv_config/hv_item.py | 2 +-
misc/config_tools/launch_config/com.py | 22 ++---
misc/config_tools/library/launch_cfg_lib.py | 34 +++----
misc/config_tools/library/scenario_cfg_lib.py | 76 +++++++--------
.../config_tools/scenario_config/pci_dev_c.py | 32 +++----
.../scenario_config/scenario_item.py | 4 +-
.../scenario_config/vm_configurations_c.py | 44 ++++-----
.../scenario_config/vm_configurations_h.py | 26 +++---
misc/config_tools/schema/VMtypes.xsd | 30 +++---
misc/config_tools/schema/config.xsd | 4 +-
misc/config_tools/static_allocators/bdf.py | 2 +-
.../static_allocators/cpu_affinity.py | 34 +++----
misc/config_tools/static_allocators/gpa.py | 4 +-
misc/config_tools/static_allocators/intx.py | 14 +--
.../config_tools/static_allocators/lib/lib.py | 4 +-
misc/config_tools/static_allocators/pio.py | 10 +-
misc/config_tools/xforms/lib.xsl | 6 +-
misc/config_tools/xforms/misc_cfg.h.xsl | 22 ++---
misc/config_tools/xforms/pci_dev.c.xsl | 6 +-
.../xforms/vm_configurations.c.xsl | 24 ++---
.../xforms/vm_configurations.h.xsl | 12 +--
misc/hv_prebuild/vm_cfg_checks.c | 10 +-
misc/services/acrn_manager/acrnd.c | 10 +-
misc/services/life_mngr/life_mngr.c | 54 +++++------
misc/services/life_mngr/life_mngr_win.c | 4 +-
79 files changed, 573 insertions(+), 572 deletions(-)


Re: [PATCH v2 34/34] ACRN: misc: Renam SOS to SERVICE_VM

Geoffroy Van Cutsem
 

Fix subject line from "misc: Renam SOS to SERVICE_VM" to "misc: Rename
SOS to SERVICE_VM"

On Tue, 2021-10-19 at 15:20 +0800, Liu Long wrote:
From: Liu Long <longliu@...>

Rename SOS_LCS_SOCK to SERVICE_VM_LCS_SOCK
rename SOS_REQ to SERVICE_VM_REQ

Signed-off-by: Liu Long <longliu@...>
---
.../scenario_config/vm_configurations_c.py | 4 ++--
misc/config_tools/static_allocators/intx.py | 8 ++++----
misc/services/acrn_manager/acrnd.c | 10 +++++---
--
misc/services/life_mngr/life_mngr_win.c | 4 ++--
4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/misc/config_tools/scenario_config/vm_configurations_c.py
b/misc/config_tools/scenario_config/vm_configurations_c.py
index 0adcb3a4a..4e79433c6 100644
--- a/misc/config_tools/scenario_config/vm_configurations_c.py
+++ b/misc/config_tools/scenario_config/vm_configurations_c.py
@@ -58,7 +58,7 @@ def vuart0_output(i, vm_type, vm_info, config):
print("\t\t\t.type =
{0},".format(vm_info.vuart.v0_vuart[i]['type']), file=config)
if vm_info.vuart.v0_vuart[i]['base'] == "INVALID_COM_BASE":
print("\t\t\t.addr.port_base = INVALID_COM_BASE,",
file=config)
- if "SOS_" in vm_type:
+ if "SERVICE_" in vm_type:
print("\t\t\t.irq = SERVICE_VM_COM1_IRQ,", file=config)
elif "PRE_LAUNCHED_VM" ==
scenario_cfg_lib.VM_DB[vm_type]['load_type']:
print("\t\t\t.irq = COM1_IRQ,", file=config)
@@ -66,7 +66,7 @@ def vuart0_output(i, vm_type, vm_info, config):
print("\t\t\t.irq = {0},".format(
vm_info.vuart.v0_vuart[i]['irq']), file=config)
else:
- if "SOS_" in vm_type:
+ if "SERVICE_" in vm_type:
print("\t\t\t.addr.port_base = SERVICE_VM_COM1_BASE,",
file=config)
print("\t\t\t.irq = SERVICE_VM_COM1_IRQ,", file=config)
elif "PRE_LAUNCHED_VM" ==
scenario_cfg_lib.VM_DB[vm_type]['load_type']:
diff --git a/misc/config_tools/static_allocators/intx.py
b/misc/config_tools/static_allocators/intx.py
index 8dfa70a34..20778b584 100644
--- a/misc/config_tools/static_allocators/intx.py
+++ b/misc/config_tools/static_allocators/intx.py
@@ -73,13 +73,13 @@ def alloc_legacy_vuart_irqs(board_etree,
scenario_etree, allocation_etree):
raise
lib.error.ResourceError(f"{hv_debug_console} is not in the native
environment! The ttyS available are: {native_ttys.keys()}")
else:
legacy_vuart_node_irq_text =
common.get_node(f"legacy_vuart[@id =
'{legacy_vuart_id}']/irq/text()", vm_node)
- if legacy_vuart_node_irq_text == 'COM1_IRQ' or
legacy_vuart_node_irq_text == 'SOS_COM1_IRQ' \
- or legacy_vuart_node_irq_text == 'COM3_IRQ' or
legacy_vuart_node_irq_text == 'SOS_COM3_IRQ':
+ if legacy_vuart_node_irq_text == 'COM1_IRQ' or
legacy_vuart_node_irq_text == 'SERVICE_VM_OS_COM1_IRQ' \
+ or legacy_vuart_node_irq_text == 'COM3_IRQ' or
legacy_vuart_node_irq_text == 'SERVICE_VM_OS_COM3_IRQ':
legacy_vuart_irq = '4'
if legacy_vuart_irq in irq_list:
remove_irq(irq_list, legacy_vuart_irq)
- elif legacy_vuart_node_irq_text == 'COM2_IRQ' or
legacy_vuart_node_irq_text == 'SOS_COM2_IRQ' \
- or legacy_vuart_node_irq_text == 'COM4_IRQ' or
legacy_vuart_node_irq_text == 'SOS_COM4_IRQ':
+ elif legacy_vuart_node_irq_text == 'COM2_IRQ' or
legacy_vuart_node_irq_text == 'SERVICE_VM_OS_COM2_IRQ' \
+ or legacy_vuart_node_irq_text == 'COM4_IRQ' or
legacy_vuart_node_irq_text == 'SERVICE_VM_OS_COM4_IRQ':
legacy_vuart_irq = '3'
if legacy_vuart_irq in irq_list:
remove_irq(irq_list, legacy_vuart_irq)
diff --git a/misc/services/acrn_manager/acrnd.c
b/misc/services/acrn_manager/acrnd.c
index c579fe38f..e02db4eac 100644
--- a/misc/services/acrn_manager/acrnd.c
+++ b/misc/services/acrn_manager/acrnd.c
@@ -22,7 +22,7 @@
#include "ioc.h"

#define ACRND_NAME "acrnd"
-#define SOS_LCS_SOCK "sos-lcs"
+#define SERVICE_VM_LCS_SOCK "sos-lcs"
I don't think I saw this sos-lcs being renamed to service-vm-lcs in the
patch series, or did I miss it?

#define HW_IOC_PATH "/dev/cbc-early-signals"
#define VMS_STOP_TIMEOUT 20U /* Time to wait VMs to stop */
#define SOCK_TIMEOUT 2U
@@ -321,10 +321,10 @@ unsigned get_sos_wakeup_reason(void)
struct mngr_msg req;
struct mngr_msg ack;

- client_fd = mngr_open_un(SOS_LCS_SOCK, MNGR_CLIENT);
+ client_fd = mngr_open_un(SERVICE_VM_LCS_SOCK, MNGR_CLIENT);
if (client_fd <= 0) {
fprintf(stderr, "Failed to open the socket(%s) to query
the "
- "reason for the wake-up",
SOS_LCS_SOCK);
+ "reason for the wake-up",
SERVICE_VM_LCS_SOCK);
goto EXIT;
}

@@ -383,7 +383,7 @@ static int set_sos_timer(time_t due_time)
struct mngr_msg req;
struct mngr_msg ack;

- client_fd = mngr_open_un(SOS_LCS_SOCK, MNGR_CLIENT);
+ client_fd = mngr_open_un(SERVICE_VM_LCS_SOCK, MNGR_CLIENT);
if (client_fd <= 0) {
perror("Failed to open sock for to req wkup_reason");
ret = client_fd;
@@ -513,7 +513,7 @@ static void* notify_stop_state(void *arg)

store_timer_list();

- lcs_fd = mngr_open_un(SOS_LCS_SOCK, MNGR_CLIENT);
+ lcs_fd = mngr_open_un(SERVICE_VM_LCS_SOCK, MNGR_CLIENT);
if (lcs_fd < 0) {
fprintf(stderr, "cannot open sos-lcs.socket\n");
goto exit;
diff --git a/misc/services/life_mngr/life_mngr_win.c
b/misc/services/life_mngr/life_mngr_win.c
index f140293b5..9ebda6508 100644
--- a/misc/services/life_mngr/life_mngr_win.c
+++ b/misc/services/life_mngr/life_mngr_win.c
@@ -7,7 +7,7 @@
#include <stdio.h>


-#define SOS_REQ "shutdown"
+#define SERVICE_VM_REQ "shutdown"
#define UOS_ACK "acked"
#define BUFF_SIZE 16U
#define MSG_SIZE 8U
@@ -69,7 +69,7 @@ int main()
continue;
}

- if (strncmp(recvbuf, SOS_REQ, MSG_SIZE) == 0)
+ if (strncmp(recvbuf, SERVICE_VM_REQ, MSG_SIZE) == 0)
{
WriteFile(hCom, UOS_ACK, sizeof(UOS_ACK), NULL,
NULL);
system("shutdown -s -t 0");


Re: [PATCH v2 33/34] ACRN: misc: Rename SOS_COM to SERVICE_VM_CON

Geoffroy Van Cutsem
 

Fix typo in subject line:

misc: Rename SOS_COM to SERVICE_VM_CON -> misc: Rename SOS_COM to
SERVICE_VM_COM

On Tue, 2021-10-19 at 15:20 +0800, Liu Long wrote:
From: Liu Long <longliu@...>

Rename SOS_COMx_BASE to SERVICE_VM_COMx_BASE
rename SOS_COM1_IRQ to SERVICE_VM_COMx_IRQ
rename SEVERITY_SOS to SEVERITY_SERVICE_VM
rename SOS_VM_UUID to SERVICE_VM_UUID
rename RTCT_NATIVE_FILE_PATH_IN_SOS to
RTCT_NATIVE_FILE_PATH_IN_SERVICE_VM_OS
How about RTCT_NATIVE_FILEPATH_SERVICE_VM_OS instead to make it
slightly less long. (same question for _V2_ below)

rename RTCT_V2_NATIVE_FILE_PATH_IN_SOS to
RTCT_V2_NATIVE_FILE_PATH_IN_SERVICE_VM_OS

Signed-off-by: Liu Long <longliu@...>
---
devicemodel/hw/platform/acpi/acpi.c | 8 +++---
hypervisor/common/hypercall.c | 2 +-
hypervisor/include/arch/x86/asm/vm_config.h | 6 ++---
hypervisor/include/common/vm_uuids.h | 2 +-
misc/config_tools/data/nuc11tnbi5/hybrid.xml | 8 +++---
misc/config_tools/data/nuc11tnbi5/shared.xml | 8 +++---
misc/config_tools/launch_config/com.py | 2 +-
misc/config_tools/library/scenario_cfg_lib.py | 10 +++----
.../scenario_config/vm_configurations_c.py | 8 +++---
misc/config_tools/schema/VMtypes.xsd | 26 +++++++++------
----
misc/config_tools/static_allocators/pio.py | 8 +++---
misc/hv_prebuild/vm_cfg_checks.c | 4 +--
12 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/devicemodel/hw/platform/acpi/acpi.c
b/devicemodel/hw/platform/acpi/acpi.c
index 12214a850..39ff62ae9 100644
--- a/devicemodel/hw/platform/acpi/acpi.c
+++ b/devicemodel/hw/platform/acpi/acpi.c
@@ -1160,8 +1160,8 @@ static struct {
*/
int create_and_inject_vrtct(struct vmctx *ctx)
{
-#define RTCT_NATIVE_FILE_PATH_IN_SOS
"/sys/firmware/acpi/tables/PTCT"
-#define RTCT_V2_NATIVE_FILE_PATH_IN_SOS
"/sys/firmware/acpi/tables/RTCT"
+#define RTCT_NATIVE_FILE_PATH_IN_SERVICE_VM_OS
"/sys/firmware/acpi/tables/PTCT"
+#define RTCT_V2_NATIVE_FILE_PATH_IN_SERVICE_VM_OS
"/sys/firmware/acpi/tables/RTCT"


#define RTCT_BUF_LEN 0x200 /* Otherwise, need to modify
DSDT_OFFSET corresponding */
@@ -1180,9 +1180,9 @@ int create_and_inject_vrtct(struct vmctx *ctx)
};

/* Name of native RTCT table is "PTCT"(v1) or "RTCT"(v2) */
- native_rtct_fd = open(RTCT_NATIVE_FILE_PATH_IN_SOS, O_RDONLY);
+ native_rtct_fd = open(RTCT_NATIVE_FILE_PATH_IN_SERVICE_VM_OS,
O_RDONLY);
if (native_rtct_fd < 0) {
- native_rtct_fd = open(RTCT_V2_NATIVE_FILE_PATH_IN_SOS,
O_RDONLY);
+ native_rtct_fd =
open(RTCT_V2_NATIVE_FILE_PATH_IN_SERVICE_VM_OS, O_RDONLY);
if (native_rtct_fd < 0) {
pr_err("RTCT file is NOT detected.\n");
return -1;
diff --git a/hypervisor/common/hypercall.c
b/hypervisor/common/hypercall.c
index cc7ba30af..22e42e447 100644
--- a/hypervisor/common/hypercall.c
+++ b/hypervisor/common/hypercall.c
@@ -71,7 +71,7 @@ bool is_hypercall_from_ring0(void)

inline static bool is_severity_pass(uint16_t target_vmid)
{
- return SEVERITY_SOS >= get_vm_severity(target_vmid);
+ return SEVERITY_SERVICE_VM >= get_vm_severity(target_vmid);
}

/**
diff --git a/hypervisor/include/arch/x86/asm/vm_config.h
b/hypervisor/include/arch/x86/asm/vm_config.h
index af789d32c..99043944b 100644
--- a/hypervisor/include/arch/x86/asm/vm_config.h
+++ b/hypervisor/include/arch/x86/asm/vm_config.h
@@ -38,8 +38,8 @@
#define MAX_MMIO_DEV_NUM 2U

#define CONFIG_SERVICE_VM .load_order = SERVICE_VM,\
- .uuid = SOS_VM_UUID, \
- .severity = SEVERITY_SOS
+ .uuid = SERVICE_VM_UUID, \
+ .severity = SEVERITY_SERVICE_VM

#define CONFIG_SAFETY_VM(idx) .load_order = PRE_LAUNCHED_VM,
\
.uuid = SAFETY_VM_UUID##idx, \
@@ -69,7 +69,7 @@
enum acrn_vm_severity {
SEVERITY_SAFETY_VM = 0x40U,
SEVERITY_RTVM = 0x30U,
- SEVERITY_SOS = 0x20U,
+ SEVERITY_SERVICE_VM = 0x20U,
SEVERITY_STANDARD_VM = 0x10U,
};

diff --git a/hypervisor/include/common/vm_uuids.h
b/hypervisor/include/common/vm_uuids.h
index e87d071da..0a06e44bb 100644
--- a/hypervisor/include/common/vm_uuids.h
+++ b/hypervisor/include/common/vm_uuids.h
@@ -8,7 +8,7 @@
#define VM_UUIDS_H

/* dbbbd434-7a57-4216-a12c-2201f1ab0240 */
-#define SOS_VM_UUID {0xdbU, 0xbbU, 0xd4U, 0x34U, 0x7aU,
0x57U, 0x42U, 0x16U, \
+#define SERVICE_VM_UUID {0xdbU, 0xbbU, 0xd4U, 0x34U,
0x7aU, 0x57U, 0x42U, 0x16U, \
0xa1U, 0x2cU, 0x22U, 0x01U, 0xf1U,
0xabU, 0x02U, 0x40U}

/* fc836901-8685-4bc0-8b71-6e31dc36fa47 */
diff --git a/misc/config_tools/data/nuc11tnbi5/hybrid.xml
b/misc/config_tools/data/nuc11tnbi5/hybrid.xml
index f3fb11028..8cf073f15 100644
--- a/misc/config_tools/data/nuc11tnbi5/hybrid.xml
+++ b/misc/config_tools/data/nuc11tnbi5/hybrid.xml
@@ -147,13 +147,13 @@
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>
- <base>SOS_COM1_BASE</base>
- <irq>SOS_COM1_IRQ</irq>
+ <base>SERVICE_VM_COM1_BASE</base>
+ <irq>SERVICE_VM_COM1_IRQ</irq>
</legacy_vuart>
<legacy_vuart id="1">
<type>VUART_LEGACY_PIO</type>
- <base>SOS_COM2_BASE</base>
- <irq>SOS_COM2_IRQ</irq>
+ <base>SERVICE_VM_COM2_BASE</base>
+ <irq>SERVICE_VM_COM2_IRQ</irq>
<target_vm_id>0</target_vm_id>
<target_uart_id>1</target_uart_id>
</legacy_vuart>
diff --git a/misc/config_tools/data/nuc11tnbi5/shared.xml
b/misc/config_tools/data/nuc11tnbi5/shared.xml
index 7a431a5d8..c581f1c86 100644
--- a/misc/config_tools/data/nuc11tnbi5/shared.xml
+++ b/misc/config_tools/data/nuc11tnbi5/shared.xml
@@ -83,13 +83,13 @@
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>
- <base>SOS_COM1_BASE</base>
- <irq>SOS_COM1_IRQ</irq>
+ <base>SERVICE_VM_COM1_BASE</base>
+ <irq>SERVICE_VM_COM1_IRQ</irq>
</legacy_vuart>
<legacy_vuart id="1">
<type>VUART_LEGACY_PIO</type>
- <base>SOS_COM2_BASE</base>
- <irq>SOS_COM2_IRQ</irq>
+ <base>SERVICE_VM_COM2_BASE</base>
+ <irq>SERVICE_VM_COM2_IRQ</irq>
<target_vm_id>2</target_vm_id>
<target_uart_id>1</target_uart_id>
</legacy_vuart>
diff --git a/misc/config_tools/launch_config/com.py
b/misc/config_tools/launch_config/com.py
index df40db004..b7c434f2c 100644
--- a/misc/config_tools/launch_config/com.py
+++ b/misc/config_tools/launch_config/com.py
@@ -592,7 +592,7 @@ def dm_arg_set(names, sel, virt_io, dm, vmid,
config):
vuart_base =
launch_cfg_lib.get_vuart1_from_scenario(service_vmid + vmid)
if vuart_base == "INVALID_COM_BASE":
err_key = "uos:id={}:poweroff_channel".format(vmid)
- launch_cfg_lib.ERR_LIST[err_key] = "vuart1 of VM{}
in scenario file should select 'SOS_COM2_BASE'".format(service_vmid +
vmid)
+ launch_cfg_lib.ERR_LIST[err_key] = "vuart1 of VM{}
in scenario file should select
'SERVICE_VM_COM2_BASE'".format(service_vmid + vmid)
return
scenario_cfg_lib.get_service_vm_vuart_settings()
print(" {} \\".format(pm_vuart +
launch_cfg_lib.PM_CHANNEL_DIC[pm_key] +
scenario_cfg_lib.SERVICE_VM_UART1_VALID_NUM), file=config)
diff --git a/misc/config_tools/library/scenario_cfg_lib.py
b/misc/config_tools/library/scenario_cfg_lib.py
index edb9ba58d..29becbfd9 100644
--- a/misc/config_tools/library/scenario_cfg_lib.py
+++ b/misc/config_tools/library/scenario_cfg_lib.py
@@ -18,7 +18,7 @@ KERN_BOOT_ADDR_LIST = ['0x100000']

VUART_TYPE = ['VUART_LEGACY_PIO', 'VUART_PCI']
INVALID_COM_BASE = 'INVALID_COM_BASE'
-VUART_BASE = ['SOS_COM1_BASE', 'SOS_COM2_BASE', 'SOS_COM3_BASE',
'SOS_COM4_BASE', 'COM1_BASE',
+VUART_BASE = ['SERVICE_VM_COM1_BASE', 'SERVICE_VM_COM2_BASE',
'SERVICE_VM_COM3_BASE', 'SERVICE_VM_COM4_BASE', 'COM1_BASE',
'COM2_BASE', 'COM3_BASE', 'COM4_BASE',
'CONFIG_COM_BASE', INVALID_COM_BASE]
INVALID_PCI_BASE = 'INVALID_PCI_BASE'
PCI_VUART = 'PCI_VUART'
@@ -27,7 +27,7 @@ PCI_VUART_BASE = [PCI_VUART, INVALID_PCI_BASE]
AVALIBLE_COM1_BASE = [INVALID_COM_BASE, 'COM1_BASE']
AVALIBLE_COM2_BASE = [INVALID_COM_BASE, 'COM2_BASE']

-VUART_IRQ = ['SOS_COM1_IRQ', 'SOS_COM2_IRQ', 'SOS_COM3_IRQ',
'SOS_COM4_IRQ',
+VUART_IRQ = ['SERVICE_VM_COM1_IRQ', 'SERVICE_VM_COM2_IRQ',
'SERVICE_VM_COM3_IRQ', 'SERVICE_VM_COM4_IRQ',
'COM1_IRQ', 'COM2_IRQ', 'COM3_IRQ', 'COM4_IRQ',
'CONFIG_COM_IRQ']

# Support 512M, 1G, 2G
@@ -57,7 +57,7 @@ UUID_DB = {
}

VM_DB = {
- 'SERVICE_VM':{'load_type':'SERVICE_VM',
'severity':'SEVERITY_SOS', 'uuid':UUID_DB['SERVICE_VM']},
+ 'SERVICE_VM':{'load_type':'SERVICE_VM',
'severity':'SEVERITY_SERVICE_VM', 'uuid':UUID_DB['SERVICE_VM']},
'SAFETY_VM':{'load_type':'PRE_LAUNCHED_VM',
'severity':'SEVERITY_SAFETY_VM', 'uuid':UUID_DB['SAFETY_VM']},
'PRE_RT_VM':{'load_type':'PRE_LAUNCHED_VM',
'severity':'SEVERITY_RTVM', 'uuid':UUID_DB['PRE_RT_VM']},
'PRE_STD_VM':{'load_type':'PRE_LAUNCHED_VM',
'severity':'SEVERITY_STANDARD_VM', 'uuid':UUID_DB['PRE_STD_VM']},
@@ -658,9 +658,9 @@ def avl_vuart_ui_select(scenario_info):

if "SERVICE_VM" == VM_DB[vm_type]['load_type']:
key = "vm={},legacy_vuart=0,base".format(vm_i)
- tmp_vuart[key] = ['SOS_COM1_BASE', 'INVALID_COM_BASE']
+ tmp_vuart[key] = ['SERVICE_VM_COM1_BASE',
'INVALID_COM_BASE']
key = "vm={},legacy_vuart=1,base".format(vm_i)
- tmp_vuart[key] = ['SOS_COM2_BASE', 'INVALID_COM_BASE']
+ tmp_vuart[key] = ['SERVICE_VM_COM2_BASE',
'INVALID_COM_BASE']
else:
key = "vm={},legacy_vuart=0,base".format(vm_i)
tmp_vuart[key] = ['INVALID_COM_BASE', 'COM1_BASE']
diff --git a/misc/config_tools/scenario_config/vm_configurations_c.py
b/misc/config_tools/scenario_config/vm_configurations_c.py
index ca7fa0c07..0adcb3a4a 100644
--- a/misc/config_tools/scenario_config/vm_configurations_c.py
+++ b/misc/config_tools/scenario_config/vm_configurations_c.py
@@ -59,7 +59,7 @@ def vuart0_output(i, vm_type, vm_info, config):
if vm_info.vuart.v0_vuart[i]['base'] == "INVALID_COM_BASE":
print("\t\t\t.addr.port_base = INVALID_COM_BASE,",
file=config)
if "SOS_" in vm_type:
- print("\t\t\t.irq = SOS_COM1_IRQ,", file=config)
+ print("\t\t\t.irq = SERVICE_VM_COM1_IRQ,", file=config)
elif "PRE_LAUNCHED_VM" ==
scenario_cfg_lib.VM_DB[vm_type]['load_type']:
print("\t\t\t.irq = COM1_IRQ,", file=config)
elif "POST_LAUNCHED_VM" in
scenario_cfg_lib.VM_DB[vm_type]['load_type']:
@@ -67,8 +67,8 @@ def vuart0_output(i, vm_type, vm_info, config):
vm_info.vuart.v0_vuart[i]['irq']), file=config)
else:
if "SOS_" in vm_type:
- print("\t\t\t.addr.port_base = SOS_COM1_BASE,",
file=config)
- print("\t\t\t.irq = SOS_COM1_IRQ,", file=config)
+ print("\t\t\t.addr.port_base = SERVICE_VM_COM1_BASE,",
file=config)
+ print("\t\t\t.irq = SERVICE_VM_COM1_IRQ,", file=config)
elif "PRE_LAUNCHED_VM" ==
scenario_cfg_lib.VM_DB[vm_type]['load_type']:
print("\t\t\t.addr.port_base = COM1_BASE,", file=config)
print("\t\t\t.irq = COM1_IRQ,", file=config)
@@ -121,7 +121,7 @@ def vuart1_output(i, vm_type, vuart1_vmid_dic,
vm_info, config):
if vuart1_vmid_dic and i in vuart1_vmid_dic.keys():
if "SERVICE_VM" ==
scenario_cfg_lib.VM_DB[vm_type]['load_type']:
if vm_info.vuart.v1_vuart[i]['base'] !=
"INVALID_COM_BASE" and vuart_enable[i]:
- print("\t\t\t.irq = SOS_COM2_IRQ,", file=config)
+ print("\t\t\t.irq = SERVICE_VM_COM2_IRQ,",
file=config)
else:
if vm_info.vuart.v1_vuart[i]['base'] !=
"INVALID_COM_BASE" and vuart_enable[i]:
print("\t\t\t.irq = COM2_IRQ,", file=config)
diff --git a/misc/config_tools/schema/VMtypes.xsd
b/misc/config_tools/schema/VMtypes.xsd
index 2d786b6b1..1c74d70c2 100644
--- a/misc/config_tools/schema/VMtypes.xsd
+++ b/misc/config_tools/schema/VMtypes.xsd
@@ -217,16 +217,16 @@ must exactly match the module tag in the GRUB
multiboot cmdline.</xs:documentati

<xs:simpleType name="LegacyVuartBase">
<xs:annotation>
- <xs:documentation>A string with either ``SOS_COM1_BASE``,
-``SOS_COM2_BASE``, ``SOS_COM3_BASE``, ``SOS_COM4_BASE``,
+ <xs:documentation>A string with either ``SERVICE_VM_COM1_BASE``,
+``SERVICE_VM_COM2_BASE``, ``SERVICE_VM_COM3_BASE``,
``SERVICE_VM_COM4_BASE``,
``COM1_BASE``, ``COM2_BASE``, ``COM3_BASE``, ``COM4_BASE``,
``CONFIG_COM_BASE``, or indicating it's disabled with
``INVALID_COM_BASE``.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:enumeration value="SOS_COM1_BASE" />
- <xs:enumeration value="SOS_COM2_BASE" />
- <xs:enumeration value="SOS_COM3_BASE" />
- <xs:enumeration value="SOS_COM4_BASE" />
+ <xs:enumeration value="SERVICE_VM_COM1_BASE" />
+ <xs:enumeration value="SERVICE_VM_COM2_BASE" />
+ <xs:enumeration value="SERVICE_VM_COM3_BASE" />
+ <xs:enumeration value="SERVICE_VM_COM4_BASE" />
<xs:enumeration value="COM1_BASE" />
<xs:enumeration value="COM2_BASE" />
<xs:enumeration value="COM3_BASE" />
@@ -238,16 +238,16 @@ must exactly match the module tag in the GRUB
multiboot cmdline.</xs:documentati

<xs:simpleType name="LegacyVuartIrq">
<xs:annotation acrn:configurable="n">
- <xs:documentation>A string with either ``SOS_COM1_IRQ``,
-``SOS_COM2_IRQ``, ``SOS_COM3_IRQ``, ``SOS_COM4_IRQ``,
+ <xs:documentation>A string with either ``SERVICE_VM_COM1_IRQ``,
+``SERVICE_VM_COM2_IRQ``, ``SERVICE_VM_COM3_IRQ``,
``SERVICE_VM_COM4_IRQ``,
``COM1_IRQ``, ``COM2_IRQ``, ``COM3_IRQ``, ``COM4_IRQ``
or ``CONFIG_COM_IRQ``.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:enumeration value="SOS_COM1_IRQ" />
- <xs:enumeration value="SOS_COM2_IRQ" />
- <xs:enumeration value="SOS_COM3_IRQ" />
- <xs:enumeration value="SOS_COM4_IRQ" />
+ <xs:enumeration value="SERVICE_VM_COM1_IRQ" />
+ <xs:enumeration value="SERVICE_VM_COM2_IRQ" />
+ <xs:enumeration value="SERVICE_VM_COM3_IRQ" />
+ <xs:enumeration value="SERVICE_VM_COM4_IRQ" />
<xs:enumeration value="COM1_IRQ" />
<xs:enumeration value="COM2_IRQ" />
<xs:enumeration value="COM3_IRQ" />
@@ -267,7 +267,7 @@ supported.</xs:documentation>
<xs:element name="base" type="LegacyVuartBase">
<xs:annotation>
<xs:documentation>vUART (COM) enabling switch. Enable by
exposing its COM_BASE
-(e.b., ``SOS_COM1_BASE`` for Service VM); disable by returning
+(e.b., ``SERVICE_VM_COM1_BASE`` for Service VM); disable by
returning
``INVALID_COM_BASE``.</xs:documentation>
</xs:annotation>
</xs:element>
diff --git a/misc/config_tools/static_allocators/pio.py
b/misc/config_tools/static_allocators/pio.py
index 9c28306bc..f94f7e9a9 100644
--- a/misc/config_tools/static_allocators/pio.py
+++ b/misc/config_tools/static_allocators/pio.py
@@ -26,13 +26,13 @@ def remove_pio(pio_list, base):
def assign_legacy_vuart_io_port(vm_node, legacy_vuart_id):
legacy_vuart_base = ""
legacy_vuart_node_base_text =
common.get_node(f"./legacy_vuart[@id =
'{legacy_vuart_id}']/base/text()", vm_node)
- if legacy_vuart_node_base_text == 'COM1_BASE' or
legacy_vuart_node_base_text == 'SOS_COM1_BASE':
+ if legacy_vuart_node_base_text == 'COM1_BASE' or
legacy_vuart_node_base_text == 'SERVICE_VM_COM1_BASE':
legacy_vuart_base = '0x3F8'
- elif legacy_vuart_node_base_text == 'COM2_BASE' or
legacy_vuart_node_base_text == 'SOS_COM2_BASE':
+ elif legacy_vuart_node_base_text == 'COM2_BASE' or
legacy_vuart_node_base_text == 'SERVICE_VM_COM2_BASE':
legacy_vuart_base = '0x2F8'
- elif legacy_vuart_node_base_text == 'COM3_BASE' or
legacy_vuart_node_base_text == 'SOS_COM3_BASE':
+ elif legacy_vuart_node_base_text == 'COM3_BASE' or
legacy_vuart_node_base_text == 'SERVICE_VM_COM3_BASE':
legacy_vuart_base = '0x3E8'
- elif legacy_vuart_node_base_text == 'COM4_BASE' or
legacy_vuart_node_base_text == 'SOS_COM4_BASE':
+ elif legacy_vuart_node_base_text == 'COM4_BASE' or
legacy_vuart_node_base_text == 'SERVICE_VM_COM4_BASE':
legacy_vuart_base = '0x2E8'
return legacy_vuart_base

diff --git a/misc/hv_prebuild/vm_cfg_checks.c
b/misc/hv_prebuild/vm_cfg_checks.c
index 5921b6b79..63b032774 100644
--- a/misc/hv_prebuild/vm_cfg_checks.c
+++ b/misc/hv_prebuild/vm_cfg_checks.c
@@ -131,7 +131,7 @@ bool sanitize_vm_config(void)
ret = false;
} else {
#if (SERVICE_VM_NUM == 1U)
- if (vm_config->severity <=
SEVERITY_SOS) {
+ if (vm_config->severity <=
SEVERITY_SERVICE_VM) {
/* If there are both SOS and Pre-
launched VM, make sure pre-launched VM has higher severity than SOS
*/
printf("%s: pre-launched vm
doesn't has higher severity than SOS \n", __func__);
ret = false;
@@ -142,7 +142,7 @@ bool sanitize_vm_config(void)
case SERVICE_VM:
break;
case POST_LAUNCHED_VM:
- if ((vm_config->severity ==
(uint8_t)SEVERITY_SAFETY_VM) || (vm_config->severity ==
(uint8_t)SEVERITY_SOS)) {
+ if ((vm_config->severity ==
(uint8_t)SEVERITY_SAFETY_VM) || (vm_config->severity ==
(uint8_t)SEVERITY_SERVICE_VM)) {
ret = false;
}
break;


Re: [PATCH v2 32/34] ACRN: config_tool: Renam sos to service_vm

Geoffroy Van Cutsem
 

On Tue, 2021-10-19 at 15:20 +0800, Liu Long wrote:
From: Liu Long <longliu@...>

Rename is-sos-vm to is-service-vm
rename sos_rootfs to service_vm_os_rootfs
rename SOS_ROOTFS to SERVICE_VM_OS_ROOTFS
rename SOS_BOOTARGS to SERVICE_VM_OS_BOOTARGS

Signed-off-by: Liu Long <longliu@...>
---
misc/config_tools/data/nuc11tnbi5/hybrid.xml | 2 +-
misc/config_tools/library/scenario_cfg_lib.py | 4 ++--
.../scenario_config/vm_configurations_h.py | 2 +-
misc/config_tools/xforms/lib.xsl | 4 ++--
misc/config_tools/xforms/misc_cfg.h.xsl | 16 ++++++++--------
misc/config_tools/xforms/pci_dev.c.xsl | 4 ++--
.../xforms/vm_configurations.c.xsl | 18 +++++++++-------
--
.../xforms/vm_configurations.h.xsl | 4 ++--
8 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/misc/config_tools/data/nuc11tnbi5/hybrid.xml
b/misc/config_tools/data/nuc11tnbi5/hybrid.xml
index 6964c3ae3..f3fb11028 100644
--- a/misc/config_tools/data/nuc11tnbi5/hybrid.xml
+++ b/misc/config_tools/data/nuc11tnbi5/hybrid.xml
@@ -143,7 +143,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod></ramdisk_mod>
- <bootargs>SOS_VM_BOOTARGS</bootargs>
+ <bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>
diff --git a/misc/config_tools/library/scenario_cfg_lib.py
b/misc/config_tools/library/scenario_cfg_lib.py
index 6b0f1eb8d..edb9ba58d 100644
--- a/misc/config_tools/library/scenario_cfg_lib.py
+++ b/misc/config_tools/library/scenario_cfg_lib.py
@@ -501,9 +501,9 @@ def os_kern_args_check(id_kern_args_dic,
prime_item, item):
if vm_i not in id_kern_args_dic.keys():
continue
kern_args = id_kern_args_dic[vm_i]
- if "SOS_" in vm_type and kern_args != "SOS_VM_BOOTARGS":
+ if "SERVICE_" in vm_type and kern_args !=
"SERVICE_VM_OS_BOOTARGS":
key = "vm:id={},{},{}".format(vm_i, prime_item, item)
- ERR_LIST[key] = "VM os config kernel service os should
be SOS_VM_BOOTARGS"
+ ERR_LIST[key] = "VM os config kernel service os should
This sentence is hard to parse... and I'm not entirely sure what
triggers this. Can you rephrase and replace the "service os" string
that's left in there.

Maybe something like: "The kernel command-line options for the Service
VM kernel should be stored in SERVICE_VM_OS_BOOTARGS"?

be SERVICE_VM_OS_BOOTARGS"


def os_kern_load_addr_check(kern_type, id_kern_load_addr_dic,
prime_item, item):
diff --git a/misc/config_tools/scenario_config/vm_configurations_h.py
b/misc/config_tools/scenario_config/vm_configurations_h.py
index 8ce6805c5..fc3085ca9 100644
--- a/misc/config_tools/scenario_config/vm_configurations_h.py
+++ b/misc/config_tools/scenario_config/vm_configurations_h.py
@@ -70,7 +70,7 @@ def gen_service_vm_header(scenario_items, config):
if vm_type == 'SERVICE_VM':
print("/* SERVICE_VM == VM{0} */".format(vm_i),
file=config)

- print("#define SOS_VM_BOOTARGS\t\t\tSOS_ROOTFS\t\\",
file=config)
+ print("#define
SERVICE_VM_OS_BOOTARGS\t\t\tSERVICE_VM_OS_ROOTFS\t\\", file=config)
print("\t\t\t\t\tSERVICE_VM_CONSOLE\t\\", file=config)
print("\t\t\t\t\tSERVICE_VM_IDLE\t\\", file=config)
print("\t\t\t\t\tSERVICE_VM_BOOTARGS_DIFF", file=config)
diff --git a/misc/config_tools/xforms/lib.xsl
b/misc/config_tools/xforms/lib.xsl
index da16e75d2..7af6454b0 100644
--- a/misc/config_tools/xforms/lib.xsl
+++ b/misc/config_tools/xforms/lib.xsl
@@ -331,7 +331,7 @@
<xsl:if test="acrn:is-post-launched-vm($vmtype)">
<func:result select="$ivshmem + $console_vuart +
$communication_vuart + $virtual_root_port" />
</xsl:if>
- <xsl:if test="acrn:is-sos-vm($vmtype)">
+ <xsl:if test="acrn:is-service-vm($vmtype)">
<func:result select="$ivshmem + $console_vuart +
$communication_vuart" />
</xsl:if>
</xsl:for-each>
@@ -421,7 +421,7 @@
</xsl:choose>
</func:function>

- <func:function name="acrn:is-sos-vm">
+ <func:function name="acrn:is-service-vm">
<xsl:param name="vm_type" />
<xsl:choose>
<xsl:when test="$vm_type = 'SERVICE_VM'">
diff --git a/misc/config_tools/xforms/misc_cfg.h.xsl
b/misc/config_tools/xforms/misc_cfg.h.xsl
index 4d33c4fb0..1fa045b53 100644
--- a/misc/config_tools/xforms/misc_cfg.h.xsl
+++ b/misc/config_tools/xforms/misc_cfg.h.xsl
@@ -34,8 +34,8 @@
</xsl:template>

<xsl:template match="config-data/acrn-config">
- <xsl:if test="count(vm[acrn:is-sos-vm(vm_type)])">
- <xsl:call-template name="sos_rootfs" />
+ <xsl:if test="count(vm[acrn:is-service-vm(vm_type)])">
+ <xsl:call-template name="service_vm_os_rootfs" />
<xsl:call-template name="sos_serial_console" />
<xsl:call-template name="sos_bootargs_diff" />
</xsl:if>
@@ -65,8 +65,8 @@
</xsl:for-each>
</xsl:template>

-<xsl:template name="sos_rootfs">
- <xsl:value-of select="acrn:define('SOS_ROOTFS', concat($quot,
'root=', vm/board_private/rootfs[text()], ' ', $quot), '')" />
+<xsl:template name="service_vm_os_rootfs">
+ <xsl:value-of select="acrn:define('SERVICE_VM_OS_ROOTFS',
concat($quot, 'root=', vm/board_private/rootfs[text()], ' ', $quot),
'')" />
</xsl:template>

<xsl:template name="sos_serial_console">
@@ -88,8 +88,8 @@
</xsl:template>

<xsl:template name="sos_bootargs_diff">
- <xsl:variable name="bootargs" select="normalize-space(vm[acrn:is-
sos-vm(vm_type)]/board_private/bootargs[text()])" />
- <xsl:variable name="maxcpunum" select="count(//vm[acrn:is-sos-
vm(vm_type)]/cpu_affinity/pcpu_id)" />
+ <xsl:variable name="bootargs" select="normalize-space(vm[acrn:is-
service-vm(vm_type)]/board_private/bootargs[text()])" />
+ <xsl:variable name="maxcpunum" select="count(//vm[acrn:is-service-
vm(vm_type)]/cpu_affinity/pcpu_id)" />
<xsl:variable name="hugepages" select="round(number(substring-
before(//board-data//TOTAL_MEM_INFO, 'kB')) div (1024 * 1024)) - 3"
/>
<xsl:variable name="maxcpus">
<xsl:choose>
@@ -112,8 +112,8 @@
<xsl:template name="cpu_affinity">
<xsl:for-each select="vm">
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(vm_type)">
- <xsl:value-of
select="acrn:define('SERVICE_VM_CONFIG_CPU_AFFINITY', concat('(',
acrn:string-join(//vm[acrn:is-sos-vm(vm_type)]/cpu_affinity/pcpu_id,
'|', 'AFFINITY_CPU(', 'U)'),')'), '')" />
+ <xsl:when test="acrn:is-service-vm(vm_type)">
+ <xsl:value-of
select="acrn:define('SERVICE_VM_CONFIG_CPU_AFFINITY', concat('(',
acrn:string-join(//vm[acrn:is-service-
vm(vm_type)]/cpu_affinity/pcpu_id, '|', 'AFFINITY_CPU(', 'U)'),')'),
'')" />
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="acrn:define(concat('VM', @id,
'_CONFIG_CPU_AFFINITY'), concat('(', acrn:string-
join(cpu_affinity/pcpu_id, '|', 'AFFINITY_CPU(', 'U)'),')'), '')" />
diff --git a/misc/config_tools/xforms/pci_dev.c.xsl
b/misc/config_tools/xforms/pci_dev.c.xsl
index 69453a170..01ff852ba 100644
--- a/misc/config_tools/xforms/pci_dev.c.xsl
+++ b/misc/config_tools/xforms/pci_dev.c.xsl
@@ -33,7 +33,7 @@
<xsl:template match="config-data/acrn-config/vm">
<!-- Initializer of a acrn_vm_pci_dev_config instance -->
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(vm_type)">
+ <xsl:when test="acrn:is-service-vm(vm_type)">
<xsl:value-of select="acrn:array-initializer('struct
acrn_vm_pci_dev_config', 'service_vm_pci_devs',
'CONFIG_MAX_PCI_DEV_NUM')" />
</xsl:when>
<xsl:when test="acrn:pci-dev-num(@id)">
@@ -52,7 +52,7 @@
<xsl:apply-templates select="PTM" />
</xsl:if>

- <xsl:if test="acrn:is-sos-vm(vm_type) or acrn:pci-dev-num(@id)">
+ <xsl:if test="acrn:is-service-vm(vm_type) or acrn:pci-dev-
num(@id)">
<xsl:value-of select="$end_of_array_initializer" />
</xsl:if>
</xsl:template>
diff --git a/misc/config_tools/xforms/vm_configurations.c.xsl
b/misc/config_tools/xforms/vm_configurations.c.xsl
index 4d1c323c2..85a7d5330 100644
--- a/misc/config_tools/xforms/vm_configurations.c.xsl
+++ b/misc/config_tools/xforms/vm_configurations.c.xsl
@@ -27,7 +27,7 @@
<!-- Declaration of pci_devs -->
<xsl:for-each select="vm">
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(vm_type)">
+ <xsl:when test="acrn:is-service-vm(vm_type)">
<xsl:value-of select="acrn:extern('struct
acrn_vm_pci_dev_config', 'service_vm_pci_devs',
'CONFIG_MAX_PCI_DEV_NUM')" />
</xsl:when>
<xsl:when test="acrn:pci-dev-num(@id)">
@@ -64,7 +64,7 @@

<xsl:apply-templates select="vm_type" />
<xsl:apply-templates select="name" />
- <xsl:if test="acrn:is-sos-vm(vm_type)">
+ <xsl:if test="acrn:is-service-vm(vm_type)">
<xsl:value-of select="acrn:comment('Allow Service VM to reboot
the system since it is the highest priority VM.')" />
<xsl:value-of select="$newline" />
</xsl:if>
@@ -90,7 +90,7 @@

<xsl:template match="vm_type">
<xsl:value-of select="concat('CONFIG_', current())" />
- <xsl:if test="not(acrn:is-sos-vm(current()))">
+ <xsl:if test="not(acrn:is-service-vm(current()))">
<xsl:text>(</xsl:text>
<xsl:value-of select="count(../preceding-sibling::vm[vm_type =
current()]) + 1" />
<xsl:text>)</xsl:text>
@@ -105,7 +105,7 @@

<xsl:template name="cpu_affinity">
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(vm_type)">
+ <xsl:when test="acrn:is-service-vm(vm_type)">
<xsl:value-of select="acrn:initializer('cpu_affinity',
'SERVICE_VM_CONFIG_CPU_AFFINITY')" />
</xsl:when>
<xsl:otherwise>
@@ -140,7 +140,7 @@
<xsl:template match="memory">
<xsl:value-of select="acrn:initializer('memory', '{', true())"
/>
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(../vm_type)">
+ <xsl:when test="acrn:is-service-vm(../vm_type)">
<xsl:value-of select="acrn:initializer('start_hpa',
concat(start_hpa, 'UL'))" />
</xsl:when>
<xsl:otherwise>
@@ -178,8 +178,8 @@
</xsl:if>
<xsl:if test="normalize-space(bootargs)">
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(../vm_type)">
- <xsl:value-of select="acrn:initializer('bootargs',
'SOS_VM_BOOTARGS')" />
+ <xsl:when test="acrn:is-service-vm(../vm_type)">
+ <xsl:value-of select="acrn:initializer('bootargs',
'SERVICE_VM_OS_BOOTARGS')" />
</xsl:when>
<xsl:when test="acrn:is-pre-launched-vm(../vm_type)">
<xsl:value-of select="acrn:initializer('bootargs',
concat('VM', ../@id, '_BOOT_ARGS'))" />
@@ -220,7 +220,7 @@

<xsl:template name="pci_dev_num">
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(vm_type)">
+ <xsl:when test="acrn:is-service-vm(vm_type)">
<xsl:value-of select="acrn:initializer('pci_dev_num',
concat(acrn:pci-dev-num(@id), 'U'))" />
</xsl:when>
<xsl:otherwise>
@@ -233,7 +233,7 @@

<xsl:template name="pci_devs">
<xsl:choose>
- <xsl:when test="acrn:is-sos-vm(vm_type)">
+ <xsl:when test="acrn:is-service-vm(vm_type)">
<xsl:value-of select="acrn:initializer('pci_devs',
'service_vm_pci_devs')" />
</xsl:when>
<xsl:when test="acrn:pci-dev-num(@id)">
diff --git a/misc/config_tools/xforms/vm_configurations.h.xsl
b/misc/config_tools/xforms/vm_configurations.h.xsl
index 4e45a6b41..9bd9f5cd8 100644
--- a/misc/config_tools/xforms/vm_configurations.h.xsl
+++ b/misc/config_tools/xforms/vm_configurations.h.xsl
@@ -40,7 +40,7 @@
<xsl:value-of select="acrn:comment('SERVICE_VM_NUM can only be
0U or 1U; When SERVICE_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;
MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM.')" />
<xsl:value-of select="$newline" />
<xsl:value-of select="acrn:define('PRE_VM_NUM',
count(vm[acrn:is-pre-launched-vm(vm_type)]), 'U')" />
- <xsl:value-of select="acrn:define('SERVICE_VM_NUM',
count(vm[acrn:is-sos-vm(vm_type)]), 'U')" />
+ <xsl:value-of select="acrn:define('SERVICE_VM_NUM',
count(vm[acrn:is-service-vm(vm_type)]), 'U')" />
<xsl:value-of select="acrn:define('MAX_POST_VM_NUM',
count(vm[acrn:is-post-launched-vm(vm_type)]), 'U')" />
<xsl:value-of select="acrn:define('CONFIG_MAX_KATA_VM_NUM',
count(vm[acrn:is-kata-vm(vm_type)]), 'U')" />
</xsl:template>
@@ -62,7 +62,7 @@
<xsl:if test="count(vm[vm_type='SERVICE_VM'])">
<xsl:value-of select="acrn:comment(concat('SERVICE_VM == VM',
vm[vm_type='SERVICE_VM']/@id))" />
<xsl:value-of select="$newline" />
- <xsl:value-of select="acrn:define('SOS_VM_BOOTARGS',
'SOS_ROOTFS SERVICE_VM_CONSOLE SERVICE_VM_IDLE
SERVICE_VM_BOOTARGS_DIFF', '')" />
+ <xsl:value-of select="acrn:define('SERVICE_VM_OS_BOOTARGS',
'SERVICE_VM_OS_ROOTFS SERVICE_VM_CONSOLE SERVICE_VM_IDLE
SERVICE_VM_BOOTARGS_DIFF', '')" />
</xsl:if>
</xsl:template>


Re: [PATCH v2 31/34] ACRN: life_mngr: Rename sos to service_vm

Geoffroy Van Cutsem
 

On Tue, 2021-10-19 at 15:20 +0800, Liu Long wrote:
From: Liu Long <longliu@...>

Rename sos_socket_thread to service_vm_socket_thread
rename listener_fn_to_sos to listener_fn_to_service_vm
rename sos_socket_pid to service_vm_socket_pid
rename PROCESS_RUN_IN_SOS to PROCESS_RUN_IN_SERVICE_VM
rename SHUTDOWN_REQ_FROM_SOS to SHUTDOWN_REQ_FROM_SERVICE_VM

Signed-off-by: Liu Long <longliu@...>
---
misc/services/life_mngr/life_mngr.c | 46 ++++++++++++++-------------
--
1 file changed, 23 insertions(+), 23 deletions(-)

diff --git a/misc/services/life_mngr/life_mngr.c
b/misc/services/life_mngr/life_mngr.c
index ef174b325..c431911cd 100644
--- a/misc/services/life_mngr/life_mngr.c
+++ b/misc/services/life_mngr/life_mngr.c
@@ -27,18 +27,18 @@
#define SERVICE_VM_SOCKET_PORT (0x2000U)
#define UOS_SOCKET_PORT (SERVICE_VM_SOCKET_PORT + 1U)

-/* life_mngr process run in SOS or UOS */
+/* life_mngr process run in Service VM or UOS */
enum process_env {
PROCESS_UNKNOWN = 0,
- PROCESS_RUN_IN_SOS,
+ PROCESS_RUN_IN_SERVICE_VM,
PROCESS_RUN_IN_UOS,
};

/* Enumerated shutdown state machine only for UOS thread */
enum shutdown_state {
SHUTDOWN_REQ_WAITING = 0, /* Can receive shutdown cmd in
this state */
- SHUTDOWN_ACK_WAITING, /* Wait acked message from
SOS */
- SHUTDOWN_REQ_FROM_SOS, /* Trigger shutdown by SOS
*/
+ SHUTDOWN_ACK_WAITING, /* Wait acked message from
Service VM */
+ SHUTDOWN_REQ_FROM_SERVICE_VM, /* Trigger shutdown
by Service VM */
SHUTDOWN_REQ_FROM_UOS, /* Trigger shutdown by UOS
*/

};
@@ -160,7 +160,7 @@ static int setup_socket_listen(unsigned short
port)
/* this thread runs on Service VM:
* communiate between lifecycle-mngr and acrn-dm process in Service
VM side
*/
-static void *sos_socket_thread(void *arg)
+static void *service_vm_socket_thread(void *arg)
{
int listen_fd, connect_fd, connect_fd2;
struct sockaddr_in client;
@@ -223,7 +223,7 @@ static void *sos_socket_thread(void *arg)
LOG_WRITE("Send acked message
to acrn-dm VM fail\n");
}
LOG_WRITE("Receive shutdown command
from User VM\r\n");
- ret = system("~/s5_trigger.sh sos");
+ ret = system("~/s5_trigger.sh
service_vm");
LOG_PRINTF("call s5_trigger.sh
ret=0x%x\r\n", ret);
break;
}
@@ -251,7 +251,7 @@ out:
/* this thread runs on User VM:
* User VM wait for message from Service VM
*/
-static void *listener_fn_to_sos(void *arg)
+static void *listener_fn_to_service_vm(void *arg)
{

int ret;
@@ -259,7 +259,7 @@ static void *listener_fn_to_sos(void *arg)
bool shutdown_self = false;
unsigned char buf[BUFF_SIZE];

- /* UOS-server wait for message from SOS */
+ /* UOS-server wait for message from Service VM */
do {
memset(buf, 0, sizeof(buf));
ret = receive_message(tty_dev_fd, buf, sizeof(buf));
@@ -268,11 +268,11 @@ static void *listener_fn_to_sos(void *arg)
}

switch (shutdown_state) {
- /* it can receive shutdown command from SOS */
+ /* it can receive shutdown command from Service VM */
case SHUTDOWN_REQ_WAITING:
- case SHUTDOWN_REQ_FROM_SOS:
+ case SHUTDOWN_REQ_FROM_SERVICE_VM:
if ((ret > 0) && (strncmp(SHUTDOWN_CMD, (const
char *)buf, strlen(SHUTDOWN_CMD)) == 0)) {
- shutdown_state = SHUTDOWN_REQ_FROM_SOS;
+ shutdown_state =
SHUTDOWN_REQ_FROM_SERVICE_VM;
ret = send_message(tty_dev_fd, ACK_CMD,
sizeof(ACK_CMD));
if (ret != 0) {
LOG_WRITE("UOS send acked
message failed!\n");
@@ -283,7 +283,7 @@ static void *listener_fn_to_sos(void *arg)
}
break;

- /* it will try to resend shutdown cmd to sos if there
is no acked message */
+ /* it will try to resend shutdown cmd to service_vm if
there is no acked message */
case SHUTDOWN_ACK_WAITING:
if ((ret > 0) && (strncmp(ACK_CMD, (const char
*)buf, strlen(ACK_CMD)) == 0)) {
LOG_WRITE("received acked message from
Service VM\n");
@@ -295,7 +295,7 @@ static void *listener_fn_to_sos(void *arg)
}
retry--;
} else {
- LOG_PRINTF("Cann't not receive
acked message from SOS, have try %d times\r\n",
+ LOG_PRINTF("Cann't not receive
acked message from Service VM, have try %d times\r\n",
Could we also fix the log message to: "Cannot receive ACKED message
from the Service VM, have tried %d times\r\n"?

TRY_SEND_CNT);
shutdown_state =
SHUTDOWN_REQ_WAITING;
retry = TRY_SEND_CNT;
@@ -372,7 +372,7 @@ static void *listener_fn_to_operator(void *arg)
LOG_WRITE("Send acked message fail\n");
}

- LOG_WRITE("send shutdown message to sos\r\n");
+ LOG_WRITE("send shutdown message to
service_vm\r\n");
Change to "Sent shutdown message to Service VM"

/* send shutdown command to the Servcie VM */
Typo: Servcie VM -> Service VM

ret = send_message(tty_dev_fd, SHUTDOWN_CMD,
sizeof(SHUTDOWN_CMD));
if (ret != 0) {
@@ -396,7 +396,7 @@ int main(int argc, char *argv[])
int ret = 0;
char *devname_uos = "";
enum process_env env = PROCESS_UNKNOWN;
- pthread_t sos_socket_pid;
+ pthread_t service_vm_socket_pid;
/* User VM wait for shutdown from Service VM */
pthread_t uos_thread_pid_1;
/* User VM wait for shutdown from other process */
@@ -409,7 +409,7 @@ int main(int argc, char *argv[])
}

if (argc <= 2) {
- LOG_WRITE("Too few options. Example: [./life_mngr uos
/dev/ttyS1] or ./life_mngr sos /dev/ttyS1]\n");
+ LOG_WRITE("Too few options. Example: [./life_mngr uos
/dev/ttyS1] or ./life_mngr service_vm /dev/ttyS1]\n");
fclose(log_fd);
return -EINVAL;
}
@@ -427,20 +427,20 @@ int main(int argc, char *argv[])
return -EINVAL;
}

- ret = pthread_create(&uos_thread_pid_1, NULL,
listener_fn_to_sos, NULL);
+ ret = pthread_create(&uos_thread_pid_1, NULL,
listener_fn_to_service_vm, NULL);
ret = pthread_create(&uos_thread_pid_2, NULL,
listener_fn_to_operator, NULL);

- } else if (strncmp("sos", argv[1], NODE_SIZE) == 0) {
- env = PROCESS_RUN_IN_SOS;
- ret = pthread_create(&sos_socket_pid, NULL,
sos_socket_thread, NULL);
+ } else if (strncmp("service_vm", argv[1], NODE_SIZE) == 0) {
+ env = PROCESS_RUN_IN_SERVICE_VM;
+ ret = pthread_create(&service_vm_socket_pid, NULL,
service_vm_socket_thread, NULL);
} else {
- LOG_WRITE("Invalid param. Example: [./life_mngr uos
/dev/ttyS1] or ./life_mngr sos /dev/ttyS1]\n");
+ LOG_WRITE("Invalid param. Example: [./life_mngr uos
/dev/ttyS1] or ./life_mngr service_vm /dev/ttyS1]\n");
fclose(log_fd);
return -EINVAL;
}

- if (env == PROCESS_RUN_IN_SOS) {
- pthread_join(sos_socket_pid, NULL);
+ if (env == PROCESS_RUN_IN_SERVICE_VM) {
+ pthread_join(service_vm_socket_pid, NULL);
} else if (env == PROCESS_RUN_IN_UOS) {
pthread_join(uos_thread_pid_1, NULL);
pthread_join(uos_thread_pid_2, NULL);

3701 - 3720 of 37344