Date   

Re: [PATCH] misc: modify the logic of generate HV_RAM_START

chenli.wei
 

On 6/2/2022 4:08 PM, Junjie Mao wrote:
Chenli Wei <chenli.wei@...> writes:

From: Chenli Wei <chenli.wei@...>

The current code assume that there must be an HV_RAM_START element in
the scenario and we will generate it if user have not set, the default
value of HV_RAM_START is 0x00400000 which cause an overlap issue.

This patch remove the requires of HV_RAM_START element, calculate
HV_RAM_SIZE and find a region of e820 to run the ACRN which start
address will be HV_RAM_START.

It is still valid if the user set HV_RAM_START by XMLs.

Signed-off-by: Chenli Wei <chenli.wei@...>
---
.../config_tools/hv_config/board_defconfig.py | 50 +------------------
misc/config_tools/library/common.py | 2 +-
misc/config_tools/schema/config.xsd | 2 +-
misc/config_tools/static_allocators/hv_ram.py | 49 ++++++++++++++++++
misc/config_tools/xforms/config_common.xsl | 5 ++
5 files changed, 57 insertions(+), 51 deletions(-)
create mode 100644 misc/config_tools/static_allocators/hv_ram.py

diff --git a/misc/config_tools/hv_config/board_defconfig.py b/misc/config_tools/hv_config/board_defconfig.py
index 5d47533af..65a198486 100644
--- a/misc/config_tools/hv_config/board_defconfig.py
+++ b/misc/config_tools/hv_config/board_defconfig.py
@@ -13,8 +13,6 @@ import common
DESC = """# Board defconfig generated by acrn-config tool
"""
-HV_RAM_SIZE_MAX = 0x40000000
-
MEM_ALIGN = 2 * common.SIZE_M
@@ -54,60 +52,14 @@ def get_serial_type():
def get_memory(hv_info, config):
- # this dictonary mapped with 'address start':'mem range'
- ram_range = {}
-
- post_launched_vm_num = 0
- for id in common.VM_TYPES:
- if common.VM_TYPES[id] in scenario_cfg_lib.VM_DB and \
- scenario_cfg_lib.VM_DB[common.VM_TYPES[id]]["load_type"] == "POST_LAUNCHED_VM":
- post_launched_vm_num += 1
- hv_ram_size = common.HV_BASE_RAM_SIZE + common.POST_LAUNCHED_VM_RAM_SIZE * post_launched_vm_num
-
- ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
- total_shm_size = 0
- if ivshmem_enabled == 'y':
- raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
- for raw_shm in raw_shmem_regions:
- if raw_shm is None or raw_shm.strip() == '':
- continue
- raw_shm_splited = raw_shm.split(',')
- if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
- and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
- try:
- size = raw_shm_splited[1].strip()
- int_size = int(size) * 0x100000
- total_shm_size += int_size
- except Exception as e:
- print(e)
-
- hv_ram_size += 2 * max(total_shm_size, 0x200000)
- if hv_ram_size > HV_RAM_SIZE_MAX:
- common.print_red("requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX), err=True)
- err_dic["board config: total vm number error"] = \
- "requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX)
- return err_dic
-
- # reseve 16M memory for hv sbuf, ramoops, etc.
- reserved_ram = 0x1000000
# We recommend to put hv ram start address high than 0x10000000 to
# reduce memory conflict with GRUB/Service VM Kernel.
hv_start_offset = 0x10000000
- total_size = reserved_ram + hv_ram_size
for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
if hv_start_offset <= start_addr < 0x80000000:
del board_cfg_lib.USED_RAM_RANGE[start_addr]
- ram_range = board_cfg_lib.get_ram_range()
- avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset)
- hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
- hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
- board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size
-
- if not hv_info.mem.hv_ram_start:
- print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config)
- else:
- print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config)
+ print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config)
print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config)
print("CONFIG_IVSHMEM_ENABLED={}".format(hv_info.mem.ivshmem_enable), file=config)
diff --git a/misc/config_tools/library/common.py b/misc/config_tools/library/common.py
index 7ea12fa9d..24ece4bf3 100644
--- a/misc/config_tools/library/common.py
+++ b/misc/config_tools/library/common.py
@@ -45,7 +45,7 @@ MAX_VM_NUM = 16
MAX_VUART_NUM = 8
HV_BASE_RAM_SIZE = 0x1400000
-POST_LAUNCHED_VM_RAM_SIZE = 0x1000000
+VM_RAM_SIZE = 0x2800000
class MultiItem():
diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd
index 07777522e..24a15877f 100644
--- a/misc/config_tools/schema/config.xsd
+++ b/misc/config_tools/schema/config.xsd
@@ -129,7 +129,7 @@
<xs:documentation>Specify the size of the memory stack in bytes for each physical CPU. For example, if you specify 8 kilobytes, each CPU will get its own 8-kilobyte stack.</xs:documentation>
</xs:annotation>
</xs:element>
- <xs:element name="HV_RAM_START" type="HexFormat" default="0x00400000">
+ <xs:element name="HV_RAM_START" type="HexFormat" default="0x00400000" minOccurs="0">
The default value seems to be useless here as you have logic in the
static allocator to determine one if it is not given.
Yes,  we could remove the default="0x00400000" and we need the minOccurs="0".

There was a schema error if we remove both the "default" and "minOccurs" parameters.


Re: [PATCH] misc: modify the logic of generate HV_RAM_START

Junjie Mao
 

Chenli Wei <chenli.wei@...> writes:

From: Chenli Wei <chenli.wei@...>

The current code assume that there must be an HV_RAM_START element in
the scenario and we will generate it if user have not set, the default
value of HV_RAM_START is 0x00400000 which cause an overlap issue.

This patch remove the requires of HV_RAM_START element, calculate
HV_RAM_SIZE and find a region of e820 to run the ACRN which start
address will be HV_RAM_START.

It is still valid if the user set HV_RAM_START by XMLs.

Signed-off-by: Chenli Wei <chenli.wei@...>
---
.../config_tools/hv_config/board_defconfig.py | 50 +------------------
misc/config_tools/library/common.py | 2 +-
misc/config_tools/schema/config.xsd | 2 +-
misc/config_tools/static_allocators/hv_ram.py | 49 ++++++++++++++++++
misc/config_tools/xforms/config_common.xsl | 5 ++
5 files changed, 57 insertions(+), 51 deletions(-)
create mode 100644 misc/config_tools/static_allocators/hv_ram.py

diff --git a/misc/config_tools/hv_config/board_defconfig.py b/misc/config_tools/hv_config/board_defconfig.py
index 5d47533af..65a198486 100644
--- a/misc/config_tools/hv_config/board_defconfig.py
+++ b/misc/config_tools/hv_config/board_defconfig.py
@@ -13,8 +13,6 @@ import common
DESC = """# Board defconfig generated by acrn-config tool
"""

-HV_RAM_SIZE_MAX = 0x40000000
-
MEM_ALIGN = 2 * common.SIZE_M


@@ -54,60 +52,14 @@ def get_serial_type():

def get_memory(hv_info, config):

- # this dictonary mapped with 'address start':'mem range'
- ram_range = {}
-
- post_launched_vm_num = 0
- for id in common.VM_TYPES:
- if common.VM_TYPES[id] in scenario_cfg_lib.VM_DB and \
- scenario_cfg_lib.VM_DB[common.VM_TYPES[id]]["load_type"] == "POST_LAUNCHED_VM":
- post_launched_vm_num += 1
- hv_ram_size = common.HV_BASE_RAM_SIZE + common.POST_LAUNCHED_VM_RAM_SIZE * post_launched_vm_num
-
- ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
- total_shm_size = 0
- if ivshmem_enabled == 'y':
- raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
- for raw_shm in raw_shmem_regions:
- if raw_shm is None or raw_shm.strip() == '':
- continue
- raw_shm_splited = raw_shm.split(',')
- if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
- and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
- try:
- size = raw_shm_splited[1].strip()
- int_size = int(size) * 0x100000
- total_shm_size += int_size
- except Exception as e:
- print(e)
-
- hv_ram_size += 2 * max(total_shm_size, 0x200000)
- if hv_ram_size > HV_RAM_SIZE_MAX:
- common.print_red("requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX), err=True)
- err_dic["board config: total vm number error"] = \
- "requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX)
- return err_dic
-
- # reseve 16M memory for hv sbuf, ramoops, etc.
- reserved_ram = 0x1000000
# We recommend to put hv ram start address high than 0x10000000 to
# reduce memory conflict with GRUB/Service VM Kernel.
hv_start_offset = 0x10000000
- total_size = reserved_ram + hv_ram_size
for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
if hv_start_offset <= start_addr < 0x80000000:
del board_cfg_lib.USED_RAM_RANGE[start_addr]
- ram_range = board_cfg_lib.get_ram_range()
- avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset)
- hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
- hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
- board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size
-
- if not hv_info.mem.hv_ram_start:
- print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config)
- else:
- print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config)

+ print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config)
print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config)
print("CONFIG_IVSHMEM_ENABLED={}".format(hv_info.mem.ivshmem_enable), file=config)

diff --git a/misc/config_tools/library/common.py b/misc/config_tools/library/common.py
index 7ea12fa9d..24ece4bf3 100644
--- a/misc/config_tools/library/common.py
+++ b/misc/config_tools/library/common.py
@@ -45,7 +45,7 @@ MAX_VM_NUM = 16
MAX_VUART_NUM = 8

HV_BASE_RAM_SIZE = 0x1400000
-POST_LAUNCHED_VM_RAM_SIZE = 0x1000000
+VM_RAM_SIZE = 0x2800000

class MultiItem():

diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd
index 07777522e..24a15877f 100644
--- a/misc/config_tools/schema/config.xsd
+++ b/misc/config_tools/schema/config.xsd
@@ -129,7 +129,7 @@
<xs:documentation>Specify the size of the memory stack in bytes for each physical CPU. For example, if you specify 8 kilobytes, each CPU will get its own 8-kilobyte stack.</xs:documentation>
</xs:annotation>
</xs:element>
- <xs:element name="HV_RAM_START" type="HexFormat" default="0x00400000">
+ <xs:element name="HV_RAM_START" type="HexFormat" default="0x00400000" minOccurs="0">
The default value seems to be useless here as you have logic in the
static allocator to determine one if it is not given.

--
Best Regards
Junjie Mao

<xs:annotation acrn:views="">
<xs:documentation>The 2MB-aligned starting physical address of the RAM region used by the hypervisor.</xs:documentation>
</xs:annotation>
diff --git a/misc/config_tools/static_allocators/hv_ram.py b/misc/config_tools/static_allocators/hv_ram.py
new file mode 100644
index 000000000..0e85cc082
--- /dev/null
+++ b/misc/config_tools/static_allocators/hv_ram.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
+import common, board_cfg_lib, scenario_cfg_lib
+
+HV_RAM_SIZE_MAX = 0x40000000
+
+MEM_ALIGN = 2 * common.SIZE_M
+
+def fn(board_etree, scenario_etree, allocation_etree):
+ # this dictonary mapped with 'address start':'mem range'
+ ram_range = {}
+
+ vm_num = 0
+ vm_list = scenario_etree.xpath("//vm")
+ if vm_list is not None:
+ vm_num = len(vm_list)
+ hv_ram_size = common.HV_BASE_RAM_SIZE + common.VM_RAM_SIZE * vm_num
+ ivshmem_list = scenario_etree.xpath("//IVSHMEM_SIZE/text()")
+ total_shm_size = 0
+ for ram_size in ivshmem_list:
+ try:
+ total_shm_size += int(ram_size) * 0x100000
+ except Exception as e:
+ print(e)
+ hv_ram_size += 2 * max(total_shm_size, 0x200000)
+ assert(hv_ram_size <= HV_RAM_SIZE_MAX)
+ # reseve 16M memory for hv sbuf, ramoops, etc.
+ reserved_ram = 0x1000000
+ # We recommend to put hv ram start address high than 0x10000000 to
+ # reduce memory conflict with GRUB/SOS Kernel.
+ hv_start_offset = 0x10000000
+ total_size = reserved_ram + hv_ram_size
+ for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
+ if hv_start_offset <= start_addr < 0x80000000:
+ del board_cfg_lib.USED_RAM_RANGE[start_addr]
+ ram_range = board_cfg_lib.get_ram_range()
+ avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset)
+ hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
+ hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
+ board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size
+ common.append_node("/acrn-config/hv/MEMORY/HV_RAM_START", hex(hv_start_addr), allocation_etree)
+ common.append_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE", hex(hv_ram_size), allocation_etree)
diff --git a/misc/config_tools/xforms/config_common.xsl b/misc/config_tools/xforms/config_common.xsl
index 3c951ced3..6af8c2c31 100644
--- a/misc/config_tools/xforms/config_common.xsl
+++ b/misc/config_tools/xforms/config_common.xsl
@@ -155,6 +155,11 @@
<xsl:with-param name="default" select="//allocation-data/acrn-config/hv/MEMORY/HV_RAM_START" />
</xsl:call-template>

+ <xsl:call-template name="integer-by-key">
+ <xsl:with-param name="key" select="'HV_RAM_SIZE'" />
+ <xsl:with-param name="default" select="//allocation-data/acrn-config/hv/MEMORY/HV_RAM_SIZE" />
+ </xsl:call-template>
+
<xsl:call-template name="integer-by-key">
<xsl:with-param name="key" select="'STACK_SIZE'" />
</xsl:call-template>


[PATCH] misc: modify the logic of generate HV_RAM_START

Chenli Wei
 

From: Chenli Wei <chenli.wei@...>

The current code assume that there must be an HV_RAM_START element in
the scenario and we will generate it if user have not set, the default
value of HV_RAM_START is 0x00400000 which cause an overlap issue.

This patch remove the requires of HV_RAM_START element, calculate
HV_RAM_SIZE and find a region of e820 to run the ACRN which start
address will be HV_RAM_START.

It is still valid if the user set HV_RAM_START by XMLs.

Signed-off-by: Chenli Wei <chenli.wei@...>
---
.../config_tools/hv_config/board_defconfig.py | 50 +------------------
misc/config_tools/library/common.py | 2 +-
misc/config_tools/schema/config.xsd | 2 +-
misc/config_tools/static_allocators/hv_ram.py | 49 ++++++++++++++++++
misc/config_tools/xforms/config_common.xsl | 5 ++
5 files changed, 57 insertions(+), 51 deletions(-)
create mode 100644 misc/config_tools/static_allocators/hv_ram.py

diff --git a/misc/config_tools/hv_config/board_defconfig.py b/misc/config_tools/hv_config/board_defconfig.py
index 5d47533af..65a198486 100644
--- a/misc/config_tools/hv_config/board_defconfig.py
+++ b/misc/config_tools/hv_config/board_defconfig.py
@@ -13,8 +13,6 @@ import common
DESC = """# Board defconfig generated by acrn-config tool
"""

-HV_RAM_SIZE_MAX = 0x40000000
-
MEM_ALIGN = 2 * common.SIZE_M


@@ -54,60 +52,14 @@ def get_serial_type():

def get_memory(hv_info, config):

- # this dictonary mapped with 'address start':'mem range'
- ram_range = {}
-
- post_launched_vm_num = 0
- for id in common.VM_TYPES:
- if common.VM_TYPES[id] in scenario_cfg_lib.VM_DB and \
- scenario_cfg_lib.VM_DB[common.VM_TYPES[id]]["load_type"] == "POST_LAUNCHED_VM":
- post_launched_vm_num += 1
- hv_ram_size = common.HV_BASE_RAM_SIZE + common.POST_LAUNCHED_VM_RAM_SIZE * post_launched_vm_num
-
- ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
- total_shm_size = 0
- if ivshmem_enabled == 'y':
- raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
- for raw_shm in raw_shmem_regions:
- if raw_shm is None or raw_shm.strip() == '':
- continue
- raw_shm_splited = raw_shm.split(',')
- if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
- and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
- try:
- size = raw_shm_splited[1].strip()
- int_size = int(size) * 0x100000
- total_shm_size += int_size
- except Exception as e:
- print(e)
-
- hv_ram_size += 2 * max(total_shm_size, 0x200000)
- if hv_ram_size > HV_RAM_SIZE_MAX:
- common.print_red("requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX), err=True)
- err_dic["board config: total vm number error"] = \
- "requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX)
- return err_dic
-
- # reseve 16M memory for hv sbuf, ramoops, etc.
- reserved_ram = 0x1000000
# We recommend to put hv ram start address high than 0x10000000 to
# reduce memory conflict with GRUB/Service VM Kernel.
hv_start_offset = 0x10000000
- total_size = reserved_ram + hv_ram_size
for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
if hv_start_offset <= start_addr < 0x80000000:
del board_cfg_lib.USED_RAM_RANGE[start_addr]
- ram_range = board_cfg_lib.get_ram_range()
- avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset)
- hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
- hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
- board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size
-
- if not hv_info.mem.hv_ram_start:
- print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config)
- else:
- print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config)

+ print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config)
print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config)
print("CONFIG_IVSHMEM_ENABLED={}".format(hv_info.mem.ivshmem_enable), file=config)

diff --git a/misc/config_tools/library/common.py b/misc/config_tools/library/common.py
index 7ea12fa9d..24ece4bf3 100644
--- a/misc/config_tools/library/common.py
+++ b/misc/config_tools/library/common.py
@@ -45,7 +45,7 @@ MAX_VM_NUM = 16
MAX_VUART_NUM = 8

HV_BASE_RAM_SIZE = 0x1400000
-POST_LAUNCHED_VM_RAM_SIZE = 0x1000000
+VM_RAM_SIZE = 0x2800000

class MultiItem():

diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd
index 07777522e..24a15877f 100644
--- a/misc/config_tools/schema/config.xsd
+++ b/misc/config_tools/schema/config.xsd
@@ -129,7 +129,7 @@
<xs:documentation>Specify the size of the memory stack in bytes for each physical CPU. For example, if you specify 8 kilobytes, each CPU will get its own 8-kilobyte stack.</xs:documentation>
</xs:annotation>
</xs:element>
- <xs:element name="HV_RAM_START" type="HexFormat" default="0x00400000">
+ <xs:element name="HV_RAM_START" type="HexFormat" default="0x00400000" minOccurs="0">
<xs:annotation acrn:views="">
<xs:documentation>The 2MB-aligned starting physical address of the RAM region used by the hypervisor.</xs:documentation>
</xs:annotation>
diff --git a/misc/config_tools/static_allocators/hv_ram.py b/misc/config_tools/static_allocators/hv_ram.py
new file mode 100644
index 000000000..0e85cc082
--- /dev/null
+++ b/misc/config_tools/static_allocators/hv_ram.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
+import common, board_cfg_lib, scenario_cfg_lib
+
+HV_RAM_SIZE_MAX = 0x40000000
+
+MEM_ALIGN = 2 * common.SIZE_M
+
+def fn(board_etree, scenario_etree, allocation_etree):
+ # this dictonary mapped with 'address start':'mem range'
+ ram_range = {}
+
+ vm_num = 0
+ vm_list = scenario_etree.xpath("//vm")
+ if vm_list is not None:
+ vm_num = len(vm_list)
+ hv_ram_size = common.HV_BASE_RAM_SIZE + common.VM_RAM_SIZE * vm_num
+ ivshmem_list = scenario_etree.xpath("//IVSHMEM_SIZE/text()")
+ total_shm_size = 0
+ for ram_size in ivshmem_list:
+ try:
+ total_shm_size += int(ram_size) * 0x100000
+ except Exception as e:
+ print(e)
+ hv_ram_size += 2 * max(total_shm_size, 0x200000)
+ assert(hv_ram_size <= HV_RAM_SIZE_MAX)
+ # reseve 16M memory for hv sbuf, ramoops, etc.
+ reserved_ram = 0x1000000
+ # We recommend to put hv ram start address high than 0x10000000 to
+ # reduce memory conflict with GRUB/SOS Kernel.
+ hv_start_offset = 0x10000000
+ total_size = reserved_ram + hv_ram_size
+ for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
+ if hv_start_offset <= start_addr < 0x80000000:
+ del board_cfg_lib.USED_RAM_RANGE[start_addr]
+ ram_range = board_cfg_lib.get_ram_range()
+ avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset)
+ hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
+ hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
+ board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size
+ common.append_node("/acrn-config/hv/MEMORY/HV_RAM_START", hex(hv_start_addr), allocation_etree)
+ common.append_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE", hex(hv_ram_size), allocation_etree)
diff --git a/misc/config_tools/xforms/config_common.xsl b/misc/config_tools/xforms/config_common.xsl
index 3c951ced3..6af8c2c31 100644
--- a/misc/config_tools/xforms/config_common.xsl
+++ b/misc/config_tools/xforms/config_common.xsl
@@ -155,6 +155,11 @@
<xsl:with-param name="default" select="//allocation-data/acrn-config/hv/MEMORY/HV_RAM_START" />
</xsl:call-template>

+ <xsl:call-template name="integer-by-key">
+ <xsl:with-param name="key" select="'HV_RAM_SIZE'" />
+ <xsl:with-param name="default" select="//allocation-data/acrn-config/hv/MEMORY/HV_RAM_SIZE" />
+ </xsl:call-template>
+
<xsl:call-template name="integer-by-key">
<xsl:with-param name="key" select="'STACK_SIZE'" />
</xsl:call-template>
--
2.25.1


Re: [PATCH v2] dm: vdisplay: terminate acrn-dm process when SDL init failed

Yu Wang
 

Acked-by: Wang, Yu1 <yu1.wang@...>

On Thu, Jun 02, 2022 at 01:23:56PM +0800, peng.p.sun@... wrote:
From: Sun Peng <peng.p.sun@...>

Virtual display is component which based on native window system.
This feature depended phisical monitor connected and graphic driver in
SOS running correctly. If these dependencies fail, it is a fatal error
for virtual display. We have to terminate the device model to let user
fix runtime environment issue for graphics.

Tracked-On: #7672
Signed-off-by: Sun Peng <peng.p.sun@...>
---
devicemodel/core/main.c | 5 ++++-
devicemodel/hw/vdisplay_sdl.c | 6 +++++-
devicemodel/include/vdisplay.h | 2 +-
3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/devicemodel/core/main.c b/devicemodel/core/main.c
index 299a38dee..0e3d77bfa 100644
--- a/devicemodel/core/main.c
+++ b/devicemodel/core/main.c
@@ -1045,7 +1045,10 @@ main(int argc, char *argv[])
}

if (gfx_ui) {
- gfx_ui_init();
+ if(gfx_ui_init()) {
+ pr_err("gfx ui initialize failed\n");
+ exit(1);
+ }
}

for (;;) {
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c
index dfad4c6a8..1c3eb1efa 100644
--- a/devicemodel/hw/vdisplay_sdl.c
+++ b/devicemodel/hw/vdisplay_sdl.c
@@ -1128,7 +1128,7 @@ vdpy_deinit(int handle)
return 0;
}

-void
+int
gfx_ui_init()
{
SDL_SysWMinfo info;
@@ -1141,6 +1141,7 @@ gfx_ui_init()

if (SDL_Init(SDL_INIT_VIDEO)) {
pr_err("Failed to Init SDL2 system");
+ return -1;
}

SDL_GetDisplayBounds(0, &disp_rect);
@@ -1150,6 +1151,7 @@ gfx_ui_init()
pr_err("Too small resolutions. Please check the "
" graphics system\n");
SDL_Quit();
+ return -1;
}

SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1");
@@ -1170,6 +1172,8 @@ gfx_ui_init()
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);

vdpy.s.is_ui_realized = true;
+
+ return 0;
}

void
diff --git a/devicemodel/include/vdisplay.h b/devicemodel/include/vdisplay.h
index 510e3e967..99506469b 100644
--- a/devicemodel/include/vdisplay.h
+++ b/devicemodel/include/vdisplay.h
@@ -84,7 +84,7 @@ struct cursor {
};

int vdpy_parse_cmd_option(const char *opts);
-void gfx_ui_init();
+int gfx_ui_init();
int vdpy_init();
void vdpy_get_display_info(int handle, struct display_info *info);
void vdpy_surface_set(int handle, struct surface *surf);
--
2.25.1


[PATCH v2] dm: vdisplay: terminate acrn-dm process when SDL init failed

Sun, Peng
 

From: Sun Peng <peng.p.sun@...>

Virtual display is component which based on native window system.
This feature depended phisical monitor connected and graphic driver in
SOS running correctly. If these dependencies fail, it is a fatal error
for virtual display. We have to terminate the device model to let user
fix runtime environment issue for graphics.

Tracked-On: #7672
Signed-off-by: Sun Peng <peng.p.sun@...>
---
devicemodel/core/main.c | 5 ++++-
devicemodel/hw/vdisplay_sdl.c | 6 +++++-
devicemodel/include/vdisplay.h | 2 +-
3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/devicemodel/core/main.c b/devicemodel/core/main.c
index 299a38dee..0e3d77bfa 100644
--- a/devicemodel/core/main.c
+++ b/devicemodel/core/main.c
@@ -1045,7 +1045,10 @@ main(int argc, char *argv[])
}

if (gfx_ui) {
- gfx_ui_init();
+ if(gfx_ui_init()) {
+ pr_err("gfx ui initialize failed\n");
+ exit(1);
+ }
}

for (;;) {
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c
index dfad4c6a8..1c3eb1efa 100644
--- a/devicemodel/hw/vdisplay_sdl.c
+++ b/devicemodel/hw/vdisplay_sdl.c
@@ -1128,7 +1128,7 @@ vdpy_deinit(int handle)
return 0;
}

-void
+int
gfx_ui_init()
{
SDL_SysWMinfo info;
@@ -1141,6 +1141,7 @@ gfx_ui_init()

if (SDL_Init(SDL_INIT_VIDEO)) {
pr_err("Failed to Init SDL2 system");
+ return -1;
}

SDL_GetDisplayBounds(0, &disp_rect);
@@ -1150,6 +1151,7 @@ gfx_ui_init()
pr_err("Too small resolutions. Please check the "
" graphics system\n");
SDL_Quit();
+ return -1;
}

SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1");
@@ -1170,6 +1172,8 @@ gfx_ui_init()
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);

vdpy.s.is_ui_realized = true;
+
+ return 0;
}

void
diff --git a/devicemodel/include/vdisplay.h b/devicemodel/include/vdisplay.h
index 510e3e967..99506469b 100644
--- a/devicemodel/include/vdisplay.h
+++ b/devicemodel/include/vdisplay.h
@@ -84,7 +84,7 @@ struct cursor {
};

int vdpy_parse_cmd_option(const char *opts);
-void gfx_ui_init();
+int gfx_ui_init();
int vdpy_init();
void vdpy_get_display_info(int handle, struct display_info *info);
void vdpy_surface_set(int handle, struct surface *surf);
--
2.25.1


Re: [PATCH v1] dm: vdisplay: terminate acrn-dm process when SDL init failed

Yu Wang
 

On Thu, Jun 02, 2022 at 12:09:36PM +0800, peng.p.sun@... wrote:
From: Sun Peng <peng.p.sun@...>

Virtual display is component which based on native window system.
This feature depended phisical monitor connected and graphic driver in
SOS running correctly. If these dependencies fail, it is a fatal error
for virtual display. We have to terminate the device model to let user
fix runtime environment issue for graphics.

Tracked-On: #7672
Signed-off-by: Sun Peng <peng.p.sun@...>
---
devicemodel/hw/vdisplay_sdl.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c
index dfad4c6a8..e6127f846 100644
--- a/devicemodel/hw/vdisplay_sdl.c
+++ b/devicemodel/hw/vdisplay_sdl.c
@@ -1141,6 +1141,7 @@ gfx_ui_init()

if (SDL_Init(SDL_INIT_VIDEO)) {
pr_err("Failed to Init SDL2 system");
+ exit(1);
Please return -1 and do exit(1) in main() with an error log.

}

SDL_GetDisplayBounds(0, &disp_rect);
--
2.25.1


[PATCH v1] dm: vdisplay: terminate acrn-dm process when SDL init failed

Sun, Peng
 

From: Sun Peng <peng.p.sun@...>

Virtual display is component which based on native window system.
This feature depended phisical monitor connected and graphic driver in
SOS running correctly. If these dependencies fail, it is a fatal error
for virtual display. We have to terminate the device model to let user
fix runtime environment issue for graphics.

Tracked-On: #7672
Signed-off-by: Sun Peng <peng.p.sun@...>
---
devicemodel/hw/vdisplay_sdl.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c
index dfad4c6a8..e6127f846 100644
--- a/devicemodel/hw/vdisplay_sdl.c
+++ b/devicemodel/hw/vdisplay_sdl.c
@@ -1141,6 +1141,7 @@ gfx_ui_init()

if (SDL_Init(SDL_INIT_VIDEO)) {
pr_err("Failed to Init SDL2 system");
+ exit(1);
}

SDL_GetDisplayBounds(0, &disp_rect);
--
2.25.1


Re: [PATCH] misc: Limit IVSHMEM region name to 27 characters

Liu, Yifan1
 

Sure. Will do.

-----Original Message-----
From: Mao, Junjie <junjie.mao@...>
Sent: Wednesday, June 1, 2022 5:00 PM
To: Liu, Yifan1 <yifan1.liu@...>
Cc: acrn-dev@...
Subject: Re: [acrn-dev] [PATCH] misc: Limit IVSHMEM region name to 27 characters

"Liu, Yifan1" <yifan1.liu@...> writes:

From: Yifan Liu <yifan1.liu@...>

Current IVSHMEM region name does not have size limit. This patch limits
it to 27 characters so that land specifier ("hv:/" or "dm:/") plus region
name can fit into an array of 32 characters.

Signed-off-by: Yifan Liu <yifan1.liu@...>
---
misc/config_tools/schema/types.xsd | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index e4e7eefc0..9bfc62536 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -249,10 +249,10 @@ Read more about the available scheduling options in :ref:`cpu_sharing`.</xs:docu
</xs:annotation>
<xs:simpleType>
<xs:annotation>
- <xs:documentation>A string with no spaces.</xs:documentation>
+ <xs:documentation>A string with up to 27 characters of digits, letters and ``_``.</xs:documentation>
You may also want to update the placeholder of the corresponding
widget. IVSHMEM regions are configured using customized widgets. Thus,
the placeholder, i.e. the string in grey in the widget when users leave
it empty, is hardcoded in the vue file.

--
Best Regards
Junjie Mao

</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="\w+" />
+ <xs:pattern value="\w{1,27}" />
</xs:restriction>
</xs:simpleType>
</xs:element>


Re: [PATCH] misc: Limit IVSHMEM region name to 27 characters

Junjie Mao
 

"Liu, Yifan1" <yifan1.liu@...> writes:

From: Yifan Liu <yifan1.liu@...>

Current IVSHMEM region name does not have size limit. This patch limits
it to 27 characters so that land specifier ("hv:/" or "dm:/") plus region
name can fit into an array of 32 characters.

Signed-off-by: Yifan Liu <yifan1.liu@...>
---
misc/config_tools/schema/types.xsd | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index e4e7eefc0..9bfc62536 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -249,10 +249,10 @@ Read more about the available scheduling options in :ref:`cpu_sharing`.</xs:docu
</xs:annotation>
<xs:simpleType>
<xs:annotation>
- <xs:documentation>A string with no spaces.</xs:documentation>
+ <xs:documentation>A string with up to 27 characters of digits, letters and ``_``.</xs:documentation>
You may also want to update the placeholder of the corresponding
widget. IVSHMEM regions are configured using customized widgets. Thus,
the placeholder, i.e. the string in grey in the widget when users leave
it empty, is hardcoded in the vue file.

--
Best Regards
Junjie Mao

</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="\w+" />
+ <xs:pattern value="\w{1,27}" />
</xs:restriction>
</xs:simpleType>
</xs:element>


Re: [PATCH] misc: Limit IVSHMEM region name to 27 characters

Junjie Mao
 

"Liu, Yifan1" <yifan1.liu@...> writes:

From: Yifan Liu <yifan1.liu@...>

Current IVSHMEM region name does not have size limit. This patch limits
it to 27 characters so that land specifier ("hv:/" or "dm:/") plus region
name can fit into an array of 32 characters.

Signed-off-by: Yifan Liu <yifan1.liu@...>
Reviewed-by: Junjie Mao <junjie.mao@...>

---
misc/config_tools/schema/types.xsd | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index e4e7eefc0..9bfc62536 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -249,10 +249,10 @@ Read more about the available scheduling options in :ref:`cpu_sharing`.</xs:docu
</xs:annotation>
<xs:simpleType>
<xs:annotation>
- <xs:documentation>A string with no spaces.</xs:documentation>
+ <xs:documentation>A string with up to 27 characters of digits, letters and ``_``.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="\w+" />
+ <xs:pattern value="\w{1,27}" />
</xs:restriction>
</xs:simpleType>
</xs:element>


[PATCH] misc: Limit IVSHMEM region name to 27 characters

Liu, Yifan1
 

From: Yifan Liu <yifan1.liu@...>

Current IVSHMEM region name does not have size limit. This patch limits
it to 27 characters so that land specifier ("hv:/" or "dm:/") plus region
name can fit into an array of 32 characters.

Signed-off-by: Yifan Liu <yifan1.liu@...>
---
misc/config_tools/schema/types.xsd | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index e4e7eefc0..9bfc62536 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -249,10 +249,10 @@ Read more about the available scheduling options in :ref:`cpu_sharing`.</xs:docu
</xs:annotation>
<xs:simpleType>
<xs:annotation>
- <xs:documentation>A string with no spaces.</xs:documentation>
+ <xs:documentation>A string with up to 27 characters of digits, letters and ``_``.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="\w+" />
+ <xs:pattern value="\w{1,27}" />
</xs:restriction>
</xs:simpleType>
</xs:element>
--
2.32.0.windows.2


Re: [PATCH 3/3] upgrader:refine upgrader.py

Junjie Mao
 

"Li, Ziheng" <ziheng.li@...> writes:

From 5820f56aca3c934fd02e14bd08bbb4843a8324c1 Mon Sep 17 00:00:00 2001
From: Ziheng Li <ziheng.li@...>
Date: Wed, 1 Jun 2022 15:39:25 +0800
Subject: [PATCH 3/3] upgrader:refine upgrader.py

In the process of upgrading scenario XML, deleted
the " clos" node.

Tracked-On: #7660
Signed-off-by: Ziheng Li <ziheng.li@...>
---
misc/config_tools/scenario_config/upgrader.py | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/misc/config_tools/scenario_config/upgrader.py b/misc/config_tools/scenario_config/upgrader.py
index 396d1d48b..b1d8d09de 100755
--- a/misc/config_tools/scenario_config/upgrader.py
+++ b/misc/config_tools/scenario_config/upgrader.py
@@ -869,6 +869,11 @@ class ScenarioUpgrader(ScenarioTransformer):
logging.error(f"Cannot infer the loader order of VM {self.old_xml_etree.getelementpath(old_node)}")
continue

+ vcpu_clos_node = old_node.xpath("./clos/vcpu_clos")
+ if vcpu_clos_node:
+ for node in vcpu_clos_node:
+ self.old_data_nodes.discard(node)
+
Silently dropping users' cache allocation policies is not a good
practice. You need to warn the user explicitly when any meaningful data
is discarded during the upgrade.

You can instead add an object of class `DiscardedDataFilter` to the
array `filters` in class `UpgradingScenarioStage`.

--
Best Regards
Junjie Mao

root_node.append(new_node)
for k, v in old_node.items():
new_node.set(k, v)
--
2.25.1


Re: [PATCH 2/3] upgrader:refine upgrader.py

Junjie Mao
 

"Li, Ziheng" <ziheng.li@...> writes:

From c028778e4aac4bc0b3fba988646236c6ff87c579 Mon Sep 17 00:00:00 2001
From: Ziheng Li <ziheng.li@...>
Date: Wed, 1 Jun 2022 15:32:46 +0800
Subject: [PATCH 2/3] upgrader:refine upgrader.py

Adjusted the structure of sub nodes of "memory" node
in "vm" node, put the "start_hpa" node and "size_hpa" node
of each group into the "hpa_region". Renamed the "whole" node
to "size" node.

Tracked-On #7660
Signed-off-by: Ziheng Li <ziheng.li@...>
---
misc/config_tools/scenario_config/upgrader.py | 54 ++++++++++++++++++-
1 file changed, 53 insertions(+), 1 deletion(-)

diff --git a/misc/config_tools/scenario_config/upgrader.py b/misc/config_tools/scenario_config/upgrader.py
index d20c220cc..396d1d48b 100755
--- a/misc/config_tools/scenario_config/upgrader.py
+++ b/misc/config_tools/scenario_config/upgrader.py
@@ -433,6 +433,57 @@ class ScenarioUpgrader(ScenarioTransformer):
new_nodes.append(virtio.format_xml_element())
return False

+ def move_memory(self, xsd_element_node, xml_parent_node, new_nodes):
+ new_node = etree.Element(xsd_element_node.get("name"))
+ memory_node = self.hv_vm_node_map[xml_parent_node].xpath("./memory")
+ old_data_start_hpa = []
+ old_data_size_hpa = []
+ old_data_whole = []
+ if len(memory_node) != 0:
+ for element in memory_node[0]:
+ if "start_hpa" in element.tag:
+ old_data_start_hpa.append(element)
+ elif "size" in element.tag:
+ old_data_size_hpa.append(element)
+ elif "whole" in element.tag:
+ old_data_whole.append(element)
+ elif "hpa_region" in element.tag:
+ for subelement in element:
+ if "start_hpa" in subelement.tag:
+ old_data_start_hpa.append(subelement)
+ elif "size" in subelement.tag:
+ old_data_size_hpa.append(subelement)
+ elif "whole" in subelement.tag:
+ old_data_whole.append(subelement)
+
+ if len(old_data_start_hpa) != 0 and len(old_data_size_hpa) != 0:
+ for i in range(len(old_data_start_hpa)):
+ if int(old_data_start_hpa[i].text, 16) != 0 and int(old_data_size_hpa[i].text, 16) != 0:
+ hpa_region_node = etree.SubElement(new_node, 'hpa_region')
+ old_data_size_hpa[i].tag = "size_hpa"
+ hpa_region_node.append(old_data_start_hpa[i])
+ hpa_region_node.append(old_data_size_hpa[i])
+ elif len(old_data_whole) != 0 or (len(old_data_start_hpa) == 0 and len(old_data_size_hpa) != 0):
+ if len(old_data_whole) != 0:
+ for i in range(len(old_data_whole)):
+ old_data_whole[i].tag = "size"
+ new_node.append(old_data_whole[i])
+ else:
+ for i in range(len(old_data_size_hpa)):
+ old_data_size_hpa[i].tag = "size"
+ new_node.append(old_data_size_hpa[i])
+
+ new_nodes.append(new_node)
+
+ for n in old_data_start_hpa:
+ self.old_data_nodes.discard(n)
+ for n in old_data_size_hpa:
+ self.old_data_nodes.discard(n)
+ for n in old_data_whole:
+ self.old_data_nodes.discard(n)
Have you tried this with scenario/launch XMLs from, e.g. 2.7 release?

+
+ return False
+
def move_console_vuart(self, xsd_element_node, xml_parent_node, new_nodes):
new_node = etree.Element(xsd_element_node.get("name"))
new_node.text = "None"
@@ -731,7 +782,7 @@ class ScenarioUpgrader(ScenarioTransformer):
"vbootloader": partialmethod(move_enablement, ".//vbootloader", values_as_enabled = ["ovmf"], values_as_disabled = ["no"]),

# Intermediate nodes
- "memory": partialmethod(create_node_if, ".//memory", ".//mem_size"),
+ #"memory": partialmethod(create_node_if, ".//memory", ".//mem_size"),
If this entry is not useful anymore, simply delete it.

"pci_devs": partialmethod(create_node_if, ".//pci_devs", ".//passthrough_devices/*[text() != ''] | .//sriov/*[text() != '']"),

"BUILD_TYPE": move_build_type,
@@ -744,6 +795,7 @@ class ScenarioUpgrader(ScenarioTransformer):
"os_type": move_os_type,
"virtio_devices": move_virtio_devices,
"memory/whole": partialmethod(rename_data, "memory/whole", ".//mem_size"),
If the whole `memory` node is now handled by a dedicated data mover, I
don't think you need a special mover for `memory/whole` anymore.

--
Best Regards
Junjie Mao

+ "memory": move_memory,

"default": move_data_by_same_tag,
}


Re: [PATCH 1/3] upgrader:refine upgrader.py

Junjie Mao
 

"Li, Ziheng" <ziheng.li@...> writes:

From 44c1a2739e8b6b136aeaad2e7a84a820396edb2d Mon Sep 17 00:00:00 2001
From: Ziheng Li <ziheng.li@...>
Date: Wed, 1 Jun 2022 15:24:50 +0800
Subject: [PATCH 1/3] upgrader:refine upgrader.py

In the process of upgrading scenario XML, renamed
node "RELOC" and "MULTIBOOT2" as "RELOC_ENABLED"
and "MULTIBOOT2_ENABLED".

Tracked-On: #7660
Signed-off-by: Ziheng Li <ziheng.li@...>
---
misc/config_tools/scenario_config/upgrader.py | 26 +++++++++++++++++++
1 file changed, 26 insertions(+)

diff --git a/misc/config_tools/scenario_config/upgrader.py b/misc/config_tools/scenario_config/upgrader.py
index 50a4422f5..d20c220cc 100755
--- a/misc/config_tools/scenario_config/upgrader.py
+++ b/misc/config_tools/scenario_config/upgrader.py
@@ -395,6 +395,30 @@ class ScenarioUpgrader(ScenarioTransformer):
self.move_data_by_xpath(".//BUILD_TYPE", xsd_element_node, xml_parent_node, new_nodes)
return False

+ def move_reloc(self, xsd_element_node, xml_parent_node, new_nodes):
+ old_reloc_node = self.get_node(self.old_xml_etree, "//FEATURES//RELOC")
+ if old_reloc_node is not None:
+ new_node = etree.Element(xsd_element_node.get("name"))
+ new_node.text = old_reloc_node.text
+ new_nodes.append(new_node)
+ self.old_data_nodes.discard(old_reloc_node)
+ else:
+ self.move_data_by_same_tag(xsd_element_node, xml_parent_node, new_nodes)
+
+ return False
+
+ def move_multiboot2(self, xsd_element_node, xml_parent_node, new_nodes):
+ old_multiboot2_node = self.get_node(self.old_xml_etree, "//FEATURES//MULTIBOOT2")
+ if old_multiboot2_node is not None:
+ new_node = etree.Element(xsd_element_node.get("name"))
+ new_node.text = old_multiboot2_node.text
+ new_nodes.append(new_node)
+ self.old_data_nodes.discard(old_multiboot2_node)
+ else:
+ self.move_data_by_same_tag(xsd_element_node, xml_parent_node, new_nodes)
+
+ return False
+
def move_virtio_devices(self, xsd_element_node, xml_parent_node, new_nodes):
virtio = VirtioDevices(self.old_xml_etree)
try:
@@ -711,6 +735,8 @@ class ScenarioUpgrader(ScenarioTransformer):
"pci_devs": partialmethod(create_node_if, ".//pci_devs", ".//passthrough_devices/*[text() != ''] | .//sriov/*[text() != '']"),

"BUILD_TYPE": move_build_type,
+ "RELOC_ENABLED": move_reloc,
+ "MULTIBOOT2_ENABLED": move_multiboot2,
For renaming, you can use `partialmethod(rename_data, "old/xpath",
"new/xpath")` without defining any data mover of your own.

--
Best Regards
Junjie Mao

"console_vuart": move_console_vuart,
"vuart_connections": move_vuart_connections,
"IVSHMEM": move_ivshmem,
--
2.25.1


[PATCH 3/3] upgrader:refine upgrader.py

Li, Ziheng
 

From 5820f56aca3c934fd02e14bd08bbb4843a8324c1 Mon Sep 17 00:00:00 2001
From: Ziheng Li <ziheng.li@...>
Date: Wed, 1 Jun 2022 15:39:25 +0800
Subject: [PATCH 3/3] upgrader:refine upgrader.py

In the process of upgrading scenario XML, deleted
the " clos" node.

Tracked-On: #7660
Signed-off-by: Ziheng Li <ziheng.li@...>
---
misc/config_tools/scenario_config/upgrader.py | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/misc/config_tools/scenario_config/upgrader.py b/misc/config_tools/scenario_config/upgrader.py
index 396d1d48b..b1d8d09de 100755
--- a/misc/config_tools/scenario_config/upgrader.py
+++ b/misc/config_tools/scenario_config/upgrader.py
@@ -869,6 +869,11 @@ class ScenarioUpgrader(ScenarioTransformer):
logging.error(f"Cannot infer the loader order of VM {self.old_xml_etree.getelementpath(old_node)}")
continue

+ vcpu_clos_node = old_node.xpath("./clos/vcpu_clos")
+ if vcpu_clos_node:
+ for node in vcpu_clos_node:
+ self.old_data_nodes.discard(node)
+
root_node.append(new_node)
for k, v in old_node.items():
new_node.set(k, v)
--
2.25.1


[PATCH 2/3] upgrader:refine upgrader.py

Li, Ziheng
 

From c028778e4aac4bc0b3fba988646236c6ff87c579 Mon Sep 17 00:00:00 2001
From: Ziheng Li <ziheng.li@...>
Date: Wed, 1 Jun 2022 15:32:46 +0800
Subject: [PATCH 2/3] upgrader:refine upgrader.py

Adjusted the structure of sub nodes of "memory" node
in "vm" node, put the "start_hpa" node and "size_hpa" node
of each group into the "hpa_region". Renamed the "whole" node
to "size" node.

Tracked-On #7660
Signed-off-by: Ziheng Li <ziheng.li@...>
---
misc/config_tools/scenario_config/upgrader.py | 54 ++++++++++++++++++-
1 file changed, 53 insertions(+), 1 deletion(-)

diff --git a/misc/config_tools/scenario_config/upgrader.py b/misc/config_tools/scenario_config/upgrader.py
index d20c220cc..396d1d48b 100755
--- a/misc/config_tools/scenario_config/upgrader.py
+++ b/misc/config_tools/scenario_config/upgrader.py
@@ -433,6 +433,57 @@ class ScenarioUpgrader(ScenarioTransformer):
new_nodes.append(virtio.format_xml_element())
return False

+ def move_memory(self, xsd_element_node, xml_parent_node, new_nodes):
+ new_node = etree.Element(xsd_element_node.get("name"))
+ memory_node = self.hv_vm_node_map[xml_parent_node].xpath("./memory")
+ old_data_start_hpa = []
+ old_data_size_hpa = []
+ old_data_whole = []
+ if len(memory_node) != 0:
+ for element in memory_node[0]:
+ if "start_hpa" in element.tag:
+ old_data_start_hpa.append(element)
+ elif "size" in element.tag:
+ old_data_size_hpa.append(element)
+ elif "whole" in element.tag:
+ old_data_whole.append(element)
+ elif "hpa_region" in element.tag:
+ for subelement in element:
+ if "start_hpa" in subelement.tag:
+ old_data_start_hpa.append(subelement)
+ elif "size" in subelement.tag:
+ old_data_size_hpa.append(subelement)
+ elif "whole" in subelement.tag:
+ old_data_whole.append(subelement)
+
+ if len(old_data_start_hpa) != 0 and len(old_data_size_hpa) != 0:
+ for i in range(len(old_data_start_hpa)):
+ if int(old_data_start_hpa[i].text, 16) != 0 and int(old_data_size_hpa[i].text, 16) != 0:
+ hpa_region_node = etree.SubElement(new_node, 'hpa_region')
+ old_data_size_hpa[i].tag = "size_hpa"
+ hpa_region_node.append(old_data_start_hpa[i])
+ hpa_region_node.append(old_data_size_hpa[i])
+ elif len(old_data_whole) != 0 or (len(old_data_start_hpa) == 0 and len(old_data_size_hpa) != 0):
+ if len(old_data_whole) != 0:
+ for i in range(len(old_data_whole)):
+ old_data_whole[i].tag = "size"
+ new_node.append(old_data_whole[i])
+ else:
+ for i in range(len(old_data_size_hpa)):
+ old_data_size_hpa[i].tag = "size"
+ new_node.append(old_data_size_hpa[i])
+
+ new_nodes.append(new_node)
+
+ for n in old_data_start_hpa:
+ self.old_data_nodes.discard(n)
+ for n in old_data_size_hpa:
+ self.old_data_nodes.discard(n)
+ for n in old_data_whole:
+ self.old_data_nodes.discard(n)
+
+ return False
+
def move_console_vuart(self, xsd_element_node, xml_parent_node, new_nodes):
new_node = etree.Element(xsd_element_node.get("name"))
new_node.text = "None"
@@ -731,7 +782,7 @@ class ScenarioUpgrader(ScenarioTransformer):
"vbootloader": partialmethod(move_enablement, ".//vbootloader", values_as_enabled = ["ovmf"], values_as_disabled = ["no"]),

# Intermediate nodes
- "memory": partialmethod(create_node_if, ".//memory", ".//mem_size"),
+ #"memory": partialmethod(create_node_if, ".//memory", ".//mem_size"),
"pci_devs": partialmethod(create_node_if, ".//pci_devs", ".//passthrough_devices/*[text() != ''] | .//sriov/*[text() != '']"),

"BUILD_TYPE": move_build_type,
@@ -744,6 +795,7 @@ class ScenarioUpgrader(ScenarioTransformer):
"os_type": move_os_type,
"virtio_devices": move_virtio_devices,
"memory/whole": partialmethod(rename_data, "memory/whole", ".//mem_size"),
+ "memory": move_memory,

"default": move_data_by_same_tag,
}
--
2.25.1


[PATCH 1/3] upgrader:refine upgrader.py

Li, Ziheng
 

From 44c1a2739e8b6b136aeaad2e7a84a820396edb2d Mon Sep 17 00:00:00 2001
From: Ziheng Li <ziheng.li@...>
Date: Wed, 1 Jun 2022 15:24:50 +0800
Subject: [PATCH 1/3] upgrader:refine upgrader.py

In the process of upgrading scenario XML, renamed
node "RELOC" and "MULTIBOOT2" as "RELOC_ENABLED"
and "MULTIBOOT2_ENABLED".

Tracked-On: #7660
Signed-off-by: Ziheng Li <ziheng.li@...>
---
misc/config_tools/scenario_config/upgrader.py | 26 +++++++++++++++++++
1 file changed, 26 insertions(+)

diff --git a/misc/config_tools/scenario_config/upgrader.py b/misc/config_tools/scenario_config/upgrader.py
index 50a4422f5..d20c220cc 100755
--- a/misc/config_tools/scenario_config/upgrader.py
+++ b/misc/config_tools/scenario_config/upgrader.py
@@ -395,6 +395,30 @@ class ScenarioUpgrader(ScenarioTransformer):
self.move_data_by_xpath(".//BUILD_TYPE", xsd_element_node, xml_parent_node, new_nodes)
return False

+ def move_reloc(self, xsd_element_node, xml_parent_node, new_nodes):
+ old_reloc_node = self.get_node(self.old_xml_etree, "//FEATURES//RELOC")
+ if old_reloc_node is not None:
+ new_node = etree.Element(xsd_element_node.get("name"))
+ new_node.text = old_reloc_node.text
+ new_nodes.append(new_node)
+ self.old_data_nodes.discard(old_reloc_node)
+ else:
+ self.move_data_by_same_tag(xsd_element_node, xml_parent_node, new_nodes)
+
+ return False
+
+ def move_multiboot2(self, xsd_element_node, xml_parent_node, new_nodes):
+ old_multiboot2_node = self.get_node(self.old_xml_etree, "//FEATURES//MULTIBOOT2")
+ if old_multiboot2_node is not None:
+ new_node = etree.Element(xsd_element_node.get("name"))
+ new_node.text = old_multiboot2_node.text
+ new_nodes.append(new_node)
+ self.old_data_nodes.discard(old_multiboot2_node)
+ else:
+ self.move_data_by_same_tag(xsd_element_node, xml_parent_node, new_nodes)
+
+ return False
+
def move_virtio_devices(self, xsd_element_node, xml_parent_node, new_nodes):
virtio = VirtioDevices(self.old_xml_etree)
try:
@@ -711,6 +735,8 @@ class ScenarioUpgrader(ScenarioTransformer):
"pci_devs": partialmethod(create_node_if, ".//pci_devs", ".//passthrough_devices/*[text() != ''] | .//sriov/*[text() != '']"),

"BUILD_TYPE": move_build_type,
+ "RELOC_ENABLED": move_reloc,
+ "MULTIBOOT2_ENABLED": move_multiboot2,
"console_vuart": move_console_vuart,
"vuart_connections": move_vuart_connections,
"IVSHMEM": move_ivshmem,
--
2.25.1


[PATCH] hv: compile out unused function if CONFIG_MULTIBOOT2 is disabled

Calvin Zhang <calvinzhang.cool@...>
 

When CONFIG_MULTIBOOT2 is disabled, 'create_service_vm_efi_mmap_desc' is
unused and build fails becuase [-Werror=unused-function] is set.

boot/guest/bzimage_loader.c:188:17: error: 'create_service_vm_efi_mmap_desc' defined but not used [-Werror=unused-function]
188 | static uint16_t create_service_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_memory_desc *efi_mmap_desc)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cc1: all warnings being treated as errors

Tracked-On: #7634
Signed-off-by: Calvin Zhang <calvinzhang.cool@...>
---
hypervisor/boot/guest/bzimage_loader.c | 2 ++
1 file changed, 2 insertions(+)

diff --git a/hypervisor/boot/guest/bzimage_loader.c b/hypervisor/boot/guest/bzimage_loader.c
index a05f2a176..d55519657 100644
--- a/hypervisor/boot/guest/bzimage_loader.c
+++ b/hypervisor/boot/guest/bzimage_loader.c
@@ -182,6 +182,7 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
return load_addr;
}

+#ifdef CONFIG_MULTIBOOT2
/**
* @pre vm != NULL && efi_mmap_desc != NULL
*/
@@ -234,6 +235,7 @@ static uint16_t create_service_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_m

return desc_idx;
}
+#endif

/**
* @pre zp != NULL && vm != NULL
--
2.30.2


Re: [PATCH] config_tools: bugfix for saving all enum values

Junjie Mao
 

"Li, KunhuiX" <kunhuix.li@...> writes:

-----Original Message-----
From: Mao, Junjie <junjie.mao@...>
Sent: Thursday, May 26, 2022 2:23 PM
To: Li, KunhuiX <kunhuix.li@...>
Cc: acrn-dev@...; Xie, Nanlin <nanlin.xie@...>
Subject: Re: [PATCH] config_tools: bugfix for saving all enum values

Kunhui-Li <kunhuix.li@...> writes:

fix the issue that all enum values are saved in scenario xml file
if user doesn't select any value in configurator.

Tracked-On: #6690
Signed-off-by: Kunhui-Li <kunhuix.li@...>
I'm fine with this change. How about the other customized widgets?
Checked the other customized widgets just now, (VUART, IVSHMEM, Virtio input/network, and CAT)
this issue was not found.
OK. Please submit the PR then. Thanks.

--
Best Regards
Junjie Mao

--
Best Regards
Junjie Mao

---
.../pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
b/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
index 49f7b73d7..f2d88444e 100644
--- a/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
+++ b/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
@@ -137,8 +137,8 @@ export default {
this.defaultVal = []
}
this.defaultVal.push({
- "use_type": this.ConsoleUseType,
- "backend_type": this.ConsoleBackendType,
+ "use_type": "",
+ "backend_type": "",
"output_file_path": "",
"sock_file_path": "",
"tty_device_path": "",


Re: [PATCH] config_tools: bugfix for saving all enum values

Kunhui Li
 

-----Original Message-----
From: Mao, Junjie <junjie.mao@...>
Sent: Thursday, May 26, 2022 2:23 PM
To: Li, KunhuiX <kunhuix.li@...>
Cc: acrn-dev@...; Xie, Nanlin <nanlin.xie@...>
Subject: Re: [PATCH] config_tools: bugfix for saving all enum values

Kunhui-Li <kunhuix.li@...> writes:

fix the issue that all enum values are saved in scenario xml file
if user doesn't select any value in configurator.

Tracked-On: #6690
Signed-off-by: Kunhui-Li <kunhuix.li@...>
I'm fine with this change. How about the other customized widgets?
Checked the other customized widgets just now, (VUART, IVSHMEM, Virtio input/network, and CAT)
this issue was not found.

--
Best Regards
Junjie Mao

---
.../pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
b/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
index 49f7b73d7..f2d88444e 100644
--- a/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
+++ b/misc/config_tools/configurator/packages/configurator/src/pages/Config/ConfigForm/CustomWidget/Virtio/Console.vue
@@ -137,8 +137,8 @@ export default {
this.defaultVal = []
}
this.defaultVal.push({
- "use_type": this.ConsoleUseType,
- "backend_type": this.ConsoleBackendType,
+ "use_type": "",
+ "backend_type": "",
"output_file_path": "",
"sock_file_path": "",
"tty_device_path": "",

941 - 960 of 37092