[PATCH] config-tools: solve hv and vm memory address conflict


Li, Ziheng
 

From c849de5679200ad9bd212a119b876a3a490d0450 Mon Sep 17 00:00:00 2001
From: zihengL1 <ziheng.li@...>
Date: Wed, 10 Aug 2022 13:23:50 +0800
Subject: [PATCH] config-tools: solve hv and vm memory address conflict

Fixed the problem that acrn can still build normally
when the memory addresses of HV and VM conflict, which
causes the hypervisor to hang.
At the same time, defined a class to process memory to
obtain the available memory range.

Tracked-On: #7913
Signed-off-by: Ziheng Li <ziheng.li@...>
---
.../static_allocators/memory_allocator.py | 44 ++++++++++++-------
1 file changed, 29 insertions(+), 15 deletions(-)

diff --git a/misc/config_tools/static_allocators/memory_allocator.py b/misc/config_tools/static_allocators/memory_allocator.py
index cbfc9e138..5156f75c2 100644
--- a/misc/config_tools/static_allocators/memory_allocator.py
+++ b/misc/config_tools/static_allocators/memory_allocator.py
@@ -11,14 +11,23 @@ import lib.error
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, math, logging

-def import_memory_info(board_etree):
+class RamRange():
ram_range = {}
- for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"):
- start = int(memory_range.get("start"), base=16)
- size = int(memory_range.get("size"), base=10)
- ram_range[start] = size

- return ram_range
+ @classmethod
+ def import_memory_info(cls, board_etree, allocation_etree):
+ hv_start = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_START/text()", allocation_etree), 16)
+ hv_size = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE/text()", allocation_etree), 16)
+ for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"):
+ start = int(memory_range.get("start"), base=16)
+ size = int(memory_range.get("size"), base=10)
+ if start < hv_start and start + size > hv_start + hv_size:
+ cls.ram_range[start] = hv_start - start
+ cls.ram_range[hv_start + hv_size] = start + size - hv_start - hv_size
+ else:
+ cls.ram_range[start] = size
+
+ return cls.ram_range

def check_hpa(vm_node_info):
hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*")
@@ -77,20 +86,22 @@ def alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list):
mem_key = sorted(ram_range_info)
for mem_start in mem_key:
mem_size = ram_range_info[mem_start]
+ mem_end = mem_start + mem_size
for hpa_start in hpa_key:
hpa_size = mem_info_list[vm_index][hpa_start]
+ hpa_end = hpa_start + hpa_size
if hpa_start != 0:
- if mem_start < hpa_start and mem_start + mem_size > hpa_start + hpa_size:
+ if mem_start < hpa_start and mem_end > hpa_end:
ram_range_info[mem_start] = hpa_start - mem_start
- ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
- elif mem_start == hpa_start and mem_start + mem_size > hpa_start + hpa_size:
+ ram_range_info[hpa_end] = mem_end - hpa_end
+ elif mem_start == hpa_start and mem_end > hpa_end:
del ram_range_info[mem_start]
- ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
- elif mem_start < hpa_start and mem_start + mem_size == hpa_start + hpa_size:
+ ram_range_info[hpa_end] = mem_end - hpa_end
+ elif mem_start < hpa_start and mem_end == hpa_end:
ram_range_info[mem_start] = hpa_start - mem_start
- elif mem_start == hpa_start and mem_start + mem_size == hpa_start + hpa_size:
+ elif mem_start == hpa_start and mem_end == hpa_end:
del ram_range_info[mem_start]
- elif mem_start > hpa_start or mem_start + mem_size < hpa_start + hpa_size:
+ elif mem_start > hpa_start or mem_end < hpa_end:
raise lib.error.ResourceError(f"Start address of HPA is out of available memory range: vm id: {vm_index}, hpa_start: {hpa_start}.")
elif mem_size < hpa_size:
raise lib.error.ResourceError(f"Size of HPA is out of available memory range: vm id: {vm_index}, hpa_size: {hpa_size}.")
@@ -147,14 +158,17 @@ def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list):
region_index = region_index + 1

def alloc_vm_memory(board_etree, scenario_etree, allocation_etree):
- ram_range_info = import_memory_info(board_etree)
+ ram_range_info = RamRange().import_memory_info(board_etree, allocation_etree)
+ print(RamRange().ram_range)
ram_range_info, mem_info_list, vm_node_index_list = alloc_memory(scenario_etree, ram_range_info)
+ print(RamRange().ram_range)
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list)

def allocate_hugepages(board_etree, scenario_etree, allocation_etree):
hugepages_1gb = 0
hugepages_2mb = 0
- ram_range_info = import_memory_info(board_etree)
+ ram_range_info = RamRange().ram_range
total_hugepages = int(sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)*0.98/(1024*1024*1024) \
- sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \
- 5 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu")))
--
2.25.1


Junjie Mao
 

"Li, Ziheng" <ziheng.li@...> writes:

From c849de5679200ad9bd212a119b876a3a490d0450 Mon Sep 17 00:00:00 2001
From: zihengL1 <ziheng.li@...>
Date: Wed, 10 Aug 2022 13:23:50 +0800
Subject: [PATCH] config-tools: solve hv and vm memory address conflict

Fixed the problem that acrn can still build normally
when the memory addresses of HV and VM conflict, which
causes the hypervisor to hang.
At the same time, defined a class to process memory to
obtain the available memory range.

Tracked-On: #7913
Signed-off-by: Ziheng Li <ziheng.li@...>
---
.../static_allocators/memory_allocator.py | 44 ++++++++++++-------
1 file changed, 29 insertions(+), 15 deletions(-)

diff --git a/misc/config_tools/static_allocators/memory_allocator.py b/misc/config_tools/static_allocators/memory_allocator.py
index cbfc9e138..5156f75c2 100644
--- a/misc/config_tools/static_allocators/memory_allocator.py
+++ b/misc/config_tools/static_allocators/memory_allocator.py
@@ -11,14 +11,23 @@ import lib.error
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, math, logging

-def import_memory_info(board_etree):
+class RamRange():
ram_range = {}
- for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"):
- start = int(memory_range.get("start"), base=16)
- size = int(memory_range.get("size"), base=10)
- ram_range[start] = size

- return ram_range
+ @classmethod
+ def import_memory_info(cls, board_etree, allocation_etree):
+ hv_start = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_START/text()", allocation_etree), 16)
+ hv_size = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE/text()", allocation_etree), 16)
+ for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"):
+ start = int(memory_range.get("start"), base=16)
+ size = int(memory_range.get("size"), base=10)
+ if start < hv_start and start + size > hv_start + hv_size:
+ cls.ram_range[start] = hv_start - start
+ cls.ram_range[hv_start + hv_size] = start + size - hv_start - hv_size
+ else:
+ cls.ram_range[start] = size
+
+ return cls.ram_range
Since you have wrap up the ram_range dict as a class (or should it be an
object instead?), you can also abstract the allocation and checking
functions as methods of that class to separate the low-level range-based
operations from the allocation algorithms.


def check_hpa(vm_node_info):
hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*")
@@ -77,20 +86,22 @@ def alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list):
mem_key = sorted(ram_range_info)
for mem_start in mem_key:
mem_size = ram_range_info[mem_start]
+ mem_end = mem_start + mem_size
for hpa_start in hpa_key:
hpa_size = mem_info_list[vm_index][hpa_start]
+ hpa_end = hpa_start + hpa_size
if hpa_start != 0:
- if mem_start < hpa_start and mem_start + mem_size > hpa_start + hpa_size:
+ if mem_start < hpa_start and mem_end > hpa_end:
ram_range_info[mem_start] = hpa_start - mem_start
- ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
- elif mem_start == hpa_start and mem_start + mem_size > hpa_start + hpa_size:
+ ram_range_info[hpa_end] = mem_end - hpa_end
+ elif mem_start == hpa_start and mem_end > hpa_end:
del ram_range_info[mem_start]
- ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
- elif mem_start < hpa_start and mem_start + mem_size == hpa_start + hpa_size:
+ ram_range_info[hpa_end] = mem_end - hpa_end
+ elif mem_start < hpa_start and mem_end == hpa_end:
ram_range_info[mem_start] = hpa_start - mem_start
- elif mem_start == hpa_start and mem_start + mem_size == hpa_start + hpa_size:
+ elif mem_start == hpa_start and mem_end == hpa_end:
del ram_range_info[mem_start]
- elif mem_start > hpa_start or mem_start + mem_size < hpa_start + hpa_size:
+ elif mem_start > hpa_start or mem_end < hpa_end:
IIRC I saw similar changes in a previous patch. Is this a second version
of that?

raise lib.error.ResourceError(f"Start address of HPA is out of available memory range: vm id: {vm_index}, hpa_start: {hpa_start}.")
elif mem_size < hpa_size:
raise lib.error.ResourceError(f"Size of HPA is out of available memory range: vm id: {vm_index}, hpa_size: {hpa_size}.")
@@ -147,14 +158,17 @@ def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list):
region_index = region_index + 1

def alloc_vm_memory(board_etree, scenario_etree, allocation_etree):
- ram_range_info = import_memory_info(board_etree)
+ ram_range_info = RamRange().import_memory_info(board_etree, allocation_etree)
+ print(RamRange().ram_range)
ram_range_info, mem_info_list, vm_node_index_list = alloc_memory(scenario_etree, ram_range_info)
+ print(RamRange().ram_range)
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
Please exclude debug logs from your patch.

--
Best Regards
Junjie Mao

write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list)

def allocate_hugepages(board_etree, scenario_etree, allocation_etree):
hugepages_1gb = 0
hugepages_2mb = 0
- ram_range_info = import_memory_info(board_etree)
+ ram_range_info = RamRange().ram_range
total_hugepages = int(sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)*0.98/(1024*1024*1024) \
- sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \
- 5 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu")))
--
2.25.1