[PATCH v2 1/2] misc: refine CLOS module for expansibility


chenli.wei
 

This patch refine CLOS module by the following aspects:

1 Unified CACHE_ID type to Hex Format
2 Rewrite policy merge with RDT Class
3 Modify the logic of generate CPU mask

v1-->v2:
1. code format
2. add comment for RdtPolicy

Signed-off-by: Chenli Wei <chenli.wei@...>
---
misc/config_tools/board_config/board_c.py | 8 +-
misc/config_tools/schema/types.xsd | 2 +-
misc/config_tools/static_allocators/clos.py | 233 ++++++++++++--------
3 files changed, 144 insertions(+), 99 deletions(-)

diff --git a/misc/config_tools/board_config/board_c.py b/misc/config_tools/board_config/board_c.py
index 8082c4387..4be3ea4ec 100644
--- a/misc/config_tools/board_config/board_c.py
+++ b/misc/config_tools/board_config/board_c.py
@@ -173,7 +173,7 @@ def gen_rdt_str(cache, config):
return err_dic

cdp_enable = get_cdp_enabled()
- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)
if len(cat_mask_list) > int(clos_number):
err_dic['board config: Failed to generate board.c'] = "CLOS Mask Number too bigger then the supported of L2/L3 cache"
return err_dic;
@@ -207,7 +207,7 @@ def gen_rdt_str(cache, config):

cpu_mask = 0
for processor in processor_list:
- core_id = common.get_node(f"//core[@id = '{processor}']/thread/cpu_id/text()", board_etree)
+ core_id = common.get_node(f"//thread[apic_id = '{processor}']/cpu_id/text()", board_etree)
if core_id is None:
continue
else:
@@ -240,7 +240,7 @@ def gen_clos_array(cache_list, config):
clos_number = common.get_node(f"./capability/clos_number/text()", cache)
if cache_level == "2":

- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)
array_size = len(cat_mask_list)

print("union clos_config platform_l2_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config)
@@ -250,7 +250,7 @@ def gen_clos_array(cache_list, config):
print("};\n", file=config)
res_present[RDT.L2.value] += 1
elif cache_level == "3":
- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)

print("union clos_config platform_l3_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index 36a4aae09..cec4b84a5 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -355,7 +355,7 @@ RDT, setting this option to ``y`` is ignored.</xs:documentation>

<xs:complexType name="CacheAllocationType">
<xs:sequence>
- <xs:element name="CACHE_ID" type="xs:integer"/>
+ <xs:element name="CACHE_ID" type="HexFormat"/>
<xs:element name="CACHE_LEVEL" type="xs:integer"/>
<xs:element name="POLICY" type="CachePolicyType" minOccurs="1" maxOccurs="unbounded"/>
</xs:sequence>
diff --git a/misc/config_tools/static_allocators/clos.py b/misc/config_tools/static_allocators/clos.py
index 1f0476422..80b59435d 100644
--- a/misc/config_tools/static_allocators/clos.py
+++ b/misc/config_tools/static_allocators/clos.py
@@ -12,114 +12,159 @@ import re
from collections import defaultdict
from itertools import combinations

-def create_clos_node(etree, vm_id, index_list):
- allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
+class Identifier:
+ vm_name = ""
+ vcpu = ""
+ cache_type = ""
+
+ def __init__(self, vm_name, vcpu, cache_type):
+ self.vm_name = vm_name
+ self.vcpu = vcpu
+ self.cache_type = cache_type
+
+ def __eq__(self, other):
+ return (self.vm_name == other.vm_name) and (self.vcpu == other.vcpu) and (self.cache_type == other.cache_type)
+
+class RdtPolicy:
+
+# a dictionary to save the CLOS policy from user setting
+ policy_dict = {}
+
+# a list stored the vCPU or VM info
+ policy_owner = []
+
+# a list stored the L2 cache IDs
+ cache2_id_list = []
+
+#L2 cache have more then one section, this function find which one have been set
+ def find_cache2_id(self, mask):
+ for cache2 in self.cache2_id_list:
+ if mask[cache2] != None:
+ return cache2
+ return None
+
+ def __init__(self, policy_dict, owner):
+ self.policy_dict = policy_dict
+ self.policy_owner = [owner]
+
+#check whether the src could be merged, if yes, add the src owner to policy_owner list and return True
+ def merge_policy(self, src):
+ if self.policy_dict["l3"] == src.policy_dict["l3"]:
+ cache2_id = self.find_cache2_id(src.policy_dict)
+ if (cache2_id == None) or (self.policy_dict[cache2_id] == src.policy_dict[cache2_id]):
+ self.policy_owner.append(src.policy_owner[0])
+ return True
+ elif self.policy_dict[cache2_id] == None:
+ self.policy_dict[cache2_id] = src.policy_dict[cache2_id]
+ return True
+ return False
+
+#check whether a VM/vCPU could adapt this policy
+ def adapt_policy(self, policy_identifier):
+ for owner in self.policy_owner:
+ if owner == policy_identifier:
+ return True
+ return False
+
+class vCatPolicy(RdtPolicy):
+ def merge_policy(self, src):
+ return False
+
+def create_clos_node(scenario_etree, vm_id, index_list):
+ allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", scenario_etree)
if allocation_vm_node is None:
- allocation_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
+ allocation_vm_node = common.append_node("/acrn-config/vm", None, scenario_etree, id = vm_id)
if common.get_node("./clos", allocation_vm_node) is None:
clos_node = common.append_node("./clos", None, allocation_vm_node)
for index in index_list:
common.append_node(f"./vcpu_clos", str(index), clos_node)

-def find_cache2_id(mask, cache2_id_list):
- for cache2 in cache2_id_list:
- if mask[cache2] != "None":
- return cache2
- return "None"
-
-def merge_policy_list(mask_list, cache2_id_list):
- index = 0
+def merge_policy_list(policy_list):
result_list = []
- for index,mask in enumerate(mask_list):
- merged = 0
+ for index,p in enumerate(policy_list):
+ merged = False
if index == 0:
- result_list.append(mask)
+ result_list.append(p)
continue
for result in result_list:
- if result["l3"] != mask["l3"]:
- continue
- else:
- cache2_id = find_cache2_id(mask, cache2_id_list)
- if cache2_id == "None" or result[cache2_id] == mask[cache2_id]:
- merged = 1
- break
- if result[cache2_id] == "None":
- merged = 1
- result[cache2_id] = mask[cache2_id]
- break
- if merged == 0:
- result_list.append(mask)
+ if result.merge_policy(p):
+ merged = True
+ break;
+ if merged == False:
+ result_list.append(p)
return result_list

-def gen_all_clos_index(board_etree, scenario_etree, allocation_etree):
- policy_list = []
- allocation_list = scenario_etree.xpath(f"//POLICY")
+def gen_identifier_list(scenario_etree):
+ identifier_list = []
+ vm_list = scenario_etree.xpath("//POLICY/VM")
+ for vm in vm_list:
+ vm_name = common.get_node("./text()", vm)
+ vcpu = common.get_node("../VCPU/text()", vm)
+ cache_type = common.get_node("../TYPE/text()", vm)
+ identifier_list.append(Identifier(vm_name, vcpu, cache_type))
+ return identifier_list
+
+def vm_vcat_enable(scenario_etree, vm_name):
+ vcat_enable = common.get_node(f"//VCAT_ENABLED/text()", scenario_etree)
+ virtual_cat_support = common.get_node(f"//vm[name = '{vm_name}']/virtual_cat_support/text()", scenario_etree)
+ return (vcat_enable == "y") and (virtual_cat_support == "y")
+
+def get_policy_list(board_etree, scenario_etree, allocation_etree):
cache2_id_list = scenario_etree.xpath("//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()")
cache2_id_list.sort()

- for policy in allocation_list:
- cache_level = common.get_node("../CACHE_LEVEL/text()", policy)
- cache_id = common.get_node("../CACHE_ID/text()", policy)
- vcpu = common.get_node("./VCPU/text()", policy)
- mask = common.get_node("./CLOS_MASK/text()", policy)
- tmp = (cache_level, cache_id, vcpu, mask)
- policy_list.append(tmp)
-
- vCPU_list = scenario_etree.xpath(f"//POLICY/VCPU/text()")
- l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY/CLOS_MASK")
- mask_list = []
- for vCPU in vCPU_list:
+ RdtPolicy.cache2_id_list = cache2_id_list
+ identifier_list = gen_identifier_list(scenario_etree)
+
+ result_list = []
+ for ident in identifier_list:
dict_tmp = {}
- l3_mask = l2_mask = "None"
- l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY[VCPU = '{vCPU}']/CLOS_MASK/text()")
- if len(l3_mask_list) > 0:
- l3_mask = l3_mask_list[0]
+ policy_list = scenario_etree.xpath(f"//POLICY[VM = '{ident.vm_name}' and VCPU = '{ident.vcpu}' and TYPE = '{ident.cache_type}']")
+ l3_mask = l2_mask = None
+ cache2_id = None
+ for policy in policy_list:
+ cache_level = common.get_node("../CACHE_LEVEL/text()", policy)
+ cache_id = common.get_node("../CACHE_ID/text()", policy)
+ clos_mask = common.get_node("./CLOS_MASK/text()", policy)
+ if cache_level == "2":
+ l2_mask = clos_mask
+ cache2_id = cache_id
+ else:
+ l3_mask = clos_mask
dict_tmp["l3"] = l3_mask
-
- l2_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2]/POLICY[VCPU = '{vCPU}']/CLOS_MASK")
- if len(l2_mask_list) > 0:
- l2_mask = l2_mask_list[0].text
- cache_id = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2 and POLICY/VCPU = '{vCPU}']/CACHE_ID/text()")[0]
for cache2 in cache2_id_list:
- if cache2 == cache_id:
+ if cache2 == cache2_id:
dict_tmp[cache_id] = l2_mask
else:
- dict_tmp[cache2] = "None"
- mask_list.append(dict_tmp)
- mask_list = merge_policy_list(mask_list, cache2_id_list)
- return mask_list
-
-def get_clos_index(cache_level, cache_id, clos_mask):
- mask_list = common.get_mask_list(cache_level, cache_id)
- idx = 0
- for mask in mask_list:
- idx += 1
- if mask == clos_mask:
- break
- return idx
-def get_clos_id(mask_list, l2_id, l2_mask, l3_mask):
- for mask in mask_list:
- if mask[l2_id] == l2_mask and mask["l3"] == l3_mask:
- return mask_list.index(mask)
+ dict_tmp[cache2] = None
+ if vm_vcat_enable(scenario_etree, ident.vm_name):
+ result_list.append(vCatPolicy(dict_tmp, ident))
+ else:
+ result_list.append(RdtPolicy(dict_tmp, ident))
+ return merge_policy_list(result_list)
+
+def get_clos_id(rdt_list, mask_identifier):
+ for index,rdt in enumerate(rdt_list):
+ if rdt.adapt_policy(mask_identifier):
+ return index
return 0

def alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list):
vm_node_list = scenario_etree.xpath("//vm")
for vm_node in vm_node_list:
- vmname = common.get_node("./name/text()", vm_node)
- allocation_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[POLICY/VM = '{vmname}']")
- for allocation in allocation_list:
- index_list = []
- cache_level = common.get_node("./CACHE_LEVEL/text()", allocation)
- cache_id = common.get_node("./CACHE_ID/text()", allocation)
- clos_mask_list = allocation.xpath(f".//POLICY[VM = '{vmname}']/CLOS_MASK/text()")
-
- for clos_mask in clos_mask_list:
- index = get_clos_id(mask_list, cache_id, clos_mask, "None")
+ vm_name = common.get_node("./name/text()", vm_node)
+ vcpu_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}']/VCPU/text()")
+ index_list = []
+ for vcpu in sorted(list(set(vcpu_list))):
+ type_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}' and VCPU = '{vcpu}']/TYPE/text()")
+ for cache_type in sorted(list(set(type_list))):
+ if cache_type == "Data":
+ continue
+ index = get_clos_id(mask_list, Identifier(vm_name, vcpu, cache_type))
index_list.append(index)
- create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)
+ create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)

-def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list):
+def create_mask_list_node(board_etree, scenario_etree, allocation_etree, rdt_policy_list):
allocation_hv_node = common.get_node(f"//hv", allocation_etree)
if allocation_hv_node is None:
allocation_hv_node = common.append_node(f"/acrn-config/hv", None, allocation_etree)
@@ -127,24 +172,24 @@ def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_lis
cache2_id_list.sort()
if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id="l3")
- for i in range(0, len(mask_list)):
- if mask_list[i]["l3"] == "None":
- value = "0xffff"
- else:
- value = str(mask_list[i]["l3"])
+ length = common.get_node(f"//cache[@level='3']/capability/capacity_mask_length/text()", board_etree)
+ value = hex((1 << int(length)) - 1)
+ for i in range(0, len(rdt_policy_list)):
+ if rdt_policy_list[i].policy_dict["l3"] is not None:
+ value = str(rdt_policy_list[i].policy_dict["l3"])
common.append_node(f"./clos", value, clos_mask)

for cache2 in cache2_id_list:
+ length = common.get_node(f"//cache[@level='2' and @id = '{cache2}']/capability/capacity_mask_length/text()", board_etree)
+ value = hex((1 << int(length)) - 1)
if common.get_node("./clos_mask[@id = '{cache2}']", allocation_hv_node) is None:
clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id=cache2)
- for i in range(0, len(mask_list)):
- if mask_list[i][cache2] == "None":
- value = "0xffff"
- else:
- value = str(mask_list[i][cache2] )
+ for i in range(0, len(rdt_policy_list)):
+ if rdt_policy_list[i].policy_dict[cache2] is not None:
+ value = str(rdt_policy_list[i].policy_dict[cache2] )
common.append_node(f"./clos", value, clos_mask)

def fn(board_etree, scenario_etree, allocation_etree):
- mask_list = gen_all_clos_index(board_etree, scenario_etree, allocation_etree)
- creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list)
- alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list)
+ policy_list = get_policy_list(board_etree, scenario_etree, allocation_etree)
+ create_mask_list_node(board_etree, scenario_etree, allocation_etree, policy_list)
+ alloc_clos_index(board_etree, scenario_etree, allocation_etree, policy_list)
--
2.17.1


Junjie Mao
 

Chenli Wei <chenli.wei@...> writes:

This patch refine CLOS module by the following aspects:

1 Unified CACHE_ID type to Hex Format
2 Rewrite policy merge with RDT Class
3 Modify the logic of generate CPU mask

v1-->v2:
1. code format
2. add comment for RdtPolicy

Signed-off-by: Chenli Wei <chenli.wei@...>
---
misc/config_tools/board_config/board_c.py | 8 +-
misc/config_tools/schema/types.xsd | 2 +-
misc/config_tools/static_allocators/clos.py | 233 ++++++++++++--------
3 files changed, 144 insertions(+), 99 deletions(-)

diff --git a/misc/config_tools/board_config/board_c.py b/misc/config_tools/board_config/board_c.py
index 8082c4387..4be3ea4ec 100644
--- a/misc/config_tools/board_config/board_c.py
+++ b/misc/config_tools/board_config/board_c.py
@@ -173,7 +173,7 @@ def gen_rdt_str(cache, config):
return err_dic

cdp_enable = get_cdp_enabled()
- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)
if len(cat_mask_list) > int(clos_number):
err_dic['board config: Failed to generate board.c'] = "CLOS Mask Number too bigger then the supported of L2/L3 cache"
return err_dic;
@@ -207,7 +207,7 @@ def gen_rdt_str(cache, config):

cpu_mask = 0
for processor in processor_list:
- core_id = common.get_node(f"//core[@id = '{processor}']/thread/cpu_id/text()", board_etree)
+ core_id = common.get_node(f"//thread[apic_id = '{processor}']/cpu_id/text()", board_etree)
if core_id is None:
continue
else:
@@ -240,7 +240,7 @@ def gen_clos_array(cache_list, config):
clos_number = common.get_node(f"./capability/clos_number/text()", cache)
if cache_level == "2":

- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)
array_size = len(cat_mask_list)

print("union clos_config platform_l2_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config)
@@ -250,7 +250,7 @@ def gen_clos_array(cache_list, config):
print("};\n", file=config)
res_present[RDT.L2.value] += 1
elif cache_level == "3":
- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)

print("union clos_config platform_l3_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index 36a4aae09..cec4b84a5 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -355,7 +355,7 @@ RDT, setting this option to ``y`` is ignored.</xs:documentation>

<xs:complexType name="CacheAllocationType">
<xs:sequence>
- <xs:element name="CACHE_ID" type="xs:integer"/>
+ <xs:element name="CACHE_ID" type="HexFormat"/>
<xs:element name="CACHE_LEVEL" type="xs:integer"/>
<xs:element name="POLICY" type="CachePolicyType" minOccurs="1" maxOccurs="unbounded"/>
</xs:sequence>
diff --git a/misc/config_tools/static_allocators/clos.py b/misc/config_tools/static_allocators/clos.py
index 1f0476422..80b59435d 100644
--- a/misc/config_tools/static_allocators/clos.py
+++ b/misc/config_tools/static_allocators/clos.py
@@ -12,114 +12,159 @@ import re
from collections import defaultdict
from itertools import combinations

-def create_clos_node(etree, vm_id, index_list):
- allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
+class Identifier:
Use `namedtuple`. No need to write 10 lines only to revent a wheel.

Also, this class represents an identifier of what? Have you considered
CDP here as well?

+ vm_name = ""
+ vcpu = ""
+ cache_type = ""
+
+ def __init__(self, vm_name, vcpu, cache_type):
+ self.vm_name = vm_name
+ self.vcpu = vcpu
+ self.cache_type = cache_type
+
+ def __eq__(self, other):
+ return (self.vm_name == other.vm_name) and (self.vcpu == other.vcpu) and (self.cache_type == other.cache_type)
+
+class RdtPolicy:
+
+# a dictionary to save the CLOS policy from user setting
Indent the comments to the same level as the code you are commenting on.

+ policy_dict = {}
+
+# a list stored the vCPU or VM info
+ policy_owner = []
+
+# a list stored the L2 cache IDs
+ cache2_id_list = []
+
+#L2 cache have more then one section, this function find which one have been set
Typo: then -> than

+ def find_cache2_id(self, mask):
+ for cache2 in self.cache2_id_list:
+ if mask[cache2] != None:
+ return cache2
+ return None
+
+ def __init__(self, policy_dict, owner):
+ self.policy_dict = policy_dict
+ self.policy_owner = [owner]
+
+#check whether the src could be merged, if yes, add the src owner to policy_owner list and return True
+ def merge_policy(self, src):
+ if self.policy_dict["l3"] == src.policy_dict["l3"]:
While dicts in Python can use arbitrary keys (even with different
types), the code is much less maintainable if you do so.

If you want to wrap RDT policies as objects, do the following:

1. Instead of using an all-in-one dict, split L2 and L3 configurations
as different fields.

2. Add an interface that takes an XML element and updates the objects,
so that when walking through the policies you do not need to create
any more dictionaries or lists as another level of intermediate
representation.

+ cache2_id = self.find_cache2_id(src.policy_dict)
+ if (cache2_id == None) or (self.policy_dict[cache2_id] == src.policy_dict[cache2_id]):
+ self.policy_owner.append(src.policy_owner[0])
In theory you should extend `self.policy_owner` using
`src.policy_owner`. While today `src` may have at most one owner, it
relies on how you walk through the policies which this class had better
not make any assumption on.

+ return True
+ elif self.policy_dict[cache2_id] == None:
+ self.policy_dict[cache2_id] = src.policy_dict[cache2_id]
+ return True
+ return False
+
+#check whether a VM/vCPU could adapt this policy
+ def adapt_policy(self, policy_identifier):
"adapt" means "make sth. suitable", but this function is only a
predicate with no side effects.

+ for owner in self.policy_owner:
+ if owner == policy_identifier:
+ return True
+ return False
Do you mean `return policy_identifier in self.policy_owner` by the above
four lines?

+
+class vCatPolicy(RdtPolicy):
+ def merge_policy(self, src):
+ return False
+
+def create_clos_node(scenario_etree, vm_id, index_list):
+ allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", scenario_etree)
if allocation_vm_node is None:
- allocation_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
+ allocation_vm_node = common.append_node("/acrn-config/vm", None, scenario_etree, id = vm_id)
if common.get_node("./clos", allocation_vm_node) is None:
clos_node = common.append_node("./clos", None, allocation_vm_node)
for index in index_list:
common.append_node(f"./vcpu_clos", str(index), clos_node)

-def find_cache2_id(mask, cache2_id_list):
- for cache2 in cache2_id_list:
- if mask[cache2] != "None":
- return cache2
- return "None"
-
-def merge_policy_list(mask_list, cache2_id_list):
- index = 0
+def merge_policy_list(policy_list):
result_list = []
- for index,mask in enumerate(mask_list):
- merged = 0
+ for index,p in enumerate(policy_list):
+ merged = False
if index == 0:
Why do need this branch? When `index` is 0, `result_list` is empty and
`merged` will always be `False` after you walk through that empty
list. Adding this specific branch does little (if any) help to
performance but hurts a lot in readability.

- result_list.append(mask)
+ result_list.append(p)
continue
for result in result_list:
- if result["l3"] != mask["l3"]:
- continue
- else:
- cache2_id = find_cache2_id(mask, cache2_id_list)
- if cache2_id == "None" or result[cache2_id] == mask[cache2_id]:
- merged = 1
- break
- if result[cache2_id] == "None":
- merged = 1
- result[cache2_id] = mask[cache2_id]
- break
- if merged == 0:
- result_list.append(mask)
+ if result.merge_policy(p):
+ merged = True
+ break;
+ if merged == False:
Simply say `if not merged`.

+ result_list.append(p)
return result_list

-def gen_all_clos_index(board_etree, scenario_etree, allocation_etree):
- policy_list = []
- allocation_list = scenario_etree.xpath(f"//POLICY")
+def gen_identifier_list(scenario_etree):
+ identifier_list = []
+ vm_list = scenario_etree.xpath("//POLICY/VM")
+ for vm in vm_list:
+ vm_name = common.get_node("./text()", vm)
+ vcpu = common.get_node("../VCPU/text()", vm)
+ cache_type = common.get_node("../TYPE/text()", vm)
+ identifier_list.append(Identifier(vm_name, vcpu, cache_type))
+ return identifier_list
+
+def vm_vcat_enable(scenario_etree, vm_name):
+ vcat_enable = common.get_node(f"//VCAT_ENABLED/text()", scenario_etree)
+ virtual_cat_support = common.get_node(f"//vm[name = '{vm_name}']/virtual_cat_support/text()", scenario_etree)
+ return (vcat_enable == "y") and (virtual_cat_support == "y")
+
+def get_policy_list(board_etree, scenario_etree, allocation_etree):
cache2_id_list = scenario_etree.xpath("//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()")
cache2_id_list.sort()

- for policy in allocation_list:
- cache_level = common.get_node("../CACHE_LEVEL/text()", policy)
- cache_id = common.get_node("../CACHE_ID/text()", policy)
- vcpu = common.get_node("./VCPU/text()", policy)
- mask = common.get_node("./CLOS_MASK/text()", policy)
- tmp = (cache_level, cache_id, vcpu, mask)
- policy_list.append(tmp)
-
- vCPU_list = scenario_etree.xpath(f"//POLICY/VCPU/text()")
- l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY/CLOS_MASK")
- mask_list = []
- for vCPU in vCPU_list:
+ RdtPolicy.cache2_id_list = cache2_id_list
+ identifier_list = gen_identifier_list(scenario_etree)
+
+ result_list = []
+ for ident in identifier_list:
dict_tmp = {}
- l3_mask = l2_mask = "None"
- l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY[VCPU = '{vCPU}']/CLOS_MASK/text()")
- if len(l3_mask_list) > 0:
- l3_mask = l3_mask_list[0]
+ policy_list = scenario_etree.xpath(f"//POLICY[VM = '{ident.vm_name}' and VCPU = '{ident.vcpu}' and TYPE = '{ident.cache_type}']")
+ l3_mask = l2_mask = None
+ cache2_id = None
+ for policy in policy_list:
+ cache_level = common.get_node("../CACHE_LEVEL/text()", policy)
+ cache_id = common.get_node("../CACHE_ID/text()", policy)
+ clos_mask = common.get_node("./CLOS_MASK/text()", policy)
+ if cache_level == "2":
+ l2_mask = clos_mask
+ cache2_id = cache_id
+ else:
+ l3_mask = clos_mask
dict_tmp["l3"] = l3_mask
-
- l2_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2]/POLICY[VCPU = '{vCPU}']/CLOS_MASK")
- if len(l2_mask_list) > 0:
- l2_mask = l2_mask_list[0].text
- cache_id = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2 and POLICY/VCPU = '{vCPU}']/CACHE_ID/text()")[0]
for cache2 in cache2_id_list:
- if cache2 == cache_id:
+ if cache2 == cache2_id:
dict_tmp[cache_id] = l2_mask
else:
- dict_tmp[cache2] = "None"
- mask_list.append(dict_tmp)
- mask_list = merge_policy_list(mask_list, cache2_id_list)
- return mask_list
-
-def get_clos_index(cache_level, cache_id, clos_mask):
- mask_list = common.get_mask_list(cache_level, cache_id)
- idx = 0
- for mask in mask_list:
- idx += 1
- if mask == clos_mask:
- break
- return idx
-def get_clos_id(mask_list, l2_id, l2_mask, l3_mask):
- for mask in mask_list:
- if mask[l2_id] == l2_mask and mask["l3"] == l3_mask:
- return mask_list.index(mask)
+ dict_tmp[cache2] = None
+ if vm_vcat_enable(scenario_etree, ident.vm_name):
+ result_list.append(vCatPolicy(dict_tmp, ident))
+ else:
+ result_list.append(RdtPolicy(dict_tmp, ident))
+ return merge_policy_list(result_list)
+
+def get_clos_id(rdt_list, mask_identifier):
+ for index,rdt in enumerate(rdt_list):
+ if rdt.adapt_policy(mask_identifier):
Please, be consistent with your names. What's the difference between
"identifier" and "owner"? If they are the same, unify the noun you use
to refer to them; otherwise, why you compare identifiers with owners in
the `adapt_policy` member function?

I'll stop here, as this patch still need a fundamental refactoring to
make the overall structure easy to read.

--
Best Regards
Junjie Mao

+ return index
return 0

def alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list):
vm_node_list = scenario_etree.xpath("//vm")
for vm_node in vm_node_list:
- vmname = common.get_node("./name/text()", vm_node)
- allocation_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[POLICY/VM = '{vmname}']")
- for allocation in allocation_list:
- index_list = []
- cache_level = common.get_node("./CACHE_LEVEL/text()", allocation)
- cache_id = common.get_node("./CACHE_ID/text()", allocation)
- clos_mask_list = allocation.xpath(f".//POLICY[VM = '{vmname}']/CLOS_MASK/text()")
-
- for clos_mask in clos_mask_list:
- index = get_clos_id(mask_list, cache_id, clos_mask, "None")
+ vm_name = common.get_node("./name/text()", vm_node)
+ vcpu_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}']/VCPU/text()")
+ index_list = []
+ for vcpu in sorted(list(set(vcpu_list))):
+ type_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}' and VCPU = '{vcpu}']/TYPE/text()")
+ for cache_type in sorted(list(set(type_list))):
+ if cache_type == "Data":
+ continue
+ index = get_clos_id(mask_list, Identifier(vm_name, vcpu, cache_type))
index_list.append(index)
- create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)
+ create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)

-def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list):
+def create_mask_list_node(board_etree, scenario_etree, allocation_etree, rdt_policy_list):
allocation_hv_node = common.get_node(f"//hv", allocation_etree)
if allocation_hv_node is None:
allocation_hv_node = common.append_node(f"/acrn-config/hv", None, allocation_etree)
@@ -127,24 +172,24 @@ def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_lis
cache2_id_list.sort()
if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id="l3")
- for i in range(0, len(mask_list)):
- if mask_list[i]["l3"] == "None":
- value = "0xffff"
- else:
- value = str(mask_list[i]["l3"])
+ length = common.get_node(f"//cache[@level='3']/capability/capacity_mask_length/text()", board_etree)
+ value = hex((1 << int(length)) - 1)
+ for i in range(0, len(rdt_policy_list)):
+ if rdt_policy_list[i].policy_dict["l3"] is not None:
+ value = str(rdt_policy_list[i].policy_dict["l3"])
common.append_node(f"./clos", value, clos_mask)

for cache2 in cache2_id_list:
+ length = common.get_node(f"//cache[@level='2' and @id = '{cache2}']/capability/capacity_mask_length/text()", board_etree)
+ value = hex((1 << int(length)) - 1)
if common.get_node("./clos_mask[@id = '{cache2}']", allocation_hv_node) is None:
clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id=cache2)
- for i in range(0, len(mask_list)):
- if mask_list[i][cache2] == "None":
- value = "0xffff"
- else:
- value = str(mask_list[i][cache2] )
+ for i in range(0, len(rdt_policy_list)):
+ if rdt_policy_list[i].policy_dict[cache2] is not None:
+ value = str(rdt_policy_list[i].policy_dict[cache2] )
common.append_node(f"./clos", value, clos_mask)

def fn(board_etree, scenario_etree, allocation_etree):
- mask_list = gen_all_clos_index(board_etree, scenario_etree, allocation_etree)
- creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list)
- alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list)
+ policy_list = get_policy_list(board_etree, scenario_etree, allocation_etree)
+ create_mask_list_node(board_etree, scenario_etree, allocation_etree, policy_list)
+ alloc_clos_index(board_etree, scenario_etree, allocation_etree, policy_list)


chenli.wei
 

On 5/7/2022 10:38 AM, Junjie Mao wrote:
Chenli Wei <chenli.wei@...> writes:

This patch refine CLOS module by the following aspects:

1 Unified CACHE_ID type to Hex Format
2 Rewrite policy merge with RDT Class
3 Modify the logic of generate CPU mask

v1-->v2:
1. code format
2. add comment for RdtPolicy

Signed-off-by: Chenli Wei <chenli.wei@...>
---
misc/config_tools/board_config/board_c.py | 8 +-
misc/config_tools/schema/types.xsd | 2 +-
misc/config_tools/static_allocators/clos.py | 233 ++++++++++++--------
3 files changed, 144 insertions(+), 99 deletions(-)

diff --git a/misc/config_tools/board_config/board_c.py b/misc/config_tools/board_config/board_c.py
index 8082c4387..4be3ea4ec 100644
--- a/misc/config_tools/board_config/board_c.py
+++ b/misc/config_tools/board_config/board_c.py
@@ -173,7 +173,7 @@ def gen_rdt_str(cache, config):
return err_dic

cdp_enable = get_cdp_enabled()
- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)
if len(cat_mask_list) > int(clos_number):
err_dic['board config: Failed to generate board.c'] = "CLOS Mask Number too bigger then the supported of L2/L3 cache"
return err_dic;
@@ -207,7 +207,7 @@ def gen_rdt_str(cache, config):

cpu_mask = 0
for processor in processor_list:
- core_id = common.get_node(f"//core[@id = '{processor}']/thread/cpu_id/text()", board_etree)
+ core_id = common.get_node(f"//thread[apic_id = '{processor}']/cpu_id/text()", board_etree)
if core_id is None:
continue
else:
@@ -240,7 +240,7 @@ def gen_clos_array(cache_list, config):
clos_number = common.get_node(f"./capability/clos_number/text()", cache)
if cache_level == "2":

- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)
array_size = len(cat_mask_list)

print("union clos_config platform_l2_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config)
@@ -250,7 +250,7 @@ def gen_clos_array(cache_list, config):
print("};\n", file=config)
res_present[RDT.L2.value] += 1
elif cache_level == "3":
- cat_mask_list = get_mask_list(cache_level, int(cache_id, 16))
+ cat_mask_list = get_mask_list(cache_level, cache_id)

print("union clos_config platform_l3_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config)

diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd
index 36a4aae09..cec4b84a5 100644
--- a/misc/config_tools/schema/types.xsd
+++ b/misc/config_tools/schema/types.xsd
@@ -355,7 +355,7 @@ RDT, setting this option to ``y`` is ignored.</xs:documentation>

<xs:complexType name="CacheAllocationType">
<xs:sequence>
- <xs:element name="CACHE_ID" type="xs:integer"/>
+ <xs:element name="CACHE_ID" type="HexFormat"/>
<xs:element name="CACHE_LEVEL" type="xs:integer"/>
<xs:element name="POLICY" type="CachePolicyType" minOccurs="1" maxOccurs="unbounded"/>
</xs:sequence>
diff --git a/misc/config_tools/static_allocators/clos.py b/misc/config_tools/static_allocators/clos.py
index 1f0476422..80b59435d 100644
--- a/misc/config_tools/static_allocators/clos.py
+++ b/misc/config_tools/static_allocators/clos.py
@@ -12,114 +12,159 @@ import re
from collections import defaultdict
from itertools import combinations

-def create_clos_node(etree, vm_id, index_list):
- allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
+class Identifier:
Use `namedtuple`. No need to write 10 lines only to revent a wheel.
OK

Also, this class represents an identifier of what? Have you considered
CDP here as well?
The "vm_name", "vcpu" and "cache_type" could identifier a policy.

The "cache_type " could be "Code" and "Data" if we enable CDP mode.


+ vm_name = ""
+ vcpu = ""
+ cache_type = ""
+
+ def __init__(self, vm_name, vcpu, cache_type):
+ self.vm_name = vm_name
+ self.vcpu = vcpu
+ self.cache_type = cache_type
+
+ def __eq__(self, other):
+ return (self.vm_name == other.vm_name) and (self.vcpu == other.vcpu) and (self.cache_type == other.cache_type)
+
+class RdtPolicy:
+
+# a dictionary to save the CLOS policy from user setting
Indent the comments to the same level as the code you are commenting on.
OK

+ policy_dict = {}
+
+# a list stored the vCPU or VM info
+ policy_owner = []
+
+# a list stored the L2 cache IDs
+ cache2_id_list = []
+
+#L2 cache have more then one section, this function find which one have been set
Typo: then -> than
Done

+ def find_cache2_id(self, mask):
+ for cache2 in self.cache2_id_list:
+ if mask[cache2] != None:
+ return cache2
+ return None
+
+ def __init__(self, policy_dict, owner):
+ self.policy_dict = policy_dict
+ self.policy_owner = [owner]
+
+#check whether the src could be merged, if yes, add the src owner to policy_owner list and return True
+ def merge_policy(self, src):
+ if self.policy_dict["l3"] == src.policy_dict["l3"]:
While dicts in Python can use arbitrary keys (even with different
types), the code is much less maintainable if you do so.

If you want to wrap RDT policies as objects, do the following:

1. Instead of using an all-in-one dict, split L2 and L3 configurations
as different fields.
OK, I will split it.

2. Add an interface that takes an XML element and updates the objects,
so that when walking through the policies you do not need to create
any more dictionaries or lists as another level of intermediate
representation.
OK, I will try to add new interface.

+ cache2_id = self.find_cache2_id(src.policy_dict)
+ if (cache2_id == None) or (self.policy_dict[cache2_id] == src.policy_dict[cache2_id]):
+ self.policy_owner.append(src.policy_owner[0])
In theory you should extend `self.policy_owner` using
`src.policy_owner`. While today `src` may have at most one owner, it
relies on how you walk through the policies which this class had better
not make any assumption on.
Yes, you are right, I will use a loop to append the src.policy_owner list.


+ return True
+ elif self.policy_dict[cache2_id] == None:
+ self.policy_dict[cache2_id] = src.policy_dict[cache2_id]
+ return True
+ return False
+
+#check whether a VM/vCPU could adapt this policy
+ def adapt_policy(self, policy_identifier):
"adapt" means "make sth. suitable", but this function is only a
predicate with no side effects.
Use "find" to replace it.
+ for owner in self.policy_owner:
+ if owner == policy_identifier:
+ return True
+ return False
Do you mean `return policy_identifier in self.policy_owner` by the above
four lines?
Yes.

policy_identifier is an object, so I use the Operator overloading "__eq__"
These code could by simple after use "namedtuple".


+
+class vCatPolicy(RdtPolicy):
+ def merge_policy(self, src):
+ return False
+
+def create_clos_node(scenario_etree, vm_id, index_list):
+ allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", scenario_etree)
if allocation_vm_node is None:
- allocation_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
+ allocation_vm_node = common.append_node("/acrn-config/vm", None, scenario_etree, id = vm_id)
if common.get_node("./clos", allocation_vm_node) is None:
clos_node = common.append_node("./clos", None, allocation_vm_node)
for index in index_list:
common.append_node(f"./vcpu_clos", str(index), clos_node)

-def find_cache2_id(mask, cache2_id_list):
- for cache2 in cache2_id_list:
- if mask[cache2] != "None":
- return cache2
- return "None"
-
-def merge_policy_list(mask_list, cache2_id_list):
- index = 0
+def merge_policy_list(policy_list):
result_list = []
- for index,mask in enumerate(mask_list):
- merged = 0
+ for index,p in enumerate(policy_list):
+ merged = False
if index == 0:
Why do need this branch? When `index` is 0, `result_list` is empty and
`merged` will always be `False` after you walk through that empty
list. Adding this specific branch does little (if any) help to
performance but hurts a lot in readability.
I will remove this branch.

- result_list.append(mask)
+ result_list.append(p)
continue
for result in result_list:
- if result["l3"] != mask["l3"]:
- continue
- else:
- cache2_id = find_cache2_id(mask, cache2_id_list)
- if cache2_id == "None" or result[cache2_id] == mask[cache2_id]:
- merged = 1
- break
- if result[cache2_id] == "None":
- merged = 1
- result[cache2_id] = mask[cache2_id]
- break
- if merged == 0:
- result_list.append(mask)
+ if result.merge_policy(p):
+ merged = True
+ break;
+ if merged == False:
Simply say `if not merged`.
OK

+ result_list.append(p)
return result_list

-def gen_all_clos_index(board_etree, scenario_etree, allocation_etree):
- policy_list = []
- allocation_list = scenario_etree.xpath(f"//POLICY")
+def gen_identifier_list(scenario_etree):
+ identifier_list = []
+ vm_list = scenario_etree.xpath("//POLICY/VM")
+ for vm in vm_list:
+ vm_name = common.get_node("./text()", vm)
+ vcpu = common.get_node("../VCPU/text()", vm)
+ cache_type = common.get_node("../TYPE/text()", vm)
+ identifier_list.append(Identifier(vm_name, vcpu, cache_type))
+ return identifier_list
+
+def vm_vcat_enable(scenario_etree, vm_name):
+ vcat_enable = common.get_node(f"//VCAT_ENABLED/text()", scenario_etree)
+ virtual_cat_support = common.get_node(f"//vm[name = '{vm_name}']/virtual_cat_support/text()", scenario_etree)
+ return (vcat_enable == "y") and (virtual_cat_support == "y")
+
+def get_policy_list(board_etree, scenario_etree, allocation_etree):
cache2_id_list = scenario_etree.xpath("//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()")
cache2_id_list.sort()

- for policy in allocation_list:
- cache_level = common.get_node("../CACHE_LEVEL/text()", policy)
- cache_id = common.get_node("../CACHE_ID/text()", policy)
- vcpu = common.get_node("./VCPU/text()", policy)
- mask = common.get_node("./CLOS_MASK/text()", policy)
- tmp = (cache_level, cache_id, vcpu, mask)
- policy_list.append(tmp)
-
- vCPU_list = scenario_etree.xpath(f"//POLICY/VCPU/text()")
- l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY/CLOS_MASK")
- mask_list = []
- for vCPU in vCPU_list:
+ RdtPolicy.cache2_id_list = cache2_id_list
+ identifier_list = gen_identifier_list(scenario_etree)
+
+ result_list = []
+ for ident in identifier_list:
dict_tmp = {}
- l3_mask = l2_mask = "None"
- l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY[VCPU = '{vCPU}']/CLOS_MASK/text()")
- if len(l3_mask_list) > 0:
- l3_mask = l3_mask_list[0]
+ policy_list = scenario_etree.xpath(f"//POLICY[VM = '{ident.vm_name}' and VCPU = '{ident.vcpu}' and TYPE = '{ident.cache_type}']")
+ l3_mask = l2_mask = None
+ cache2_id = None
+ for policy in policy_list:
+ cache_level = common.get_node("../CACHE_LEVEL/text()", policy)
+ cache_id = common.get_node("../CACHE_ID/text()", policy)
+ clos_mask = common.get_node("./CLOS_MASK/text()", policy)
+ if cache_level == "2":
+ l2_mask = clos_mask
+ cache2_id = cache_id
+ else:
+ l3_mask = clos_mask
dict_tmp["l3"] = l3_mask
-
- l2_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2]/POLICY[VCPU = '{vCPU}']/CLOS_MASK")
- if len(l2_mask_list) > 0:
- l2_mask = l2_mask_list[0].text
- cache_id = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2 and POLICY/VCPU = '{vCPU}']/CACHE_ID/text()")[0]
for cache2 in cache2_id_list:
- if cache2 == cache_id:
+ if cache2 == cache2_id:
dict_tmp[cache_id] = l2_mask
else:
- dict_tmp[cache2] = "None"
- mask_list.append(dict_tmp)
- mask_list = merge_policy_list(mask_list, cache2_id_list)
- return mask_list
-
-def get_clos_index(cache_level, cache_id, clos_mask):
- mask_list = common.get_mask_list(cache_level, cache_id)
- idx = 0
- for mask in mask_list:
- idx += 1
- if mask == clos_mask:
- break
- return idx
-def get_clos_id(mask_list, l2_id, l2_mask, l3_mask):
- for mask in mask_list:
- if mask[l2_id] == l2_mask and mask["l3"] == l3_mask:
- return mask_list.index(mask)
+ dict_tmp[cache2] = None
+ if vm_vcat_enable(scenario_etree, ident.vm_name):
+ result_list.append(vCatPolicy(dict_tmp, ident))
+ else:
+ result_list.append(RdtPolicy(dict_tmp, ident))
+ return merge_policy_list(result_list)
+
+def get_clos_id(rdt_list, mask_identifier):
+ for index,rdt in enumerate(rdt_list):
+ if rdt.adapt_policy(mask_identifier):
Please, be consistent with your names. What's the difference between
"identifier" and "owner"? If they are the same, unify the noun you use
to refer to them; otherwise, why you compare identifiers with owners in
the `adapt_policy` member function?

I'll stop here, as this patch still need a fundamental refactoring to
make the overall structure easy to read.
Very thanks for your review, I will refine again.

--
Best Regards
Junjie Mao

+ return index
return 0

def alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list):
vm_node_list = scenario_etree.xpath("//vm")
for vm_node in vm_node_list:
- vmname = common.get_node("./name/text()", vm_node)
- allocation_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[POLICY/VM = '{vmname}']")
- for allocation in allocation_list:
- index_list = []
- cache_level = common.get_node("./CACHE_LEVEL/text()", allocation)
- cache_id = common.get_node("./CACHE_ID/text()", allocation)
- clos_mask_list = allocation.xpath(f".//POLICY[VM = '{vmname}']/CLOS_MASK/text()")
-
- for clos_mask in clos_mask_list:
- index = get_clos_id(mask_list, cache_id, clos_mask, "None")
+ vm_name = common.get_node("./name/text()", vm_node)
+ vcpu_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}']/VCPU/text()")
+ index_list = []
+ for vcpu in sorted(list(set(vcpu_list))):
+ type_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}' and VCPU = '{vcpu}']/TYPE/text()")
+ for cache_type in sorted(list(set(type_list))):
+ if cache_type == "Data":
+ continue
+ index = get_clos_id(mask_list, Identifier(vm_name, vcpu, cache_type))
index_list.append(index)
- create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)
+ create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)

-def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list):
+def create_mask_list_node(board_etree, scenario_etree, allocation_etree, rdt_policy_list):
allocation_hv_node = common.get_node(f"//hv", allocation_etree)
if allocation_hv_node is None:
allocation_hv_node = common.append_node(f"/acrn-config/hv", None, allocation_etree)
@@ -127,24 +172,24 @@ def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_lis
cache2_id_list.sort()
if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id="l3")
- for i in range(0, len(mask_list)):
- if mask_list[i]["l3"] == "None":
- value = "0xffff"
- else:
- value = str(mask_list[i]["l3"])
+ length = common.get_node(f"//cache[@level='3']/capability/capacity_mask_length/text()", board_etree)
+ value = hex((1 << int(length)) - 1)
+ for i in range(0, len(rdt_policy_list)):
+ if rdt_policy_list[i].policy_dict["l3"] is not None:
+ value = str(rdt_policy_list[i].policy_dict["l3"])
common.append_node(f"./clos", value, clos_mask)

for cache2 in cache2_id_list:
+ length = common.get_node(f"//cache[@level='2' and @id = '{cache2}']/capability/capacity_mask_length/text()", board_etree)
+ value = hex((1 << int(length)) - 1)
if common.get_node("./clos_mask[@id = '{cache2}']", allocation_hv_node) is None:
clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id=cache2)
- for i in range(0, len(mask_list)):
- if mask_list[i][cache2] == "None":
- value = "0xffff"
- else:
- value = str(mask_list[i][cache2] )
+ for i in range(0, len(rdt_policy_list)):
+ if rdt_policy_list[i].policy_dict[cache2] is not None:
+ value = str(rdt_policy_list[i].policy_dict[cache2] )
common.append_node(f"./clos", value, clos_mask)

def fn(board_etree, scenario_etree, allocation_etree):
- mask_list = gen_all_clos_index(board_etree, scenario_etree, allocation_etree)
- creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list)
- alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list)
+ policy_list = get_policy_list(board_etree, scenario_etree, allocation_etree)
+ create_mask_list_node(board_etree, scenario_etree, allocation_etree, policy_list)
+ alloc_clos_index(board_etree, scenario_etree, allocation_etree, policy_list)