[PATCH] config-tools: add progress bar for board_inspector
From 8e4d2c3be25e1936cd5af2a28ee7f45232bb39d6 Mon Sep 17 00:00:00 2001 From: zihengL1 <ziheng.li@...> Date: Thu, 11 Aug 2022 09:53:01 +0800 Subject: [PATCH] config-tools: add progress bar for board_inspector
Added progress bar for board_inspector.py and added timeout mechanism for update-pciids command.
Tracked-On: #7973 Signed-off-by: Ziheng Li <ziheng.li@...> --- .../board_inspector/board_inspector.py | 119 ++++++++++-------- 1 file changed, 64 insertions(+), 55 deletions(-)
diff --git a/misc/config_tools/board_inspector/board_inspector.py b/misc/config_tools/board_inspector/board_inspector.py index 7e4a406c6..4da400bea 100755 --- a/misc/config_tools/board_inspector/board_inspector.py +++ b/misc/config_tools/board_inspector/board_inspector.py @@ -12,6 +12,7 @@ import tempfile import subprocess # nosec import lxml.etree import argparse +from tqdm import tqdm from collections import namedtuple from importlib import import_module @@ -68,7 +69,7 @@ def check_deps(): try: logger.info("Updating pci.ids for latest PCI device descriptions.") res = subprocess.Popen(["update-pciids", "-q"], stderr=subprocess.DEVNULL) - if res.wait() != 0: + if res.wait(timeout=40) != 0: logger.warning(f"Failed to invoke update-pciids. No functional impact is foreseen, but descriptions of PCI devices may be inaccurate.") except Exception as e: logger.warning(f"Failed to invoke update-pciids: {e}. No functional impact is foreseen, but descriptions of PCI devices may be unavailable.") @@ -146,64 +147,71 @@ def summary_loginfo(board_xml): def main(board_name, board_xml, args): print(f"Generating board XML {board_name}. This may take a few minutes...") - # Check that the dependencies are met - check_deps() + with tqdm(total=100) as pbar: + # Check that the dependencies are met + check_deps() + pbar.update(20) - # Check if this is native os - native_check() + # Check if this is native os + native_check() + pbar.update(20) - # Check if there exists multiple PCI domains (which is not supported) - check_pci_domains() + # Check if there exists multiple PCI domains (which is not supported) + check_pci_domains() + pbar.update(20) - # Bring up all cores - bring_up_cores() + # Bring up all cores + bring_up_cores() + pbar.update(20) - try: - # First invoke the legacy board parser to create the board XML ... - legacy_parser = os.path.join(script_dir, "legacy", "board_parser.py") - env = { "PYTHONPATH": script_dir, "PATH": os.environ["PATH"] } - subprocess.run([sys.executable, legacy_parser, args.board_name, "--out", board_xml], check=True, env=env) - - # ... then load the created board XML and append it with additional data by invoking the extractors. - board_etree = lxml.etree.parse(board_xml) - root_node = board_etree.getroot() - - # Clear the whitespaces between adjacent children under the root node - root_node.text = None - for elem in root_node: - elem.tail = None - - # Create nodes for each kind of resource - root_node.append(lxml.etree.Element("processors")) - root_node.append(lxml.etree.Element("caches")) - root_node.append(lxml.etree.Element("memory")) - root_node.append(lxml.etree.Element("ioapics")) - root_node.append(lxml.etree.Element("devices")) - root_node.append(lxml.etree.Element("device-classes")) - - extractors_path = os.path.join(script_dir, "extractors") - extractors = [f for f in os.listdir(extractors_path) if f[:2].isdigit()] - for extractor in sorted(extractors): - module_name = os.path.splitext(extractor)[0] - module = import_module(f"extractors.{module_name}") - if args.basic and getattr(module, "advanced", False): - continue - module.extract(args, board_etree) - - # Validate the XML against XSD assertions - count = validator.validate_board(os.path.join(script_dir, 'schema', 'boardchecks.xsd'), board_etree) - if count == 0: - logger.info("All board checks passed.") - - #Format and out put the log info - summary_loginfo(board_xml) - - # Finally overwrite the output with the updated XML - board_etree.write(board_xml, pretty_print=True) - - except subprocess.CalledProcessError as e: - logger.critical(e) - sys.exit(1) + try: + # First invoke the legacy board parser to create the board XML ... + legacy_parser = os.path.join(script_dir, "legacy", "board_parser.py") + env = { "PYTHONPATH": script_dir, "PATH": os.environ["PATH"] } + subprocess.run([sys.executable, legacy_parser, args.board_name, "--out", board_xml], check=True, env=env) + + # ... then load the created board XML and append it with additional data by invoking the extractors. + board_etree = lxml.etree.parse(board_xml) + root_node = board_etree.getroot() + + # Clear the whitespaces between adjacent children under the root node + root_node.text = None + for elem in root_node: + elem.tail = None + + # Create nodes for each kind of resource + root_node.append(lxml.etree.Element("processors")) + root_node.append(lxml.etree.Element("caches")) + root_node.append(lxml.etree.Element("memory")) + root_node.append(lxml.etree.Element("ioapics")) + root_node.append(lxml.etree.Element("devices")) + root_node.append(lxml.etree.Element("device-classes")) + + extractors_path = os.path.join(script_dir, "extractors") + extractors = [f for f in os.listdir(extractors_path) if f[:2].isdigit()] + for extractor in sorted(extractors): + module_name = os.path.splitext(extractor)[0] + module = import_module(f"extractors.{module_name}") + if args.basic and getattr(module, "advanced", False): + continue + module.extract(args, board_etree) + + # Validate the XML against XSD assertions + count = validator.validate_board(os.path.join(script_dir, 'schema', 'boardchecks.xsd'), board_etree) + if count == 0: + logger.info("All board checks passed.") + + #Format and out put the log info + summary_loginfo(board_xml) + + # Finally overwrite the output with the updated XML + board_etree.write(board_xml, pretty_print=True) + + except subprocess.CalledProcessError as e: + logger.critical(e) + sys.exit(1) + + pbar.update(20) if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -238,3 +246,4 @@ if __name__ == "__main__": board_xml = args.out if args.out else f"{args.board_name}.xml" main(args.board_name, board_xml, args) + -- 2.25.1
|
|
[PATCH v2] config-tools: solve hv and vm memory address conflict
From e1b5adb295a2154a970ade09f46c62e76e2a4bbf Mon Sep 17 00:00:00 2001 From: zihengL1 <ziheng.li@...> Date: Wed, 10 Aug 2022 13:23:50 +0800 Subject: [PATCH v2] config-tools: solve hv and vm memory address conflict
Fixed the problem that acrn can still build normally when the memory addresses of HV and VM conflict, which causes the hypervisor to hang.
At the same time, defined a class to process memory to obtain and check the available memory range.
Memory range obtain and check related functions are defined as class methods. (The original version of this patch was on July 29)
Tracked-On: #7913 Signed-off-by: Ziheng Li <ziheng.li@...> --- .../static_allocators/memory_allocator.py | 109 ++++++++++-------- 1 file changed, 61 insertions(+), 48 deletions(-)
diff --git a/misc/config_tools/static_allocators/memory_allocator.py b/misc/config_tools/static_allocators/memory_allocator.py index cbfc9e138..c4d27bd82 100644 --- a/misc/config_tools/static_allocators/memory_allocator.py +++ b/misc/config_tools/static_allocators/memory_allocator.py @@ -11,46 +11,57 @@ import lib.error sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library')) import common, math, logging -def import_memory_info(board_etree): +class RamRange(): ram_range = {} - for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"): - start = int(memory_range.get("start"), base=16) - size = int(memory_range.get("size"), base=10) - ram_range[start] = size - - return ram_range - -def check_hpa(vm_node_info): - hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*") - hpa_node_list_new = [] - for hpa_node in hpa_node_list: - if int(hpa_node.text, 16) != 0: - hpa_node_list_new.append(hpa_node) - - return hpa_node_list_new - -def get_memory_info(vm_node_info): - start_hpa = [] - size_hpa = [] - hpa_info = {} - - size_node = common.get_node("./memory/size", vm_node_info) - if size_node is not None: - size_byte = int(size_node.text) * 0x100000 - hpa_info[0] = size_byte - hpa_node_list = check_hpa(vm_node_info) - if len(hpa_node_list) != 0: - for hpa_node in hpa_node_list: - if hpa_node.tag == "start_hpa": - start_hpa.append(int(hpa_node.text, 16)) - elif hpa_node.tag == "size_hpa": - size_byte = int(hpa_node.text) * 0x100000 - size_hpa.append(size_byte) - if len(start_hpa) != 0 and len(start_hpa) == len(start_hpa): - for i in range(len(start_hpa)): - hpa_info[start_hpa[i]] = size_hpa[i] - return hpa_info + @classmethod + def import_memory_info(cls, board_etree, allocation_etree): + hv_start = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_START/text()", allocation_etree), 16) + hv_size = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE/text()", allocation_etree), 16) + for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"): + start = int(memory_range.get("start"), base=16) + size = int(memory_range.get("size"), base=10) + if start < hv_start and start + size > hv_start + hv_size: + cls.ram_range[start] = hv_start - start + cls.ram_range[hv_start + hv_size] = start + size - hv_start - hv_size + else: + cls.ram_range[start] = size + + return cls.ram_range + + @classmethod + def check_hpa(cls, vm_node_info): + hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*") + hpa_node_list_new = [] + for hpa_node in hpa_node_list: + if int(hpa_node.text, base=16) != 0: + hpa_node_list_new.append(hpa_node) + + return hpa_node_list_new + + @classmethod + def get_memory_info(cls, vm_node_info): + start_hpa = [] + size_hpa = [] + hpa_info = {} + + size_node = common.get_node("./memory/size", vm_node_info) + if size_node is not None: + size_byte = int(size_node.text) * 0x100000 + hpa_info[0] = size_byte + hpa_node_list = RamRange().check_hpa(vm_node_info) + if len(hpa_node_list) != 0: + for hpa_node in hpa_node_list: + if hpa_node.tag == "start_hpa": + start_hpa.append(int(hpa_node.text, 16)) + elif hpa_node.tag == "size_hpa": + size_byte = int(hpa_node.text) * 0x100000 + size_hpa.append(size_byte) + if len(start_hpa) != 0 and len(start_hpa) == len(start_hpa): + for i in range(len(start_hpa)): + hpa_info[start_hpa[i]] = size_hpa[i] + + return hpa_info def alloc_memory(scenario_etree, ram_range_info): vm_node_list = scenario_etree.xpath("/acrn-config/vm[load_order = 'PRE_LAUNCHED_VM']") @@ -62,7 +73,7 @@ def alloc_memory(scenario_etree, ram_range_info): ram_range_info.pop(key) for vm_node in vm_node_list: - mem_info = get_memory_info(vm_node) + mem_info = RamRange().get_memory_info(vm_node) mem_info_list.append(mem_info) vm_node_index_list.append(vm_node.attrib["id"]) @@ -77,20 +88,22 @@ def alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list): mem_key = sorted(ram_range_info) for mem_start in mem_key: mem_size = ram_range_info[mem_start] + mem_end = mem_start + mem_size for hpa_start in hpa_key: hpa_size = mem_info_list[vm_index][hpa_start] + hpa_end = hpa_start + hpa_size if hpa_start != 0: - if mem_start < hpa_start and mem_start + mem_size > hpa_start + hpa_size: + if mem_start < hpa_start and mem_end > hpa_end: ram_range_info[mem_start] = hpa_start - mem_start - ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size - elif mem_start == hpa_start and mem_start + mem_size > hpa_start + hpa_size: + ram_range_info[hpa_end] = mem_end - hpa_end + elif mem_start == hpa_start and mem_end > hpa_end: del ram_range_info[mem_start] - ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size - elif mem_start < hpa_start and mem_start + mem_size == hpa_start + hpa_size: + ram_range_info[hpa_end] = mem_end - hpa_end + elif mem_start < hpa_start and mem_end == hpa_end: ram_range_info[mem_start] = hpa_start - mem_start - elif mem_start == hpa_start and mem_start + mem_size == hpa_start + hpa_size: + elif mem_start == hpa_start and mem_end == hpa_end: del ram_range_info[mem_start] - elif mem_start > hpa_start or mem_start + mem_size < hpa_start + hpa_size: + elif mem_start > hpa_start or mem_end < hpa_end: raise lib.error.ResourceError(f"Start address of HPA is out of available memory range: vm id: {vm_index}, hpa_start: {hpa_start}.") elif mem_size < hpa_size: raise lib.error.ResourceError(f"Size of HPA is out of available memory range: vm id: {vm_index}, hpa_size: {hpa_size}.") @@ -147,14 +160,14 @@ def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list): region_index = region_index + 1 def alloc_vm_memory(board_etree, scenario_etree, allocation_etree): - ram_range_info = import_memory_info(board_etree) + ram_range_info = RamRange().import_memory_info(board_etree, allocation_etree) ram_range_info, mem_info_list, vm_node_index_list = alloc_memory(scenario_etree, ram_range_info) write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list) def allocate_hugepages(board_etree, scenario_etree, allocation_etree): hugepages_1gb = 0 hugepages_2mb = 0 - ram_range_info = import_memory_info(board_etree) + ram_range_info = RamRange().ram_range total_hugepages = int(sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)*0.98/(1024*1024*1024) \ - sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \ - 5 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu"))) -- 2.25.1
|
|
Re: [PATCH] config-tools: solve hv and vm memory address conflict
"Li, Ziheng" <ziheng.li@...> writes: From c849de5679200ad9bd212a119b876a3a490d0450 Mon Sep 17 00:00:00 2001 From: zihengL1 <ziheng.li@...> Date: Wed, 10 Aug 2022 13:23:50 +0800 Subject: [PATCH] config-tools: solve hv and vm memory address conflict
Fixed the problem that acrn can still build normally when the memory addresses of HV and VM conflict, which causes the hypervisor to hang. At the same time, defined a class to process memory to obtain the available memory range.
Tracked-On: #7913 Signed-off-by: Ziheng Li <ziheng.li@...> --- .../static_allocators/memory_allocator.py | 44 ++++++++++++------- 1 file changed, 29 insertions(+), 15 deletions(-)
diff --git a/misc/config_tools/static_allocators/memory_allocator.py b/misc/config_tools/static_allocators/memory_allocator.py index cbfc9e138..5156f75c2 100644 --- a/misc/config_tools/static_allocators/memory_allocator.py +++ b/misc/config_tools/static_allocators/memory_allocator.py @@ -11,14 +11,23 @@ import lib.error sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library')) import common, math, logging
-def import_memory_info(board_etree): +class RamRange(): ram_range = {} - for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"): - start = int(memory_range.get("start"), base=16) - size = int(memory_range.get("size"), base=10) - ram_range[start] = size
- return ram_range + @classmethod + def import_memory_info(cls, board_etree, allocation_etree): + hv_start = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_START/text()", allocation_etree), 16) + hv_size = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE/text()", allocation_etree), 16) + for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"): + start = int(memory_range.get("start"), base=16) + size = int(memory_range.get("size"), base=10) + if start < hv_start and start + size > hv_start + hv_size: + cls.ram_range[start] = hv_start - start + cls.ram_range[hv_start + hv_size] = start + size - hv_start - hv_size + else: + cls.ram_range[start] = size + + return cls.ram_range Since you have wrap up the ram_range dict as a class (or should it be an object instead?), you can also abstract the allocation and checking functions as methods of that class to separate the low-level range-based operations from the allocation algorithms. def check_hpa(vm_node_info): hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*") @@ -77,20 +86,22 @@ def alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list): mem_key = sorted(ram_range_info) for mem_start in mem_key: mem_size = ram_range_info[mem_start] + mem_end = mem_start + mem_size for hpa_start in hpa_key: hpa_size = mem_info_list[vm_index][hpa_start] + hpa_end = hpa_start + hpa_size if hpa_start != 0: - if mem_start < hpa_start and mem_start + mem_size > hpa_start + hpa_size: + if mem_start < hpa_start and mem_end > hpa_end: ram_range_info[mem_start] = hpa_start - mem_start - ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size - elif mem_start == hpa_start and mem_start + mem_size > hpa_start + hpa_size: + ram_range_info[hpa_end] = mem_end - hpa_end + elif mem_start == hpa_start and mem_end > hpa_end: del ram_range_info[mem_start] - ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size - elif mem_start < hpa_start and mem_start + mem_size == hpa_start + hpa_size: + ram_range_info[hpa_end] = mem_end - hpa_end + elif mem_start < hpa_start and mem_end == hpa_end: ram_range_info[mem_start] = hpa_start - mem_start - elif mem_start == hpa_start and mem_start + mem_size == hpa_start + hpa_size: + elif mem_start == hpa_start and mem_end == hpa_end: del ram_range_info[mem_start] - elif mem_start > hpa_start or mem_start + mem_size < hpa_start + hpa_size: + elif mem_start > hpa_start or mem_end < hpa_end:
IIRC I saw similar changes in a previous patch. Is this a second version of that? raise lib.error.ResourceError(f"Start address of HPA is out of available memory range: vm id: {vm_index}, hpa_start: {hpa_start}.") elif mem_size < hpa_size: raise lib.error.ResourceError(f"Size of HPA is out of available memory range: vm id: {vm_index}, hpa_size: {hpa_size}.") @@ -147,14 +158,17 @@ def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list): region_index = region_index + 1
def alloc_vm_memory(board_etree, scenario_etree, allocation_etree): - ram_range_info = import_memory_info(board_etree) + ram_range_info = RamRange().import_memory_info(board_etree, allocation_etree) + print(RamRange().ram_range) ram_range_info, mem_info_list, vm_node_index_list = alloc_memory(scenario_etree, ram_range_info) + print(RamRange().ram_range) + print("!!!!!!!!!!!!!!!!!!!!!!!!!!") Please exclude debug logs from your patch. -- Best Regards Junjie Mao write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list)
def allocate_hugepages(board_etree, scenario_etree, allocation_etree): hugepages_1gb = 0 hugepages_2mb = 0 - ram_range_info = import_memory_info(board_etree) + ram_range_info = RamRange().ram_range total_hugepages = int(sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)*0.98/(1024*1024*1024) \ - sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \ - 5 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu"))) -- 2.25.1
|
|
[PATCH v4 2/2] dm: vdisplay: multi-vdisplay support.
From: Sun Peng <peng.p.sun@...>
Allow one VM have more than 1 virtual display for output. Till now, the max virtual display number is 2. So guest VM can use dual display for mirror and extend desktop mode. To specify multi-vdisplay, need use acrn-dm parameters like this: For fullscreen mode:
virtio-gpu,geometry=fullscreen:monitor_id1,geometry=fullscreen:monitor_id2
For window mode:
virtio-gpu,geometry=<width>x<height>+<x_off>+<y_off>,geometry=<width>x<height>+<x_off>+<y_off>
v1->v2: add pscreen_id outputs for easier debugging.
Signed-off-by: Sun Peng <peng.p.sun@...> Reviewed-by: Zhao Yakui <yakui.zhao@...> --- devicemodel/hw/vdisplay_sdl.c | 69 +++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 27 deletions(-)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 15eeab8f1..a0bf60075 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -32,7 +32,7 @@ #define VDPY_MIN_WIDTH 640 #define VDPY_MIN_HEIGHT 480 #define transto_10bits(color) (uint16_t)(color * 1024 + 0.5) -#define VSCREEN_MAX_NUM 1 +#define VSCREEN_MAX_NUM 2 static unsigned char default_raw_argb[VDPY_DEFAULT_WIDTH * VDPY_DEFAULT_HEIGHT * 4]; @@ -1312,7 +1312,7 @@ gfx_ui_deinit() int vdpy_parse_cmd_option(const char *opts) { - char *str; + char *str, *stropts, *tmp; int snum, error; struct vscreen *vscr; @@ -1320,35 +1320,50 @@ int vdpy_parse_cmd_option(const char *opts) vdpy.vscrs = calloc(VSCREEN_MAX_NUM, sizeof(struct vscreen)); vdpy.vscrs_num = 0; - str = strcasestr(opts, "geometry="); - vscr = vdpy.vscrs + vdpy.vscrs_num; - if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vscr->pscreen_id); - if (snum != 1) { + stropts = strdup(opts); + while ((str = strsep(&stropts, ",")) != NULL) { + vscr = vdpy.vscrs + vdpy.vscrs_num; + tmp = strcasestr(str, "geometry="); + if (str && strcasestr(str, "geometry=fullscreen")) { + snum = sscanf(tmp, "geometry=fullscreen:%d", &vscr->pscreen_id); + if (snum != 1) { + vscr->pscreen_id = 0; + } + vscr->width = VDPY_MAX_WIDTH; + vscr->height = VDPY_MAX_HEIGHT; + vscr->is_fullscreen = true; + pr_info("virtual display: fullscreen on monitor %d.\n", + vscr->pscreen_id); + vscr->info.xoff = vscr->org_x; + vscr->info.yoff = vscr->org_y; + vscr->info.width = vscr->width; + vscr->info.height = vscr->height; + vdpy.vscrs_num++; + } else if (str && strcasestr(str, "geometry=")) { + snum = sscanf(tmp, "geometry=%dx%d+%d+%d", + &vscr->width, &vscr->height, + &vscr->org_x, &vscr->org_y); + if (snum != 4) { + pr_err("incorrect geometry option. Should be" + " WxH+x+y\n"); + error = -1; + } + vscr->is_fullscreen = false; vscr->pscreen_id = 0; + pr_info("virtual display: windowed on monitor %d.\n", + vscr->pscreen_id); + vscr->info.xoff = vscr->org_x; + vscr->info.yoff = vscr->org_y; + vscr->info.width = vscr->width; + vscr->info.height = vscr->height; + vdpy.vscrs_num++; } - vscr->width = VDPY_MAX_WIDTH; - vscr->height = VDPY_MAX_HEIGHT; - vscr->is_fullscreen = true; - vdpy.vscrs_num++; - pr_info("virtual display: fullscreen.\n"); - } else if (opts && strcasestr(opts, "geometry=")) { - snum = sscanf(str, "geometry=%dx%d+%d+%d", - &vscr->width, &vscr->height, - &vscr->org_x, &vscr->org_y); - if (snum != 4) { - pr_err("incorrect geometry option. Should be" - " WxH+x+y\n"); - error = -1; + if (vdpy.vscrs_num > VSCREEN_MAX_NUM) { + pr_err("%d virtual displays are too many that acrn-dm can't support!\n"); + break; } - vscr->is_fullscreen = false; - vdpy.vscrs_num++; - pr_info("virtual display: windowed.\n"); } + free(stropts); - vscr->info.xoff = 0; - vscr->info.yoff = 0; - vscr->info.width = vdpy.vscrs->width; - vscr->info.height = vdpy.vscrs->height; return error; } -- 2.25.1
|
|
[PATCH v4 1/2] dm: vdisplay: refine vdisplay core concept abstractions
From: Sun Peng <peng.p.sun@...>
Add new concept "vscreen" to abstract all specs about screen in a display server. This can provide convenience to add more screens for one VM.
v1->v2: remove vscr NULL checks. v2->v3: restore vscreen geometry calibaration to previous version. wrap geometry calibaration and create window/render to functions.
Signed-off-by: Sun Peng <peng.p.sun@...> --- devicemodel/hw/vdisplay_sdl.c | 480 +++++++++++++++++++++------------- 1 file changed, 296 insertions(+), 184 deletions(-)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 403047a3c..15eeab8f1 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -32,6 +32,7 @@ #define VDPY_MIN_WIDTH 640 #define VDPY_MIN_HEIGHT 480 #define transto_10bits(color) (uint16_t)(color * 1024 + 0.5) +#define VSCREEN_MAX_NUM 1 static unsigned char default_raw_argb[VDPY_DEFAULT_WIDTH * VDPY_DEFAULT_HEIGHT * 4]; @@ -51,27 +52,39 @@ struct egl_display_ops { PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES; }; -static struct display { +struct vscreen { + struct display_info info; int pscreen_id; SDL_Rect pscreen_rect; - struct display_info info; - struct state s; - SDL_Texture *dpy_texture; - SDL_Window *dpy_win; - SDL_Renderer *dpy_renderer; - pixman_image_t *dpy_img; - pthread_t tid; - int width, height; // Width/height of dpy_win - int org_x, org_y; - int guest_width, guest_height; + bool is_fullscreen; + int org_x; + int org_y; + int width; + int height; + int guest_width; + int guest_height; struct surface surf; struct cursor cur; - SDL_Texture *cursor_tex; + SDL_Texture *surf_tex; + SDL_Texture *cur_tex; + int surf_updates; + int cur_updates; + SDL_Window *win; + SDL_Renderer *renderer; + pixman_image_t *img; + EGLImage egl_img; + /* Record the update_time that is activated from guest_vm */ + struct timespec last_time; +}; + +static struct display { + struct state s; + struct vscreen *vscrs; + int vscrs_num; + pthread_t tid; /* Add one UI_timer(33ms) to render the buffers from guest_vm */ struct acrn_timer ui_timer; struct vdpy_display_bh ui_timer_bh; - /* Record the update_time that is activated from guest_vm */ - struct timespec last_time; // protect the request_list pthread_mutex_t vdisplay_mutex; // receive the signal that request is submitted @@ -82,14 +95,11 @@ static struct display { SDL_GLContext eglContext; EGLDisplay eglDisplay; struct egl_display_ops gl_ops; - EGLImage cur_egl_img; } vdpy = { .s.is_ui_realized = false, .s.is_active = false, .s.is_wayland = false, .s.is_x11 = false, - .s.is_fullscreen = false, - .s.updates = 0, .s.n_connect = 0 }; @@ -514,10 +524,16 @@ void vdpy_get_edid(int handle, int scanout_id, uint8_t *edid, size_t size) { struct edid_info edid_info; + struct vscreen *vscr; + + if (scanout_id >= vdpy.vscrs_num) + return; + + vscr = vdpy.vscrs + scanout_id; if (handle == vdpy.s.n_connect) { - edid_info.prefx = vdpy.info.width; - edid_info.prefy = vdpy.info.height; + edid_info.prefx = vscr->info.width; + edid_info.prefy = vscr->info.height; edid_info.maxx = VDPY_MAX_WIDTH; edid_info.maxy = VDPY_MAX_HEIGHT; } else { @@ -537,11 +553,18 @@ vdpy_get_edid(int handle, int scanout_id, uint8_t *edid, size_t size) void vdpy_get_display_info(int handle, int scanout_id, struct display_info *info) { + struct vscreen *vscr; + + if (scanout_id >= vdpy.vscrs_num) + return; + + vscr = vdpy.vscrs + scanout_id; + if (handle == vdpy.s.n_connect) { - info->xoff = vdpy.info.xoff; - info->yoff = vdpy.info.yoff; - info->width = vdpy.info.width; - info->height = vdpy.info.height; + info->xoff = vscr->info.xoff; + info->yoff = vscr->info.yoff; + info->width = vscr->info.width; + info->height = vscr->info.height; } else { info->xoff = 0; info->yoff = 0; @@ -554,6 +577,8 @@ static void sdl_gl_display_init(void) { struct egl_display_ops *gl_ops = &vdpy.gl_ops; + struct vscreen *vscr; + int i; /* obtain the eglDisplay/eglContext */ vdpy.eglDisplay = eglGetCurrentDisplay(); @@ -570,7 +595,11 @@ sdl_gl_display_init(void) gl_ops->glEGLImageTargetTexture2DOES = (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) eglGetProcAddress("glEGLImageTargetTexture2DOES"); - vdpy.cur_egl_img = EGL_NO_IMAGE_KHR; + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + vscr->egl_img = EGL_NO_IMAGE_KHR; + } + if ((gl_ops->eglCreateImageKHR == NULL) || (gl_ops->eglDestroyImageKHR == NULL) || (gl_ops->glEGLImageTargetTexture2DOES == NULL)) { @@ -588,6 +617,7 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pixman_image_t *src_img; int format; int access, i; + struct vscreen *vscr; if (handle != vdpy.s.n_connect) { return; @@ -599,9 +629,15 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; + if (surf == NULL ) { - vdpy.surf.width = 0; - vdpy.surf.height = 0; + vscr->surf.width = 0; + vscr->surf.height = 0; /* Need to use the default 640x480 for the SDL_Texture */ src_img = pixman_image_create_bits(PIXMAN_a8r8g8b8, VDPY_MIN_WIDTH, VDPY_MIN_HEIGHT, @@ -611,8 +647,8 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pr_err("failed to create pixman_image\n"); return; } - vdpy.guest_width = VDPY_MIN_WIDTH; - vdpy.guest_height = VDPY_MIN_HEIGHT; + vscr->guest_width = VDPY_MIN_WIDTH; + vscr->guest_height = VDPY_MIN_HEIGHT; } else if (surf->surf_type == SURFACE_PIXMAN) { src_img = pixman_image_create_bits(surf->surf_format, surf->width, surf->height, surf->pixel, @@ -621,21 +657,21 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pr_err("failed to create pixman_image\n"); return; } - vdpy.surf = *surf; - vdpy.guest_width = surf->width; - vdpy.guest_height = surf->height; + vscr->surf = *surf; + vscr->guest_width = surf->width; + vscr->guest_height = surf->height; } else if (surf->surf_type == SURFACE_DMABUF) { src_img = NULL; - vdpy.surf = *surf; - vdpy.guest_width = surf->width; - vdpy.guest_height = surf->height; + vscr->surf = *surf; + vscr->guest_width = surf->width; + vscr->guest_height = surf->height; } else { /* Unsupported type */ return; } - if (vdpy.dpy_texture) { - SDL_DestroyTexture(vdpy.dpy_texture); + if (vscr->surf_tex) { + SDL_DestroyTexture(vscr->surf_tex); } if (surf && (surf->surf_type == SURFACE_DMABUF)) { access = SDL_TEXTUREACCESS_STATIC; @@ -666,23 +702,23 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pixman_image_get_format(src_img)); } } - vdpy.dpy_texture = SDL_CreateTexture(vdpy.dpy_renderer, + vscr->surf_tex = SDL_CreateTexture(vscr->renderer, format, access, - vdpy.guest_width, vdpy.guest_height); + vscr->guest_width, vscr->guest_height); - if (vdpy.dpy_texture == NULL) { + if (vscr->surf_tex == NULL) { pr_err("Failed to create SDL_texture for surface.\n"); } /* For the surf_switch, it will be updated in surface_update */ if (!surf) { - SDL_UpdateTexture(vdpy.dpy_texture, NULL, + SDL_UpdateTexture(vscr->surf_tex, NULL, pixman_image_get_data(src_img), pixman_image_get_stride(src_img)); - SDL_RenderClear(vdpy.dpy_renderer); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.dpy_texture, NULL, NULL); - SDL_RenderPresent(vdpy.dpy_renderer); + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); + SDL_RenderPresent(vscr->renderer); } else if (surf->surf_type == SURFACE_DMABUF) { EGLImageKHR egl_img = EGL_NO_IMAGE_KHR; EGLint attrs[64]; @@ -713,46 +749,54 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) return; } - SDL_GL_BindTexture(vdpy.dpy_texture, NULL, NULL); + SDL_GL_BindTexture(vscr->surf_tex, NULL, NULL); gl_ops->glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_img); - if (vdpy.cur_egl_img != EGL_NO_IMAGE_KHR) + if (vscr->egl_img != EGL_NO_IMAGE_KHR) gl_ops->eglDestroyImageKHR(vdpy.eglDisplay, - vdpy.cur_egl_img); + vscr->egl_img); /* In theory the created egl_img can be released after it is bound * to texture. * Now it is released next time so that it is controlled correctly */ - vdpy.cur_egl_img = egl_img; + vscr->egl_img = egl_img; } - if (vdpy.dpy_img) - pixman_image_unref(vdpy.dpy_img); + if (vscr->img) + pixman_image_unref(vscr->img); if (surf == NULL) { - SDL_SetWindowTitle(vdpy.dpy_win, + SDL_SetWindowTitle(vscr->win, "Not activate display yet!"); } else { - SDL_SetWindowTitle(vdpy.dpy_win, + SDL_SetWindowTitle(vscr->win, "ACRN Virtual Monitor"); } /* Replace the cur_img with the created_img */ - vdpy.dpy_img = src_img; + vscr->img = src_img; } void -vdpy_cursor_position_transformation(struct display *vdpy, SDL_Rect *rect) +vdpy_cursor_position_transformation(struct display *vdpy, int scanout_id, SDL_Rect *rect) { - rect->x = (vdpy->cur.x * vdpy->width) / vdpy->guest_width; - rect->y = (vdpy->cur.y * vdpy->height) / vdpy->guest_height; - rect->w = (vdpy->cur.width * vdpy->width) / vdpy->guest_width; - rect->h = (vdpy->cur.height * vdpy->height) / vdpy->guest_height; + struct vscreen *vscr; + + if (scanout_id >= vdpy->vscrs_num) { + return; + } + + vscr = vdpy->vscrs + scanout_id; + rect->x = (vscr->cur.x * vscr->width) / vscr->width; + rect->y = (vscr->cur.y * vscr->height) / vscr->height; + rect->w = (vscr->cur.width * vscr->width) / vscr->width; + rect->h = (vscr->cur.height * vscr->height) / vscr->height; } void vdpy_surface_update(int handle, int scanout_id, struct surface *surf) { SDL_Rect cursor_rect; + struct vscreen *vscr; if (handle != vdpy.s.n_connect) { return; @@ -769,32 +813,39 @@ vdpy_surface_update(int handle, int scanout_id, struct surface *surf) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; if (surf->surf_type == SURFACE_PIXMAN) - SDL_UpdateTexture(vdpy.dpy_texture, NULL, + SDL_UpdateTexture(vscr->surf_tex, NULL, surf->pixel, surf->stride); - SDL_RenderClear(vdpy.dpy_renderer); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.dpy_texture, NULL, NULL); + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); /* This should be handled after rendering the surface_texture. * Otherwise it will be hidden */ - if (vdpy.cursor_tex) { - vdpy_cursor_position_transformation(&vdpy, &cursor_rect); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.cursor_tex, + if (vscr->cur_tex) { + vdpy_cursor_position_transformation(&vdpy, scanout_id, &cursor_rect); + SDL_RenderCopy(vscr->renderer, vscr->cur_tex, NULL, &cursor_rect); } - SDL_RenderPresent(vdpy.dpy_renderer); + SDL_RenderPresent(vscr->renderer); /* update the rendering time */ - clock_gettime(CLOCK_MONOTONIC, &vdpy.last_time); + clock_gettime(CLOCK_MONOTONIC, &vscr->last_time); } void vdpy_cursor_define(int handle, int scanout_id, struct cursor *cur) { + struct vscreen *vscr; + if (handle != vdpy.s.n_connect) { return; } @@ -805,39 +856,52 @@ vdpy_cursor_define(int handle, int scanout_id, struct cursor *cur) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + if (cur->data == NULL) return; - if (vdpy.cursor_tex) - SDL_DestroyTexture(vdpy.cursor_tex); + vscr = vdpy.vscrs + scanout_id; - vdpy.cursor_tex = SDL_CreateTexture( - vdpy.dpy_renderer, + if (vscr->cur_tex) + SDL_DestroyTexture(vscr->cur_tex); + + vscr->cur_tex = SDL_CreateTexture( + vscr->renderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, cur->width, cur->height); - if (vdpy.cursor_tex == NULL) { + if (vscr->cur_tex == NULL) { pr_err("Failed to create sdl_cursor surface for %p.\n", cur); return; } - SDL_SetTextureBlendMode(vdpy.cursor_tex, SDL_BLENDMODE_BLEND); - vdpy.cur = *cur; - SDL_UpdateTexture(vdpy.cursor_tex, NULL, cur->data, cur->width * 4); + SDL_SetTextureBlendMode(vscr->cur_tex, SDL_BLENDMODE_BLEND); + vscr->cur = *cur; + SDL_UpdateTexture(vscr->cur_tex, NULL, cur->data, cur->width * 4); } void vdpy_cursor_move(int handle, int scanout_id, uint32_t x, uint32_t y) { + struct vscreen *vscr; + if (handle != vdpy.s.n_connect) { return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; /* Only move the position of the cursor. The cursor_texture * will be handled in surface_update */ - vdpy.cur.x = x; - vdpy.cur.y = y; + vscr->cur.x = x; + vscr->cur.y = y; } static void @@ -847,35 +911,41 @@ vdpy_sdl_ui_refresh(void *data) struct timespec cur_time; uint64_t elapsed_time; SDL_Rect cursor_rect; + struct vscreen *vscr; + int i; ui_vdpy = (struct display *)data; - /* Skip it if no surface needs to be rendered */ - if (ui_vdpy->dpy_texture == NULL) - return; + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = ui_vdpy->vscrs + i; - clock_gettime(CLOCK_MONOTONIC, &cur_time); + /* Skip it if no surface needs to be rendered */ + if (vscr->surf_tex == NULL) + continue; - elapsed_time = (cur_time.tv_sec - ui_vdpy->last_time.tv_sec) * 1000000000 + - cur_time.tv_nsec - ui_vdpy->last_time.tv_nsec; + clock_gettime(CLOCK_MONOTONIC, &cur_time); - /* the time interval is less than 10ms. Skip it */ - if (elapsed_time < 10000000) - return; + elapsed_time = (cur_time.tv_sec - vscr->last_time.tv_sec) * 1000000000 + + cur_time.tv_nsec - vscr->last_time.tv_nsec; - SDL_RenderClear(ui_vdpy->dpy_renderer); - SDL_RenderCopy(ui_vdpy->dpy_renderer, ui_vdpy->dpy_texture, NULL, NULL); + /* the time interval is less than 10ms. Skip it */ + if (elapsed_time < 10000000) + return; - /* This should be handled after rendering the surface_texture. - * Otherwise it will be hidden - */ - if (ui_vdpy->cursor_tex) { - vdpy_cursor_position_transformation(ui_vdpy, &cursor_rect); - SDL_RenderCopy(ui_vdpy->dpy_renderer, ui_vdpy->cursor_tex, - NULL, &cursor_rect); - } + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); - SDL_RenderPresent(ui_vdpy->dpy_renderer); + /* This should be handled after rendering the surface_texture. + * Otherwise it will be hidden + */ + if (vscr->cur_tex) { + vdpy_cursor_position_transformation(ui_vdpy, i, &cursor_rect); + SDL_RenderCopy(vscr->renderer, vscr->cur_tex, + NULL, &cursor_rect); + } + + SDL_RenderPresent(vscr->renderer); + } } static void @@ -903,60 +973,84 @@ vdpy_sdl_ui_timer(void *data, uint64_t nexp) pthread_mutex_unlock(&ui_vdpy->vdisplay_mutex); } -static void * -vdpy_sdl_display_thread(void *data) +void +vdpy_calibrate_vscreen_geometry(struct vscreen *vscr) { - uint32_t win_flags; - struct vdpy_display_bh *bh; - struct itimerspec ui_timer_spec; - - if (vdpy.width && vdpy.height) { + if (vscr->width && vscr->height) { /* clip the region between (640x480) and (1920x1080) */ - if (vdpy.width < VDPY_MIN_WIDTH) - vdpy.width = VDPY_MIN_WIDTH; - if (vdpy.width > VDPY_MAX_WIDTH) - vdpy.width = VDPY_MAX_WIDTH; - if (vdpy.height < VDPY_MIN_HEIGHT) - vdpy.height = VDPY_MIN_HEIGHT; - if (vdpy.height > VDPY_MAX_HEIGHT) - vdpy.height = VDPY_MAX_HEIGHT; + if (vscr->width < VDPY_MIN_WIDTH) + vscr->width = VDPY_MIN_WIDTH; + if (vscr->width > VDPY_MAX_WIDTH) + vscr->width = VDPY_MAX_WIDTH; + if (vscr->height < VDPY_MIN_HEIGHT) + vscr->height = VDPY_MIN_HEIGHT; + if (vscr->height > VDPY_MAX_HEIGHT) + vscr->height = VDPY_MAX_HEIGHT; } else { /* the default window(1280x720) is created with undefined pos * when no geometry info is passed */ - vdpy.org_x = 0xFFFF; - vdpy.org_y = 0xFFFF; - vdpy.width = VDPY_DEFAULT_WIDTH; - vdpy.height = VDPY_DEFAULT_HEIGHT; + vscr->org_x = 0xFFFF; + vscr->org_y = 0xFFFF; + vscr->width = VDPY_DEFAULT_WIDTH; + vscr->height = VDPY_DEFAULT_HEIGHT; } +} + +int +vdpy_create_vscreen_window(struct vscreen *vscr) +{ + uint32_t win_flags; win_flags = SDL_WINDOW_OPENGL | - SDL_WINDOW_ALWAYS_ON_TOP | - SDL_WINDOW_SHOWN; - if (vdpy.s.is_fullscreen) { + SDL_WINDOW_ALWAYS_ON_TOP | + SDL_WINDOW_SHOWN; + if (vscr->is_fullscreen) { win_flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; - vdpy.org_x = vdpy.pscreen_rect.x; - vdpy.org_y = vdpy.pscreen_rect.y; + vscr->org_x = vscr->pscreen_rect.x; + vscr->org_y = vscr->pscreen_rect.y; } - vdpy.dpy_win = NULL; - vdpy.dpy_renderer = NULL; - vdpy.dpy_img = NULL; + vscr->win = NULL; + vscr->renderer = NULL; + vscr->img = NULL; // Zoom to width and height of pscreen is fullscreen enabled - vdpy.dpy_win = SDL_CreateWindow("ACRN_DM", - vdpy.org_x, vdpy.org_y, - vdpy.width, vdpy.height, - win_flags); - if (vdpy.dpy_win == NULL) { + vscr->win = SDL_CreateWindow("ACRN_DM", + vscr->org_x, vscr->org_y, + vscr->width, vscr->height, + win_flags); + if (vscr->win == NULL) { pr_err("Failed to Create SDL_Window\n"); - goto sdl_fail; + return -1; } - pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vdpy.pscreen_id, - vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height); + pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vscr->pscreen_id, + vscr->org_x, vscr->org_y, vscr->width, vscr->height); - vdpy.dpy_renderer = SDL_CreateRenderer(vdpy.dpy_win, -1, 0); - if (vdpy.dpy_renderer == NULL) { + vscr->renderer = SDL_CreateRenderer(vscr->win, -1, 0); + if (vscr->renderer == NULL) { pr_err("Failed to Create GL_Renderer \n"); - goto sdl_fail; + return -1; + } + + return 0; +} + +static void * +vdpy_sdl_display_thread(void *data) +{ + struct vdpy_display_bh *bh; + struct itimerspec ui_timer_spec; + + struct vscreen *vscr; + int i; + + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + + vdpy_calibrate_vscreen_geometry(vscr); + if (vdpy_create_vscreen_window(vscr)) { + goto sdl_fail; + } + clock_gettime(CLOCK_MONOTONIC, &vscr->last_time); } sdl_gl_display_init(); pthread_mutex_init(&vdpy.vdisplay_mutex, NULL); @@ -966,7 +1060,6 @@ vdpy_sdl_display_thread(void *data) vdpy.ui_timer_bh.task_cb = vdpy_sdl_ui_refresh; vdpy.ui_timer_bh.data = &vdpy; - clock_gettime(CLOCK_MONOTONIC, &vdpy.last_time); vdpy.ui_timer.clockid = CLOCK_MONOTONIC; acrn_timer_init(&vdpy.ui_timer, vdpy_sdl_ui_timer, &vdpy); ui_timer_spec.it_interval.tv_sec = 0; @@ -1014,34 +1107,40 @@ vdpy_sdl_display_thread(void *data) /* SDL display_thread will exit because of DM request */ pthread_mutex_destroy(&vdpy.vdisplay_mutex); pthread_cond_destroy(&vdpy.vdisplay_signal); - if (vdpy.dpy_img) { - pixman_image_unref(vdpy.dpy_img); - vdpy.dpy_img = NULL; - } - /* Continue to thread cleanup */ - - if (vdpy.dpy_texture) { - SDL_DestroyTexture(vdpy.dpy_texture); - vdpy.dpy_texture = NULL; - } - if (vdpy.cursor_tex) { - SDL_DestroyTexture(vdpy.cursor_tex); - vdpy.cursor_tex = NULL; - } - if (vdpy.egl_dmabuf_supported && (vdpy.cur_egl_img != EGL_NO_IMAGE_KHR)) - vdpy.gl_ops.eglDestroyImageKHR(vdpy.eglDisplay, - vdpy.cur_egl_img); + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->img) { + pixman_image_unref(vscr->img); + vscr->img = NULL; + } + /* Continue to thread cleanup */ -sdl_fail: + if (vscr->surf_tex) { + SDL_DestroyTexture(vscr->surf_tex); + vscr->surf_tex = NULL; + } + if (vscr->cur_tex) { + SDL_DestroyTexture(vscr->cur_tex); + vscr->cur_tex = NULL; + } - if (vdpy.dpy_renderer) { - SDL_DestroyRenderer(vdpy.dpy_renderer); - vdpy.dpy_renderer = NULL; + if (vdpy.egl_dmabuf_supported && (vscr->egl_img != EGL_NO_IMAGE_KHR)) + vdpy.gl_ops.eglDestroyImageKHR(vdpy.eglDisplay, + vscr->egl_img); } - if (vdpy.dpy_win) { - SDL_DestroyWindow(vdpy.dpy_win); - vdpy.dpy_win = NULL; + +sdl_fail: + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->renderer) { + SDL_DestroyRenderer(vscr->renderer); + vscr->renderer = NULL; + } + if (vscr->win) { + SDL_DestroyWindow(vscr->win); + vscr->win = NULL; + } } /* This is used to workaround the TLS issue of libEGL + libGLdispatch @@ -1108,7 +1207,7 @@ vdpy_init(int *supported_wins) vdpy.s.n_connect++; if (supported_wins) - *supported_wins = 1; + *supported_wins = vdpy.vscrs_num; return vdpy.s.n_connect; } @@ -1142,6 +1241,8 @@ gfx_ui_init() { SDL_SysWMinfo info; int num_pscreen; + struct vscreen *vscr; + int i; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1154,21 +1255,25 @@ gfx_ui_init() } num_pscreen = SDL_GetNumVideoDisplays(); - if (vdpy.pscreen_id >= num_pscreen) { - pr_err("Monitor id %d is out of avalble range [0~%d].\n", - vdpy.pscreen_id, num_pscreen); - SDL_Quit(); - return -1; - } - SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->pscreen_id >= num_pscreen) { + pr_err("Monitor id %d is out of avalble range [0~%d].\n", + vscr->pscreen_id, num_pscreen); + SDL_Quit(); + return -1; + } + + SDL_GetDisplayBounds(vscr->pscreen_id, &vscr->pscreen_rect); - if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || - vdpy.pscreen_rect.h < VDPY_MIN_HEIGHT) { - pr_err("Too small resolutions. Please check the " - " graphics system\n"); - SDL_Quit(); - return -1; + if (vscr->pscreen_rect.w < VDPY_MIN_WIDTH || + vscr->pscreen_rect.h < VDPY_MIN_HEIGHT) { + pr_err("Too small resolutions. Please check the " + " graphics system\n"); + SDL_Quit(); + return -1; + } } SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1"); @@ -1200,6 +1305,7 @@ gfx_ui_deinit() return; } + free(vdpy.vscrs); SDL_Quit(); pr_info("SDL_Quit\r\n"); } @@ -1208,35 +1314,41 @@ int vdpy_parse_cmd_option(const char *opts) { char *str; int snum, error; + struct vscreen *vscr; error = 0; + vdpy.vscrs = calloc(VSCREEN_MAX_NUM, sizeof(struct vscreen)); + vdpy.vscrs_num = 0; str = strcasestr(opts, "geometry="); + vscr = vdpy.vscrs + vdpy.vscrs_num; if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.pscreen_id); + snum = sscanf(str, "geometry=fullscreen:%d", &vscr->pscreen_id); if (snum != 1) { - vdpy.pscreen_id = 0; + vscr->pscreen_id = 0; } - vdpy.width = VDPY_MAX_WIDTH; - vdpy.height = VDPY_MAX_HEIGHT; - vdpy.s.is_fullscreen = true; + vscr->width = VDPY_MAX_WIDTH; + vscr->height = VDPY_MAX_HEIGHT; + vscr->is_fullscreen = true; + vdpy.vscrs_num++; pr_info("virtual display: fullscreen.\n"); } else if (opts && strcasestr(opts, "geometry=")) { snum = sscanf(str, "geometry=%dx%d+%d+%d", - &vdpy.width, &vdpy.height, - &vdpy.org_x, &vdpy.org_y); + &vscr->width, &vscr->height, + &vscr->org_x, &vscr->org_y); if (snum != 4) { pr_err("incorrect geometry option. Should be" " WxH+x+y\n"); error = -1; } - vdpy.s.is_fullscreen = false; + vscr->is_fullscreen = false; + vdpy.vscrs_num++; pr_info("virtual display: windowed.\n"); } - vdpy.info.xoff = 0; - vdpy.info.yoff = 0; - vdpy.info.width = vdpy.width; - vdpy.info.height = vdpy.height; + vscr->info.xoff = 0; + vscr->info.yoff = 0; + vscr->info.width = vdpy.vscrs->width; + vscr->info.height = vdpy.vscrs->height; return error; } -- 2.25.1
|
|
[PATCH v4 2/2] dm: vdisplay: Add physical monitor id check.
From: Sun Peng <peng.p.sun@...>
vdisplay use physical monitor id(pscreen index) to locate the monitor. The max index value always is the physical monitor number - 1. For example, there are 4 physical monitors connected. The monitor id should be 0, 1, 2 and 3. We need check monitor id that user inputs and make sure it is in a correct range.
Signed-off-by: Sun Peng <peng.p.sun@...> Reviewed-by: Zhao Yakui <yakui.zhao@...> --- devicemodel/hw/vdisplay_sdl.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index cea26d3ee..403047a3c 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -1141,6 +1141,7 @@ int gfx_ui_init() { SDL_SysWMinfo info; + int num_pscreen; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1152,6 +1153,14 @@ gfx_ui_init() return -1; } + num_pscreen = SDL_GetNumVideoDisplays(); + if (vdpy.pscreen_id >= num_pscreen) { + pr_err("Monitor id %d is out of avalble range [0~%d].\n", + vdpy.pscreen_id, num_pscreen); + SDL_Quit(); + return -1; + } + SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || -- 2.25.1
|
|
[PATCH v4 1/2] dm: vdisplay: multi-local-mornitor support.
From: Sun Peng <peng.p.sun@...>
To support full screen mode on one of multi-local-mornitor which connected to SOS by monitor ID that customer specify. The monitor ID is specified in acrn-dm's parameter like this:
virtio-gpu,geometry=fullscreen:monitor_id
For window mode, the vdisplay window always be shown on monitor 0, because the customer can drag the window to anyone monitor. Besides, the customer can set the monitor by x_off and y_off parameter like this:
virtio-gpu,geometry=<width>x<height>+<x_off>+<y_off>
v1->v2: Add note about zoom to pscreen width & height in fullscreen.
Signed-off-by: Sun Peng <peng.p.sun@...> --- devicemodel/hw/vdisplay_sdl.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 65fd78d93..cea26d3ee 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -52,6 +52,8 @@ struct egl_display_ops { }; static struct display { + int pscreen_id; + SDL_Rect pscreen_rect; struct display_info info; struct state s; SDL_Texture *dpy_texture; @@ -62,7 +64,6 @@ static struct display { int width, height; // Width/height of dpy_win int org_x, org_y; int guest_width, guest_height; - int screen; struct surface surf; struct cursor cur; SDL_Texture *cursor_tex; @@ -933,11 +934,14 @@ vdpy_sdl_display_thread(void *data) SDL_WINDOW_ALWAYS_ON_TOP | SDL_WINDOW_SHOWN; if (vdpy.s.is_fullscreen) { - win_flags |= SDL_WINDOW_FULLSCREEN; + win_flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; + vdpy.org_x = vdpy.pscreen_rect.x; + vdpy.org_y = vdpy.pscreen_rect.y; } vdpy.dpy_win = NULL; vdpy.dpy_renderer = NULL; vdpy.dpy_img = NULL; + // Zoom to width and height of pscreen is fullscreen enabled vdpy.dpy_win = SDL_CreateWindow("ACRN_DM", vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height, @@ -946,6 +950,9 @@ vdpy_sdl_display_thread(void *data) pr_err("Failed to Create SDL_Window\n"); goto sdl_fail; } + pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vdpy.pscreen_id, + vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height); + vdpy.dpy_renderer = SDL_CreateRenderer(vdpy.dpy_win, -1, 0); if (vdpy.dpy_renderer == NULL) { pr_err("Failed to Create GL_Renderer \n"); @@ -1134,7 +1141,6 @@ int gfx_ui_init() { SDL_SysWMinfo info; - SDL_Rect disp_rect; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1146,10 +1152,10 @@ gfx_ui_init() return -1; } - SDL_GetDisplayBounds(0, &disp_rect); + SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); - if (disp_rect.w < VDPY_MIN_WIDTH || - disp_rect.h < VDPY_MIN_HEIGHT) { + if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || + vdpy.pscreen_rect.h < VDPY_MIN_HEIGHT) { pr_err("Too small resolutions. Please check the " " graphics system\n"); SDL_Quit(); @@ -1198,9 +1204,9 @@ int vdpy_parse_cmd_option(const char *opts) str = strcasestr(opts, "geometry="); if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.screen); + snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.pscreen_id); if (snum != 1) { - vdpy.screen = 0; + vdpy.pscreen_id = 0; } vdpy.width = VDPY_MAX_WIDTH; vdpy.height = VDPY_MAX_HEIGHT; -- 2.25.1
|
|
[PATCH 6/6] hv: block guest eist/hwp cpuids and MSRs
This patch is to disable guests from controlling CPU frequency. Now ACRN has owned CPU frequency control, so guests should no longer have it. This is done by blocking the eist/hwp cpuids and MSRs, and as well as the hypercall on p-state info(which the DM would use to generate guest ACPI p-state related tables).
Alternatively, ACPI p-state can be passed through to VMs if they do not share pCPUs(or interfere other VM's CPU frequency). Offline tool will generate a flag to indicate if the ACPI p-state is OK to pass through. When the flag is set, the eist cpuid/MSRs and hypercalls are not blocked.
Signed-off-by: Wu Zhou <wu.zhou@...> --- devicemodel/include/types.h | 1 + hypervisor/arch/x86/guest/vcpuid.c | 10 ++++++ hypervisor/arch/x86/guest/vmsr.c | 38 ++++++++++++++++++-- hypervisor/common/hypercall.c | 2 +- hypervisor/include/arch/x86/asm/cpuid.h | 2 ++ hypervisor/include/arch/x86/asm/guest/vcpu.h | 2 +- 6 files changed, 50 insertions(+), 5 deletions(-)
diff --git a/devicemodel/include/types.h b/devicemodel/include/types.h index 64527cc41..c32c6a262 100644 --- a/devicemodel/include/types.h +++ b/devicemodel/include/types.h @@ -10,6 +10,7 @@ #include <stdarg.h> #include <sched.h> #include <sys/types.h> +#include <stdbool.h> #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ diff --git a/hypervisor/arch/x86/guest/vcpuid.c b/hypervisor/arch/x86/guest/vcpuid.c index 580098719..5900db99d 100644 --- a/hypervisor/arch/x86/guest/vcpuid.c +++ b/hypervisor/arch/x86/guest/vcpuid.c @@ -17,6 +17,7 @@ #include <logmsg.h> #include <asm/rdt.h> #include <asm/guest/vcat.h> +#include <asm/per_cpu.h> static inline const struct vcpuid_entry *local_find_vcpuid_entry(const struct acrn_vcpu *vcpu, uint32_t leaf, uint32_t subleaf) @@ -115,6 +116,12 @@ static void init_vcpuid_entry(uint32_t leaf, uint32_t subleaf, entry->flags = flags; switch (leaf) { + + case 0x06U: + cpuid_subleaf(leaf, subleaf, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); + entry->eax &= ~CPUID_06_EAX_HWP; + break; + case 0x07U: if (subleaf == 0U) { uint64_t cr4_reserved_mask = get_cr4_reserved_bits(); @@ -627,6 +634,9 @@ static void guest_cpuid_01h(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx /* mask Safer Mode Extension */ *ecx &= ~CPUID_ECX_SMX; + if (!(get_vm_config(vcpu->vm->vm_id)->pt_acpi_pstate)) + *ecx &= ~CPUID_ECX_EST; + /* mask SDBG for silicon debug */ *ecx &= ~CPUID_ECX_SDBG; diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c index 6509d34b0..d24767f3d 100644 --- a/hypervisor/arch/x86/guest/vmsr.c +++ b/hypervisor/arch/x86/guest/vmsr.c @@ -52,6 +52,9 @@ static uint32_t emulated_guest_msrs[NUM_EMULATED_MSRS] = { MSR_IA32_TIME_STAMP_COUNTER, MSR_IA32_APIC_BASE, MSR_IA32_PERF_CTL, + MSR_IA32_PM_ENABLE, + MSR_IA32_HWP_CAPABILITIES, + MSR_IA32_HWP_REQUEST, MSR_IA32_FEATURE_CONTROL, MSR_IA32_MCG_CAP, @@ -629,6 +632,21 @@ int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu) v = msr_read(msr); break; } + case MSR_IA32_PM_ENABLE: + { + vcpu_inject_gp(vcpu, 0U); + break; + } + case MSR_IA32_HWP_CAPABILITIES: + { + vcpu_inject_gp(vcpu, 0U); + break; + } + case MSR_IA32_HWP_REQUEST: + { + vcpu_inject_gp(vcpu, 0U); + break; + } case MSR_IA32_PAT: { /* @@ -1001,10 +1019,24 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu) } case MSR_IA32_PERF_CTL: { - if (validate_pstate(vcpu->vm, v) != 0) { - break; + if (get_vm_config(vcpu->vm->vm_id)->pt_acpi_pstate) { + msr_write(msr, v); } - msr_write(msr, v); + break; + } + case MSR_IA32_PM_ENABLE: + { + vcpu_inject_gp(vcpu, 0U); + break; + } + case MSR_IA32_HWP_CAPABILITIES: + { + vcpu_inject_gp(vcpu, 0U); + break; + } + case MSR_IA32_HWP_REQUEST: + { + vcpu_inject_gp(vcpu, 0U); break; } case MSR_IA32_PAT: diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index e27eaf7e5..073e1be25 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1058,7 +1058,7 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm * If it is stored as per-cpu in the future, * we need to check PMCMD_VCPUID_MASK in cmd. */ - if (target_vm->pm.px_cnt == 0U) { + if (target_vm->pm.px_cnt == 0U || !(get_vm_config(target_vm->vm_id)->pt_acpi_pstate)) { break; } diff --git a/hypervisor/include/arch/x86/asm/cpuid.h b/hypervisor/include/arch/x86/asm/cpuid.h index 491758bab..6989a2637 100644 --- a/hypervisor/include/arch/x86/asm/cpuid.h +++ b/hypervisor/include/arch/x86/asm/cpuid.h @@ -72,6 +72,8 @@ #define CPUID_EDX_TM1 (1U<<29U) #define CPUID_EDX_IA64 (1U<<30U) #define CPUID_EDX_PBE (1U<<31U) +/* CPUID.06H:EAX.HWP*/ +#define CPUID_06_EAX_HWP (1U<<7U) /* CPUID.07H:EBX.FSGSBASE*/ #define CPUID_EBX_FSGSBASE (1U<<0U) /* CPUID.07H:EBX.TSC_ADJUST*/ diff --git a/hypervisor/include/arch/x86/asm/guest/vcpu.h b/hypervisor/include/arch/x86/asm/guest/vcpu.h index 87f6930a6..26f333f80 100644 --- a/hypervisor/include/arch/x86/asm/guest/vcpu.h +++ b/hypervisor/include/arch/x86/asm/guest/vcpu.h @@ -173,7 +173,7 @@ enum reset_mode; #define SECURE_WORLD 1 #define NUM_WORLD_MSRS 2U -#define NUM_COMMON_MSRS 23U +#define NUM_COMMON_MSRS 26U #ifdef CONFIG_VCAT_ENABLED #define NUM_CAT_L2_MSRS MAX_CACHE_CLOS_NUM_ENTRIES -- 2.25.1
|
|
[PATCH 5/6] hv: add CPU frequency driver in hv
This patch will enable ACRN as the CPU frequency controllor.
The frequency driver uses governors to decide the frequency strategy. Currently we have two governors: - Performance: CPU can runs at its max possible frequency(turbo boost will be activated if enabled). If hardware performance states (HWP) machanic is available, use its to automatically adjust frequency. - Nominal: CPU runs at its base frequency.
The governor is chosed by user in configurator. Besides, user will have to chose one of the two freuquency control interfaces: - HWP - ACPI p-state
The governors use an abstract layer called frequency policy to decide what the CPU's highest/lowest/base frequency is. the frequency policy is a per pCPU data. It is generated by offline tools.
The frequency driver provides 2 APIs for the hypervisor to call. - init_cpu_freq() called by boot CPU at start up to initialize the frequency driver. - cpu_freq_pcpu_online() called by pCPUs when online. Setting CPU frequency as its governor demands.
After pCPU is online, the frequency driver would no longer adjust value of its frequency registers. It is required for the VMs to give up CPU frequency control. This can be done by blocking VMs' eist/hwp cpuids and freuqency control MSRs in hv. Alternatively, ACPI p-state can be passed through to VMs if they do not share pCPUs(or interfere with other VM's CPU frequency).
Signed-off-by: Wu Zhou <wu.zhou@...> --- hypervisor/arch/x86/cpu.c | 5 ++ hypervisor/arch/x86/pm.c | 59 +++++++++++++++++++++++ hypervisor/include/arch/x86/asm/board.h | 2 + hypervisor/include/arch/x86/asm/host_pm.h | 2 + hypervisor/include/arch/x86/asm/per_cpu.h | 1 + 5 files changed, 69 insertions(+)
diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index 13ffb3f59..b668439c8 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -24,6 +24,7 @@ #include <version.h> #include <asm/vmx.h> #include <asm/msr.h> +#include <asm/host_pm.h> #include <ptdev.h> #include <logmsg.h> #include <asm/rdt.h> @@ -156,6 +157,8 @@ void init_pcpu_pre(bool is_bsp) load_pcpu_state_data(); + init_cpu_freq(); + init_e820(); /* reserve ppt buffer from e820 */ @@ -315,6 +318,8 @@ void init_pcpu_post(uint16_t pcpu_id) panic("failed to initialize software SRAM!"); } + cpu_freq_pcpu_online(); + init_sched(pcpu_id); #ifdef CONFIG_RDT_ENABLED diff --git a/hypervisor/arch/x86/pm.c b/hypervisor/arch/x86/pm.c index 9af9362cc..abadb37ef 100644 --- a/hypervisor/arch/x86/pm.c +++ b/hypervisor/arch/x86/pm.c @@ -19,6 +19,7 @@ #include <asm/lapic.h> #include <asm/tsc.h> #include <delay.h> +#include <asm/board.h> struct cpu_context cpu_ctx; @@ -271,3 +272,61 @@ void reset_host(void) asm_pause(); } } + +/* + * set the cpu's performance level range + * The performance level is not necessary CPU's frequency ratio. + * When using HWP, the levels represents the level used in HWP_REQUEST MSR, while using ACPI p-state, it represents + * the Px level 'x' descripted in ACPI _PSS table. + * + * For ACPI p-state, it does not have the hardware automatic frequency adjust ablility, it can only set a fixed + * frequency. So this function assumes that highest_lvl = lowest_lvl when using ACPI p-state. + */ +static void cpu_freq_set_performance(uint8_t highest_lvl, uint8_t lowest_lvl) +{ + uint64_t reg; + pr_acrnlog("3"); + if (cpufreq_info.interface_type == CPUFREQ_INTERFACE_HWP) { + reg = (0x80UL << 24) | (0x00UL << 16) | (((uint64_t)highest_lvl) << 8) | ((uint64_t)lowest_lvl); + msr_write(MSR_IA32_HWP_REQUEST, reg); + + } else if (cpufreq_info.interface_type == CPUFREQ_INTERFACE_ACPI) { + struct cpu_state_info *pm_s_state_data = get_cpu_pm_state_info(); + if (highest_lvl < pm_s_state_data->px_cnt) { + reg = pm_s_state_data->px_data[highest_lvl].control; + msr_write(MSR_IA32_PERF_CTL, reg); + } + } +} + +/* called by boot cpu at initializing phase */ +void init_cpu_freq(void) +{ + uint16_t pcpu_id; + + for (pcpu_id = 0; pcpu_id < MAX_PCPU_NUM; pcpu_id++) { + per_cpu(cpufreq_policy, pcpu_id) = &(cpufreq_policy_info[pcpu_id]); + } + + if (cpufreq_info.interface_type == CPUFREQ_INTERFACE_HWP) { + msr_write(MSR_IA32_PM_ENABLE, 1U); + } +} + +/* called by pcpu, setting its frequency at start up */ +void cpu_freq_pcpu_online(void) +{ + struct acrn_cpufreq_policy *policy = get_cpu_var(cpufreq_policy); + + /* + * Currently we have 2 governors: + * CPUFREQ_GOVERNOR_PERFORMANCE - CPU can run at up to its max frequency + * CPUFREQ_GOVERNOR_NOMINAL - CPU locked at its base/guaranteed frequency + * You can chose one from offline tool. + */ + if (cpufreq_info.governor_type == CPUFREQ_GOVERNOR_PERFORMANCE && policy->available) { + cpu_freq_set_performance(policy->policy_highest_lvl, policy->policy_lowest_lvl); + } else if (cpufreq_info.governor_type == CPUFREQ_GOVERNOR_NOMINAL && policy->available) { + cpu_freq_set_performance(policy->policy_guaranteed_lvl, policy->policy_guaranteed_lvl); + } +} diff --git a/hypervisor/include/arch/x86/asm/board.h b/hypervisor/include/arch/x86/asm/board.h index 56bbeb9c8..f30457d25 100644 --- a/hypervisor/include/arch/x86/asm/board.h +++ b/hypervisor/include/arch/x86/asm/board.h @@ -34,6 +34,8 @@ extern struct rdt_type res_cap_info[RDT_NUM_RESOURCES]; #endif extern const struct cpu_state_table board_cpu_state_tbl; +extern struct acrn_cpufreq_policy cpufreq_policy_info[MAX_PCPU_NUM]; +extern const struct acrn_cpufreq_info cpufreq_info; extern const union pci_bdf plat_hidden_pdevs[MAX_HIDDEN_PDEVS_NUM]; extern const struct vmsix_on_msi_info vmsix_on_msi_devs[MAX_VMSIX_ON_MSI_PDEVS_NUM]; diff --git a/hypervisor/include/arch/x86/asm/host_pm.h b/hypervisor/include/arch/x86/asm/host_pm.h index b8fb8a307..e71895579 100644 --- a/hypervisor/include/arch/x86/asm/host_pm.h +++ b/hypervisor/include/arch/x86/asm/host_pm.h @@ -39,5 +39,7 @@ extern void restore_s3_context(void); struct cpu_state_info *get_cpu_pm_state_info(void); struct acpi_reset_reg *get_host_reset_reg_data(void); void reset_host(void); +void init_cpu_freq(void); +void cpu_freq_pcpu_online(void); #endif /* HOST_PM_H */ diff --git a/hypervisor/include/arch/x86/asm/per_cpu.h b/hypervisor/include/arch/x86/asm/per_cpu.h index 1c7c83d80..2cc7aa4b8 100644 --- a/hypervisor/include/arch/x86/asm/per_cpu.h +++ b/hypervisor/include/arch/x86/asm/per_cpu.h @@ -62,6 +62,7 @@ struct per_cpu_region { uint64_t shutdown_vm_bitmap; uint64_t tsc_suspend; struct acrn_vcpu *whose_iwkey; + struct acrn_cpufreq_policy *cpufreq_policy; /* * We maintain a per-pCPU array of vCPUs. vCPUs of a VM won't * share same pCPU. So the maximum possible # of vCPUs that can -- 2.25.1
|
|
[PATCH 4/6] config_tools & hv: generate CPU frequency info code
This patch is to generate CPU frequency info from allocation data.
Frequency policy struct and frequency info struct are added in acrn_common.h.
Frequency policy is generated for each CPU in board.c, and passthrough pstate flag is generated for each VM in vm_configuration.c.
Signed-off-by: Wu Zhou <wu.zhou@...> --- hypervisor/include/arch/x86/asm/vm_config.h | 2 ++ hypervisor/include/public/acrn_common.h | 29 +++++++++++++++ misc/config_tools/board_config/board_c.py | 35 +++++++++++++++++++ .../xforms/vm_configurations.c.xsl | 8 +++++ 4 files changed, 74 insertions(+)
diff --git a/hypervisor/include/arch/x86/asm/vm_config.h b/hypervisor/include/arch/x86/asm/vm_config.h index ef80701d6..c6ea4fbe9 100644 --- a/hypervisor/include/arch/x86/asm/vm_config.h +++ b/hypervisor/include/arch/x86/asm/vm_config.h @@ -207,6 +207,8 @@ struct acrn_vm_config { uint16_t pt_intx_num; /* number of pt_intx_config entries pointed by pt_intx */ struct pt_intx_config *pt_intx; /* stores the base address of struct pt_intx_config array */ + + bool pt_acpi_pstate; /* whether to passthru acpi pstate */ } __aligned(8); struct acrn_vm_config *get_vm_config(uint16_t vm_id); diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h index fee71a655..b2d3e3d68 100644 --- a/hypervisor/include/public/acrn_common.h +++ b/hypervisor/include/public/acrn_common.h @@ -522,6 +522,35 @@ struct acrn_pstate_data { uint64_t status; /* success indicator */ }; +struct acrn_cpufreq_policy { + bool available; + /* + * Frequency levels: + * for HWP, it is the continuous performance level for HWP_REQUEST msr; + * for ACPI p-state, it is the APCI Px performance level x + */ + uint8_t policy_guaranteed_lvl; + uint8_t policy_highest_lvl; + uint8_t policy_lowest_lvl; +}; + +enum cpufreq_governor_type { + CPUFREQ_GOVERNOR_PERFORMANCE, + CPUFREQ_GOVERNOR_NOMINAL, +}; + +enum cpufreq_interface_type { + CPUFREQ_INTERFACE_NONE = 0, + CPUFREQ_INTERFACE_HWP, + CPUFREQ_INTERFACE_ACPI, +}; + +struct acrn_cpufreq_info { + enum cpufreq_governor_type governor_type; + enum cpufreq_interface_type interface_type; + struct acrn_cpufreq_policy *policy; +}; + struct acpi_sx_pkg { uint8_t val_pm1a; uint8_t val_pm1b; diff --git a/misc/config_tools/board_config/board_c.py b/misc/config_tools/board_config/board_c.py index 655cfc6c4..2597877be 100644 --- a/misc/config_tools/board_config/board_c.py +++ b/misc/config_tools/board_config/board_c.py @@ -449,6 +449,39 @@ def gen_known_caps_pci_devs(config): if i == (bdf_list_len - 1): print("};", file=config) +def gen_cpufreq_info(config): + allocation_dir = os.path.split(common.SCENARIO_INFO_FILE)[0] + "/configs/allocation.xml" + allocation_etree = lxml.etree.parse(allocation_dir) + cpu_list = board_cfg_lib.get_processor_info() + max_cpu_num = len(cpu_list) + + print("\nstruct acrn_cpufreq_policy cpufreq_policy_info[MAX_PCPU_NUM] = {", file=config) + for cpu_id in range(max_cpu_num): + available = 'false' + policy_node = common.get_node(f"//cpufreq/CPU[@id='{cpu_id}']/policy", allocation_etree) + if policy_node != None: + available = "true" + policy_guaranteed_lvl = common.get_node("./policy_guaranteed_lvl/text()", policy_node) + policy_highest_lvl = common.get_node("./policy_highest_lvl/text()", policy_node) + policy_lowest_lvl = common.get_node("./policy_lowest_lvl/text()", policy_node) + + print("\t{", file=config) + print(f"\t\t.available = {available},", file=config) + if available == 'true': + print(f"\t\t.policy_guaranteed_lvl = {policy_guaranteed_lvl},", file=config) + print(f"\t\t.policy_highest_lvl = {policy_highest_lvl},", file=config) + print(f"\t\t.policy_lowest_lvl = {policy_lowest_lvl},", file=config) + print("\t},", file=config) + print("};", file=config) + + print("\nconst struct acrn_cpufreq_info cpufreq_info = {", file=config) + + governor = common.get_node(f"//cpufreq/governor/text()", allocation_etree) + interface = common.get_node(f"//cpufreq/interface/text()", allocation_etree) + print(f"\t.governor_type = {governor},", file=config) + print(f"\t.interface_type = {interface},", file=config) + print("\t.policy = cpufreq_policy_info,", file=config) + print("};", file=config) def generate_file(config): """ @@ -479,4 +512,6 @@ def generate_file(config): # gen known caps of pci dev info for platform gen_known_caps_pci_devs(config) + gen_cpufreq_info(config) + return err_dic diff --git a/misc/config_tools/xforms/vm_configurations.c.xsl b/misc/config_tools/xforms/vm_configurations.c.xsl index bb2c3e29d..9c35a1392 100644 --- a/misc/config_tools/xforms/vm_configurations.c.xsl +++ b/misc/config_tools/xforms/vm_configurations.c.xsl @@ -97,6 +97,7 @@ <xsl:call-template name="load_order" /> <xsl:apply-templates select="name" /> + <xsl:call-template name="pt_acpi_pstate" /> <xsl:if test="acrn:is-service-vm(load_order)"> <xsl:value-of select="acrn:comment('Allow Service VM to reboot the system since it is the highest priority VM.')" /> <xsl:value-of select="$newline" /> @@ -158,6 +159,13 @@ <xsl:value-of select="acrn:initializer('name', concat($quot, current(), $quot))" /> </xsl:template> + <xsl:template name="pt_acpi_pstate"> + <xsl:variable name="vm_id" select="@id" /> + <xsl:if test="//allocation-data/acrn-config/vm[@id=$vm_id]/vm_pt_acpi_pstate = 'y'"> + <xsl:value-of select="acrn:initializer('pt_acpi_pstate', 'true')" /> + </xsl:if> + </xsl:template> + <xsl:template name="cpu_affinity"> <xsl:choose> <xsl:when test="acrn:is-service-vm(load_order)"> -- 2.25.1
|
|
[PATCH 3/6] config_tools: allocate CPU frequency policy
Using CPU frequency info extracted in board_inspector, this patch generates frequency policy for each CPU. The frequency policy is a abstract layer which describes where the CPU's highest/lowest/base frequency levels are at, or whether to just passthrough frequency control to the guests. ACRN CPU frequency driver can use those policy data to control CPU frequency more easily.
The frequency level is not necessary equivalent to frequency ratio. For HWP, it stands for performance levels writen to HWP_REQUEST. For ACPI p-state, it stands for the index for p-state entries described by ACPI _PSS.
The rules to generate frequency policy are: - For standard VMs, highest frequency level is just the highest level current CPU can run at. - For RTVMs, we want certainty in latency, so just let highest=lowest=base. - In some cases, CPUs are sharing frequency domain on hardware level (e.g. ADL e-cores in group of 4). Policies could be different between CPUs, but if they are in the same frequency domain, we need to make them aligned with each other. - If one VM's pCPUs are not shared with other VMs, and HV is configurated to use ACPI p-state interface, we can pass through p-state control to the VM.
The policy data is temporarily stored in allocation_etree.
Signed-off-by: Wu Zhou <wu.zhou@...> --- misc/config_tools/library/board_cfg_lib.py | 37 ++++ .../static_allocators/cpu_freq.py | 198 ++++++++++++++++++ 2 files changed, 235 insertions(+) create mode 100644 misc/config_tools/static_allocators/cpu_freq.py
diff --git a/misc/config_tools/library/board_cfg_lib.py b/misc/config_tools/library/board_cfg_lib.py index 953d2d7d0..5d8fc8743 100644 --- a/misc/config_tools/library/board_cfg_lib.py +++ b/misc/config_tools/library/board_cfg_lib.py @@ -349,6 +349,43 @@ def get_pci_info(board_info): return (pci_desc, pci_bdf_vpid) +def get_p_state_count(): + """ + Get cpu p-state count + :return: p-state count + """ + px_info = get_info(common.BOARD_INFO_FILE, "<PX_INFO>", "</PX_INFO>") + if px_info != None: + for line in px_info: + if re.search("{.*}", line) == None: + px_info.remove(line) + + return len(px_info) + +def get_p_state_index_from_ratio(ratio): + """ + Get the closest p-state index lesser than given ratio + :return: p-state index + """ + px_info = get_info(common.BOARD_INFO_FILE, "<PX_INFO>", "</PX_INFO>") + if px_info != None: + for line in px_info: + if re.search("{.*}", line) == None: + px_info.remove(line) + + i = 0 + closest_index = 1 + for line in px_info: + l = re.search("0x(\w*)UL}", line) + if l != None: + state_ratio = int(l.group(1), 16) >> 8 + if state_ratio <= ratio: + closest_index = i + break + i += 1 + + return closest_index + HI_MMIO_OFFSET = 0 class Bar_Mem: diff --git a/misc/config_tools/static_allocators/cpu_freq.py b/misc/config_tools/static_allocators/cpu_freq.py new file mode 100644 index 000000000..787c2e135 --- /dev/null +++ b/misc/config_tools/static_allocators/cpu_freq.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import common, board_cfg_lib + +# CPU frequency dependency +# Some CPU cores may share the same clock domain/group with others, which makes them always run at +# the same frequency of the highest on in the group. Including those conditions: +# 1. CPU in the clock domain described in ACPI _PSD. +# 2. CPU threads sharing the same physical core. +# 3. E-cores residents in the same group. +def alloc_dependency(board_etree, scenario_etree, allocation_etree): + cpus = board_etree.xpath("//processors//thread") + for cpu in cpus: + cpu_id = common.get_node("./cpu_id/text()", cpu) + psd_cpus = common.get_node("./freqdomain_cpus/text()", cpu).split(' ') + apic_id = int(common.get_node("./apic_id/text()", cpu)[2:], base=16) + is_hybrid = (len(board_etree.xpath("//processors//capability[@id='hybrid']")) != 0) + core_type = common.get_node("./core_type/text()", cpu) + for other_cpu in cpus: + other_cpu_id = common.get_node("./cpu_id/text()", other_cpu) + if cpu_id != other_cpu_id: + other_apic_id = int(common.get_node("./apic_id/text()", other_cpu)[2:], base=16) + other_core_type = common.get_node("./core_type/text()", other_cpu) + # threads at same core + if (apic_id & ~1) == (other_apic_id & ~1): + psd_cpus.append(other_cpu_id) + # e-cores in the same group + if is_hybrid and core_type == 'Atom' and other_core_type == 'Atom' and (apic_id & ~7) == (other_apic_id & ~7): + psd_cpus.append(other_cpu_id) + + alloc_dep_node = common.append_node("/acrn-config/hv/cpufreq/CPU", None, allocation_etree, id=cpu_id) + if psd_cpus != None: + psd_cpus = list(set(psd_cpus)) + psd_cpus.sort() + common.append_node("./freq_dependency", " ".join(psd_cpus), alloc_dep_node) + +# CPU frequency policy: +# +# Frequency policy is used to determine in what range the governor could adjust the CPU's performance. +# eg. highest_performance_lvl, lowest_performance_lvl +# The performance level is not necessary CPU's frequency ratio. +# When using HWP, the levels represents the level used in HWP_REQUEST MSR, while using ACPI p-state, it represents +# the P-state index in ACPI _PSS table. +# +# When the CPU is assigned to a RTVM, we wish to have its frequency fixed, to get more certainty in latency. +# The way to fix the frequency is to set highest_lvl = lowest_lvl. +# +# Some CPU cores may have dependent frequency, as a frequency domain or group. Their frequency always stays +# the same with the highest one in the group. In this siduation, the CPU's frequency policy should be adjusted +# to align with other CPUs in its group. +def alloc_policy(board_etree, scenario_etree, allocation_etree): + cpu_has_eist = (len(board_etree.xpath("//processors//capability[@id='est']")) != 0) + cpu_has_hwp = (len(board_etree.xpath("//processors//capability[@id='hwp_supported']")) != 0) + cpu_has_turbo = (len(board_etree.xpath("//processors//capability[@id='turbo_boost_available']")) != 0) + governor = common.get_node("//CPUFREQ_GOVERNOR/text()", scenario_etree) + freq_interface = common.get_node("//CPUFREQ_INTERFACE/text()", scenario_etree) + rtvm_cpus = scenario_etree.xpath(f"//vm[vm_type = 'RTVM']//cpu_affinity//pcpu_id/text()") + cpus = board_etree.xpath("//processors//thread") + + common.append_node(f"/acrn-config/hv/cpufreq/governor", governor, allocation_etree) + + if cpu_has_hwp and freq_interface == 'CPUFREQ_INTERFACE_HWP': + common.append_node(f"/acrn-config/hv/cpufreq/interface", "CPUFREQ_INTERFACE_HWP", allocation_etree) + for cpu in cpus: + cpu_id = common.get_node("./cpu_id/text()", cpu) + guaranteed_performance_lvl = common.get_node("./guaranteed_performance_lvl/text()", cpu) + highest_performance_lvl = common.get_node("./highest_performance_lvl/text()", cpu) + lowest_performance_lvl = common.get_node("./lowest_performance_lvl/text()", cpu) + if cpu_id in rtvm_cpus: + # for CPUs in RTVM, fix to base performance + policy_lowest = guaranteed_performance_lvl + policy_highest = guaranteed_performance_lvl + policy_guaranteed = guaranteed_performance_lvl + elif cpu_has_turbo: + policy_lowest = lowest_performance_lvl + policy_highest = highest_performance_lvl + policy_guaranteed = guaranteed_performance_lvl + else: + policy_lowest = lowest_performance_lvl + policy_highest = guaranteed_performance_lvl + policy_guaranteed = guaranteed_performance_lvl + + cpu_node = common.get_node(f"//hv/cpufreq/CPU[@id='{cpu_id}']", allocation_etree) + policy_node = common.append_node("./policy", None, cpu_node) + common.append_node("./policy_guaranteed_lvl", policy_guaranteed, policy_node) + common.append_node("./policy_highest_lvl", policy_highest, policy_node) + common.append_node("./policy_lowest_lvl", policy_lowest, policy_node) + elif cpu_has_eist: + common.append_node(f"/acrn-config/hv/cpufreq/interface", "CPUFREQ_INTERFACE_ACPI", allocation_etree) + mntr = int(board_etree.xpath("//processors//attribute[@id='max_none_turbo_ratio']/text()")[0], 10) + p_count = board_cfg_lib.get_p_state_count() + none_turbo_p = board_cfg_lib.get_p_state_index_from_ratio(mntr) + if p_count != 0: + for cpu in cpus: + cpu_id = common.get_node("./cpu_id/text()", cpu) + # P0 is the highest stat + if cpu_id in rtvm_cpus: + # for CPUs in RTVM, fix to base performance(none turbo frequency if turbo on) + if cpu_has_turbo: + policy_highest = none_turbo_p + policy_guaranteed = none_turbo_p + policy_lowest = none_turbo_p + else: + policy_highest = 0 + policy_guaranteed = 0 + policy_lowest = 0 + else: + if cpu_has_turbo: + policy_highest = 0 + policy_guaranteed = none_turbo_p + policy_lowest = p_count -1 + else: + policy_highest = 0 + policy_guaranteed = 0 + policy_lowest = p_count -1 + + policy_node = common.append_node(f"/acrn-config/hv/cpufreq/CPU[@id='{cpu_id}']/policy", None, allocation_etree) + common.append_node("./policy_guaranteed_lvl", str(policy_guaranteed), policy_node) + common.append_node("./policy_highest_lvl", str(policy_highest), policy_node) + common.append_node("./policy_lowest_lvl", str(policy_lowest), policy_node) + else: + common.append_node(f"/acrn-config/hv/cpufreq/interface", "CPUFREQ_INTERFACE_NONE", allocation_etree) + + # Let CPUs in the same frequency dependency group have the same policy + for alloc_cpu in allocation_etree.xpath("//cpufreq/CPU"): + dependency_cpus = common.get_node("./freq_dependency/text()", alloc_cpu).split(" ") + if common.get_node("./policy", alloc_cpu) != None: + highest_lvl = int(common.get_node(".//policy_highest_lvl/text()", alloc_cpu)) + lowest_lvl = int(common.get_node(".//policy_lowest_lvl/text()", alloc_cpu)) + for dep_cpu_id in dependency_cpus: + dep_highest = int(common.get_node(f"//cpufreq/CPU[@id={dep_cpu_id}]//policy_highest_lvl/text()", allocation_etree)) + dep_lowest = int(common.get_node(f"//cpufreq/CPU[@id={dep_cpu_id}]//policy_lowest_lvl/text()", allocation_etree)) + if freq_interface == 'CPUFREQ_INTERFACE_HWP': + if highest_lvl > dep_highest: + highest_lvl = dep_highest + if lowest_lvl < dep_lowest: + lowest_lvl = dep_lowest + else: + if highest_lvl < dep_highest: + highest_lvl = dep_highest + if lowest_lvl > dep_lowest: + lowest_lvl = dep_lowest + + common.update_text("./policy/policy_highest_lvl", str(highest_lvl), alloc_cpu, True) + common.update_text("./policy/policy_lowest_lvl", str(lowest_lvl), alloc_cpu, True) + +# passthrough acpi p-state to vm when: +# 1. VM does not share CPU with none service vms +# 2. Configurated as ACPI p-state interace +# 3. CPU frequency is not dependent with other vm's CPU +def alloc_passthrough(board_etree, scenario_etree, allocation_etree): + policy_nodes = allocation_etree.xpath("/acrn-config/hv/cpufreq/CPU/policy") + all_cpu_ids = scenario_etree.xpath("//cpu_affinity//pcpu_id/text()") + freq_interface = common.get_node("//CPUFREQ_INTERFACE/text()", scenario_etree) + vms = scenario_etree.xpath("//vm[load_order!='SERVICE_VM']") + for vm in vms: + vm_cpu_shared = 0 + vm_cpu_ids = vm.xpath(".//cpu_affinity//pcpu_id/text()") + vm_id = common.get_node("./@id", vm) + + for cpu_id in vm_cpu_ids: + if all_cpu_ids.count(cpu_id) > 1: + vm_cpu_shared = 1 + + vm_cpus_dependency = list() + for cpu_id in vm.xpath(".//cpu_affinity//pcpu_id/text()"): + vm_cpus_dependency += common.get_node(f"//cpufreq/CPU[@id={cpu_id}]/freq_dependency/text()", allocation_etree).split(" ") + + other_vms = scenario_etree.xpath(f"//vm[load_order!='SERVICE_VM' and @id!={vm_id}]") + cpu_freq_dependent_to_other_vms = 0 + for other_vm in other_vms: + for cpu_id in other_vm.xpath(".//cpu_affinity//pcpu_id/text()"): + if cpu_id in vm_cpus_dependency: + cpu_freq_dependent_to_other_vms = 1 + + allocation_vm_node = common.get_node(f"/acrn-config/vm[@id='{vm_id}']", allocation_etree) + if allocation_vm_node is None: + allocation_vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id) + + if vm_cpu_shared == 0 and cpu_freq_dependent_to_other_vms == 0 and freq_interface == "CPUFREQ_INTERFACE_ACPI": + print("pt acpi pstate ", vm_id) + common.append_node("./vm_pt_acpi_pstate", 'y', allocation_vm_node) + for cpu_id in vm.xpath(".//cpu_affinity//pcpu_id/text()"): + print(cpu_id) + policy_node = common.get_node(f"//cpufreq//CPU[@id='{cpu_id}']/policy", allocation_etree) + common.append_node("./pcpu_pt_acpi_pstate", 'y', policy_node) + print("passthrough", cpu_id) + +def fn(board_etree, scenario_etree, allocation_etree): + common.append_node("/acrn-config/hv/cpufreq", None, allocation_etree) + alloc_dependency(board_etree, scenario_etree, allocation_etree) + alloc_policy(board_etree, scenario_etree, allocation_etree) + alloc_passthrough(board_etree, scenario_etree, allocation_etree) -- 2.25.1
|
|
[PATCH 2/6] config_tools: extract CPU frequency info in board_inspector
This patch adds CPU frequency info for board_inspector. The info is to be used by ACRN CPU frequency driver. Including those: - Capabilities for HWP base regs and turbo boost, by reading cpuids. - Max none turbo ratio and max turbo ratio, by reading MSRs. - HWP capabilities, by reading IA32_HWP_CAPABILITIES. This register is part of HWP interface, and requires HWP enabled in IA32_PM_ENABLE. - ACPI _PSD info, by reading sys nodes of acpi-pstate driver. This table describes frequency domains in which CPUs shares the same frequency.
Signed-off-by: Wu Zhou <wu.zhou@...> --- .../board_inspector/board_inspector.py | 2 +- .../board_inspector/cpuparser/cpuids.py | 6 +++ .../board_inspector/cpuparser/msr.py | 31 ++++++++++++- .../board_inspector/cpuparser/platformbase.py | 12 +++++ .../extractors/10-processors.py | 45 ++++++++++++++++++- 5 files changed, 93 insertions(+), 3 deletions(-)
diff --git a/misc/config_tools/board_inspector/board_inspector.py b/misc/config_tools/board_inspector/board_inspector.py index 7e4a406c6..7869c8a35 100755 --- a/misc/config_tools/board_inspector/board_inspector.py +++ b/misc/config_tools/board_inspector/board_inspector.py @@ -37,7 +37,7 @@ class AddLLCCATAction(argparse.Action): def check_deps(): # Check that the required tools are installed on the system - BIN_LIST = ['cpuid', 'rdmsr', 'lspci', ' dmidecode', 'blkid', 'stty'] + BIN_LIST = ['cpuid', 'rdmsr', 'wrmsr', 'lspci', ' dmidecode', 'blkid', 'stty'] cpuid_min_ver = 20170122 had_error = False for execute in BIN_LIST: diff --git a/misc/config_tools/board_inspector/cpuparser/cpuids.py b/misc/config_tools/board_inspector/cpuparser/cpuids.py index 6013ffeb9..50cc473dd 100644 --- a/misc/config_tools/board_inspector/cpuparser/cpuids.py +++ b/misc/config_tools/board_inspector/cpuparser/cpuids.py @@ -285,6 +285,7 @@ class LEAF_6(CPUID): arat_supported = cpuidfield(EAX, 2, 2, doc = "APIC-Timer-always-running feature is supported if set") pln_supported = cpuidfield(EAX, 4, 4, doc = "Power limit notification controls are supported if set") ecmd_supported = cpuidfield(EAX, 5, 5, doc = "Clock modulation duty cycle extension is supported if set") + hwp_supported = cpuidfield(EAX, 7, 7, doc = "HWP base registers (IA32_PM_ENABLE[bit 0], IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS) are supported if set") package_thermal_management_supported = cpuidfield(EAX, 6, 6, doc = "Package thermal management is supported if set") num_interrupt_thresholds = cpuidfield(EBX, 3, 0, doc="Number of interrupt thresholds in digital thermal sensor") @@ -292,6 +293,11 @@ class LEAF_6(CPUID): hardware_coordination_feedback_capability = cpuidfield(ECX, 0, 0, doc="Hardware coordination feedback capability") performance_energy_bias = cpuidfield(ECX, 3, 3, doc="Performance-energy bias preference support") + capability_bits = [ + "turbo_boost_available", + "hwp_supported", + ] + class LEAF_7(CPUID): """Structured Extended Feature Flags Enumeration Leaf diff --git a/misc/config_tools/board_inspector/cpuparser/msr.py b/misc/config_tools/board_inspector/cpuparser/msr.py index 8fff698fa..2228d64e5 100644 --- a/misc/config_tools/board_inspector/cpuparser/msr.py +++ b/misc/config_tools/board_inspector/cpuparser/msr.py @@ -3,7 +3,8 @@ # SPDX-License-Identifier: BSD-3-Clause # -from cpuparser.platformbase import MSR, msrfield +from cgitb import enable +from cpuparser.platformbase import MSR, msrfield, msrfields class MSR_IA32_MISC_ENABLE(MSR): addr = 0x1a0 @@ -217,3 +218,31 @@ class MSR_IA32_VMX_ENTRY_CTLS(MSR): "vmx_entry_ctls_load_pat", "vmx_entry_ctls_ia32e_mode", ] + +class MSR_IA32_HWP_CAPABILITIES(MSR): + addr = 0x00000771 + highest_performance_lvl = msrfields(7, 0, doc=None) + guaranteed_performance_lvl = msrfields(15, 8, doc=None) + lowest_performance_lvl = msrfields(31, 24, doc=None) + + attribute_bits = [ + "highest_performance_lvl", + "guaranteed_performance_lvl", + "lowest_performance_lvl", + ] + +class MSR_TURBO_RATIO_LIMIT(MSR): + addr = 0x000001ad + max_ratio_1core = msrfields(7, 0, doc=None) + + attribute_bits = [ + "max_ratio_1core", + ] + +class MSR_TURBO_ACTIVATION_RATIO(MSR): + addr = 0x0000064c + max_none_turbo_ratio = msrfields(7, 0, doc=None) + + attribute_bits = [ + "max_none_turbo_ratio", + ] \ No newline at end of file diff --git a/misc/config_tools/board_inspector/cpuparser/platformbase.py b/misc/config_tools/board_inspector/cpuparser/platformbase.py index 28dba44d7..f0c2331b2 100644 --- a/misc/config_tools/board_inspector/cpuparser/platformbase.py +++ b/misc/config_tools/board_inspector/cpuparser/platformbase.py @@ -230,3 +230,15 @@ class msrfield(property): @staticmethod def is_ctrl_setting_allowed(msr_val, ctrl): return ((msr_val >> 32) & ctrl) == ctrl + +class msrfields(property): + def __init__(self, msb, lsb, doc="Bogus"): + self.msb = msb + self.lsb = lsb + + max_value = (1 << (msb - lsb + 1)) - 1 + field_mask = max_value << lsb + + def getter(self): + return (self.value & field_mask) >> lsb + super(msrfields, self).__init__(getter, doc=doc) \ No newline at end of file diff --git a/misc/config_tools/board_inspector/extractors/10-processors.py b/misc/config_tools/board_inspector/extractors/10-processors.py index 11c3dcd76..205227f27 100644 --- a/misc/config_tools/board_inspector/extractors/10-processors.py +++ b/misc/config_tools/board_inspector/extractors/10-processors.py @@ -4,6 +4,7 @@ # import logging +import subprocess import lxml.etree import re @@ -47,7 +48,7 @@ def extract_model(processors_node, cpu_id, family_id, model_id, core_type, nativ brandstring += leaf_data.brandstring n.set("description", re.sub('[^!-~]+', ' ', brandstring.decode()).strip()) - leaves = [(1, 0), (7, 0), (0x80000001, 0), (0x80000007, 0)] + leaves = [(1, 0), (6, 0), (7, 0), (0x80000001, 0), (0x80000007, 0)] for leaf in leaves: leaf_data = parse_cpuid(leaf[0], leaf[1], cpu_id) for cap in leaf_data.capability_bits: @@ -70,6 +71,12 @@ def extract_model(processors_node, cpu_id, family_id, model_id, core_type, nativ for cap in leaf_data.attribute_bits: add_child(n, "attribute", str(getattr(leaf_data, cap)), id=cap) + msr_regs = [MSR_TURBO_RATIO_LIMIT, MSR_TURBO_ACTIVATION_RATIO] + for msr_reg in msr_regs: + msr_data = msr_reg.rdmsr(cpu_id) + for attr in msr_data.attribute_bits: + add_child(n, "attribute", str(getattr(msr_data, attr)), id=attr) + def extract_topology(processors_node): cpu_ids = get_online_cpu_ids() for cpu_id in cpu_ids: @@ -130,6 +137,42 @@ def extract_topology(processors_node): last_shift = leaf_topo.num_bit_shift subleaf += 1 +def extract_hwp_info(processors_node): + if processors_node.xpath("//capability[@id = 'hwp_supported']") is None: + return + + try: + subprocess.check_call('/usr/sbin/wrmsr 0x770 1', shell=True, stdout=subprocess.PIPE) + except subprocess.CalledProcessError: + logging.debug("MSR 0x770 write failed!") + return + + threads = processors_node.xpath("//thread") + for thread in threads: + cpu_id = get_node(thread, "cpu_id/text()") + msr_regs = [MSR_IA32_HWP_CAPABILITIES,] + for msr_reg in msr_regs: + msr_data = msr_reg.rdmsr(cpu_id) + for attr in msr_data.attribute_bits: + add_child(thread, attr, str(getattr(msr_data, attr))) + +def extract_psd_info(processors_node): + sysnode = '/sys/devices/system/cpu/' + threads = processors_node.xpath("//thread") + for thread in threads: + cpu_id = get_node(thread, "cpu_id/text()") + try: + with open(sysnode + "cpu{cpu_id}/cpufreq/freqdomain_cpus", 'r') as f_node: + freqdomain_cpus = f_node.read() + except IOError: + logging.info("No _PSD info for cpu {cpu_id}") + freqdomain_cpus = cpu_id + + freqdomain_cpus.replace('\n','') + add_child(thread, "freqdomain_cpus", freqdomain_cpus) + def extract(args, board_etree): processors_node = get_node(board_etree, "//processors") extract_topology(processors_node) + extract_hwp_info(processors_node) + extract_psd_info(processors_node) -- 2.25.1
|
|
[PATCH 1/6] config_tools: add HV CPU frequency options to configurator
Add 'CPU frequency governor' and 'CPU frequency interface' in hypervisor advanced options.
Signed-off-by: Wu Zhou <wu.zhou@...> --- misc/config_tools/schema/config.xsd | 23 ++++++++++++++++++ misc/config_tools/schema/types.xsd | 36 +++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+)
diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd index 040433dd3..db3ebfc63 100644 --- a/misc/config_tools/schema/config.xsd +++ b/misc/config_tools/schema/config.xsd @@ -118,6 +118,11 @@ <xs:documentation>Configure Software SRAM. This feature reserves memory buffers as always-cached memory to improve an application's real-time performance.</xs:documentation> </xs:annotation> </xs:element> + <xs:element name="CPU_PERFORMANCE" type="CPUPerformanceType"> + <xs:annotation acrn:title="CPU performance options" acrn:views="advanced"> + <xs:documentation>Specify the CPU performance setting.</xs:documentation> + </xs:annotation> + </xs:element> </xs:all> </xs:complexType> @@ -137,6 +142,24 @@ </xs:all> </xs:complexType> +<xs:complexType name="CPUPerformanceType"> + <xs:all> + <xs:element name="CPUFREQ_GOVERNOR" type="GovernorType" default="CPUFREQ_GOVERNOR_PERFORMANCE"> + <xs:annotation acrn:title="CPU frequency governor" acrn:views="advanced"> + <xs:documentation>Select the frequency governor for the hypervisor to administrate CPU performance. + +- ``Performance``: CPU may run at its maximum frequency. +- ``Nominal``: CPU runs at its guaranteed frequency.</xs:documentation> + </xs:annotation> + </xs:element> + <xs:element name="CPUFREQ_INTERFACE" type="FreqInterfaceType" default="CPUFREQ_INTERFACE_HWP"> + <xs:annotation acrn:title="CPU frequency interface" acrn:views="advanced"> + <xs:documentation>Select the hardware interface for the hypervisor to adjust CPU frequency.</xs:documentation> + </xs:annotation> + </xs:element> + </xs:all> +</xs:complexType> + <xs:complexType name="CapacitiesOptionsType"> <xs:annotation> <xs:documentation>Capacity limits for static assigned data structure or maximum supported resource.</xs:documentation> diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd index 4aa4c5cbc..5445f5e97 100644 --- a/misc/config_tools/schema/types.xsd +++ b/misc/config_tools/schema/types.xsd @@ -147,6 +147,42 @@ Read more about the available scheduling options in :ref:`cpu_sharing`.</xs:docu </xs:restriction> </xs:simpleType> +<xs:simpleType name="GovernorType"> + <xs:annotation> + <xs:documentation>A string specifying the CPU frequency governor option: + +- ``Performance``: CPU may run at its maximum frequency. +- ``Nominal``: CPU runs at its guaranteed frequency. + </xs:documentation> + </xs:annotation> + <xs:restriction base="xs:string"> + <xs:enumeration value="CPUFREQ_GOVERNOR_PERFORMANCE"> + <xs:annotation acrn:title="Performance" /> + </xs:enumeration> + <xs:enumeration value="CPUFREQ_GOVERNOR_NOMINAL"> + <xs:annotation acrn:title="Nominal" /> + </xs:enumeration> + </xs:restriction> +</xs:simpleType> + +<xs:simpleType name="FreqInterfaceType"> + <xs:annotation> + <xs:documentation>A string specifying the Select the hardware interface for the hypervisor to adjust CPU frequency: + +- ``HWP``: Use HWP interface. +- ``ACPI p-state``: Use ACPI p-state. + </xs:documentation> + </xs:annotation> + <xs:restriction base="xs:string"> + <xs:enumeration value="CPUFREQ_INTERFACE_HWP"> + <xs:annotation acrn:title="HWP" /> + </xs:enumeration> + <xs:enumeration value="CPUFREQ_INTERFACE_ACPI"> + <xs:annotation acrn:title="ACPI p-state" /> + </xs:enumeration> + </xs:restriction> +</xs:simpleType> + <xs:simpleType name="PriorityType"> <xs:annotation> <xs:documentation>Two priorities are supported for priority based scheduler: -- 2.25.1
|
|
[PATCH 0/6] Add ACRN CPU frequency management
1. ACRN CPU frequency management's design The base design is to let ACRN own CPU frequency control with those two governors: - Performance: CPU can run at its max possible frequency (turbo boost will be activated if enabled). - Nominal: CPU runs at its base frequency. Users can choose which of the governors to use. Users also must choose which frequency interface to use: - HWP - ACPI p-state The HWP switch MSR IA32_PM_ENABLE is a global control (and it by-passes ACPI cpufreq interface once enabled), so the frequency interface has to be a global setting.
RTVMs needs certainty in latency, so their CPUs always run at base/nominal frequency. Here are the combinations: Cores not running any RTVM: - Performance + HWP: Range from lowest to highest performance levels. - Nominal + HWP: Fix to nominal performance level. - Performance + P-state: Fix to highest p-state. - Nominal + P-state: Fix to base P-state. Cores running an RTVM: - P-state: Fix to base P-state - HWP: Fix to nominal performance level.
Just like the Linux cpufreq driver, a 'policy' object type is introduced to help ACRN manage CPU frequency. 'policy' is a per CPU data type which indicates “the highest/lowest/base CPU frequency limits under current HW and scenario setup". It is statically allocated by config-tools, and do not need to be configurated. With policy given for each cores, the hypervisor doesn’t have to deal with hardware or scenario settings. It only choses highest or base frequency to run at.
The frequency management system is like this:
VM0 ... VM* (VMs have no CPU freq control)
ACRN Governor (Performance/Nominal) policy0 policy1 ... policy* | | | | | | HWP or p-state HWP or p-state ... HWP or p-state pCPU0 pCPU1 pCPU*
Major changes made to code are: Configuration: - Add a hypervisor config item which specifies the governor. - Add a Boolean config item which specifies the preference of P-state over HWP. - Parse and hand over P-states and HWP info from ACPI namespace to the hypervisor. Hypervisor: - Add the CPU frequency governors. - Hide P-state and HWP in guest CPUID. - Inject #GP(0) upon accesses to P-state and HWP related MSRs. Device Model: - Do not generate _PSS and _PPC for post-launched VMs.
2. ACPI p-state pass through ACPI p-state control can be passed through to guest if it is not sharing pCPUs with others. In this patch CPU sharing is detected by analyzing cpu_affinity config in config-tools.
3. Dealing with CPU frequency domains When CPUs are in a frequency domain, they share frequency on HW level, and would always be working on the same frequency (of the highest one in the group). A typical example is the group of 4 e-cores in ADL.
Those CPUs could be assigned to different VMs. This is no problem for none-RTVMs. Because they would not mind to run at higher frequency. But if one of those VMs is RTVM, we must choose between: - Let all those CPUs run at base frequency for certainty. - Let all those CPUs run at none-guaranteed turbo max frequency. This patch has chosen base frequency for certainty.
Signed-off-by: Wu Zhou <wu.zhou@...>
*** BLURB HERE ***
Wu Zhou (6): config_tools: add HV CPU frequency options to configurator config_tools: extract CPU frequency info in board_inspector config_tools: allocate CPU frequency policy config_tools & hv: generate CPU frequency info code hv: add CPU frequency driver in hv hv: block guest eist/hwp cpuids and MSRs
devicemodel/include/types.h | 1 + hypervisor/arch/x86/cpu.c | 5 + hypervisor/arch/x86/guest/vcpuid.c | 10 + hypervisor/arch/x86/guest/vmsr.c | 38 +++- hypervisor/arch/x86/pm.c | 59 ++++++ hypervisor/common/hypercall.c | 2 +- hypervisor/include/arch/x86/asm/board.h | 2 + hypervisor/include/arch/x86/asm/cpuid.h | 2 + hypervisor/include/arch/x86/asm/guest/vcpu.h | 2 +- hypervisor/include/arch/x86/asm/host_pm.h | 2 + hypervisor/include/arch/x86/asm/per_cpu.h | 1 + hypervisor/include/arch/x86/asm/vm_config.h | 2 + hypervisor/include/public/acrn_common.h | 29 +++ misc/config_tools/board_config/board_c.py | 35 ++++ .../board_inspector/board_inspector.py | 2 +- .../board_inspector/cpuparser/cpuids.py | 6 + .../board_inspector/cpuparser/msr.py | 31 ++- .../board_inspector/cpuparser/platformbase.py | 12 ++ .../extractors/10-processors.py | 45 +++- misc/config_tools/library/board_cfg_lib.py | 37 ++++ misc/config_tools/schema/config.xsd | 23 ++ misc/config_tools/schema/types.xsd | 36 ++++ .../static_allocators/cpu_freq.py | 198 ++++++++++++++++++ .../xforms/vm_configurations.c.xsl | 8 + 24 files changed, 580 insertions(+), 8 deletions(-) create mode 100644 misc/config_tools/static_allocators/cpu_freq.py
-- 2.25.1
|
|
Re: [PATCH v3 1/2] dm: vdisplay: refine vdisplay core concept abstractions
On 2022/8/10 16:13, Sun, Peng wrote: From: Sun Peng <peng.p.sun@...> Add new concept "vscreen" to abstract all specs about screen in a display server. This can provide convenience to add more screens for one VM.
The changelog between two version had better be added. This will highlight what is changed in this version. Signed-off-by: Sun Peng <peng.p.sun@...> --- devicemodel/hw/vdisplay_sdl.c | 480 +++++++++++++++++++++------------- 1 file changed, 296 insertions(+), 184 deletions(-) diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 89f89d16e..d384e38d4 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -32,6 +32,7 @@ #define VDPY_MIN_WIDTH 640 #define VDPY_MIN_HEIGHT 480 #define transto_10bits(color) (uint16_t)(color * 1024 + 0.5) +#define VSCREEN_MAX_NUM 1 static unsigned char default_raw_argb[VDPY_DEFAULT_WIDTH * VDPY_DEFAULT_HEIGHT * 4]; @@ -51,27 +52,39 @@ struct egl_display_ops { PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES; }; -static struct display { +struct vscreen { + struct display_info info; int pscreen_id; SDL_Rect pscreen_rect; - struct display_info info; - struct state s; - SDL_Texture *dpy_texture; - SDL_Window *dpy_win; - SDL_Renderer *dpy_renderer; - pixman_image_t *dpy_img; - pthread_t tid; - int width, height; // Width/height of dpy_win - int org_x, org_y; - int guest_width, guest_height; + bool is_fullscreen; + int org_x; + int org_y; + int width; + int height; + int guest_width; + int guest_height; struct surface surf; struct cursor cur; - SDL_Texture *cursor_tex; + SDL_Texture *surf_tex; + SDL_Texture *cur_tex; + int surf_updates; + int cur_updates; + SDL_Window *win; + SDL_Renderer *renderer; + pixman_image_t *img; + EGLImage egl_img; + /* Record the update_time that is activated from guest_vm */ + struct timespec last_time; +}; + +static struct display { + struct state s; + struct vscreen *vscrs; + int vscrs_num; + pthread_t tid; /* Add one UI_timer(33ms) to render the buffers from guest_vm */ struct acrn_timer ui_timer; struct vdpy_display_bh ui_timer_bh; - /* Record the update_time that is activated from guest_vm */ - struct timespec last_time; // protect the request_list pthread_mutex_t vdisplay_mutex; // receive the signal that request is submitted @@ -82,14 +95,11 @@ static struct display { SDL_GLContext eglContext; EGLDisplay eglDisplay; struct egl_display_ops gl_ops; - EGLImage cur_egl_img; } vdpy = { .s.is_ui_realized = false, .s.is_active = false, .s.is_wayland = false, .s.is_x11 = false, - .s.is_fullscreen = false, - .s.updates = 0, .s.n_connect = 0 }; @@ -514,10 +524,16 @@ void vdpy_get_edid(int handle, int scanout_id, uint8_t *edid, size_t size) { struct edid_info edid_info; + struct vscreen *vscr; + + if (scanout_id >= vdpy.vscrs_num) + return; + + vscr = vdpy.vscrs + scanout_id; if (handle == vdpy.s.n_connect) { - edid_info.prefx = vdpy.info.width; - edid_info.prefy = vdpy.info.height; + edid_info.prefx = vscr->info.width; + edid_info.prefy = vscr->info.height; edid_info.maxx = VDPY_MAX_WIDTH; edid_info.maxy = VDPY_MAX_HEIGHT; } else { @@ -537,11 +553,18 @@ vdpy_get_edid(int handle, int scanout_id, uint8_t *edid, size_t size) void vdpy_get_display_info(int handle, int scanout_id, struct display_info *info) { + struct vscreen *vscr; + + if (scanout_id >= vdpy.vscrs_num) + return; + + vscr = vdpy.vscrs + scanout_id; + if (handle == vdpy.s.n_connect) { - info->xoff = vdpy.info.xoff; - info->yoff = vdpy.info.yoff; - info->width = vdpy.info.width; - info->height = vdpy.info.height; + info->xoff = vscr->info.xoff; + info->yoff = vscr->info.yoff; + info->width = vscr->info.width; + info->height = vscr->info.height; } else { info->xoff = 0; info->yoff = 0; @@ -554,6 +577,8 @@ static void sdl_gl_display_init(void) { struct egl_display_ops *gl_ops = &vdpy.gl_ops; + struct vscreen *vscr; + int i; /* obtain the eglDisplay/eglContext */ vdpy.eglDisplay = eglGetCurrentDisplay(); @@ -570,7 +595,11 @@ sdl_gl_display_init(void) gl_ops->glEGLImageTargetTexture2DOES = (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) eglGetProcAddress("glEGLImageTargetTexture2DOES"); - vdpy.cur_egl_img = EGL_NO_IMAGE_KHR; + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + vscr->egl_img = EGL_NO_IMAGE_KHR; + } + if ((gl_ops->eglCreateImageKHR == NULL) || (gl_ops->eglDestroyImageKHR == NULL) || (gl_ops->glEGLImageTargetTexture2DOES == NULL)) { @@ -588,6 +617,7 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pixman_image_t *src_img; int format; int access, i; + struct vscreen *vscr; if (handle != vdpy.s.n_connect) { return; @@ -599,9 +629,15 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; + if (surf == NULL ) { - vdpy.surf.width = 0; - vdpy.surf.height = 0; + vscr->surf.width = 0; + vscr->surf.height = 0; /* Need to use the default 640x480 for the SDL_Texture */ src_img = pixman_image_create_bits(PIXMAN_a8r8g8b8, VDPY_MIN_WIDTH, VDPY_MIN_HEIGHT, @@ -611,8 +647,8 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pr_err("failed to create pixman_image\n"); return; } - vdpy.guest_width = VDPY_MIN_WIDTH; - vdpy.guest_height = VDPY_MIN_HEIGHT; + vscr->guest_width = VDPY_MIN_WIDTH; + vscr->guest_height = VDPY_MIN_HEIGHT; } else if (surf->surf_type == SURFACE_PIXMAN) { src_img = pixman_image_create_bits(surf->surf_format, surf->width, surf->height, surf->pixel, @@ -621,21 +657,21 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pr_err("failed to create pixman_image\n"); return; } - vdpy.surf = *surf; - vdpy.guest_width = surf->width; - vdpy.guest_height = surf->height; + vscr->surf = *surf; + vscr->guest_width = surf->width; + vscr->guest_height = surf->height; } else if (surf->surf_type == SURFACE_DMABUF) { src_img = NULL; - vdpy.surf = *surf; - vdpy.guest_width = surf->width; - vdpy.guest_height = surf->height; + vscr->surf = *surf; + vscr->guest_width = surf->width; + vscr->guest_height = surf->height; } else { /* Unsupported type */ return; } - if (vdpy.dpy_texture) { - SDL_DestroyTexture(vdpy.dpy_texture); + if (vscr->surf_tex) { + SDL_DestroyTexture(vscr->surf_tex); } if (surf && (surf->surf_type == SURFACE_DMABUF)) { access = SDL_TEXTUREACCESS_STATIC; @@ -666,23 +702,23 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pixman_image_get_format(src_img)); } } - vdpy.dpy_texture = SDL_CreateTexture(vdpy.dpy_renderer, + vscr->surf_tex = SDL_CreateTexture(vscr->renderer, format, access, - vdpy.guest_width, vdpy.guest_height); + vscr->guest_width, vscr->guest_height); - if (vdpy.dpy_texture == NULL) { + if (vscr->surf_tex == NULL) { pr_err("Failed to create SDL_texture for surface.\n"); } /* For the surf_switch, it will be updated in surface_update */ if (!surf) { - SDL_UpdateTexture(vdpy.dpy_texture, NULL, + SDL_UpdateTexture(vscr->surf_tex, NULL, pixman_image_get_data(src_img), pixman_image_get_stride(src_img)); - SDL_RenderClear(vdpy.dpy_renderer); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.dpy_texture, NULL, NULL); - SDL_RenderPresent(vdpy.dpy_renderer); + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); + SDL_RenderPresent(vscr->renderer); } else if (surf->surf_type == SURFACE_DMABUF) { EGLImageKHR egl_img = EGL_NO_IMAGE_KHR; EGLint attrs[64]; @@ -713,46 +749,54 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) return; } - SDL_GL_BindTexture(vdpy.dpy_texture, NULL, NULL); + SDL_GL_BindTexture(vscr->surf_tex, NULL, NULL); gl_ops->glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_img); - if (vdpy.cur_egl_img != EGL_NO_IMAGE_KHR) + if (vscr->egl_img != EGL_NO_IMAGE_KHR) gl_ops->eglDestroyImageKHR(vdpy.eglDisplay, - vdpy.cur_egl_img); + vscr->egl_img); /* In theory the created egl_img can be released after it is bound * to texture. * Now it is released next time so that it is controlled correctly */ - vdpy.cur_egl_img = egl_img; + vscr->egl_img = egl_img; } - if (vdpy.dpy_img) - pixman_image_unref(vdpy.dpy_img); + if (vscr->img) + pixman_image_unref(vscr->img); if (surf == NULL) { - SDL_SetWindowTitle(vdpy.dpy_win, + SDL_SetWindowTitle(vscr->win, "Not activate display yet!"); } else { - SDL_SetWindowTitle(vdpy.dpy_win, + SDL_SetWindowTitle(vscr->win, "ACRN Virtual Monitor"); } /* Replace the cur_img with the created_img */ - vdpy.dpy_img = src_img; + vscr->img = src_img; } void -vdpy_cursor_position_transformation(struct display *vdpy, SDL_Rect *rect) +vdpy_cursor_position_transformation(struct display *vdpy, int scanout_id, SDL_Rect *rect) { - rect->x = (vdpy->cur.x * vdpy->width) / vdpy->guest_width; - rect->y = (vdpy->cur.y * vdpy->height) / vdpy->guest_height; - rect->w = (vdpy->cur.width * vdpy->width) / vdpy->guest_width; - rect->h = (vdpy->cur.height * vdpy->height) / vdpy->guest_height; + struct vscreen *vscr; + + if (scanout_id >= vdpy->vscrs_num) { + return; + } + + vscr = vdpy->vscrs + scanout_id; + rect->x = (vscr->cur.x * vscr->width) / vscr->width; + rect->y = (vscr->cur.y * vscr->height) / vscr->height; + rect->w = (vscr->cur.width * vscr->width) / vscr->width; + rect->h = (vscr->cur.height * vscr->height) / vscr->height; } void vdpy_surface_update(int handle, int scanout_id, struct surface *surf) { SDL_Rect cursor_rect; + struct vscreen *vscr; if (handle != vdpy.s.n_connect) { return; @@ -769,32 +813,39 @@ vdpy_surface_update(int handle, int scanout_id, struct surface *surf) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; if (surf->surf_type == SURFACE_PIXMAN) - SDL_UpdateTexture(vdpy.dpy_texture, NULL, + SDL_UpdateTexture(vscr->surf_tex, NULL, surf->pixel, surf->stride); - SDL_RenderClear(vdpy.dpy_renderer); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.dpy_texture, NULL, NULL); + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); /* This should be handled after rendering the surface_texture. * Otherwise it will be hidden */ - if (vdpy.cursor_tex) { - vdpy_cursor_position_transformation(&vdpy, &cursor_rect); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.cursor_tex, + if (vscr->cur_tex) { + vdpy_cursor_position_transformation(&vdpy, scanout_id, &cursor_rect); + SDL_RenderCopy(vscr->renderer, vscr->cur_tex, NULL, &cursor_rect); } - SDL_RenderPresent(vdpy.dpy_renderer); + SDL_RenderPresent(vscr->renderer); /* update the rendering time */ - clock_gettime(CLOCK_MONOTONIC, &vdpy.last_time); + clock_gettime(CLOCK_MONOTONIC, &vscr->last_time); } void vdpy_cursor_define(int handle, int scanout_id, struct cursor *cur) { + struct vscreen *vscr; + if (handle != vdpy.s.n_connect) { return; } @@ -805,39 +856,52 @@ vdpy_cursor_define(int handle, int scanout_id, struct cursor *cur) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + if (cur->data == NULL) return; - if (vdpy.cursor_tex) - SDL_DestroyTexture(vdpy.cursor_tex); + vscr = vdpy.vscrs + scanout_id; - vdpy.cursor_tex = SDL_CreateTexture( - vdpy.dpy_renderer, + if (vscr->cur_tex) + SDL_DestroyTexture(vscr->cur_tex); + + vscr->cur_tex = SDL_CreateTexture( + vscr->renderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, cur->width, cur->height); - if (vdpy.cursor_tex == NULL) { + if (vscr->cur_tex == NULL) { pr_err("Failed to create sdl_cursor surface for %p.\n", cur); return; } - SDL_SetTextureBlendMode(vdpy.cursor_tex, SDL_BLENDMODE_BLEND); - vdpy.cur = *cur; - SDL_UpdateTexture(vdpy.cursor_tex, NULL, cur->data, cur->width * 4); + SDL_SetTextureBlendMode(vscr->cur_tex, SDL_BLENDMODE_BLEND); + vscr->cur = *cur; + SDL_UpdateTexture(vscr->cur_tex, NULL, cur->data, cur->width * 4); } void vdpy_cursor_move(int handle, int scanout_id, uint32_t x, uint32_t y) { + struct vscreen *vscr; + if (handle != vdpy.s.n_connect) { return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; /* Only move the position of the cursor. The cursor_texture * will be handled in surface_update */ - vdpy.cur.x = x; - vdpy.cur.y = y; + vscr->cur.x = x; + vscr->cur.y = y; } static void @@ -847,35 +911,41 @@ vdpy_sdl_ui_refresh(void *data) struct timespec cur_time; uint64_t elapsed_time; SDL_Rect cursor_rect; + struct vscreen *vscr; + int i; ui_vdpy = (struct display *)data; - /* Skip it if no surface needs to be rendered */ - if (ui_vdpy->dpy_texture == NULL) - return; + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = ui_vdpy->vscrs + i; - clock_gettime(CLOCK_MONOTONIC, &cur_time); + /* Skip it if no surface needs to be rendered */ + if (vscr->surf_tex == NULL) + continue; - elapsed_time = (cur_time.tv_sec - ui_vdpy->last_time.tv_sec) * 1000000000 + - cur_time.tv_nsec - ui_vdpy->last_time.tv_nsec; + clock_gettime(CLOCK_MONOTONIC, &cur_time); - /* the time interval is less than 10ms. Skip it */ - if (elapsed_time < 10000000) - return; + elapsed_time = (cur_time.tv_sec - vscr->last_time.tv_sec) * 1000000000 + + cur_time.tv_nsec - vscr->last_time.tv_nsec; - SDL_RenderClear(ui_vdpy->dpy_renderer); - SDL_RenderCopy(ui_vdpy->dpy_renderer, ui_vdpy->dpy_texture, NULL, NULL); + /* the time interval is less than 10ms. Skip it */ + if (elapsed_time < 10000000) + return; - /* This should be handled after rendering the surface_texture. - * Otherwise it will be hidden - */ - if (ui_vdpy->cursor_tex) { - vdpy_cursor_position_transformation(ui_vdpy, &cursor_rect); - SDL_RenderCopy(ui_vdpy->dpy_renderer, ui_vdpy->cursor_tex, - NULL, &cursor_rect); - } + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); - SDL_RenderPresent(ui_vdpy->dpy_renderer); + /* This should be handled after rendering the surface_texture. + * Otherwise it will be hidden + */ + if (vscr->cur_tex) { + vdpy_cursor_position_transformation(ui_vdpy, i, &cursor_rect); + SDL_RenderCopy(vscr->renderer, vscr->cur_tex, + NULL, &cursor_rect); + } + + SDL_RenderPresent(vscr->renderer); + } } static void @@ -903,60 +973,84 @@ vdpy_sdl_ui_timer(void *data, uint64_t nexp) pthread_mutex_unlock(&ui_vdpy->vdisplay_mutex); } -static void * -vdpy_sdl_display_thread(void *data) +void +vdpy_calibrate_vscreen_geometry(struct vscreen *vscr) { - uint32_t win_flags; - struct vdpy_display_bh *bh; - struct itimerspec ui_timer_spec; - - if (vdpy.width && vdpy.height) { + if (vscr->width && vscr->height) { /* clip the region between (640x480) and (1920x1080) */ - if (vdpy.width < VDPY_MIN_WIDTH) - vdpy.width = VDPY_MIN_WIDTH; - if (vdpy.width > VDPY_MAX_WIDTH) - vdpy.width = VDPY_MAX_WIDTH; - if (vdpy.height < VDPY_MIN_HEIGHT) - vdpy.height = VDPY_MIN_HEIGHT; - if (vdpy.height > VDPY_MAX_HEIGHT) - vdpy.height = VDPY_MAX_HEIGHT; + if (vscr->width < VDPY_MIN_WIDTH) + vscr->width = VDPY_MIN_WIDTH; + if (vscr->width > VDPY_MAX_WIDTH) + vscr->width = VDPY_MAX_WIDTH; + if (vscr->height < VDPY_MIN_HEIGHT) + vscr->height = VDPY_MIN_HEIGHT; + if (vscr->height > VDPY_MAX_HEIGHT) + vscr->height = VDPY_MAX_HEIGHT; } else { /* the default window(1280x720) is created with undefined pos * when no geometry info is passed */ - vdpy.org_x = 0xFFFF; - vdpy.org_y = 0xFFFF; - vdpy.width = VDPY_DEFAULT_WIDTH; - vdpy.height = VDPY_DEFAULT_HEIGHT; + vscr->org_x = 0xFFFF; + vscr->org_y = 0xFFFF; + vscr->width = VDPY_DEFAULT_WIDTH; + vscr->height = VDPY_DEFAULT_HEIGHT; } +} + +int +vdpy_create_vscreen_window(struct vscreen *vscr) +{ + uint32_t win_flags; win_flags = SDL_WINDOW_OPENGL | - SDL_WINDOW_ALWAYS_ON_TOP | - SDL_WINDOW_SHOWN; - if (vdpy.s.is_fullscreen) { + SDL_WINDOW_ALWAYS_ON_TOP | + SDL_WINDOW_SHOWN; + if (vscr->is_fullscreen) { win_flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; } - vdpy.dpy_win = NULL; - vdpy.dpy_renderer = NULL; - vdpy.dpy_img = NULL; - vdpy.org_x = vdpy.pscreen_rect.x; - vdpy.org_y = vdpy.pscreen_rect.y; + vscr->win = NULL; + vscr->renderer = NULL; + vscr->img = NULL; + vscr->org_x = vscr->pscreen_rect.x; + vscr->org_y = vscr->pscreen_rect.y; // Zoom to width and height of pscreen is fullscreen enabled - vdpy.dpy_win = SDL_CreateWindow("ACRN_DM", - vdpy.org_x, vdpy.org_y, - vdpy.width, vdpy.height, - win_flags); - if (vdpy.dpy_win == NULL) { + vscr->win = SDL_CreateWindow("ACRN_DM", + vscr->org_x, vscr->org_y, + vscr->width, vscr->height, + win_flags); + if (vscr->win == NULL) { pr_err("Failed to Create SDL_Window\n"); - goto sdl_fail; + return -1; } - pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vdpy.pscreen_id, - vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height); + pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vscr->pscreen_id, + vscr->org_x, vscr->org_y, vscr->width, vscr->height); - vdpy.dpy_renderer = SDL_CreateRenderer(vdpy.dpy_win, -1, 0); - if (vdpy.dpy_renderer == NULL) { + vscr->renderer = SDL_CreateRenderer(vscr->win, -1, 0); + if (vscr->renderer == NULL) { pr_err("Failed to Create GL_Renderer \n"); - goto sdl_fail; + return -1; + } + + return 0; +} + +static void * +vdpy_sdl_display_thread(void *data) +{ + struct vdpy_display_bh *bh; + struct itimerspec ui_timer_spec; + + struct vscreen *vscr; + int i; + + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + + vdpy_calibrate_vscreen_geometry(vscr); + if (vdpy_create_vscreen_window(vscr)) { + goto sdl_fail; + } + clock_gettime(CLOCK_MONOTONIC, &vscr->last_time); } sdl_gl_display_init(); pthread_mutex_init(&vdpy.vdisplay_mutex, NULL); @@ -966,7 +1060,6 @@ vdpy_sdl_display_thread(void *data) vdpy.ui_timer_bh.task_cb = vdpy_sdl_ui_refresh; vdpy.ui_timer_bh.data = &vdpy; - clock_gettime(CLOCK_MONOTONIC, &vdpy.last_time); vdpy.ui_timer.clockid = CLOCK_MONOTONIC; acrn_timer_init(&vdpy.ui_timer, vdpy_sdl_ui_timer, &vdpy); ui_timer_spec.it_interval.tv_sec = 0; @@ -1014,34 +1107,40 @@ vdpy_sdl_display_thread(void *data) /* SDL display_thread will exit because of DM request */ pthread_mutex_destroy(&vdpy.vdisplay_mutex); pthread_cond_destroy(&vdpy.vdisplay_signal); - if (vdpy.dpy_img) { - pixman_image_unref(vdpy.dpy_img); - vdpy.dpy_img = NULL; - } - /* Continue to thread cleanup */ - - if (vdpy.dpy_texture) { - SDL_DestroyTexture(vdpy.dpy_texture); - vdpy.dpy_texture = NULL; - } - if (vdpy.cursor_tex) { - SDL_DestroyTexture(vdpy.cursor_tex); - vdpy.cursor_tex = NULL; - } - if (vdpy.egl_dmabuf_supported && (vdpy.cur_egl_img != EGL_NO_IMAGE_KHR)) - vdpy.gl_ops.eglDestroyImageKHR(vdpy.eglDisplay, - vdpy.cur_egl_img); + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->img) { + pixman_image_unref(vscr->img); + vscr->img = NULL; + } + /* Continue to thread cleanup */ -sdl_fail: + if (vscr->surf_tex) { + SDL_DestroyTexture(vscr->surf_tex); + vscr->surf_tex = NULL; + } + if (vscr->cur_tex) { + SDL_DestroyTexture(vscr->cur_tex); + vscr->cur_tex = NULL; + } - if (vdpy.dpy_renderer) { - SDL_DestroyRenderer(vdpy.dpy_renderer); - vdpy.dpy_renderer = NULL; + if (vdpy.egl_dmabuf_supported && (vscr->egl_img != EGL_NO_IMAGE_KHR)) + vdpy.gl_ops.eglDestroyImageKHR(vdpy.eglDisplay, + vscr->egl_img); } - if (vdpy.dpy_win) { - SDL_DestroyWindow(vdpy.dpy_win); - vdpy.dpy_win = NULL; + +sdl_fail: + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->renderer) { + SDL_DestroyRenderer(vscr->renderer); + vscr->renderer = NULL; + } + if (vscr->win) { + SDL_DestroyWindow(vscr->win); + vscr->win = NULL; + } } /* This is used to workaround the TLS issue of libEGL + libGLdispatch @@ -1108,7 +1207,7 @@ vdpy_init(int *supported_wins) vdpy.s.n_connect++; if (supported_wins) - *supported_wins = 1; + *supported_wins = vdpy.vscrs_num; return vdpy.s.n_connect; } @@ -1142,6 +1241,8 @@ gfx_ui_init() { SDL_SysWMinfo info; int num_pscreen; + struct vscreen *vscr; + int i; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1154,21 +1255,25 @@ gfx_ui_init() } num_pscreen = SDL_GetNumVideoDisplays(); - if (vdpy.pscreen_id >= num_pscreen) { - pr_err("Monitor id %d is out of avalble range [0~%d].\n", - vdpy.pscreen_id, num_pscreen); - SDL_Quit(); - return -1; - } - SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->pscreen_id >= num_pscreen) { + pr_err("Monitor id %d is out of avalble range [0~%d].\n", + vscr->pscreen_id, num_pscreen); + SDL_Quit(); + return -1; + } + + SDL_GetDisplayBounds(vscr->pscreen_id, &vscr->pscreen_rect); - if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || - vdpy.pscreen_rect.h < VDPY_MIN_HEIGHT) { - pr_err("Too small resolutions. Please check the " - " graphics system\n"); - SDL_Quit(); - return -1; + if (vscr->pscreen_rect.w < VDPY_MIN_WIDTH || + vscr->pscreen_rect.h < VDPY_MIN_HEIGHT) { + pr_err("Too small resolutions. Please check the " + " graphics system\n"); + SDL_Quit(); + return -1; + } } SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1"); @@ -1200,6 +1305,7 @@ gfx_ui_deinit() return; } + free(vdpy.vscrs); SDL_Quit(); pr_info("SDL_Quit\r\n"); } @@ -1208,35 +1314,41 @@ int vdpy_parse_cmd_option(const char *opts) { char *str; int snum, error; + struct vscreen *vscr; error = 0; + vdpy.vscrs = calloc(VSCREEN_MAX_NUM, sizeof(struct vscreen)); + vdpy.vscrs_num = 0; str = strcasestr(opts, "geometry="); + vscr = vdpy.vscrs + vdpy.vscrs_num; if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.pscreen_id); + snum = sscanf(str, "geometry=fullscreen:%d", &vscr->pscreen_id); if (snum != 1) { - vdpy.pscreen_id = 0; + vscr->pscreen_id = 0; } - vdpy.width = VDPY_MAX_WIDTH; - vdpy.height = VDPY_MAX_HEIGHT; - vdpy.s.is_fullscreen = true; + vscr->width = VDPY_MAX_WIDTH; + vscr->height = VDPY_MAX_HEIGHT; + vscr->is_fullscreen = true; + vdpy.vscrs_num++; pr_info("virtual display: fullscreen.\n"); } else if (opts && strcasestr(opts, "geometry=")) { snum = sscanf(str, "geometry=%dx%d+%d+%d", - &vdpy.width, &vdpy.height, - &vdpy.org_x, &vdpy.org_y); + &vscr->width, &vscr->height, + &vscr->org_x, &vscr->org_y); if (snum != 4) { pr_err("incorrect geometry option. Should be" " WxH+x+y\n"); error = -1; } - vdpy.s.is_fullscreen = false; + vscr->is_fullscreen = false; + vdpy.vscrs_num++; pr_info("virtual display: windowed.\n"); } - vdpy.info.xoff = 0; - vdpy.info.yoff = 0; - vdpy.info.width = vdpy.width; - vdpy.info.height = vdpy.height; + vscr->info.xoff = 0; + vscr->info.yoff = 0; + vscr->info.width = vdpy.vscrs->width; + vscr->info.height = vdpy.vscrs->height; return error; }
|
|
Re: [PATCH v3 1/2] dm: vdisplay: multi-local-mornitor support.
On 2022/8/10 16:11, peng.p.sun@... wrote: From: Sun Peng <peng.p.sun@...> To support full screen mode on one of multi-local-mornitor which connected to SOS by monitor ID that customer specify. The monitor ID is specified in acrn-dm's parameter like this: virtio-gpu,geometry=fullscreen:monitor_id For window mode, the vdisplay window always be shown on monitor 0, because the customer can drag the window to anyone monitor. Besides, the customer can set the monitor by x_off and y_off parameter like this: virtio-gpu,geometry=<width>x<height>+<x_off>+<y_off> It will be better that the change between the two version is added. This will highlight what is changed in this version. Signed-off-by: Sun Peng <peng.p.sun@...> --- devicemodel/hw/vdisplay_sdl.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 65fd78d93..32f929647 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -52,6 +52,8 @@ struct egl_display_ops { }; static struct display { + int pscreen_id; + SDL_Rect pscreen_rect; struct display_info info; struct state s; SDL_Texture *dpy_texture; @@ -62,7 +64,6 @@ static struct display { int width, height; // Width/height of dpy_win int org_x, org_y; int guest_width, guest_height; - int screen; struct surface surf; struct cursor cur; SDL_Texture *cursor_tex; @@ -933,11 +934,14 @@ vdpy_sdl_display_thread(void *data) SDL_WINDOW_ALWAYS_ON_TOP | SDL_WINDOW_SHOWN; if (vdpy.s.is_fullscreen) { - win_flags |= SDL_WINDOW_FULLSCREEN; + win_flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; } vdpy.dpy_win = NULL; vdpy.dpy_renderer = NULL; vdpy.dpy_img = NULL; + vdpy.org_x = vdpy.pscreen_rect.x; + vdpy.org_y = vdpy.pscreen_rect.y; This should be used only under the fullscreen scenario. When it is booted with window_mode, we will prefer to use the input x/y. + // Zoom to width and height of pscreen is fullscreen enabled vdpy.dpy_win = SDL_CreateWindow("ACRN_DM", vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height, @@ -946,6 +950,9 @@ vdpy_sdl_display_thread(void *data) pr_err("Failed to Create SDL_Window\n"); goto sdl_fail; } + pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vdpy.pscreen_id, + vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height); + vdpy.dpy_renderer = SDL_CreateRenderer(vdpy.dpy_win, -1, 0); if (vdpy.dpy_renderer == NULL) { pr_err("Failed to Create GL_Renderer \n"); @@ -1134,7 +1141,6 @@ int gfx_ui_init() { SDL_SysWMinfo info; - SDL_Rect disp_rect; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1146,10 +1152,10 @@ gfx_ui_init() return -1; } - SDL_GetDisplayBounds(0, &disp_rect); + SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); - if (disp_rect.w < VDPY_MIN_WIDTH || - disp_rect.h < VDPY_MIN_HEIGHT) { + if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || + vdpy.pscreen_rect.h < VDPY_MIN_HEIGHT) { pr_err("Too small resolutions. Please check the " " graphics system\n"); SDL_Quit(); @@ -1198,9 +1204,9 @@ int vdpy_parse_cmd_option(const char *opts) str = strcasestr(opts, "geometry="); if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.screen); + snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.pscreen_id); if (snum != 1) { - vdpy.screen = 0; + vdpy.pscreen_id = 0; } vdpy.width = VDPY_MAX_WIDTH; vdpy.height = VDPY_MAX_HEIGHT;
|
|
[PATCH v3 2/2] dm: vdisplay: multi-vdisplay support.
From: Sun Peng <peng.p.sun@...>
Allow one VM have more than 1 virtual display for output. Till now, the max virtual display number is 2. So guest VM can use dual display for mirror and extend desktop mode. To specify multi-vdisplay, need use acrn-dm parameters like this: For fullscreen mode:
virtio-gpu,geometry=fullscreen:monitor_id1,geometry=fullscreen:monitor_id2
For window mode:
virtio-gpu,geometry=<width>x<height>+<x_off>+<y_off>,geometry=<width>x<height>+<x_off>+<y_off>
Signed-off-by: Sun Peng <peng.p.sun@...> Reviewed-by: Zhao Yakui <yakui.zhao@...> --- devicemodel/hw/vdisplay_sdl.c | 69 +++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 27 deletions(-)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index d384e38d4..753fae7f8 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -32,7 +32,7 @@ #define VDPY_MIN_WIDTH 640 #define VDPY_MIN_HEIGHT 480 #define transto_10bits(color) (uint16_t)(color * 1024 + 0.5) -#define VSCREEN_MAX_NUM 1 +#define VSCREEN_MAX_NUM 2 static unsigned char default_raw_argb[VDPY_DEFAULT_WIDTH * VDPY_DEFAULT_HEIGHT * 4]; @@ -1312,7 +1312,7 @@ gfx_ui_deinit() int vdpy_parse_cmd_option(const char *opts) { - char *str; + char *str, *stropts, *tmp; int snum, error; struct vscreen *vscr; @@ -1320,35 +1320,50 @@ int vdpy_parse_cmd_option(const char *opts) vdpy.vscrs = calloc(VSCREEN_MAX_NUM, sizeof(struct vscreen)); vdpy.vscrs_num = 0; - str = strcasestr(opts, "geometry="); - vscr = vdpy.vscrs + vdpy.vscrs_num; - if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vscr->pscreen_id); - if (snum != 1) { + stropts = strdup(opts); + while ((str = strsep(&stropts, ",")) != NULL) { + vscr = vdpy.vscrs + vdpy.vscrs_num; + tmp = strcasestr(str, "geometry="); + if (str && strcasestr(str, "geometry=fullscreen")) { + snum = sscanf(tmp, "geometry=fullscreen:%d", &vscr->pscreen_id); + if (snum != 1) { + vscr->pscreen_id = 0; + } + vscr->width = VDPY_MAX_WIDTH; + vscr->height = VDPY_MAX_HEIGHT; + vscr->is_fullscreen = true; + pr_info("virtual display: fullscreen on monitor %d.\n", + vscr->pscreen_id); + vscr->info.xoff = vscr->org_x; + vscr->info.yoff = vscr->org_y; + vscr->info.width = vscr->width; + vscr->info.height = vscr->height; + vdpy.vscrs_num++; + } else if (str && strcasestr(str, "geometry=")) { + snum = sscanf(tmp, "geometry=%dx%d+%d+%d", + &vscr->width, &vscr->height, + &vscr->org_x, &vscr->org_y); + if (snum != 4) { + pr_err("incorrect geometry option. Should be" + " WxH+x+y\n"); + error = -1; + } + vscr->is_fullscreen = false; vscr->pscreen_id = 0; + pr_info("virtual display: windowed on monitor %d.\n", + vscr->pscreen_id); + vscr->info.xoff = vscr->org_x; + vscr->info.yoff = vscr->org_y; + vscr->info.width = vscr->width; + vscr->info.height = vscr->height; + vdpy.vscrs_num++; } - vscr->width = VDPY_MAX_WIDTH; - vscr->height = VDPY_MAX_HEIGHT; - vscr->is_fullscreen = true; - vdpy.vscrs_num++; - pr_info("virtual display: fullscreen.\n"); - } else if (opts && strcasestr(opts, "geometry=")) { - snum = sscanf(str, "geometry=%dx%d+%d+%d", - &vscr->width, &vscr->height, - &vscr->org_x, &vscr->org_y); - if (snum != 4) { - pr_err("incorrect geometry option. Should be" - " WxH+x+y\n"); - error = -1; + if (vdpy.vscrs_num > VSCREEN_MAX_NUM) { + pr_err("%d virtual displays are too many that acrn-dm can't support!\n"); + break; } - vscr->is_fullscreen = false; - vdpy.vscrs_num++; - pr_info("virtual display: windowed.\n"); } + free(stropts); - vscr->info.xoff = 0; - vscr->info.yoff = 0; - vscr->info.width = vdpy.vscrs->width; - vscr->info.height = vdpy.vscrs->height; return error; } -- 2.25.1
|
|
[PATCH v3 1/2] dm: vdisplay: refine vdisplay core concept abstractions
From: Sun Peng <peng.p.sun@...>
Add new concept "vscreen" to abstract all specs about screen in a display server. This can provide convenience to add more screens for one VM.
Signed-off-by: Sun Peng <peng.p.sun@...> --- devicemodel/hw/vdisplay_sdl.c | 480 +++++++++++++++++++++------------- 1 file changed, 296 insertions(+), 184 deletions(-)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 89f89d16e..d384e38d4 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -32,6 +32,7 @@ #define VDPY_MIN_WIDTH 640 #define VDPY_MIN_HEIGHT 480 #define transto_10bits(color) (uint16_t)(color * 1024 + 0.5) +#define VSCREEN_MAX_NUM 1 static unsigned char default_raw_argb[VDPY_DEFAULT_WIDTH * VDPY_DEFAULT_HEIGHT * 4]; @@ -51,27 +52,39 @@ struct egl_display_ops { PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES; }; -static struct display { +struct vscreen { + struct display_info info; int pscreen_id; SDL_Rect pscreen_rect; - struct display_info info; - struct state s; - SDL_Texture *dpy_texture; - SDL_Window *dpy_win; - SDL_Renderer *dpy_renderer; - pixman_image_t *dpy_img; - pthread_t tid; - int width, height; // Width/height of dpy_win - int org_x, org_y; - int guest_width, guest_height; + bool is_fullscreen; + int org_x; + int org_y; + int width; + int height; + int guest_width; + int guest_height; struct surface surf; struct cursor cur; - SDL_Texture *cursor_tex; + SDL_Texture *surf_tex; + SDL_Texture *cur_tex; + int surf_updates; + int cur_updates; + SDL_Window *win; + SDL_Renderer *renderer; + pixman_image_t *img; + EGLImage egl_img; + /* Record the update_time that is activated from guest_vm */ + struct timespec last_time; +}; + +static struct display { + struct state s; + struct vscreen *vscrs; + int vscrs_num; + pthread_t tid; /* Add one UI_timer(33ms) to render the buffers from guest_vm */ struct acrn_timer ui_timer; struct vdpy_display_bh ui_timer_bh; - /* Record the update_time that is activated from guest_vm */ - struct timespec last_time; // protect the request_list pthread_mutex_t vdisplay_mutex; // receive the signal that request is submitted @@ -82,14 +95,11 @@ static struct display { SDL_GLContext eglContext; EGLDisplay eglDisplay; struct egl_display_ops gl_ops; - EGLImage cur_egl_img; } vdpy = { .s.is_ui_realized = false, .s.is_active = false, .s.is_wayland = false, .s.is_x11 = false, - .s.is_fullscreen = false, - .s.updates = 0, .s.n_connect = 0 }; @@ -514,10 +524,16 @@ void vdpy_get_edid(int handle, int scanout_id, uint8_t *edid, size_t size) { struct edid_info edid_info; + struct vscreen *vscr; + + if (scanout_id >= vdpy.vscrs_num) + return; + + vscr = vdpy.vscrs + scanout_id; if (handle == vdpy.s.n_connect) { - edid_info.prefx = vdpy.info.width; - edid_info.prefy = vdpy.info.height; + edid_info.prefx = vscr->info.width; + edid_info.prefy = vscr->info.height; edid_info.maxx = VDPY_MAX_WIDTH; edid_info.maxy = VDPY_MAX_HEIGHT; } else { @@ -537,11 +553,18 @@ vdpy_get_edid(int handle, int scanout_id, uint8_t *edid, size_t size) void vdpy_get_display_info(int handle, int scanout_id, struct display_info *info) { + struct vscreen *vscr; + + if (scanout_id >= vdpy.vscrs_num) + return; + + vscr = vdpy.vscrs + scanout_id; + if (handle == vdpy.s.n_connect) { - info->xoff = vdpy.info.xoff; - info->yoff = vdpy.info.yoff; - info->width = vdpy.info.width; - info->height = vdpy.info.height; + info->xoff = vscr->info.xoff; + info->yoff = vscr->info.yoff; + info->width = vscr->info.width; + info->height = vscr->info.height; } else { info->xoff = 0; info->yoff = 0; @@ -554,6 +577,8 @@ static void sdl_gl_display_init(void) { struct egl_display_ops *gl_ops = &vdpy.gl_ops; + struct vscreen *vscr; + int i; /* obtain the eglDisplay/eglContext */ vdpy.eglDisplay = eglGetCurrentDisplay(); @@ -570,7 +595,11 @@ sdl_gl_display_init(void) gl_ops->glEGLImageTargetTexture2DOES = (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) eglGetProcAddress("glEGLImageTargetTexture2DOES"); - vdpy.cur_egl_img = EGL_NO_IMAGE_KHR; + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + vscr->egl_img = EGL_NO_IMAGE_KHR; + } + if ((gl_ops->eglCreateImageKHR == NULL) || (gl_ops->eglDestroyImageKHR == NULL) || (gl_ops->glEGLImageTargetTexture2DOES == NULL)) { @@ -588,6 +617,7 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pixman_image_t *src_img; int format; int access, i; + struct vscreen *vscr; if (handle != vdpy.s.n_connect) { return; @@ -599,9 +629,15 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; + if (surf == NULL ) { - vdpy.surf.width = 0; - vdpy.surf.height = 0; + vscr->surf.width = 0; + vscr->surf.height = 0; /* Need to use the default 640x480 for the SDL_Texture */ src_img = pixman_image_create_bits(PIXMAN_a8r8g8b8, VDPY_MIN_WIDTH, VDPY_MIN_HEIGHT, @@ -611,8 +647,8 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pr_err("failed to create pixman_image\n"); return; } - vdpy.guest_width = VDPY_MIN_WIDTH; - vdpy.guest_height = VDPY_MIN_HEIGHT; + vscr->guest_width = VDPY_MIN_WIDTH; + vscr->guest_height = VDPY_MIN_HEIGHT; } else if (surf->surf_type == SURFACE_PIXMAN) { src_img = pixman_image_create_bits(surf->surf_format, surf->width, surf->height, surf->pixel, @@ -621,21 +657,21 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pr_err("failed to create pixman_image\n"); return; } - vdpy.surf = *surf; - vdpy.guest_width = surf->width; - vdpy.guest_height = surf->height; + vscr->surf = *surf; + vscr->guest_width = surf->width; + vscr->guest_height = surf->height; } else if (surf->surf_type == SURFACE_DMABUF) { src_img = NULL; - vdpy.surf = *surf; - vdpy.guest_width = surf->width; - vdpy.guest_height = surf->height; + vscr->surf = *surf; + vscr->guest_width = surf->width; + vscr->guest_height = surf->height; } else { /* Unsupported type */ return; } - if (vdpy.dpy_texture) { - SDL_DestroyTexture(vdpy.dpy_texture); + if (vscr->surf_tex) { + SDL_DestroyTexture(vscr->surf_tex); } if (surf && (surf->surf_type == SURFACE_DMABUF)) { access = SDL_TEXTUREACCESS_STATIC; @@ -666,23 +702,23 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) pixman_image_get_format(src_img)); } } - vdpy.dpy_texture = SDL_CreateTexture(vdpy.dpy_renderer, + vscr->surf_tex = SDL_CreateTexture(vscr->renderer, format, access, - vdpy.guest_width, vdpy.guest_height); + vscr->guest_width, vscr->guest_height); - if (vdpy.dpy_texture == NULL) { + if (vscr->surf_tex == NULL) { pr_err("Failed to create SDL_texture for surface.\n"); } /* For the surf_switch, it will be updated in surface_update */ if (!surf) { - SDL_UpdateTexture(vdpy.dpy_texture, NULL, + SDL_UpdateTexture(vscr->surf_tex, NULL, pixman_image_get_data(src_img), pixman_image_get_stride(src_img)); - SDL_RenderClear(vdpy.dpy_renderer); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.dpy_texture, NULL, NULL); - SDL_RenderPresent(vdpy.dpy_renderer); + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); + SDL_RenderPresent(vscr->renderer); } else if (surf->surf_type == SURFACE_DMABUF) { EGLImageKHR egl_img = EGL_NO_IMAGE_KHR; EGLint attrs[64]; @@ -713,46 +749,54 @@ vdpy_surface_set(int handle, int scanout_id, struct surface *surf) return; } - SDL_GL_BindTexture(vdpy.dpy_texture, NULL, NULL); + SDL_GL_BindTexture(vscr->surf_tex, NULL, NULL); gl_ops->glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_img); - if (vdpy.cur_egl_img != EGL_NO_IMAGE_KHR) + if (vscr->egl_img != EGL_NO_IMAGE_KHR) gl_ops->eglDestroyImageKHR(vdpy.eglDisplay, - vdpy.cur_egl_img); + vscr->egl_img); /* In theory the created egl_img can be released after it is bound * to texture. * Now it is released next time so that it is controlled correctly */ - vdpy.cur_egl_img = egl_img; + vscr->egl_img = egl_img; } - if (vdpy.dpy_img) - pixman_image_unref(vdpy.dpy_img); + if (vscr->img) + pixman_image_unref(vscr->img); if (surf == NULL) { - SDL_SetWindowTitle(vdpy.dpy_win, + SDL_SetWindowTitle(vscr->win, "Not activate display yet!"); } else { - SDL_SetWindowTitle(vdpy.dpy_win, + SDL_SetWindowTitle(vscr->win, "ACRN Virtual Monitor"); } /* Replace the cur_img with the created_img */ - vdpy.dpy_img = src_img; + vscr->img = src_img; } void -vdpy_cursor_position_transformation(struct display *vdpy, SDL_Rect *rect) +vdpy_cursor_position_transformation(struct display *vdpy, int scanout_id, SDL_Rect *rect) { - rect->x = (vdpy->cur.x * vdpy->width) / vdpy->guest_width; - rect->y = (vdpy->cur.y * vdpy->height) / vdpy->guest_height; - rect->w = (vdpy->cur.width * vdpy->width) / vdpy->guest_width; - rect->h = (vdpy->cur.height * vdpy->height) / vdpy->guest_height; + struct vscreen *vscr; + + if (scanout_id >= vdpy->vscrs_num) { + return; + } + + vscr = vdpy->vscrs + scanout_id; + rect->x = (vscr->cur.x * vscr->width) / vscr->width; + rect->y = (vscr->cur.y * vscr->height) / vscr->height; + rect->w = (vscr->cur.width * vscr->width) / vscr->width; + rect->h = (vscr->cur.height * vscr->height) / vscr->height; } void vdpy_surface_update(int handle, int scanout_id, struct surface *surf) { SDL_Rect cursor_rect; + struct vscreen *vscr; if (handle != vdpy.s.n_connect) { return; @@ -769,32 +813,39 @@ vdpy_surface_update(int handle, int scanout_id, struct surface *surf) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; if (surf->surf_type == SURFACE_PIXMAN) - SDL_UpdateTexture(vdpy.dpy_texture, NULL, + SDL_UpdateTexture(vscr->surf_tex, NULL, surf->pixel, surf->stride); - SDL_RenderClear(vdpy.dpy_renderer); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.dpy_texture, NULL, NULL); + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); /* This should be handled after rendering the surface_texture. * Otherwise it will be hidden */ - if (vdpy.cursor_tex) { - vdpy_cursor_position_transformation(&vdpy, &cursor_rect); - SDL_RenderCopy(vdpy.dpy_renderer, vdpy.cursor_tex, + if (vscr->cur_tex) { + vdpy_cursor_position_transformation(&vdpy, scanout_id, &cursor_rect); + SDL_RenderCopy(vscr->renderer, vscr->cur_tex, NULL, &cursor_rect); } - SDL_RenderPresent(vdpy.dpy_renderer); + SDL_RenderPresent(vscr->renderer); /* update the rendering time */ - clock_gettime(CLOCK_MONOTONIC, &vdpy.last_time); + clock_gettime(CLOCK_MONOTONIC, &vscr->last_time); } void vdpy_cursor_define(int handle, int scanout_id, struct cursor *cur) { + struct vscreen *vscr; + if (handle != vdpy.s.n_connect) { return; } @@ -805,39 +856,52 @@ vdpy_cursor_define(int handle, int scanout_id, struct cursor *cur) return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + if (cur->data == NULL) return; - if (vdpy.cursor_tex) - SDL_DestroyTexture(vdpy.cursor_tex); + vscr = vdpy.vscrs + scanout_id; - vdpy.cursor_tex = SDL_CreateTexture( - vdpy.dpy_renderer, + if (vscr->cur_tex) + SDL_DestroyTexture(vscr->cur_tex); + + vscr->cur_tex = SDL_CreateTexture( + vscr->renderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, cur->width, cur->height); - if (vdpy.cursor_tex == NULL) { + if (vscr->cur_tex == NULL) { pr_err("Failed to create sdl_cursor surface for %p.\n", cur); return; } - SDL_SetTextureBlendMode(vdpy.cursor_tex, SDL_BLENDMODE_BLEND); - vdpy.cur = *cur; - SDL_UpdateTexture(vdpy.cursor_tex, NULL, cur->data, cur->width * 4); + SDL_SetTextureBlendMode(vscr->cur_tex, SDL_BLENDMODE_BLEND); + vscr->cur = *cur; + SDL_UpdateTexture(vscr->cur_tex, NULL, cur->data, cur->width * 4); } void vdpy_cursor_move(int handle, int scanout_id, uint32_t x, uint32_t y) { + struct vscreen *vscr; + if (handle != vdpy.s.n_connect) { return; } + if (scanout_id >= vdpy.vscrs_num) { + return; + } + + vscr = vdpy.vscrs + scanout_id; /* Only move the position of the cursor. The cursor_texture * will be handled in surface_update */ - vdpy.cur.x = x; - vdpy.cur.y = y; + vscr->cur.x = x; + vscr->cur.y = y; } static void @@ -847,35 +911,41 @@ vdpy_sdl_ui_refresh(void *data) struct timespec cur_time; uint64_t elapsed_time; SDL_Rect cursor_rect; + struct vscreen *vscr; + int i; ui_vdpy = (struct display *)data; - /* Skip it if no surface needs to be rendered */ - if (ui_vdpy->dpy_texture == NULL) - return; + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = ui_vdpy->vscrs + i; - clock_gettime(CLOCK_MONOTONIC, &cur_time); + /* Skip it if no surface needs to be rendered */ + if (vscr->surf_tex == NULL) + continue; - elapsed_time = (cur_time.tv_sec - ui_vdpy->last_time.tv_sec) * 1000000000 + - cur_time.tv_nsec - ui_vdpy->last_time.tv_nsec; + clock_gettime(CLOCK_MONOTONIC, &cur_time); - /* the time interval is less than 10ms. Skip it */ - if (elapsed_time < 10000000) - return; + elapsed_time = (cur_time.tv_sec - vscr->last_time.tv_sec) * 1000000000 + + cur_time.tv_nsec - vscr->last_time.tv_nsec; - SDL_RenderClear(ui_vdpy->dpy_renderer); - SDL_RenderCopy(ui_vdpy->dpy_renderer, ui_vdpy->dpy_texture, NULL, NULL); + /* the time interval is less than 10ms. Skip it */ + if (elapsed_time < 10000000) + return; - /* This should be handled after rendering the surface_texture. - * Otherwise it will be hidden - */ - if (ui_vdpy->cursor_tex) { - vdpy_cursor_position_transformation(ui_vdpy, &cursor_rect); - SDL_RenderCopy(ui_vdpy->dpy_renderer, ui_vdpy->cursor_tex, - NULL, &cursor_rect); - } + SDL_RenderClear(vscr->renderer); + SDL_RenderCopy(vscr->renderer, vscr->surf_tex, NULL, NULL); - SDL_RenderPresent(ui_vdpy->dpy_renderer); + /* This should be handled after rendering the surface_texture. + * Otherwise it will be hidden + */ + if (vscr->cur_tex) { + vdpy_cursor_position_transformation(ui_vdpy, i, &cursor_rect); + SDL_RenderCopy(vscr->renderer, vscr->cur_tex, + NULL, &cursor_rect); + } + + SDL_RenderPresent(vscr->renderer); + } } static void @@ -903,60 +973,84 @@ vdpy_sdl_ui_timer(void *data, uint64_t nexp) pthread_mutex_unlock(&ui_vdpy->vdisplay_mutex); } -static void * -vdpy_sdl_display_thread(void *data) +void +vdpy_calibrate_vscreen_geometry(struct vscreen *vscr) { - uint32_t win_flags; - struct vdpy_display_bh *bh; - struct itimerspec ui_timer_spec; - - if (vdpy.width && vdpy.height) { + if (vscr->width && vscr->height) { /* clip the region between (640x480) and (1920x1080) */ - if (vdpy.width < VDPY_MIN_WIDTH) - vdpy.width = VDPY_MIN_WIDTH; - if (vdpy.width > VDPY_MAX_WIDTH) - vdpy.width = VDPY_MAX_WIDTH; - if (vdpy.height < VDPY_MIN_HEIGHT) - vdpy.height = VDPY_MIN_HEIGHT; - if (vdpy.height > VDPY_MAX_HEIGHT) - vdpy.height = VDPY_MAX_HEIGHT; + if (vscr->width < VDPY_MIN_WIDTH) + vscr->width = VDPY_MIN_WIDTH; + if (vscr->width > VDPY_MAX_WIDTH) + vscr->width = VDPY_MAX_WIDTH; + if (vscr->height < VDPY_MIN_HEIGHT) + vscr->height = VDPY_MIN_HEIGHT; + if (vscr->height > VDPY_MAX_HEIGHT) + vscr->height = VDPY_MAX_HEIGHT; } else { /* the default window(1280x720) is created with undefined pos * when no geometry info is passed */ - vdpy.org_x = 0xFFFF; - vdpy.org_y = 0xFFFF; - vdpy.width = VDPY_DEFAULT_WIDTH; - vdpy.height = VDPY_DEFAULT_HEIGHT; + vscr->org_x = 0xFFFF; + vscr->org_y = 0xFFFF; + vscr->width = VDPY_DEFAULT_WIDTH; + vscr->height = VDPY_DEFAULT_HEIGHT; } +} + +int +vdpy_create_vscreen_window(struct vscreen *vscr) +{ + uint32_t win_flags; win_flags = SDL_WINDOW_OPENGL | - SDL_WINDOW_ALWAYS_ON_TOP | - SDL_WINDOW_SHOWN; - if (vdpy.s.is_fullscreen) { + SDL_WINDOW_ALWAYS_ON_TOP | + SDL_WINDOW_SHOWN; + if (vscr->is_fullscreen) { win_flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; } - vdpy.dpy_win = NULL; - vdpy.dpy_renderer = NULL; - vdpy.dpy_img = NULL; - vdpy.org_x = vdpy.pscreen_rect.x; - vdpy.org_y = vdpy.pscreen_rect.y; + vscr->win = NULL; + vscr->renderer = NULL; + vscr->img = NULL; + vscr->org_x = vscr->pscreen_rect.x; + vscr->org_y = vscr->pscreen_rect.y; // Zoom to width and height of pscreen is fullscreen enabled - vdpy.dpy_win = SDL_CreateWindow("ACRN_DM", - vdpy.org_x, vdpy.org_y, - vdpy.width, vdpy.height, - win_flags); - if (vdpy.dpy_win == NULL) { + vscr->win = SDL_CreateWindow("ACRN_DM", + vscr->org_x, vscr->org_y, + vscr->width, vscr->height, + win_flags); + if (vscr->win == NULL) { pr_err("Failed to Create SDL_Window\n"); - goto sdl_fail; + return -1; } - pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vdpy.pscreen_id, - vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height); + pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vscr->pscreen_id, + vscr->org_x, vscr->org_y, vscr->width, vscr->height); - vdpy.dpy_renderer = SDL_CreateRenderer(vdpy.dpy_win, -1, 0); - if (vdpy.dpy_renderer == NULL) { + vscr->renderer = SDL_CreateRenderer(vscr->win, -1, 0); + if (vscr->renderer == NULL) { pr_err("Failed to Create GL_Renderer \n"); - goto sdl_fail; + return -1; + } + + return 0; +} + +static void * +vdpy_sdl_display_thread(void *data) +{ + struct vdpy_display_bh *bh; + struct itimerspec ui_timer_spec; + + struct vscreen *vscr; + int i; + + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + + vdpy_calibrate_vscreen_geometry(vscr); + if (vdpy_create_vscreen_window(vscr)) { + goto sdl_fail; + } + clock_gettime(CLOCK_MONOTONIC, &vscr->last_time); } sdl_gl_display_init(); pthread_mutex_init(&vdpy.vdisplay_mutex, NULL); @@ -966,7 +1060,6 @@ vdpy_sdl_display_thread(void *data) vdpy.ui_timer_bh.task_cb = vdpy_sdl_ui_refresh; vdpy.ui_timer_bh.data = &vdpy; - clock_gettime(CLOCK_MONOTONIC, &vdpy.last_time); vdpy.ui_timer.clockid = CLOCK_MONOTONIC; acrn_timer_init(&vdpy.ui_timer, vdpy_sdl_ui_timer, &vdpy); ui_timer_spec.it_interval.tv_sec = 0; @@ -1014,34 +1107,40 @@ vdpy_sdl_display_thread(void *data) /* SDL display_thread will exit because of DM request */ pthread_mutex_destroy(&vdpy.vdisplay_mutex); pthread_cond_destroy(&vdpy.vdisplay_signal); - if (vdpy.dpy_img) { - pixman_image_unref(vdpy.dpy_img); - vdpy.dpy_img = NULL; - } - /* Continue to thread cleanup */ - - if (vdpy.dpy_texture) { - SDL_DestroyTexture(vdpy.dpy_texture); - vdpy.dpy_texture = NULL; - } - if (vdpy.cursor_tex) { - SDL_DestroyTexture(vdpy.cursor_tex); - vdpy.cursor_tex = NULL; - } - if (vdpy.egl_dmabuf_supported && (vdpy.cur_egl_img != EGL_NO_IMAGE_KHR)) - vdpy.gl_ops.eglDestroyImageKHR(vdpy.eglDisplay, - vdpy.cur_egl_img); + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->img) { + pixman_image_unref(vscr->img); + vscr->img = NULL; + } + /* Continue to thread cleanup */ -sdl_fail: + if (vscr->surf_tex) { + SDL_DestroyTexture(vscr->surf_tex); + vscr->surf_tex = NULL; + } + if (vscr->cur_tex) { + SDL_DestroyTexture(vscr->cur_tex); + vscr->cur_tex = NULL; + } - if (vdpy.dpy_renderer) { - SDL_DestroyRenderer(vdpy.dpy_renderer); - vdpy.dpy_renderer = NULL; + if (vdpy.egl_dmabuf_supported && (vscr->egl_img != EGL_NO_IMAGE_KHR)) + vdpy.gl_ops.eglDestroyImageKHR(vdpy.eglDisplay, + vscr->egl_img); } - if (vdpy.dpy_win) { - SDL_DestroyWindow(vdpy.dpy_win); - vdpy.dpy_win = NULL; + +sdl_fail: + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->renderer) { + SDL_DestroyRenderer(vscr->renderer); + vscr->renderer = NULL; + } + if (vscr->win) { + SDL_DestroyWindow(vscr->win); + vscr->win = NULL; + } } /* This is used to workaround the TLS issue of libEGL + libGLdispatch @@ -1108,7 +1207,7 @@ vdpy_init(int *supported_wins) vdpy.s.n_connect++; if (supported_wins) - *supported_wins = 1; + *supported_wins = vdpy.vscrs_num; return vdpy.s.n_connect; } @@ -1142,6 +1241,8 @@ gfx_ui_init() { SDL_SysWMinfo info; int num_pscreen; + struct vscreen *vscr; + int i; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1154,21 +1255,25 @@ gfx_ui_init() } num_pscreen = SDL_GetNumVideoDisplays(); - if (vdpy.pscreen_id >= num_pscreen) { - pr_err("Monitor id %d is out of avalble range [0~%d].\n", - vdpy.pscreen_id, num_pscreen); - SDL_Quit(); - return -1; - } - SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); + for (i = 0; i < vdpy.vscrs_num; i++) { + vscr = vdpy.vscrs + i; + if (vscr->pscreen_id >= num_pscreen) { + pr_err("Monitor id %d is out of avalble range [0~%d].\n", + vscr->pscreen_id, num_pscreen); + SDL_Quit(); + return -1; + } + + SDL_GetDisplayBounds(vscr->pscreen_id, &vscr->pscreen_rect); - if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || - vdpy.pscreen_rect.h < VDPY_MIN_HEIGHT) { - pr_err("Too small resolutions. Please check the " - " graphics system\n"); - SDL_Quit(); - return -1; + if (vscr->pscreen_rect.w < VDPY_MIN_WIDTH || + vscr->pscreen_rect.h < VDPY_MIN_HEIGHT) { + pr_err("Too small resolutions. Please check the " + " graphics system\n"); + SDL_Quit(); + return -1; + } } SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1"); @@ -1200,6 +1305,7 @@ gfx_ui_deinit() return; } + free(vdpy.vscrs); SDL_Quit(); pr_info("SDL_Quit\r\n"); } @@ -1208,35 +1314,41 @@ int vdpy_parse_cmd_option(const char *opts) { char *str; int snum, error; + struct vscreen *vscr; error = 0; + vdpy.vscrs = calloc(VSCREEN_MAX_NUM, sizeof(struct vscreen)); + vdpy.vscrs_num = 0; str = strcasestr(opts, "geometry="); + vscr = vdpy.vscrs + vdpy.vscrs_num; if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.pscreen_id); + snum = sscanf(str, "geometry=fullscreen:%d", &vscr->pscreen_id); if (snum != 1) { - vdpy.pscreen_id = 0; + vscr->pscreen_id = 0; } - vdpy.width = VDPY_MAX_WIDTH; - vdpy.height = VDPY_MAX_HEIGHT; - vdpy.s.is_fullscreen = true; + vscr->width = VDPY_MAX_WIDTH; + vscr->height = VDPY_MAX_HEIGHT; + vscr->is_fullscreen = true; + vdpy.vscrs_num++; pr_info("virtual display: fullscreen.\n"); } else if (opts && strcasestr(opts, "geometry=")) { snum = sscanf(str, "geometry=%dx%d+%d+%d", - &vdpy.width, &vdpy.height, - &vdpy.org_x, &vdpy.org_y); + &vscr->width, &vscr->height, + &vscr->org_x, &vscr->org_y); if (snum != 4) { pr_err("incorrect geometry option. Should be" " WxH+x+y\n"); error = -1; } - vdpy.s.is_fullscreen = false; + vscr->is_fullscreen = false; + vdpy.vscrs_num++; pr_info("virtual display: windowed.\n"); } - vdpy.info.xoff = 0; - vdpy.info.yoff = 0; - vdpy.info.width = vdpy.width; - vdpy.info.height = vdpy.height; + vscr->info.xoff = 0; + vscr->info.yoff = 0; + vscr->info.width = vdpy.vscrs->width; + vscr->info.height = vdpy.vscrs->height; return error; } -- 2.25.1
|
|
[PATCH v3 2/2] dm: vdisplay: Add physical monitor id check.
From: Sun Peng <peng.p.sun@...>
vdisplay use physical monitor id(pscreen index) to locate the monitor. The max index value always is the physical monitor number - 1. For example, there are 4 physical monitors connected. The monitor id should be 0, 1, 2 and 3. We need check monitor id that user inputs and make sure it is in a correct range.
Signed-off-by: Sun Peng <peng.p.sun@...> Reviewed-by: Zhao Yakui <yakui.zhao@...> --- devicemodel/hw/vdisplay_sdl.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 32f929647..89f89d16e 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -1141,6 +1141,7 @@ int gfx_ui_init() { SDL_SysWMinfo info; + int num_pscreen; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1152,6 +1153,14 @@ gfx_ui_init() return -1; } + num_pscreen = SDL_GetNumVideoDisplays(); + if (vdpy.pscreen_id >= num_pscreen) { + pr_err("Monitor id %d is out of avalble range [0~%d].\n", + vdpy.pscreen_id, num_pscreen); + SDL_Quit(); + return -1; + } + SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || -- 2.25.1
|
|
[PATCH v3 1/2] dm: vdisplay: multi-local-mornitor support.
From: Sun Peng <peng.p.sun@...>
To support full screen mode on one of multi-local-mornitor which connected to SOS by monitor ID that customer specify. The monitor ID is specified in acrn-dm's parameter like this:
virtio-gpu,geometry=fullscreen:monitor_id
For window mode, the vdisplay window always be shown on monitor 0, because the customer can drag the window to anyone monitor. Besides, the customer can set the monitor by x_off and y_off parameter like this:
virtio-gpu,geometry=<width>x<height>+<x_off>+<y_off>
Signed-off-by: Sun Peng <peng.p.sun@...> --- devicemodel/hw/vdisplay_sdl.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/devicemodel/hw/vdisplay_sdl.c b/devicemodel/hw/vdisplay_sdl.c index 65fd78d93..32f929647 100644 --- a/devicemodel/hw/vdisplay_sdl.c +++ b/devicemodel/hw/vdisplay_sdl.c @@ -52,6 +52,8 @@ struct egl_display_ops { }; static struct display { + int pscreen_id; + SDL_Rect pscreen_rect; struct display_info info; struct state s; SDL_Texture *dpy_texture; @@ -62,7 +64,6 @@ static struct display { int width, height; // Width/height of dpy_win int org_x, org_y; int guest_width, guest_height; - int screen; struct surface surf; struct cursor cur; SDL_Texture *cursor_tex; @@ -933,11 +934,14 @@ vdpy_sdl_display_thread(void *data) SDL_WINDOW_ALWAYS_ON_TOP | SDL_WINDOW_SHOWN; if (vdpy.s.is_fullscreen) { - win_flags |= SDL_WINDOW_FULLSCREEN; + win_flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; } vdpy.dpy_win = NULL; vdpy.dpy_renderer = NULL; vdpy.dpy_img = NULL; + vdpy.org_x = vdpy.pscreen_rect.x; + vdpy.org_y = vdpy.pscreen_rect.y; + // Zoom to width and height of pscreen is fullscreen enabled vdpy.dpy_win = SDL_CreateWindow("ACRN_DM", vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height, @@ -946,6 +950,9 @@ vdpy_sdl_display_thread(void *data) pr_err("Failed to Create SDL_Window\n"); goto sdl_fail; } + pr_info("SDL display bind to screen %d: [%d,%d,%d,%d].\n", vdpy.pscreen_id, + vdpy.org_x, vdpy.org_y, vdpy.width, vdpy.height); + vdpy.dpy_renderer = SDL_CreateRenderer(vdpy.dpy_win, -1, 0); if (vdpy.dpy_renderer == NULL) { pr_err("Failed to Create GL_Renderer \n"); @@ -1134,7 +1141,6 @@ int gfx_ui_init() { SDL_SysWMinfo info; - SDL_Rect disp_rect; setenv("SDL_VIDEO_X11_FORCE_EGL", "1", 1); setenv("SDL_OPENGL_ES_DRIVER", "1", 1); @@ -1146,10 +1152,10 @@ gfx_ui_init() return -1; } - SDL_GetDisplayBounds(0, &disp_rect); + SDL_GetDisplayBounds(vdpy.pscreen_id, &vdpy.pscreen_rect); - if (disp_rect.w < VDPY_MIN_WIDTH || - disp_rect.h < VDPY_MIN_HEIGHT) { + if (vdpy.pscreen_rect.w < VDPY_MIN_WIDTH || + vdpy.pscreen_rect.h < VDPY_MIN_HEIGHT) { pr_err("Too small resolutions. Please check the " " graphics system\n"); SDL_Quit(); @@ -1198,9 +1204,9 @@ int vdpy_parse_cmd_option(const char *opts) str = strcasestr(opts, "geometry="); if (opts && strcasestr(opts, "geometry=fullscreen")) { - snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.screen); + snum = sscanf(str, "geometry=fullscreen:%d", &vdpy.pscreen_id); if (snum != 1) { - vdpy.screen = 0; + vdpy.pscreen_id = 0; } vdpy.width = VDPY_MAX_WIDTH; vdpy.height = VDPY_MAX_HEIGHT; -- 2.25.1
|
|