Re: [PATCH v3 2/4] hv: replace the CONFIG_PLATFORM_RAM_SIZE with get_e820_ram_size for mmu


Xu, Anthony
 

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On Behalf Of chenli.wei
Sent: Monday, February 14, 2022 6:24 PM
To: acrn-dev@...; Wang, Yu1 <yu1.wang@...>
Cc: Wei, Chenli <chenli.wei@...>
Subject: Re: [acrn-dev] [PATCH v3 2/4] hv: replace the CONFIG_PLATFORM_RAM_SIZE with get_e820_ram_size for
mmu


On 2/15/2022 3:52 AM, Xu, Anthony wrote:

-----Original Message-----
From: acrn-dev@... <acrn-dev@...> On Behalf Of chenli.wei
Sent: Friday, February 11, 2022 8:25 PM
To: Wang, Yu1 <yu1.wang@...>; Xu, Anthony <anthony.xu@...>; acrn-dev@...
Cc: Wei, Chenli <chenli.wei@...>; Chenli Wei <chenli.wei@...>
Subject: [acrn-dev] [PATCH v3 2/4] hv: replace the CONFIG_PLATFORM_RAM_SIZE with get_e820_ram_size for mmu

CONFIG_PLATFORM_RAM_SIZE is predefined by config tool and mmu use it to
calcuate the table size and predefine the ppt table.

This patch will change the ppt to alloc dynamically and get the table
size by the hv_e820_ram_size interface which could get the RAM
info on run time and replace the CONFIG_PLATFORM_RAM_SIZE.

v2-->v3:
1.use ram size replace the top address

v1-->v2:
1.update commit msg

Tracked-On: #6690
Signed-off-by: Chenli Wei <chenli.wei@...>
---
hypervisor/arch/x86/cpu.c | 3 +++
hypervisor/arch/x86/mmu.c | 39 +++++++++++++++------------
hypervisor/include/arch/x86/asm/mmu.h | 1 +
3 files changed, 26 insertions(+), 17 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 18c33c7d1..1b0f8f158 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -158,6 +158,9 @@ void init_pcpu_pre(bool is_bsp)

init_e820();

+ /* reserve ppt buffer from e820 */
+ reserve_buffer_for_ppt_pages();
+
/* Initialize the hypervisor paging */
init_paging();

diff --git a/hypervisor/arch/x86/mmu.c b/hypervisor/arch/x86/mmu.c
index 26e28b882..990880276 100644
--- a/hypervisor/arch/x86/mmu.c
+++ b/hypervisor/arch/x86/mmu.c
@@ -47,24 +47,19 @@ static uint8_t sanitized_page[PAGE_SIZE] __aligned(PAGE_SIZE);
/* PPT VA and PA are identical mapping */
#define PPT_PML4_PAGE_NUM PML4_PAGE_NUM(MAX_PHY_ADDRESS_SPACE)
#define PPT_PDPT_PAGE_NUM PDPT_PAGE_NUM(MAX_PHY_ADDRESS_SPACE)
-/* Please refer to how the EPT_PD_PAGE_NUM was calculated */
-#define PPT_PD_PAGE_NUM (PD_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + (MEM_4G)) + \
- CONFIG_MAX_PCI_DEV_NUM * 6U)
#define PPT_PT_PAGE_NUM 0UL /* not support 4K granularity page mapping */
-/* must be a multiple of 64 */
-#define PPT_PAGE_NUM (roundup((PPT_PML4_PAGE_NUM + PPT_PDPT_PAGE_NUM + \
- PPT_PD_PAGE_NUM + PPT_PT_PAGE_NUM), 64U))
-static struct page ppt_pages[PPT_PAGE_NUM];
-static uint64_t ppt_page_bitmap[PPT_PAGE_NUM / 64];
+
+/* Please refer to how the ept page num was calculated */
+uint64_t get_ppt_page_num(void)
+{
+ uint64_t ppt_pd_page_num = PD_PAGE_NUM(get_e820_ram_size() + MEM_4G) +
CONFIG_MAX_PCI_DEV_NUM * 6U;
+
+ /* must be a multiple of 64 */
+ return roundup((PPT_PML4_PAGE_NUM + PPT_PDPT_PAGE_NUM + ppt_pd_page_num +
PPT_PT_PAGE_NUM), 64U);
+}

/* ppt: primary page pool */
-static struct page_pool ppt_page_pool = {
- .start_page = ppt_pages,
- .bitmap_size = PPT_PAGE_NUM / 64,
- .bitmap = ppt_page_bitmap,
- .last_hint_id = 0UL,
- .dummy_page = NULL,
-};
+static struct page_pool ppt_page_pool;

/* @pre: The PPT and EPT have same page granularity */
static inline bool ppt_large_page_support(enum _page_table_level level, __unused uint64_t prot)
@@ -241,6 +236,18 @@ void set_paging_x(uint64_t base, uint64_t size)
base_aligned, size_aligned, 0UL, PAGE_NX, &ppt_pgtable, MR_MODIFY);
}

+void reserve_buffer_for_ppt_pages(void)
allocate_ppt_pages()?
Done



+{
+ uint64_t page_base;
+
+ ppt_page_pool.bitmap = (uint64_t *)e820_alloc_memory(get_ppt_page_num()/8, MEM_4G);
+ page_base = e820_alloc_memory(sizeof(struct page) * get_ppt_page_num(), MEM_4G);
Allocate the big one first?
OK
Check the return value?
The e820_alloc_memory function have check the ret value before return it.
Do we use e820_alloc_memory to allocate VM memory?
ACRN shouldn't panic for VM memory allocation failure, should just kill the VM.

It is not relative, you can proceed with your patch.


Anthony









+
+ ppt_page_pool.start_page = (struct page *)(void *)page_base;
+ ppt_page_pool.bitmap_size = get_ppt_page_num() / 64;
+ ppt_page_pool.dummy_page = NULL;
+}
+
void init_paging(void)
{
uint64_t hv_hva;
@@ -248,7 +255,6 @@ void init_paging(void)
uint64_t low32_max_ram = 0UL;
uint64_t high64_min_ram = ~0UL;
uint64_t high64_max_ram = MEM_4G;
- uint64_t top_addr_space = CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE;

struct acrn_boot_info *abi = get_acrn_boot_info();
const struct abi_mmap *entry;
@@ -278,7 +284,6 @@ void init_paging(void)
}

low32_max_ram = round_pde_up(low32_max_ram);
- high64_max_ram = min(high64_max_ram, top_addr_space);
high64_max_ram = round_pde_down(high64_max_ram);

/* Map [0, low32_max_ram) and [high64_min_ram, high64_max_ram) RAM regions as WB attribute */
diff --git a/hypervisor/include/arch/x86/asm/mmu.h b/hypervisor/include/arch/x86/asm/mmu.h
index 7b657a8b0..64d0f0b13 100644
--- a/hypervisor/include/arch/x86/asm/mmu.h
+++ b/hypervisor/include/arch/x86/asm/mmu.h
@@ -200,6 +200,7 @@ void flush_tlb_range(uint64_t addr, uint64_t size);
void flush_invalidate_all_cache(void);
void flush_cacheline(const volatile void *p);
void flush_cache_range(const volatile void *p, uint64_t size);
+void reserve_buffer_for_ppt_pages(void);

/**
* @}
--
2.17.1









Join {acrn-dev@lists.projectacrn.org to automatically receive all group messages.