[PATCH 1/2] hv: refine the vept module


chenli.wei
 

Now the vept module uses a mixture of sept and vept, it's better to
align the file name vept.c.

So this patch rename these variable and function.

Signed-off-by: Chenli Wei <chenli.wei@...>
---
hypervisor/arch/x86/cpu.c | 2 +-
hypervisor/arch/x86/guest/vept.c | 46 ++++++++++----------
hypervisor/include/arch/x86/asm/guest/vept.h | 4 +-
3 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c
index 07dc044d2..3e2397b1a 100644
--- a/hypervisor/arch/x86/cpu.c
+++ b/hypervisor/arch/x86/cpu.c
@@ -289,7 +289,7 @@ void init_pcpu_post(uint16_t pcpu_id)
/*
* Reserve memory from platform E820 for shadow EPT 4K pages
*/
- allocate_buffer_for_sept_pages();
+ allocate_buffer_for_vept_pages();

pcpu_sync = ALL_CPUS_MASK;
/* Start all secondary cores */
diff --git a/hypervisor/arch/x86/guest/vept.c b/hypervisor/arch/x86/guest/vept.c
index 26c9ae29b..89db7d523 100644
--- a/hypervisor/arch/x86/guest/vept.c
+++ b/hypervisor/arch/x86/guest/vept.c
@@ -39,20 +39,20 @@ static uint64_t get_total_vept_4k_page_num(void)
return get_total_vept_4k_page_size() / PAGE_SIZE;
}

-static struct page_pool sept_page_pool;
-static struct page *sept_pages;
-static uint64_t *sept_page_bitmap;
+static struct page_pool vept_page_pool;
+static struct page *vept_pages;
+static uint64_t *vept_page_bitmap;

static void allocate_vept_bitmap(void)
{
- sept_page_bitmap = e820_alloc_memory((get_total_4k_page_num() / 64U), ~0UL);
+ vept_page_bitmap = (uint64_t *)e820_alloc_memory((get_total_vept_4k_page_num() / 64U), ~0UL);
}

/*
* @brief Reserve space for SEPT 4K pages from platform E820 table
* At moment, we only support nested VMX for Service VM.
*/
-void allocate_buffer_for_sept_pages(void)
+void allocate_buffer_for_vept_pages(void)
{
uint64_t page_base;

@@ -60,7 +60,7 @@ void allocate_buffer_for_sept_pages(void)

set_paging_supervisor(page_base, get_total_vept_4k_page_size());

- sept_pages = (struct page *)page_base;
+ vept_pages = (struct page *)page_base;
allocate_vept_bitmap();
}

@@ -77,7 +77,7 @@ static bool is_leaf_ept_entry(uint64_t ept_entry, enum _page_table_level pt_leve
/*
* @brief Release all pages except the PML4E page of a shadow EPT
*/
-static void free_sept_table(uint64_t *shadow_eptp)
+static void free_vept_table(uint64_t *shadow_eptp)
{
uint64_t *shadow_pml4e, *shadow_pdpte, *shadow_pde;
uint64_t i, j, k;
@@ -100,11 +100,11 @@ static void free_sept_table(uint64_t *shadow_eptp)
is_leaf_ept_entry(*shadow_pde, IA32E_PD)) {
continue;
}
- free_page(&sept_page_pool, (struct page *)((*shadow_pde) & EPT_ENTRY_PFN_MASK));
+ free_page(&vept_page_pool, (struct page *)((*shadow_pde) & EPT_ENTRY_PFN_MASK));
}
- free_page(&sept_page_pool, (struct page *)((*shadow_pdpte) & EPT_ENTRY_PFN_MASK));
+ free_page(&vept_page_pool, (struct page *)((*shadow_pdpte) & EPT_ENTRY_PFN_MASK));
}
- free_page(&sept_page_pool, (struct page *)((*shadow_pml4e) & EPT_ENTRY_PFN_MASK));
+ free_page(&vept_page_pool, (struct page *)((*shadow_pml4e) & EPT_ENTRY_PFN_MASK));
*shadow_pml4e = 0UL;
}
}
@@ -179,7 +179,7 @@ struct nept_desc *get_nept_desc(uint64_t guest_eptp)

/* A new nept_desc, initialize it */
if (desc->shadow_eptp == 0UL) {
- desc->shadow_eptp = (uint64_t)alloc_page(&sept_page_pool) | (guest_eptp & ~PAGE_MASK);
+ desc->shadow_eptp = (uint64_t)alloc_page(&vept_page_pool) | (guest_eptp & ~PAGE_MASK);
desc->guest_eptp = guest_eptp;
desc->ref_count = 1UL;

@@ -210,8 +210,8 @@ void put_nept_desc(uint64_t guest_eptp)
if (desc->ref_count == 0UL) {
dev_dbg(VETP_LOG_LEVEL, "[%s], nept_desc[%llx] ref[%d] shadow_eptp[%llx] guest_eptp[%llx]",
__func__, desc, desc->ref_count, desc->shadow_eptp, desc->guest_eptp);
- free_sept_table((void *)(desc->shadow_eptp & PAGE_MASK));
- free_page(&sept_page_pool, (struct page *)(desc->shadow_eptp & PAGE_MASK));
+ free_vept_table((void *)(desc->shadow_eptp & PAGE_MASK));
+ free_page(&vept_page_pool, (struct page *)(desc->shadow_eptp & PAGE_MASK));
/* Flush the hardware TLB */
invept((void *)(desc->shadow_eptp & PAGE_MASK));
desc->shadow_eptp = 0UL;
@@ -294,7 +294,7 @@ static uint64_t generate_shadow_ept_entry(struct acrn_vcpu *vcpu, uint64_t guest
} else {
/* Use a HPA of a new page in shadow EPT entry */
shadow_ept_entry = guest_ept_entry & ~EPT_ENTRY_PFN_MASK;
- shadow_ept_entry |= hva2hpa((void *)alloc_page(&sept_page_pool)) & EPT_ENTRY_PFN_MASK;
+ shadow_ept_entry |= hva2hpa((void *)alloc_page(&vept_page_pool)) & EPT_ENTRY_PFN_MASK;
}

return shadow_ept_entry;
@@ -516,7 +516,7 @@ int32_t invept_vmexit_handler(struct acrn_vcpu *vcpu)
* Since ACRN does not know which paging entries are changed,
* Remove all the shadow EPT entries that ACRN created for L2 VM
*/
- free_sept_table((void *)(desc->shadow_eptp & PAGE_MASK));
+ free_vept_table((void *)(desc->shadow_eptp & PAGE_MASK));
invept((void *)(desc->shadow_eptp & PAGE_MASK));
}
spinlock_release(&nept_desc_bucket_lock);
@@ -533,7 +533,7 @@ int32_t invept_vmexit_handler(struct acrn_vcpu *vcpu)
for (i = 0L; i < CONFIG_MAX_GUEST_EPT_NUM; i++) {
if (nept_desc_bucket[i].guest_eptp != 0UL) {
desc = &nept_desc_bucket[i];
- free_sept_table((void *)(desc->shadow_eptp & PAGE_MASK));
+ free_vept_table((void *)(desc->shadow_eptp & PAGE_MASK));
invept((void *)(desc->shadow_eptp & PAGE_MASK));
}
}
@@ -549,13 +549,13 @@ int32_t invept_vmexit_handler(struct acrn_vcpu *vcpu)

void init_vept(void)
{
- sept_page_pool.start_page = sept_pages;
- sept_page_pool.bitmap_size = get_total_vept_4k_page_num() / 64U;
- sept_page_pool.bitmap = sept_page_bitmap;
- sept_page_pool.dummy_page = NULL;
- spinlock_init(&sept_page_pool.lock);
- memset((void *)sept_page_pool.bitmap, 0, sept_page_pool.bitmap_size * sizeof(uint64_t));
- sept_page_pool.last_hint_id = 0UL;
+ vept_page_pool.start_page = vept_pages;
+ vept_page_pool.bitmap_size = get_total_vept_4k_page_num() / 64U;
+ vept_page_pool.bitmap = vept_page_bitmap;
+ vept_page_pool.dummy_page = NULL;
+ spinlock_init(&vept_page_pool.lock);
+ memset((void *)vept_page_pool.bitmap, 0, vept_page_pool.bitmap_size * sizeof(uint64_t));
+ vept_page_pool.last_hint_id = 0UL;

spinlock_init(&nept_desc_bucket_lock);
}
diff --git a/hypervisor/include/arch/x86/asm/guest/vept.h b/hypervisor/include/arch/x86/asm/guest/vept.h
index 159a62537..e8f62f514 100644
--- a/hypervisor/include/arch/x86/asm/guest/vept.h
+++ b/hypervisor/include/arch/x86/asm/guest/vept.h
@@ -38,7 +38,7 @@ struct nept_desc {
uint32_t ref_count;
};

-void allocate_buffer_for_sept_pages(void);
+void allocate_buffer_for_vept_pages(void);
void init_vept(void);
uint64_t get_shadow_eptp(uint64_t guest_eptp);
struct nept_desc *get_nept_desc(uint64_t guest_eptp);
@@ -46,6 +46,6 @@ void put_nept_desc(uint64_t guest_eptp);
bool handle_l2_ept_violation(struct acrn_vcpu *vcpu);
int32_t invept_vmexit_handler(struct acrn_vcpu *vcpu);
#else
-static inline void allocate_buffer_for_sept_pages(void) {};
+static inline void allocate_buffer_for_vept_pages(void) {};
#endif /* CONFIG_NVMX_ENABLED */
#endif /* VEPT_H */
--
2.17.1


Eddie Dong
 


Now the vept module uses a mixture of sept and vept, it's better to align the
file name vept.c.
The terminology of shadow ept and virtual ept means different thing. Shadow EPT is the physical EPT page table used when a certain VM is running. While virtual EPT is the EPT the L1 guest sees in nested situation.

As a file name, vept.c here, I would say the code in this file is designed to support virtual EPT, i.e. guest EPT. It is mainly a logic of how to generate shadow EPT. In general, the current code follow this way.

Changing vept to sept in the API names and data instance name is a wrong way to go.

Thx Eddie