ptirq_remapping_info records which pGSI is mapped to the vGSI in a VM. As we need to knonw whether a pGSI has been assigned to a VM and whether a vGSI in a VM has been used, there should be two hash tables to link and iterate ptirq_remapping_info: - One is used to lookup from pGSI, linking phys_link. - The other is used to lookup from vGSI in a VM, linking virt_link
When assigning a pGSI to a Post-launched VM, if the pGSI has been assigned to ServiceVM, we should remove that mapping first to reset ioapic pin state and rte, and build new mapping for the Post-launched VM. Add ptirq_reassign_intx_remapping for this.
Tracked-On: #8370 Signed-off-by: Qiang Zhang <qiang4.zhang@...> --- hypervisor/arch/x86/guest/assign.c | 41 ++++++++++---- hypervisor/common/hypercall.c | 2 +- hypervisor/common/ptdev.c | 53 ++++++++++++------- .../include/arch/x86/asm/guest/assign.h | 21 ++++++++ 4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index a49b3314b..0baa9404b 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -382,15 +382,9 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3 pr_err("INTX re-add vpin %d", virt_gsi); } } else if (entry->vm != vm) { - if (is_service_vm(entry->vm)) { - entry->vm = vm; - entry->virt_sid.value = virt_sid.value; - entry->polarity = 0U; - } else { - pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", - phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi); - entry = NULL; - } + pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", + phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi); + entry = NULL; } else { /* The mapping has already been added to the VM. No action * required. @@ -410,7 +404,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3 return entry; } -/* deactive & remove mapping entry of vpin for vm */ +/* deactivate & remove mapping entry of vpin for vm */ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr) { uint32_t phys_irq; @@ -431,7 +425,7 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, e dmar_free_irte(&intr_src, entry->irte_idx); dev_dbg(DBG_LEVEL_IRQ, - "deactive %s intx entry:pgsi=%d, pirq=%d ", + "deactivate %s intx entry:pgsi=%d, pirq=%d ", (vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC", entry->phys_sid.intx_id.gsi, phys_irq); dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n", @@ -442,6 +436,19 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, e } } +static struct ptirq_remapping_info *reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, + uint32_t phys_gsi, enum intx_ctlr vgsi_ctlr) +{ + struct ptirq_remapping_info *entry = NULL; + DEFINE_INTX_SID(phys_sid, phys_gsi, INTX_CTLR_IOAPIC); + + entry = find_ptirq_entry(PTDEV_INTR_INTX, &phys_sid, NULL); + if (entry) + remove_intx_remapping(entry->vm, entry->virt_sid.intx_id.gsi, entry->virt_sid.intx_id.ctlr); + + return add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); +} + static void ptirq_handle_intx(struct acrn_vm *vm, const struct ptirq_remapping_info *entry) { @@ -815,6 +822,18 @@ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bo spinlock_release(&ptdev_lock); } +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin) +{ + struct ptirq_remapping_info *entry; + enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC; + + spinlock_obtain(&ptdev_lock); + entry = reassign_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); + spinlock_release(&ptdev_lock); + + return (entry != NULL) ? 0 : -ENODEV; +} + /* * @pre vm != NULL */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 5269380c1..faec16545 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1016,7 +1016,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && is_gsi_valid(irq.intx.phys_pin)) { - ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, + ret = ptirq_reassign_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n", __func__); diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index e7e49934a..6308b7514 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -22,9 +22,14 @@ struct ptirq_remapping_info ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES]; static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE]; spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, }; -static struct ptirq_entry_head { - struct hlist_head list; -} ptirq_entry_heads[PTIRQ_ENTRY_HASHSIZE]; +struct ptirq_hash_table { + struct hlist_head buckets[PTIRQ_ENTRY_HASHSIZE]; +}; + +/* lookup mapping info from phyical gsi, hashing from gsi + vm address(0) */ +static struct ptirq_hash_table phy_gsi_htable; +/* lookup mapping info from vgsi within a vm, hashing from vm address + vgsi */ +static struct ptirq_hash_table vm_vgsi_htable; static inline uint16_t ptirq_alloc_entry_id(void) { @@ -40,28 +45,36 @@ static inline uint16_t ptirq_alloc_entry_id(void) return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID; } +/* to find ptirq_remapping_info from phy gsi or vgsi in a vm */ struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type, const union source_id *sid, const struct acrn_vm *vm) { struct hlist_node *p; + struct hlist_head *b; struct ptirq_remapping_info *n, *entry = NULL; - uint64_t key = hash64(sid->value, PTIRQ_ENTRY_HASHBITS); - struct ptirq_entry_head *b = &ptirq_entry_heads[key]; + uint64_t key = hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS); + + if (vm == NULL) { + b = &phy_gsi_htable.buckets[key]; - hlist_for_each(p, &b->list) { - if (vm == NULL) { + hlist_for_each(p, b) { n = hlist_entry(p, struct ptirq_remapping_info, phys_link); - } else { - n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->phys_sid.value)) { + entry = n; + break; + } + } } - - if (is_entry_active(n)) { - if ((intr_type == n->intr_type) && - ((vm == NULL) ? - (sid->value == n->phys_sid.value) : - ((vm == n->vm) && (sid->value == n->virt_sid.value)))) { - entry = n; - break; + } else { + b = &vm_vgsi_htable.buckets[key]; + hlist_for_each(p, b) { + n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->virt_sid.value) && (vm == n->vm)) { + entry = n; + break; + } } } } @@ -212,9 +225,9 @@ int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_i entry->active = true; key = hash64(entry->phys_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->phys_link, &(ptirq_entry_heads[key].list)); - key = hash64(entry->virt_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->virt_link, &(ptirq_entry_heads[key].list)); + hlist_add_head(&entry->phys_link, &(phy_gsi_htable.buckets[key])); + key = hash64(entry->virt_sid.value + (uint64_t)entry->vm, PTIRQ_ENTRY_HASHBITS); + hlist_add_head(&entry->virt_link, &(vm_vgsi_htable.buckets[key])); } return ret; diff --git a/hypervisor/include/arch/x86/asm/guest/assign.h b/hypervisor/include/arch/x86/asm/guest/assign.h index 9b3b666af..1387b9825 100644 --- a/hypervisor/include/arch/x86/asm/guest/assign.h +++ b/hypervisor/include/arch/x86/asm/guest/assign.h @@ -125,6 +125,27 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t */ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin); +/** + * @brief Assign a INTx to Postlaunched VM. + * + * If the physical pin was assigned to another VM, remove it first. + * Then assign the physical pin to target virtual VM. + * + * @param[in] vm pointer to acrn_vm + * @param[in] virt_gsi virtual pin number associated with the passthrough device + * @param[in] phys_gsi physical pin number associated with the passthrough device + * @param[in] pic_pin true for pic, false for ioapic + * + * @return + * - 0: on success + * - \p -EINVAL: invalid virt_pin value + * - \p -ENODEV: failed to add the remapping entry + * + * @pre vm != NULL + * + */ +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin); + /** * @brief Remove interrupt remapping entry/entries for MSI/MSI-x. * -- 2.30.2
|
|
toggle quoted message
Show quoted text
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 11:35 AM To: acrn-dev@... Cc: Mao, Junjie <junjie.mao@...>; Li, Fei1 <fei1.li@...>; Qiang Zhang <qiang4.zhang@...> Subject: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
ptirq_remapping_info records which pGSI is mapped to the vGSI in a VM. As we need to knonw whether a pGSI has been assigned to a VM and whether a vGSI in a VM has been used, there should be two hash tables to link and iterate ptirq_remapping_info: - One is used to lookup from pGSI, linking phys_link. - The other is used to lookup from vGSI in a VM, linking virt_link
When assigning a pGSI to a Post-launched VM, if the pGSI has been assigned to ServiceVM, we should remove that mapping first to reset ioapic pin state and rte, and build new mapping for the Post-launched VM. Add ptirq_reassign_intx_remapping for this.
Tracked-On: #8370 Signed-off-by: Qiang Zhang <qiang4.zhang@...> --- hypervisor/arch/x86/guest/assign.c | 41 ++++++++++---- hypervisor/common/hypercall.c | 2 +- hypervisor/common/ptdev.c | 53 ++++++++++++------- .../include/arch/x86/asm/guest/assign.h | 21 ++++++++ 4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index a49b3314b..0baa9404b 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -382,15 +382,9 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3 pr_err("INTX re-add vpin %d", virt_gsi); } } else if (entry->vm != vm) { - if (is_service_vm(entry->vm)) { - entry->vm = vm; - entry->virt_sid.value = virt_sid.value; - entry->polarity = 0U; - } else { - pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", - phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm-
vm_id, virt_gsi); - entry = NULL; - } + pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", + phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi); + entry = NULL; With this change you also need to replace the call to ptirq_add_intx_remapping() with your "reassign" version in create_vm() in arch/x86/guest/vm.c. That piece of logic grants some legacy interrupt lines to a pre-launched VM which may be created before or after the service VM starts, depending on the processing speed of different cores. --- Best Regards Junjie Mao } else { /* The mapping has already been added to the VM. No action * required. @@ -410,7 +404,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3 return entry; }
-/* deactive & remove mapping entry of vpin for vm */ +/* deactivate & remove mapping entry of vpin for vm */ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr) { uint32_t phys_irq; @@ -431,7 +425,7 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, e
dmar_free_irte(&intr_src, entry->irte_idx); dev_dbg(DBG_LEVEL_IRQ, - "deactive %s intx entry:pgsi=%d, pirq=%d ", + "deactivate %s intx entry:pgsi=%d, pirq=%d ", (vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC", entry->phys_sid.intx_id.gsi, phys_irq); dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n", @@ -442,6 +436,19 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, e } }
+static struct ptirq_remapping_info *reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, + uint32_t phys_gsi, enum intx_ctlr vgsi_ctlr) +{ + struct ptirq_remapping_info *entry = NULL; + DEFINE_INTX_SID(phys_sid, phys_gsi, INTX_CTLR_IOAPIC); + + entry = find_ptirq_entry(PTDEV_INTR_INTX, &phys_sid, NULL); + if (entry) + remove_intx_remapping(entry->vm, entry->virt_sid.intx_id.gsi, entry-
virt_sid.intx_id.ctlr); + + return add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); +} + static void ptirq_handle_intx(struct acrn_vm *vm, const struct ptirq_remapping_info *entry) { @@ -815,6 +822,18 @@ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bo spinlock_release(&ptdev_lock); }
+int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin) +{ + struct ptirq_remapping_info *entry; + enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC; + + spinlock_obtain(&ptdev_lock); + entry = reassign_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); + spinlock_release(&ptdev_lock); + + return (entry != NULL) ? 0 : -ENODEV; +} + /* * @pre vm != NULL */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 5269380c1..faec16545 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1016,7 +1016,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && is_gsi_valid(irq.intx.phys_pin)) { - ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, + ret = ptirq_reassign_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n", __func__); diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index e7e49934a..6308b7514 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -22,9 +22,14 @@ struct ptirq_remapping_info ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES]; static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE]; spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, };
-static struct ptirq_entry_head { - struct hlist_head list; -} ptirq_entry_heads[PTIRQ_ENTRY_HASHSIZE]; +struct ptirq_hash_table { + struct hlist_head buckets[PTIRQ_ENTRY_HASHSIZE]; +}; + +/* lookup mapping info from phyical gsi, hashing from gsi + vm address(0) */ +static struct ptirq_hash_table phy_gsi_htable; +/* lookup mapping info from vgsi within a vm, hashing from vm address + vgsi */ +static struct ptirq_hash_table vm_vgsi_htable;
static inline uint16_t ptirq_alloc_entry_id(void) { @@ -40,28 +45,36 @@ static inline uint16_t ptirq_alloc_entry_id(void) return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID; }
+/* to find ptirq_remapping_info from phy gsi or vgsi in a vm */ struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type, const union source_id *sid, const struct acrn_vm *vm) { struct hlist_node *p; + struct hlist_head *b; struct ptirq_remapping_info *n, *entry = NULL; - uint64_t key = hash64(sid->value, PTIRQ_ENTRY_HASHBITS); - struct ptirq_entry_head *b = &ptirq_entry_heads[key]; + uint64_t key = hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS); + + if (vm == NULL) { + b = &phy_gsi_htable.buckets[key];
- hlist_for_each(p, &b->list) { - if (vm == NULL) { + hlist_for_each(p, b) { n = hlist_entry(p, struct ptirq_remapping_info, phys_link); - } else { - n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->phys_sid.value)) { + entry = n; + break; + } + } } - - if (is_entry_active(n)) { - if ((intr_type == n->intr_type) && - ((vm == NULL) ? - (sid->value == n->phys_sid.value) : - ((vm == n->vm) && (sid->value == n->virt_sid.value)))) { - entry = n; - break; + } else { + b = &vm_vgsi_htable.buckets[key]; + hlist_for_each(p, b) { + n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->virt_sid.value) && (vm == n->vm)) { + entry = n; + break; + } } } } @@ -212,9 +225,9 @@ int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_i entry->active = true;
key = hash64(entry->phys_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->phys_link, &(ptirq_entry_heads[key].list)); - key = hash64(entry->virt_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->virt_link, &(ptirq_entry_heads[key].list)); + hlist_add_head(&entry->phys_link, &(phy_gsi_htable.buckets[key])); + key = hash64(entry->virt_sid.value + (uint64_t)entry->vm, PTIRQ_ENTRY_HASHBITS); + hlist_add_head(&entry->virt_link, &(vm_vgsi_htable.buckets[key])); }
return ret; diff --git a/hypervisor/include/arch/x86/asm/guest/assign.h b/hypervisor/include/arch/x86/asm/guest/assign.h index 9b3b666af..1387b9825 100644 --- a/hypervisor/include/arch/x86/asm/guest/assign.h +++ b/hypervisor/include/arch/x86/asm/guest/assign.h @@ -125,6 +125,27 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t */ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin);
+/** + * @brief Assign a INTx to Postlaunched VM. + * + * If the physical pin was assigned to another VM, remove it first. + * Then assign the physical pin to target virtual VM. + * + * @param[in] vm pointer to acrn_vm + * @param[in] virt_gsi virtual pin number associated with the passthrough device + * @param[in] phys_gsi physical pin number associated with the passthrough device + * @param[in] pic_pin true for pic, false for ioapic + * + * @return + * - 0: on success + * - \p -EINVAL: invalid virt_pin value + * - \p -ENODEV: failed to add the remapping entry + * + * @pre vm != NULL + * + */ +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin); + /** * @brief Remove interrupt remapping entry/entries for MSI/MSI-x. * -- 2.30.2
|
|
On Mon, Feb 27, 2023 at 05:36:31AM +0000, Mao, Junjie wrote: -----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 11:35 AM To: acrn-dev@... Cc: Mao, Junjie <junjie.mao@...>; Li, Fei1 <fei1.li@...>; Qiang Zhang <qiang4.zhang@...> Subject: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
ptirq_remapping_info records which pGSI is mapped to the vGSI in a VM. As we need to knonw whether a pGSI has been assigned to a VM and whether a vGSI in a VM has been used, there should be two hash tables to link and iterate ptirq_remapping_info: - One is used to lookup from pGSI, linking phys_link. - The other is used to lookup from vGSI in a VM, linking virt_link
When assigning a pGSI to a Post-launched VM, if the pGSI has been assigned to ServiceVM, we should remove that mapping first to reset ioapic pin state and rte, and build new mapping for the Post-launched VM. Add ptirq_reassign_intx_remapping for this.
Tracked-On: #8370 Signed-off-by: Qiang Zhang <qiang4.zhang@...> --- hypervisor/arch/x86/guest/assign.c | 41 ++++++++++---- hypervisor/common/hypercall.c | 2 +- hypervisor/common/ptdev.c | 53 ++++++++++++------- .../include/arch/x86/asm/guest/assign.h | 21 ++++++++ 4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index a49b3314b..0baa9404b 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -382,15 +382,9 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3 pr_err("INTX re-add vpin %d", virt_gsi); } } else if (entry->vm != vm) { - if (is_service_vm(entry->vm)) { - entry->vm = vm; - entry->virt_sid.value = virt_sid.value; - entry->polarity = 0U; - } else { - pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", - phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm-
vm_id, virt_gsi); - entry = NULL; - } + pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", + phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi); + entry = NULL; With this change you also need to replace the call to ptirq_add_intx_remapping() with your "reassign" version in create_vm() in arch/x86/guest/vm.c. That piece of logic grants some legacy interrupt lines to a pre-launched VM which may be created before or after the service VM starts, depending on the processing speed of different cores.
INTx remappings are just added to Pre-launched VM according to static configurations. In `prepare_vm`, INTx remappings for Pre-launched VMs are handled before ServiceVM starts. So there is no reassign cases for Pre-launched VMs. Thanks Qiang --- Best Regards Junjie Mao
} else { /* The mapping has already been added to the VM. No action * required. @@ -410,7 +404,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3 return entry; }
-/* deactive & remove mapping entry of vpin for vm */ +/* deactivate & remove mapping entry of vpin for vm */ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr) { uint32_t phys_irq; @@ -431,7 +425,7 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, e
dmar_free_irte(&intr_src, entry->irte_idx); dev_dbg(DBG_LEVEL_IRQ, - "deactive %s intx entry:pgsi=%d, pirq=%d ", + "deactivate %s intx entry:pgsi=%d, pirq=%d ", (vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC", entry->phys_sid.intx_id.gsi, phys_irq); dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n", @@ -442,6 +436,19 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, e } }
+static struct ptirq_remapping_info *reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, + uint32_t phys_gsi, enum intx_ctlr vgsi_ctlr) +{ + struct ptirq_remapping_info *entry = NULL; + DEFINE_INTX_SID(phys_sid, phys_gsi, INTX_CTLR_IOAPIC); + + entry = find_ptirq_entry(PTDEV_INTR_INTX, &phys_sid, NULL); + if (entry) + remove_intx_remapping(entry->vm, entry->virt_sid.intx_id.gsi, entry-
virt_sid.intx_id.ctlr); + + return add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); +} + static void ptirq_handle_intx(struct acrn_vm *vm, const struct ptirq_remapping_info *entry) { @@ -815,6 +822,18 @@ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bo spinlock_release(&ptdev_lock); }
+int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin) +{ + struct ptirq_remapping_info *entry; + enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC; + + spinlock_obtain(&ptdev_lock); + entry = reassign_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); + spinlock_release(&ptdev_lock); + + return (entry != NULL) ? 0 : -ENODEV; +} + /* * @pre vm != NULL */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 5269380c1..faec16545 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1016,7 +1016,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && is_gsi_valid(irq.intx.phys_pin)) { - ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, + ret = ptirq_reassign_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n", __func__); diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index e7e49934a..6308b7514 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -22,9 +22,14 @@ struct ptirq_remapping_info ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES]; static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE]; spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, };
-static struct ptirq_entry_head { - struct hlist_head list; -} ptirq_entry_heads[PTIRQ_ENTRY_HASHSIZE]; +struct ptirq_hash_table { + struct hlist_head buckets[PTIRQ_ENTRY_HASHSIZE]; +}; + +/* lookup mapping info from phyical gsi, hashing from gsi + vm address(0) */ +static struct ptirq_hash_table phy_gsi_htable; +/* lookup mapping info from vgsi within a vm, hashing from vm address + vgsi */ +static struct ptirq_hash_table vm_vgsi_htable;
static inline uint16_t ptirq_alloc_entry_id(void) { @@ -40,28 +45,36 @@ static inline uint16_t ptirq_alloc_entry_id(void) return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID; }
+/* to find ptirq_remapping_info from phy gsi or vgsi in a vm */ struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type, const union source_id *sid, const struct acrn_vm *vm) { struct hlist_node *p; + struct hlist_head *b; struct ptirq_remapping_info *n, *entry = NULL; - uint64_t key = hash64(sid->value, PTIRQ_ENTRY_HASHBITS); - struct ptirq_entry_head *b = &ptirq_entry_heads[key]; + uint64_t key = hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS); + + if (vm == NULL) { + b = &phy_gsi_htable.buckets[key];
- hlist_for_each(p, &b->list) { - if (vm == NULL) { + hlist_for_each(p, b) { n = hlist_entry(p, struct ptirq_remapping_info, phys_link); - } else { - n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->phys_sid.value)) { + entry = n; + break; + } + } } - - if (is_entry_active(n)) { - if ((intr_type == n->intr_type) && - ((vm == NULL) ? - (sid->value == n->phys_sid.value) : - ((vm == n->vm) && (sid->value == n->virt_sid.value)))) { - entry = n; - break; + } else { + b = &vm_vgsi_htable.buckets[key]; + hlist_for_each(p, b) { + n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->virt_sid.value) && (vm == n->vm)) { + entry = n; + break; + } } } } @@ -212,9 +225,9 @@ int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_i entry->active = true;
key = hash64(entry->phys_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->phys_link, &(ptirq_entry_heads[key].list)); - key = hash64(entry->virt_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->virt_link, &(ptirq_entry_heads[key].list)); + hlist_add_head(&entry->phys_link, &(phy_gsi_htable.buckets[key])); + key = hash64(entry->virt_sid.value + (uint64_t)entry->vm, PTIRQ_ENTRY_HASHBITS); + hlist_add_head(&entry->virt_link, &(vm_vgsi_htable.buckets[key])); }
return ret; diff --git a/hypervisor/include/arch/x86/asm/guest/assign.h b/hypervisor/include/arch/x86/asm/guest/assign.h index 9b3b666af..1387b9825 100644 --- a/hypervisor/include/arch/x86/asm/guest/assign.h +++ b/hypervisor/include/arch/x86/asm/guest/assign.h @@ -125,6 +125,27 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t */ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin);
+/** + * @brief Assign a INTx to Postlaunched VM. + * + * If the physical pin was assigned to another VM, remove it first. + * Then assign the physical pin to target virtual VM. + * + * @param[in] vm pointer to acrn_vm + * @param[in] virt_gsi virtual pin number associated with the passthrough device + * @param[in] phys_gsi physical pin number associated with the passthrough device + * @param[in] pic_pin true for pic, false for ioapic + * + * @return + * - 0: on success + * - \p -EINVAL: invalid virt_pin value + * - \p -ENODEV: failed to add the remapping entry + * + * @pre vm != NULL + * + */ +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin); + /** * @brief Remove interrupt remapping entry/entries for MSI/MSI-x. * -- 2.30.2
|
|
toggle quoted message
Show quoted text
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 3:06 PM To: Mao, Junjie <junjie.mao@...> Cc: acrn-dev@...; Li, Fei1 <fei1.li@...> Subject: Re: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
On Mon, Feb 27, 2023 at 05:36:31AM +0000, Mao, Junjie wrote:
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 11:35 AM To: acrn-dev@... Cc: Mao, Junjie <junjie.mao@...>; Li, Fei1 <fei1.li@...>; Qiang Zhang <qiang4.zhang@...> Subject: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
ptirq_remapping_info records which pGSI is mapped to the vGSI in a VM. As we need to knonw whether a pGSI has been assigned to a VM and whether a vGSI in a VM has been used, there should be two hash tables to link and iterate ptirq_remapping_info: - One is used to lookup from pGSI, linking phys_link. - The other is used to lookup from vGSI in a VM, linking virt_link
When assigning a pGSI to a Post-launched VM, if the pGSI has been assigned to ServiceVM, we should remove that mapping first to reset ioapic pin state and rte, and build new mapping for the Post-launched VM. Add ptirq_reassign_intx_remapping for this.
Tracked-On: #8370 Signed-off-by: Qiang Zhang <qiang4.zhang@...> --- hypervisor/arch/x86/guest/assign.c | 41 ++++++++++---- hypervisor/common/hypercall.c | 2 +- hypervisor/common/ptdev.c | 53 ++++++++++++------- .../include/arch/x86/asm/guest/assign.h | 21 ++++++++ 4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index a49b3314b..0baa9404b 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -382,15 +382,9 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm
*vm, uint3 pr_err("INTX re-add vpin %d", virt_gsi); } } else if (entry->vm != vm) { - if (is_service_vm(entry->vm)) { - entry->vm = vm; - entry->virt_sid.value = virt_sid.value; - entry->polarity = 0U; - } else { - pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", - phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm-
vm_id, virt_gsi); - entry = NULL; - } + pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", + phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi); + entry = NULL; With this change you also need to replace the call to ptirq_add_intx_remapping() with your "reassign" version in create_vm() in arch/x86/guest/vm.c. That piece of logic grants some legacy interrupt lines to a pre-launched VM which may be created before or after the service VM starts, depending on the processing speed of different cores.
INTx remappings are just added to Pre-launched VM according to static configurations. In `prepare_vm`, INTx remappings for Pre-launched VMs are handled before ServiceVM starts. So there is no reassign cases for Pre-launched VMs. I don't think we ever guarantee "INTx remappings for Pre-launched VMs are handled before ServiceVM starts". The launch of pre-launched VM(s) and the service VM is simultaneous. While it may be highly probable because INTx remappings for the service VM are established only when it attempts to access its vIOAPIC, there is no 100% guarantee on that. Have we added any synchronization on VM launch order recently? --- Best Regards Junjie Mao Thanks Qiang
--- Best Regards Junjie Mao
} else { /* The mapping has already been added to the VM. No action * required. @@ -410,7 +404,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm
*vm, uint3 return entry; }
-/* deactive & remove mapping entry of vpin for vm */ +/* deactivate & remove mapping entry of vpin for vm */ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr) { uint32_t phys_irq; @@ -431,7 +425,7 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, e
dmar_free_irte(&intr_src, entry->irte_idx); dev_dbg(DBG_LEVEL_IRQ, - "deactive %s intx entry:pgsi=%d, pirq=%d ", + "deactivate %s intx entry:pgsi=%d, pirq=%d ", (vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC", entry->phys_sid.intx_id.gsi, phys_irq); dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n", @@ -442,6 +436,19 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, e } }
+static struct ptirq_remapping_info *reassign_intx_remapping(struct acrn_vm *vm, uint32_t
virt_gsi, + uint32_t phys_gsi, enum intx_ctlr vgsi_ctlr) +{ + struct ptirq_remapping_info *entry = NULL; + DEFINE_INTX_SID(phys_sid, phys_gsi, INTX_CTLR_IOAPIC); + + entry = find_ptirq_entry(PTDEV_INTR_INTX, &phys_sid, NULL); + if (entry) + remove_intx_remapping(entry->vm, entry->virt_sid.intx_id.gsi, entry-
virt_sid.intx_id.ctlr); + + return add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); +} + static void ptirq_handle_intx(struct acrn_vm *vm, const struct ptirq_remapping_info *entry) { @@ -815,6 +822,18 @@ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, bo spinlock_release(&ptdev_lock); }
+int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin) +{ + struct ptirq_remapping_info *entry; + enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC; + + spinlock_obtain(&ptdev_lock); + entry = reassign_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); + spinlock_release(&ptdev_lock); + + return (entry != NULL) ? 0 : -ENODEV; +} + /* * @pre vm != NULL */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 5269380c1..faec16545 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1016,7 +1016,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && is_gsi_valid(irq.intx.phys_pin)) { - ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, + ret = ptirq_reassign_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n", __func__); diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index e7e49934a..6308b7514 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -22,9 +22,14 @@ struct ptirq_remapping_info ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES]; static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE]; spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, };
-static struct ptirq_entry_head { - struct hlist_head list; -} ptirq_entry_heads[PTIRQ_ENTRY_HASHSIZE]; +struct ptirq_hash_table { + struct hlist_head buckets[PTIRQ_ENTRY_HASHSIZE]; +}; + +/* lookup mapping info from phyical gsi, hashing from gsi + vm address(0) */ +static struct ptirq_hash_table phy_gsi_htable; +/* lookup mapping info from vgsi within a vm, hashing from vm address + vgsi */ +static struct ptirq_hash_table vm_vgsi_htable;
static inline uint16_t ptirq_alloc_entry_id(void) { @@ -40,28 +45,36 @@ static inline uint16_t ptirq_alloc_entry_id(void) return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID; }
+/* to find ptirq_remapping_info from phy gsi or vgsi in a vm */ struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type, const union source_id *sid, const struct acrn_vm *vm) { struct hlist_node *p; + struct hlist_head *b; struct ptirq_remapping_info *n, *entry = NULL; - uint64_t key = hash64(sid->value, PTIRQ_ENTRY_HASHBITS); - struct ptirq_entry_head *b = &ptirq_entry_heads[key]; + uint64_t key = hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS); + + if (vm == NULL) { + b = &phy_gsi_htable.buckets[key];
- hlist_for_each(p, &b->list) { - if (vm == NULL) { + hlist_for_each(p, b) { n = hlist_entry(p, struct ptirq_remapping_info, phys_link); - } else { - n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->phys_sid.value)) { + entry = n; + break; + } + } } - - if (is_entry_active(n)) { - if ((intr_type == n->intr_type) && - ((vm == NULL) ? - (sid->value == n->phys_sid.value) : - ((vm == n->vm) && (sid->value == n->virt_sid.value)))) { - entry = n; - break; + } else { + b = &vm_vgsi_htable.buckets[key]; + hlist_for_each(p, b) { + n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->virt_sid.value) && (vm == n->vm)) { + entry = n; + break; + } } } } @@ -212,9 +225,9 @@ int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_i entry->active = true;
key = hash64(entry->phys_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->phys_link, &(ptirq_entry_heads[key].list)); - key = hash64(entry->virt_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->virt_link, &(ptirq_entry_heads[key].list)); + hlist_add_head(&entry->phys_link, &(phy_gsi_htable.buckets[key])); + key = hash64(entry->virt_sid.value + (uint64_t)entry->vm, PTIRQ_ENTRY_HASHBITS); + hlist_add_head(&entry->virt_link, &(vm_vgsi_htable.buckets[key])); }
return ret; diff --git a/hypervisor/include/arch/x86/asm/guest/assign.h b/hypervisor/include/arch/x86/asm/guest/assign.h index 9b3b666af..1387b9825 100644 --- a/hypervisor/include/arch/x86/asm/guest/assign.h +++ b/hypervisor/include/arch/x86/asm/guest/assign.h @@ -125,6 +125,27 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t */ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin);
+/** + * @brief Assign a INTx to Postlaunched VM. + * + * If the physical pin was assigned to another VM, remove it first. + * Then assign the physical pin to target virtual VM. + * + * @param[in] vm pointer to acrn_vm + * @param[in] virt_gsi virtual pin number associated with the passthrough device + * @param[in] phys_gsi physical pin number associated with the passthrough device + * @param[in] pic_pin true for pic, false for ioapic + * + * @return + * - 0: on success + * - \p -EINVAL: invalid virt_pin value + * - \p -ENODEV: failed to add the remapping entry + * + * @pre vm != NULL + * + */ +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin); + /** * @brief Remove interrupt remapping entry/entries for MSI/MSI-x. * -- 2.30.2
|
|
On Mon, Feb 27, 2023 at 07:09:09AM +0000, Mao, Junjie wrote: -----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 3:06 PM To: Mao, Junjie <junjie.mao@...> Cc: acrn-dev@...; Li, Fei1 <fei1.li@...> Subject: Re: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
On Mon, Feb 27, 2023 at 05:36:31AM +0000, Mao, Junjie wrote:
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 11:35 AM To: acrn-dev@... Cc: Mao, Junjie <junjie.mao@...>; Li, Fei1 <fei1.li@...>; Qiang Zhang <qiang4.zhang@...> Subject: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
ptirq_remapping_info records which pGSI is mapped to the vGSI in a VM. As we need to knonw whether a pGSI has been assigned to a VM and whether a vGSI in a VM has been used, there should be two hash tables to link and iterate ptirq_remapping_info: - One is used to lookup from pGSI, linking phys_link. - The other is used to lookup from vGSI in a VM, linking virt_link
When assigning a pGSI to a Post-launched VM, if the pGSI has been assigned to ServiceVM, we should remove that mapping first to reset ioapic pin state and rte, and build new mapping for the Post-launched VM. Add ptirq_reassign_intx_remapping for this.
Tracked-On: #8370 Signed-off-by: Qiang Zhang <qiang4.zhang@...> --- hypervisor/arch/x86/guest/assign.c | 41 ++++++++++---- hypervisor/common/hypercall.c | 2 +- hypervisor/common/ptdev.c | 53 ++++++++++++------- .../include/arch/x86/asm/guest/assign.h | 21 ++++++++ 4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index a49b3314b..0baa9404b 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -382,15 +382,9 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm
*vm, uint3 pr_err("INTX re-add vpin %d", virt_gsi); } } else if (entry->vm != vm) { - if (is_service_vm(entry->vm)) { - entry->vm = vm; - entry->virt_sid.value = virt_sid.value; - entry->polarity = 0U; - } else { - pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", - phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm-
vm_id, virt_gsi); - entry = NULL; - } + pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d", + phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi); + entry = NULL; With this change you also need to replace the call to ptirq_add_intx_remapping() with your "reassign" version in create_vm() in arch/x86/guest/vm.c. That piece of logic grants some legacy interrupt lines to a pre-launched VM which may be created before or after the service VM starts, depending on the processing speed of different cores.
INTx remappings are just added to Pre-launched VM according to static configurations. In `prepare_vm`, INTx remappings for Pre-launched VMs are handled before ServiceVM starts. So there is no reassign cases for Pre-launched VMs. I don't think we ever guarantee "INTx remappings for Pre-launched VMs are handled before ServiceVM starts". The launch of pre-launched VM(s) and the service VM is simultaneous. While it may be highly probable because INTx remappings for the service VM are established only when it attempts to access its vIOAPIC, there is no 100% guarantee on that.
Have we added any synchronization on VM launch order recently?
I think this synchronization has already existed. SOS won't got launched until create_vm finishs setting ptirq_remapping_info for all Prelaunched VMs and loaded_pre_vm_nr reaches PRE_VM_NUM. ```c int32_t prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config) { int32_t err = 0; struct acrn_vm *vm = NULL; #ifdef CONFIG_SECURITY_VM_FIXUP security_vm_fixup(vm_id); #endif if (get_vmid_by_name(vm_config->name) != vm_id) { pr_err("Invalid VM name: %s", vm_config->name); err = -1; } else { /* Service VM and pre-launched VMs launch on all pCPUs defined in vm_config->cpu_affinity */ err = create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm); } if (err == 0) { if (is_prelaunched_vm(vm)) { build_vrsdp(vm); } if (is_service_vm(vm)) { /* We need to ensure all modules of pre-launched VMs have been loaded already * before loading Service VM modules, otherwise the module of pre-launched VMs could * be corrupted because Service VM kernel might pick any usable RAM to extract kernel * when KASLR enabled. * In case the pre-launched VMs aren't loaded successfuly that cause deadlock here, * use a 10000ms timer to break the waiting loop. */ uint64_t start_tick = cpu_ticks(); while (loaded_pre_vm_nr != PRE_VM_NUM) { uint64_t timeout = ticks_to_ms(cpu_ticks() - start_tick); if (timeout > 10000U) { pr_err("Loading pre-launched VMs timeout!"); break; } } } err = prepare_os_image(vm); if (is_prelaunched_vm(vm)) { loaded_pre_vm_nr++; } } return err; } ``` Thanks Qiang --- Best Regards Junjie Mao
Thanks Qiang
--- Best Regards Junjie Mao
} else { /* The mapping has already been added to the VM. No action * required. @@ -410,7 +404,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm
*vm, uint3 return entry; }
-/* deactive & remove mapping entry of vpin for vm */ +/* deactivate & remove mapping entry of vpin for vm */ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr) { uint32_t phys_irq; @@ -431,7 +425,7 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, e
dmar_free_irte(&intr_src, entry->irte_idx); dev_dbg(DBG_LEVEL_IRQ, - "deactive %s intx entry:pgsi=%d, pirq=%d ", + "deactivate %s intx entry:pgsi=%d, pirq=%d ", (vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC", entry->phys_sid.intx_id.gsi, phys_irq); dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n", @@ -442,6 +436,19 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, e } }
+static struct ptirq_remapping_info *reassign_intx_remapping(struct acrn_vm *vm, uint32_t
virt_gsi, + uint32_t phys_gsi, enum intx_ctlr vgsi_ctlr) +{ + struct ptirq_remapping_info *entry = NULL; + DEFINE_INTX_SID(phys_sid, phys_gsi, INTX_CTLR_IOAPIC); + + entry = find_ptirq_entry(PTDEV_INTR_INTX, &phys_sid, NULL); + if (entry) + remove_intx_remapping(entry->vm, entry->virt_sid.intx_id.gsi, entry-
virt_sid.intx_id.ctlr); + + return add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); +} + static void ptirq_handle_intx(struct acrn_vm *vm, const struct ptirq_remapping_info *entry) { @@ -815,6 +822,18 @@ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, bo spinlock_release(&ptdev_lock); }
+int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin) +{ + struct ptirq_remapping_info *entry; + enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC; + + spinlock_obtain(&ptdev_lock); + entry = reassign_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); + spinlock_release(&ptdev_lock); + + return (entry != NULL) ? 0 : -ENODEV; +} + /* * @pre vm != NULL */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 5269380c1..faec16545 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1016,7 +1016,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && is_gsi_valid(irq.intx.phys_pin)) { - ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, + ret = ptirq_reassign_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n", __func__); diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index e7e49934a..6308b7514 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -22,9 +22,14 @@ struct ptirq_remapping_info ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES]; static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE]; spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, };
-static struct ptirq_entry_head { - struct hlist_head list; -} ptirq_entry_heads[PTIRQ_ENTRY_HASHSIZE]; +struct ptirq_hash_table { + struct hlist_head buckets[PTIRQ_ENTRY_HASHSIZE]; +}; + +/* lookup mapping info from phyical gsi, hashing from gsi + vm address(0) */ +static struct ptirq_hash_table phy_gsi_htable; +/* lookup mapping info from vgsi within a vm, hashing from vm address + vgsi */ +static struct ptirq_hash_table vm_vgsi_htable;
static inline uint16_t ptirq_alloc_entry_id(void) { @@ -40,28 +45,36 @@ static inline uint16_t ptirq_alloc_entry_id(void) return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID; }
+/* to find ptirq_remapping_info from phy gsi or vgsi in a vm */ struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type, const union source_id *sid, const struct acrn_vm *vm) { struct hlist_node *p; + struct hlist_head *b; struct ptirq_remapping_info *n, *entry = NULL; - uint64_t key = hash64(sid->value, PTIRQ_ENTRY_HASHBITS); - struct ptirq_entry_head *b = &ptirq_entry_heads[key]; + uint64_t key = hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS); + + if (vm == NULL) { + b = &phy_gsi_htable.buckets[key];
- hlist_for_each(p, &b->list) { - if (vm == NULL) { + hlist_for_each(p, b) { n = hlist_entry(p, struct ptirq_remapping_info, phys_link); - } else { - n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->phys_sid.value)) { + entry = n; + break; + } + } } - - if (is_entry_active(n)) { - if ((intr_type == n->intr_type) && - ((vm == NULL) ? - (sid->value == n->phys_sid.value) : - ((vm == n->vm) && (sid->value == n->virt_sid.value)))) { - entry = n; - break; + } else { + b = &vm_vgsi_htable.buckets[key]; + hlist_for_each(p, b) { + n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n->virt_sid.value) && (vm == n->vm)) { + entry = n; + break; + } } } } @@ -212,9 +225,9 @@ int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_i entry->active = true;
key = hash64(entry->phys_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->phys_link, &(ptirq_entry_heads[key].list)); - key = hash64(entry->virt_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->virt_link, &(ptirq_entry_heads[key].list)); + hlist_add_head(&entry->phys_link, &(phy_gsi_htable.buckets[key])); + key = hash64(entry->virt_sid.value + (uint64_t)entry->vm, PTIRQ_ENTRY_HASHBITS); + hlist_add_head(&entry->virt_link, &(vm_vgsi_htable.buckets[key])); }
return ret; diff --git a/hypervisor/include/arch/x86/asm/guest/assign.h b/hypervisor/include/arch/x86/asm/guest/assign.h index 9b3b666af..1387b9825 100644 --- a/hypervisor/include/arch/x86/asm/guest/assign.h +++ b/hypervisor/include/arch/x86/asm/guest/assign.h @@ -125,6 +125,27 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t */ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin);
+/** + * @brief Assign a INTx to Postlaunched VM. + * + * If the physical pin was assigned to another VM, remove it first. + * Then assign the physical pin to target virtual VM. + * + * @param[in] vm pointer to acrn_vm + * @param[in] virt_gsi virtual pin number associated with the passthrough device + * @param[in] phys_gsi physical pin number associated with the passthrough device + * @param[in] pic_pin true for pic, false for ioapic + * + * @return + * - 0: on success + * - \p -EINVAL: invalid virt_pin value + * - \p -ENODEV: failed to add the remapping entry + * + * @pre vm != NULL + * + */ +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t phys_gsi, bool pic_pin); + /** * @brief Remove interrupt remapping entry/entries for MSI/MSI-x. * -- 2.30.2
|
|
toggle quoted message
Show quoted text
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 3:25 PM To: Mao, Junjie <junjie.mao@...> Cc: acrn-dev@...; Li, Fei1 <fei1.li@...> Subject: Re: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
On Mon, Feb 27, 2023 at 07:09:09AM +0000, Mao, Junjie wrote:
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 3:06 PM To: Mao, Junjie <junjie.mao@...> Cc: acrn-dev@...; Li, Fei1 <fei1.li@...> Subject: Re: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
On Mon, Feb 27, 2023 at 05:36:31AM +0000, Mao, Junjie wrote:
-----Original Message----- From: Qiang Zhang <qiang4.zhang@...> Sent: Monday, February 27, 2023 11:35 AM To: acrn-dev@... Cc: Mao, Junjie <junjie.mao@...>; Li, Fei1 <fei1.li@...>; Qiang Zhang <qiang4.zhang@...> Subject: [PATCH v2 1/1] ptirq: Fix ptirq hash tables and uos intx assignment
ptirq_remapping_info records which pGSI is mapped to the vGSI in a VM. As we need to knonw whether a pGSI has been assigned to a VM and whether a vGSI in a VM has been used, there should be two hash tables to link and iterate ptirq_remapping_info: - One is used to lookup from pGSI, linking phys_link. - The other is used to lookup from vGSI in a VM, linking virt_link
When assigning a pGSI to a Post-launched VM, if the pGSI has been assigned to ServiceVM, we should remove that mapping first to reset ioapic pin state and rte, and build new mapping for the Post-launched VM. Add ptirq_reassign_intx_remapping for this.
Tracked-On: #8370 Signed-off-by: Qiang Zhang <qiang4.zhang@...> --- hypervisor/arch/x86/guest/assign.c | 41 ++++++++++---- hypervisor/common/hypercall.c | 2 +- hypervisor/common/ptdev.c | 53 ++++++++++++------- .../include/arch/x86/asm/guest/assign.h | 21 ++++++++ 4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index a49b3314b..0baa9404b 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -382,15 +382,9 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm
*vm, uint3 pr_err("INTX re-add vpin %d", virt_gsi); } } else if (entry->vm != vm) { - if (is_service_vm(entry->vm)) { - entry->vm = vm; - entry->virt_sid.value = virt_sid.value; - entry->polarity = 0U; - } else { - pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into
vm%d
with vgsi%d", - phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi,
vm-
vm_id, virt_gsi); - entry = NULL; - } + pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d
with
vgsi%d", + phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm-
vm_id,
virt_gsi); + entry = NULL; With this change you also need to replace the call to ptirq_add_intx_remapping() with your "reassign" version in create_vm() in arch/x86/guest/vm.c. That piece of logic
grants
some legacy interrupt lines to a pre-launched VM which may be created before or after the
service VM starts, depending on the processing speed of different cores.
INTx remappings are just added to Pre-launched VM according to static configurations. In `prepare_vm`, INTx remappings for Pre-launched VMs are handled before ServiceVM starts. So there is no reassign cases for Pre-launched VMs. I don't think we ever guarantee "INTx remappings for Pre-launched VMs are handled before ServiceVM starts". The launch of pre-launched VM(s) and the service VM is simultaneous. While it may be highly probable because INTx remappings for the service VM are established only when it attempts to access its vIOAPIC, there is no 100% guarantee on that.
Have we added any synchronization on VM launch order recently? I think this synchronization has already existed.
SOS won't got launched until create_vm finishs setting ptirq_remapping_info for all Prelaunched VMs and loaded_pre_vm_nr reaches PRE_VM_NUM. I see. Wasn't aware that the synchronization had been added in another context. Thanks for the clarification. --- Best Regards Junjie Mao ```c int32_t prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config) { int32_t err = 0; struct acrn_vm *vm = NULL;
#ifdef CONFIG_SECURITY_VM_FIXUP security_vm_fixup(vm_id); #endif if (get_vmid_by_name(vm_config->name) != vm_id) { pr_err("Invalid VM name: %s", vm_config->name); err = -1; } else { /* Service VM and pre-launched VMs launch on all pCPUs defined in vm_config-
cpu_affinity */ err = create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm); }
if (err == 0) { if (is_prelaunched_vm(vm)) { build_vrsdp(vm); }
if (is_service_vm(vm)) { /* We need to ensure all modules of pre-launched VMs have been loaded already * before loading Service VM modules, otherwise the module of pre-launched VMs could * be corrupted because Service VM kernel might pick any usable RAM to extract kernel * when KASLR enabled. * In case the pre-launched VMs aren't loaded successfuly that cause deadlock here, * use a 10000ms timer to break the waiting loop. */ uint64_t start_tick = cpu_ticks();
while (loaded_pre_vm_nr != PRE_VM_NUM) { uint64_t timeout = ticks_to_ms(cpu_ticks() - start_tick);
if (timeout > 10000U) { pr_err("Loading pre-launched VMs timeout!"); break; } } }
err = prepare_os_image(vm);
if (is_prelaunched_vm(vm)) { loaded_pre_vm_nr++; } }
return err; } ```
Thanks Qiang
--- Best Regards Junjie Mao
Thanks Qiang
--- Best Regards Junjie Mao
} else { /* The mapping has already been added to the VM. No action * required. @@ -410,7 +404,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm
*vm, uint3 return entry; }
-/* deactive & remove mapping entry of vpin for vm */ +/* deactivate & remove mapping entry of vpin for vm */ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr) { uint32_t phys_irq; @@ -431,7 +425,7 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, e
dmar_free_irte(&intr_src, entry->irte_idx); dev_dbg(DBG_LEVEL_IRQ, - "deactive %s intx entry:pgsi=%d, pirq=%d ", + "deactivate %s intx entry:pgsi=%d, pirq=%d ", (vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC", entry->phys_sid.intx_id.gsi, phys_irq); dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n", @@ -442,6 +436,19 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, e } }
+static struct ptirq_remapping_info *reassign_intx_remapping(struct acrn_vm *vm, uint32_t
virt_gsi, + uint32_t phys_gsi, enum intx_ctlr vgsi_ctlr) +{ + struct ptirq_remapping_info *entry = NULL; + DEFINE_INTX_SID(phys_sid, phys_gsi, INTX_CTLR_IOAPIC); + + entry = find_ptirq_entry(PTDEV_INTR_INTX, &phys_sid, NULL); + if (entry) + remove_intx_remapping(entry->vm, entry->virt_sid.intx_id.gsi, entry-
virt_sid.intx_id.ctlr); + + return add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); +} + static void ptirq_handle_intx(struct acrn_vm *vm, const struct ptirq_remapping_info *entry) { @@ -815,6 +822,18 @@ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t
virt_gsi, bo spinlock_release(&ptdev_lock); }
+int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi,
uint32_t
phys_gsi, bool pic_pin) +{ + struct ptirq_remapping_info *entry; + enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC; + + spinlock_obtain(&ptdev_lock); + entry = reassign_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr); + spinlock_release(&ptdev_lock); + + return (entry != NULL) ? 0 : -ENODEV; +} + /* * @pre vm != NULL */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 5269380c1..faec16545 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -1016,7 +1016,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu,
struct
acrn_vm *target if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && is_gsi_valid(irq.intx.phys_pin)) { - ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, + ret = ptirq_reassign_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n",
__func__);
diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index e7e49934a..6308b7514 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -22,9 +22,14 @@ struct ptirq_remapping_info
ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES];
static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE]; spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, };
-static struct ptirq_entry_head { - struct hlist_head list; -} ptirq_entry_heads[PTIRQ_ENTRY_HASHSIZE]; +struct ptirq_hash_table { + struct hlist_head buckets[PTIRQ_ENTRY_HASHSIZE]; +}; + +/* lookup mapping info from phyical gsi, hashing from gsi + vm address(0) */ +static struct ptirq_hash_table phy_gsi_htable; +/* lookup mapping info from vgsi within a vm, hashing from vm address + vgsi */ +static struct ptirq_hash_table vm_vgsi_htable;
static inline uint16_t ptirq_alloc_entry_id(void) { @@ -40,28 +45,36 @@ static inline uint16_t ptirq_alloc_entry_id(void) return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID; }
+/* to find ptirq_remapping_info from phy gsi or vgsi in a vm */ struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type, const union source_id *sid, const struct acrn_vm *vm) { struct hlist_node *p; + struct hlist_head *b; struct ptirq_remapping_info *n, *entry = NULL; - uint64_t key = hash64(sid->value, PTIRQ_ENTRY_HASHBITS); - struct ptirq_entry_head *b = &ptirq_entry_heads[key]; + uint64_t key = hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS); + + if (vm == NULL) { + b = &phy_gsi_htable.buckets[key];
- hlist_for_each(p, &b->list) { - if (vm == NULL) { + hlist_for_each(p, b) { n = hlist_entry(p, struct ptirq_remapping_info, phys_link); - } else { - n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n-
phys_sid.value))
{ + entry = n; + break; + } + } } - - if (is_entry_active(n)) { - if ((intr_type == n->intr_type) && - ((vm == NULL) ? - (sid->value == n->phys_sid.value) : - ((vm == n->vm) && (sid->value == n->virt_sid.value)))) { - entry = n; - break; + } else { + b = &vm_vgsi_htable.buckets[key]; + hlist_for_each(p, b) { + n = hlist_entry(p, struct ptirq_remapping_info, virt_link); + if (is_entry_active(n)) { + if ((intr_type == n->intr_type) && (sid->value == n-
virt_sid.value)
&& (vm == n->vm)) { + entry = n; + break; + } } } } @@ -212,9 +225,9 @@ int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_i entry->active = true;
key = hash64(entry->phys_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->phys_link, &(ptirq_entry_heads[key].list)); - key = hash64(entry->virt_sid.value, PTIRQ_ENTRY_HASHBITS); - hlist_add_head(&entry->virt_link, &(ptirq_entry_heads[key].list)); + hlist_add_head(&entry->phys_link, &(phy_gsi_htable.buckets[key])); + key = hash64(entry->virt_sid.value + (uint64_t)entry->vm,
PTIRQ_ENTRY_HASHBITS);
+ hlist_add_head(&entry->virt_link, &(vm_vgsi_htable.buckets[key])); }
return ret; diff --git a/hypervisor/include/arch/x86/asm/guest/assign.h b/hypervisor/include/arch/x86/asm/guest/assign.h index 9b3b666af..1387b9825 100644 --- a/hypervisor/include/arch/x86/asm/guest/assign.h +++ b/hypervisor/include/arch/x86/asm/guest/assign.h @@ -125,6 +125,27 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t */ void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin);
+/** + * @brief Assign a INTx to Postlaunched VM. + * + * If the physical pin was assigned to another VM, remove it first. + * Then assign the physical pin to target virtual VM. + * + * @param[in] vm pointer to acrn_vm + * @param[in] virt_gsi virtual pin number associated with the passthrough device + * @param[in] phys_gsi physical pin number associated with the passthrough device + * @param[in] pic_pin true for pic, false for ioapic + * + * @return + * - 0: on success + * - \p -EINVAL: invalid virt_pin value + * - \p -ENODEV: failed to add the remapping entry + * + * @pre vm != NULL + * + */ +int32_t ptirq_reassign_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi,
uint32_t
phys_gsi, bool pic_pin); + /** * @brief Remove interrupt remapping entry/entries for MSI/MSI-x. * -- 2.30.2
|
|