iommu/amd: Miscellaneous clean up when free domain

* Use the protection_domain_free() helper function to free domain.
  The function has been modified to also free memory used for the v1 and v2
  page tables. Also clear gcr3 table in v2 page table free path.

* Refactor code into cleanup_domain() for reusability. Change BUG_ON to
  WARN_ON in cleanup path.

* Protection domain dev_cnt should be read when the domain is locked.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20230921092147.5930-8-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Suravee Suthikulpanit 2023-09-21 09:21:40 +00:00 committed by Joerg Roedel
parent 4c721d6a08
commit 45677ab1e5
2 changed files with 27 additions and 27 deletions

View file

@ -363,10 +363,10 @@ static void v2_free_pgtable(struct io_pgtable *iop)
if (!(pdom->flags & PD_IOMMUV2_MASK))
return;
/*
* Make changes visible to IOMMUs. No need to clear gcr3 entry
* as gcr3 table is already freed.
*/
/* Clear gcr3 entry */
amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
/* Make changes visible to IOMMUs */
amd_iommu_domain_update(pdom);
/* Free page table */

View file

@ -2047,9 +2047,11 @@ void amd_iommu_domain_update(struct protection_domain *domain)
static void cleanup_domain(struct protection_domain *domain)
{
struct iommu_dev_data *entry;
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
lockdep_assert_held(&domain->lock);
if (!domain->dev_cnt)
return;
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
@ -2057,8 +2059,7 @@ static void cleanup_domain(struct protection_domain *domain)
BUG_ON(!entry->domain);
do_detach(entry);
}
spin_unlock_irqrestore(&domain->lock, flags);
WARN_ON(domain->dev_cnt != 0);
}
static void protection_domain_free(struct protection_domain *domain)
@ -2069,6 +2070,12 @@ static void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
if (domain->flags & PD_IOMMUV2_MASK)
free_gcr3_table(domain);
if (domain->iop.root)
free_page((unsigned long)domain->iop.root);
if (domain->id)
domain_id_free(domain->id);
@ -2083,10 +2090,8 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
if (mode != PAGE_MODE_NONE) {
pt_root = (void *)get_zeroed_page(GFP_KERNEL);
if (!pt_root) {
domain_id_free(domain->id);
if (!pt_root)
return -ENOMEM;
}
}
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@ -2100,10 +2105,8 @@ static int protection_domain_init_v2(struct protection_domain *domain)
domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
if (setup_gcr3_table(domain, 1)) {
domain_id_free(domain->id);
if (setup_gcr3_table(domain, 1))
return -ENOMEM;
}
return 0;
}
@ -2162,14 +2165,12 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
goto out_err;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops) {
domain_id_free(domain->id);
if (!pgtbl_ops)
goto out_err;
}
return domain;
out_err:
kfree(domain);
protection_domain_free(domain);
return NULL;
}
@ -2207,19 +2208,18 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
domain = to_pdomain(dom);
if (domain->dev_cnt > 0)
cleanup_domain(domain);
BUG_ON(domain->dev_cnt != 0);
unsigned long flags;
if (!dom)
return;
if (domain->flags & PD_IOMMUV2_MASK)
free_gcr3_table(domain);
domain = to_pdomain(dom);
spin_lock_irqsave(&domain->lock, flags);
cleanup_domain(domain);
spin_unlock_irqrestore(&domain->lock, flags);
protection_domain_free(domain);
}