From: Jason Gunthorpe Date: Fri, 30 Aug 2024 00:06:10 +0000 (-0300) Subject: iommu/amd: Move allocation of the top table into v1_alloc_pgtable X-Git-Tag: v6.12-rc1~149^2^4~15 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8d00b77a52ef4b2091696ca25753d0ab95e4d839;p=thirdparty%2Fkernel%2Flinux.git iommu/amd: Move allocation of the top table into v1_alloc_pgtable All the page table memory should be allocated/free within the io_pgtable struct. The v2 path is already doing this, make it consistent. It is hard to see but the free of the root in protection_domain_free() is a NOP on the success path because v1_free_pgtable() does amd_iommu_domain_clr_pt_root(). The root memory is already freed because free_sub_pt() put it on the freelist. The free path in protection_domain_free() is only used during error unwind of protection_domain_alloc(). Reviewed-by: Vasant Hegde Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/r/1-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index bfbcec68efb94..03a3b09f05125 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -573,20 +573,24 @@ static void v1_free_pgtable(struct io_pgtable *iop) pgtable->mode > PAGE_MODE_6_LEVEL); free_sub_pt(pgtable->root, pgtable->mode, &freelist); + iommu_put_pages_list(&freelist); /* Update data structure */ amd_iommu_domain_clr_pt_root(dom); /* Make changes visible to IOMMUs */ amd_iommu_domain_update(dom); - - iommu_put_pages_list(&freelist); } static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); + pgtable->root = iommu_alloc_page(GFP_KERNEL); + if (!pgtable->root) + return NULL; + pgtable->mode = PAGE_MODE_3_LEVEL; + cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES; cfg->ias = IOMMU_IN_ADDR_BIT_SIZE; cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index adb579030e682..70213f94e24c1 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -52,8 +52,6 @@ #define HT_RANGE_START (0xfd00000000ULL) #define HT_RANGE_END (0xffffffffffULL) -#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL - static DEFINE_SPINLOCK(pd_bitmap_lock); LIST_HEAD(ioapic_map); @@ -2260,30 +2258,15 @@ void protection_domain_free(struct protection_domain *domain) if (domain->iop.pgtbl_cfg.tlb) free_io_pgtable_ops(&domain->iop.iop.ops); - if (domain->iop.root) - iommu_free_page(domain->iop.root); - if (domain->id) domain_id_free(domain->id); kfree(domain); } -static int protection_domain_init_v1(struct protection_domain *domain, int mode) +static int protection_domain_init_v1(struct protection_domain *domain) { - u64 *pt_root = NULL; - - BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); - - if (mode != PAGE_MODE_NONE) { - pt_root = iommu_alloc_page(GFP_KERNEL); - if (!pt_root) - return -ENOMEM; - } - domain->pd_mode = PD_MODE_V1; - amd_iommu_domain_set_pgtable(domain, pt_root, mode); - return 0; } @@ -2336,7 +2319,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type) switch (pgtable) { case AMD_IOMMU_V1: - ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL); + ret = protection_domain_init_v1(domain); break; case AMD_IOMMU_V2: ret = protection_domain_init_v2(domain);