]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
iommu/amd: Move allocation of the top table into v1_alloc_pgtable
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 30 Aug 2024 00:06:10 +0000 (21:06 -0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Oct 2024 14:32:40 +0000 (16:32 +0200)
[ Upstream commit 8d00b77a52ef4b2091696ca25753d0ab95e4d839 ]

All the page table memory should be allocated/free within the io_pgtable
struct. The v2 path is already doing this, make it consistent.

It is hard to see but the free of the root in protection_domain_free() is
a NOP on the success path because v1_free_pgtable() does
amd_iommu_domain_clr_pt_root().

The root memory is already freed because free_sub_pt() put it on the
freelist. The free path in protection_domain_free() is only used during
error unwind of protection_domain_alloc().

Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Stable-dep-of: 7a41dcb52f9d ("iommu/amd: Set the pgsize_bitmap correctly")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c

index 1074ee25064d06024b2f7857522bbc791abed8ba..05aed3cb46f1bfe2ec92c95987266f2e1b61e924 100644 (file)
@@ -574,20 +574,24 @@ static void v1_free_pgtable(struct io_pgtable *iop)
               pgtable->mode > PAGE_MODE_6_LEVEL);
 
        free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+       iommu_put_pages_list(&freelist);
 
        /* Update data structure */
        amd_iommu_domain_clr_pt_root(dom);
 
        /* Make changes visible to IOMMUs */
        amd_iommu_domain_update(dom);
-
-       iommu_put_pages_list(&freelist);
 }
 
 static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
 {
        struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
 
+       pgtable->root = iommu_alloc_page(GFP_KERNEL);
+       if (!pgtable->root)
+               return NULL;
+       pgtable->mode = PAGE_MODE_3_LEVEL;
+
        cfg->pgsize_bitmap  = AMD_IOMMU_PGSIZES;
        cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE;
        cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE;
index fc660d4b10ac8d40a249e0056ee8abd844a9a999..edbd4ca1451a864ecc09a7d4b64a73d210d141b0 100644 (file)
@@ -52,8 +52,6 @@
 #define HT_RANGE_START         (0xfd00000000ULL)
 #define HT_RANGE_END           (0xffffffffffULL)
 
-#define DEFAULT_PGTABLE_LEVEL  PAGE_MODE_3_LEVEL
-
 static DEFINE_SPINLOCK(pd_bitmap_lock);
 
 LIST_HEAD(ioapic_map);
@@ -2267,30 +2265,15 @@ void protection_domain_free(struct protection_domain *domain)
        if (domain->iop.pgtbl_cfg.tlb)
                free_io_pgtable_ops(&domain->iop.iop.ops);
 
-       if (domain->iop.root)
-               iommu_free_page(domain->iop.root);
-
        if (domain->id)
                domain_id_free(domain->id);
 
        kfree(domain);
 }
 
-static int protection_domain_init_v1(struct protection_domain *domain, int mode)
+static int protection_domain_init_v1(struct protection_domain *domain)
 {
-       u64 *pt_root = NULL;
-
-       BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
-
-       if (mode != PAGE_MODE_NONE) {
-               pt_root = iommu_alloc_page(GFP_KERNEL);
-               if (!pt_root)
-                       return -ENOMEM;
-       }
-
        domain->pd_mode = PD_MODE_V1;
-       amd_iommu_domain_set_pgtable(domain, pt_root, mode);
-
        return 0;
 }
 
@@ -2343,7 +2326,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
 
        switch (pgtable) {
        case AMD_IOMMU_V1:
-               ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
+               ret = protection_domain_init_v1(domain);
                break;
        case AMD_IOMMU_V2:
                ret = protection_domain_init_v2(domain);