]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/amd: Pass page table type as param to pdom_setup_pgtable()
authorVasant Hegde <vasant.hegde@amd.com>
Mon, 28 Oct 2024 09:38:06 +0000 (09:38 +0000)
committerJoerg Roedel <jroedel@suse.de>
Tue, 29 Oct 2024 09:08:21 +0000 (10:08 +0100)
Current code forces v1 page table for UNMANAGED domain and global page
table type (amd_iommu_pgtable) for rest of paging domain.

Following patch series adds support for domain_alloc_paging() ops. Also
enhances domain_alloc_user() to allocate page table based on 'flags.

Hence pass page table type as parameter to pdomain_setup_pgtable(). So
that caller can decide right page table type. Also update
dma_max_address() to take pgtable as parameter.

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jacob Pan <jacob.pan@linux.microsoft.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20241028093810.5901-9-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/iommu.c

index 6285fd1afd50b9e22a45b19e9cb3a9e2724a7032..cb6a72564c239f415756d2b096e7f990f5e3ad74 100644 (file)
@@ -2286,28 +2286,13 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
 }
 
 static int pdom_setup_pgtable(struct protection_domain *domain,
-                             unsigned int type)
+                             unsigned int type, int pgtable)
 {
        struct io_pgtable_ops *pgtbl_ops;
-       int pgtable;
 
-       switch (type) {
        /* No need to allocate io pgtable ops in passthrough mode */
-       case IOMMU_DOMAIN_IDENTITY:
+       if (!(type & __IOMMU_DOMAIN_PAGING))
                return 0;
-       case IOMMU_DOMAIN_DMA:
-               pgtable = amd_iommu_pgtable;
-               break;
-       /*
-        * Force IOMMU v1 page table when allocating
-        * domain for pass-through devices.
-        */
-       case IOMMU_DOMAIN_UNMANAGED:
-               pgtable = AMD_IOMMU_V1;
-               break;
-       default:
-               return -EINVAL;
-       }
 
        switch (pgtable) {
        case AMD_IOMMU_V1:
@@ -2319,6 +2304,7 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
        default:
                return -EINVAL;
        }
+
        pgtbl_ops =
                alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
        if (!pgtbl_ops)
@@ -2327,9 +2313,9 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
        return 0;
 }
 
-static inline u64 dma_max_address(void)
+static inline u64 dma_max_address(int pgtable)
 {
-       if (amd_iommu_pgtable == AMD_IOMMU_V1)
+       if (pgtable == AMD_IOMMU_V1)
                return ~0ULL;
 
        /* V2 with 4/5 level page table */
@@ -2342,7 +2328,8 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
 }
 
 static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
-                                                 struct device *dev, u32 flags)
+                                                 struct device *dev,
+                                                 u32 flags, int pgtable)
 {
        bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
        struct protection_domain *domain;
@@ -2367,7 +2354,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
        if (!domain)
                return ERR_PTR(-ENOMEM);
 
-       ret = pdom_setup_pgtable(domain, type);
+       ret = pdom_setup_pgtable(domain, type, pgtable);
        if (ret) {
                domain_id_free(domain->id);
                kfree(domain);
@@ -2375,7 +2362,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
        }
 
        domain->domain.geometry.aperture_start = 0;
-       domain->domain.geometry.aperture_end   = dma_max_address();
+       domain->domain.geometry.aperture_end   = dma_max_address(pgtable);
        domain->domain.geometry.force_aperture = true;
        domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
 
@@ -2393,8 +2380,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
 {
        struct iommu_domain *domain;
+       int pgtable = amd_iommu_pgtable;
+
+       /*
+        * Force IOMMU v1 page table when allocating
+        * domain for pass-through devices.
+        */
+       if (type == IOMMU_DOMAIN_UNMANAGED)
+               pgtable = AMD_IOMMU_V1;
 
-       domain = do_iommu_domain_alloc(type, NULL, 0);
+       domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
        if (IS_ERR(domain))
                return NULL;
 
@@ -2412,7 +2407,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
        if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data)
                return ERR_PTR(-EOPNOTSUPP);
 
-       return do_iommu_domain_alloc(type, dev, flags);
+       return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V1);
 }
 
 void amd_iommu_domain_free(struct iommu_domain *dom)