]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/amd: Remove dev == NULL checks
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 10 Jan 2025 16:35:01 +0000 (12:35 -0400)
committerJoerg Roedel <jroedel@suse.de>
Fri, 17 Jan 2025 07:59:29 +0000 (08:59 +0100)
This is no longer possible, amd_iommu_domain_alloc_paging_flags() is never
called with dev = NULL from the core code. Similarly
get_amd_iommu_from_dev() can never be NULL either.

Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v2-9776c53c2966+1c7-amd_paging_flags_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/iommu.c

index 8e9a0445e86f77275b0cdee2b1408cfc0abb59f1..b56b29b3a47d0aea5c0116f18076f6e427d12353 100644 (file)
@@ -2523,13 +2523,10 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
                                                  u32 flags, int pgtable)
 {
        bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+       struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
        struct protection_domain *domain;
-       struct amd_iommu *iommu = NULL;
        int ret;
 
-       if (dev)
-               iommu = get_amd_iommu_from_dev(dev);
-
        /*
         * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
         * default to use IOMMU_DOMAIN_DMA[_FQ].
@@ -2537,8 +2534,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
        if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
                return ERR_PTR(-EINVAL);
 
-       domain = protection_domain_alloc(type,
-                                        dev ? dev_to_node(dev) : NUMA_NO_NODE);
+       domain = protection_domain_alloc(type, dev_to_node(dev));
        if (!domain)
                return ERR_PTR(-ENOMEM);
 
@@ -2554,13 +2550,11 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
        domain->domain.geometry.force_aperture = true;
        domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
 
-       if (iommu) {
-               domain->domain.type = type;
-               domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+       domain->domain.type = type;
+       domain->domain.ops = iommu->iommu.ops->default_domain_ops;
 
-               if (dirty_tracking)
-                       domain->domain.dirty_ops = &amd_dirty_ops;
-       }
+       if (dirty_tracking)
+               domain->domain.dirty_ops = &amd_dirty_ops;
 
        return &domain->domain;
 }
@@ -2571,13 +2565,10 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
 
 {
        unsigned int type = IOMMU_DOMAIN_UNMANAGED;
-       struct amd_iommu *iommu = NULL;
+       struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
        const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
                                                IOMMU_HWPT_ALLOC_PASID;
 
-       if (dev)
-               iommu = get_amd_iommu_from_dev(dev);
-
        if ((flags & ~supported_flags) || user_data)
                return ERR_PTR(-EOPNOTSUPP);
 
@@ -2591,10 +2582,9 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
 
        /* Allocate domain with v1 page table for dirty tracking */
        if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
-               if (iommu && amd_iommu_hd_support(iommu)) {
-                       return do_iommu_domain_alloc(type, dev,
-                                                    flags, AMD_IOMMU_V1);
-               }
+               if (amd_iommu_hd_support(iommu))
+                       return do_iommu_domain_alloc(type, dev, flags,
+                                                    AMD_IOMMU_V1);
 
                return ERR_PTR(-EOPNOTSUPP);
        }