]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/vt-d: Split intel_iommu_domain_alloc_paging_flags()
authorJason Gunthorpe <jgg@nvidia.com>
Mon, 14 Jul 2025 04:50:23 +0000 (12:50 +0800)
committerWill Deacon <will@kernel.org>
Mon, 14 Jul 2025 10:18:03 +0000 (11:18 +0100)
Create stage specific functions that check the stage specific conditions
if each stage can be supported.

Have intel_iommu_domain_alloc_paging_flags() call both stages in sequence
until one does not return EOPNOTSUPP and prefer to use the first stage if
available and suitable for the requested flags.

Move second stage only operations like nested_parent and dirty_tracking
into the second stage function for clarity.

Move initialization of the iommu_domain members into paging_domain_alloc().

Drop initialization of domain->owner as the callers all do it.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/4-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20250714045028.958850-7-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/intel/iommu.c

index 8c99f8bf2065625a0ccea0d0cfbb45b6ee4ccc76..553f0e32be9adfed2a3b6bbfeeb6d053e07cfa5d 100644 (file)
@@ -3281,10 +3281,15 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
        spin_lock_init(&domain->lock);
        spin_lock_init(&domain->cache_lock);
        xa_init(&domain->iommu_array);
+       INIT_LIST_HEAD(&domain->s1_domains);
+       spin_lock_init(&domain->s1_lock);
 
        domain->nid = dev_to_node(dev);
        domain->use_first_level = first_stage;
 
+       domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+       domain->domain.ops = intel_iommu_ops.default_domain_ops;
+
        /* calculate the address width */
        addr_width = agaw_to_width(iommu->agaw);
        if (addr_width > cap_mgaw(iommu->cap))
@@ -3326,62 +3331,73 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
 }
 
 static struct iommu_domain *
-intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
-                                     const struct iommu_user_data *user_data)
+intel_iommu_domain_alloc_first_stage(struct device *dev,
+                                    struct intel_iommu *iommu, u32 flags)
+{
+       struct dmar_domain *dmar_domain;
+
+       if (flags & ~IOMMU_HWPT_ALLOC_PASID)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       /* Only SL is available in legacy mode */
+       if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       dmar_domain = paging_domain_alloc(dev, true);
+       if (IS_ERR(dmar_domain))
+               return ERR_CAST(dmar_domain);
+       return &dmar_domain->domain;
+}
+
+static struct iommu_domain *
+intel_iommu_domain_alloc_second_stage(struct device *dev,
+                                     struct intel_iommu *iommu, u32 flags)
 {
-       struct device_domain_info *info = dev_iommu_priv_get(dev);
-       bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
-       bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
-       struct intel_iommu *iommu = info->iommu;
        struct dmar_domain *dmar_domain;
-       struct iommu_domain *domain;
-       bool first_stage;
 
        if (flags &
            (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
               IOMMU_HWPT_ALLOC_PASID)))
                return ERR_PTR(-EOPNOTSUPP);
-       if (nested_parent && !nested_supported(iommu))
-               return ERR_PTR(-EOPNOTSUPP);
-       if (user_data || (dirty_tracking && !ssads_supported(iommu)))
+
+       if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
+            !nested_supported(iommu)) ||
+           ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
+            !ssads_supported(iommu)))
                return ERR_PTR(-EOPNOTSUPP);
 
-       /*
-        * Always allocate the guest compatible page table unless
-        * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING
-        * is specified.
-        */
-       if (nested_parent || dirty_tracking) {
-               if (!sm_supported(iommu) || !ecap_slts(iommu->ecap))
-                       return ERR_PTR(-EOPNOTSUPP);
-               first_stage = false;
-       } else {
-               first_stage = first_level_by_default(iommu);
-       }
+       /* Legacy mode always supports second stage */
+       if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
+               return ERR_PTR(-EOPNOTSUPP);
 
-       dmar_domain = paging_domain_alloc(dev, first_stage);
+       dmar_domain = paging_domain_alloc(dev, false);
        if (IS_ERR(dmar_domain))
                return ERR_CAST(dmar_domain);
-       domain = &dmar_domain->domain;
-       domain->type = IOMMU_DOMAIN_UNMANAGED;
-       domain->owner = &intel_iommu_ops;
-       domain->ops = intel_iommu_ops.default_domain_ops;
-
-       if (nested_parent) {
-               dmar_domain->nested_parent = true;
-               INIT_LIST_HEAD(&dmar_domain->s1_domains);
-               spin_lock_init(&dmar_domain->s1_lock);
-       }
 
-       if (dirty_tracking) {
-               if (dmar_domain->use_first_level) {
-                       iommu_domain_free(domain);
-                       return ERR_PTR(-EOPNOTSUPP);
-               }
-               domain->dirty_ops = &intel_dirty_ops;
-       }
+       dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
 
-       return domain;
+       if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+               dmar_domain->domain.dirty_ops = &intel_dirty_ops;
+
+       return &dmar_domain->domain;
+}
+
+static struct iommu_domain *
+intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
+                                     const struct iommu_user_data *user_data)
+{
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
+       struct intel_iommu *iommu = info->iommu;
+       struct iommu_domain *domain;
+
+       if (user_data)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       /* Prefer first stage if possible by default. */
+       domain = intel_iommu_domain_alloc_first_stage(dev, iommu, flags);
+       if (domain != ERR_PTR(-EOPNOTSUPP))
+               return domain;
+       return intel_iommu_domain_alloc_second_stage(dev, iommu, flags);
 }
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)