]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/arm-smmu-v3: Convert to domain_alloc_paging()
authorJason Gunthorpe <jgg@nvidia.com>
Mon, 26 Feb 2024 17:07:27 +0000 (13:07 -0400)
committerWill Deacon <will@kernel.org>
Thu, 29 Feb 2024 15:12:23 +0000 (15:12 +0000)
Now that the BLOCKED and IDENTITY behaviors are managed with their own
domains change to the domain_alloc_paging() op.

For now SVA remains using the old interface, eventually it will get its
own op that can pass in the device and mm_struct which will let us have a
sane lifetime for the mmu_notifier.

Call arm_smmu_domain_finalise() early if dev is available.

Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Moritz Fischer <moritzf@google.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/16-v6-96275f25c39d+2d4-smmuv3_newapi_p1_jgg@nvidia.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

index ebd8362c8aa3ac5089efa4fdfd9fde146c0d1afb..b7938f17222b4d91bff71652df66f998f7665c80 100644 (file)
@@ -2197,14 +2197,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
 
 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
-       struct arm_smmu_domain *smmu_domain;
 
        if (type == IOMMU_DOMAIN_SVA)
                return arm_smmu_sva_domain_alloc();
+       return ERR_PTR(-EOPNOTSUPP);
+}
 
-       if (type != IOMMU_DOMAIN_UNMANAGED &&
-           type != IOMMU_DOMAIN_DMA)
-               return NULL;
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+       struct arm_smmu_domain *smmu_domain;
 
        /*
         * Allocate the domain and initialise some of its data structures.
@@ -2213,13 +2214,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
         */
        smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
        if (!smmu_domain)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        mutex_init(&smmu_domain->init_mutex);
        INIT_LIST_HEAD(&smmu_domain->devices);
        spin_lock_init(&smmu_domain->devices_lock);
        INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
 
+       if (dev) {
+               struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+               int ret;
+
+               ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
+               if (ret) {
+                       kfree(smmu_domain);
+                       return ERR_PTR(ret);
+               }
+       }
        return &smmu_domain->domain;
 }
 
@@ -3083,6 +3094,7 @@ static struct iommu_ops arm_smmu_ops = {
        .blocked_domain         = &arm_smmu_blocked_domain,
        .capable                = arm_smmu_capable,
        .domain_alloc           = arm_smmu_domain_alloc,
+       .domain_alloc_paging    = arm_smmu_domain_alloc_paging,
        .probe_device           = arm_smmu_probe_device,
        .release_device         = arm_smmu_release_device,
        .device_group           = arm_smmu_device_group,