]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON
authorRob Clark <robdclark@chromium.org>
Sun, 29 Jun 2025 20:13:15 +0000 (13:13 -0700)
committerRob Clark <robin.clark@oss.qualcomm.com>
Sat, 5 Jul 2025 00:48:37 +0000 (17:48 -0700)
With user managed VMs and multiple queues, it is in theory possible to
trigger map/unmap errors.  These will (in a later patch) mark the VM as
unusable.  But we want to tell the io-pgtable helpers not to spam the
log.  In addition, in the unmap path, we don't want to bail early from
the unmap, to ensure we don't leave some dangling pages mapped.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661520/

drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_mmu.h

index 62b5f294a2aa361d35198ad2249ee7d75031d9f8..5e115abe769216f3b0d196d3686d3c9c4a1c2bb4 100644 (file)
@@ -2280,7 +2280,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
 {
        struct msm_mmu *mmu;
 
-       mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu);
+       mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed);
 
        if (IS_ERR(mmu))
                return ERR_CAST(mmu);
index a0c74ecdb11b8eb091a5d4c4f88e109ea6c92eed..bd67431cb25f198eba5826d064c0bb825f043ad4 100644 (file)
@@ -94,15 +94,24 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
 {
        struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
        struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       int ret = 0;
 
        while (size) {
-               size_t unmapped, pgsize, count;
+               size_t pgsize, count;
+               ssize_t unmapped;
 
                pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
 
                unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
-               if (!unmapped)
-                       break;
+               if (unmapped <= 0) {
+                       ret = -EINVAL;
+                       /*
+                        * Continue attempting to unamp the remained of the
+                        * range, so we don't end up with some dangling
+                        * mapped pages
+                        */
+                       unmapped = PAGE_SIZE;
+               }
 
                iova += unmapped;
                size -= unmapped;
@@ -110,7 +119,7 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
 
        iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
 
-       return (size == 0) ? 0 : -EINVAL;
+       return ret;
 }
 
 static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
@@ -324,7 +333,7 @@ static const struct iommu_flush_ops tlb_ops = {
 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
                unsigned long iova, int flags, void *arg);
 
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
 {
        struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
        struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -358,6 +367,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
        ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
        ttbr0_cfg.tlb = &tlb_ops;
 
+       if (!kernel_managed) {
+               ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
+       }
+
        pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
                &ttbr0_cfg, pagetable);
 
index 9d61999f4d42580ed2ceb196e23629c8ce32b9e9..04dce0faaa3a5479a6a4ce1b9d345ea30e0bc4ae 100644 (file)
@@ -51,7 +51,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
        mmu->handler = handler;
 }
 
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
 
 int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
                               int *asid);