]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/panfrost: Fix the error path in panfrost_mmu_map_fault_addr()
authorBoris Brezillon <boris.brezillon@collabora.com>
Fri, 5 Jan 2024 18:46:11 +0000 (21:46 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Apr 2024 09:19:34 +0000 (11:19 +0200)
commit 1fc9af813b25e146d3607669247d0f970f5a87c3 upstream.

Subject: drm/panfrost: Fix the error path in panfrost_mmu_map_fault_addr()

If some the pages or sgt allocation failed, we shouldn't release the
pages ref we got earlier, otherwise we will end up with unbalanced
get/put_pages() calls. We should instead leave everything in place
and let the BO release function deal with extra cleanup when the object
is destroyed, or let the fault handler try again next time it's called.

Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations")
Cc: <stable@vger.kernel.org>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Co-developed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240105184624.508603-18-dmitry.osipenko@collabora.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/panfrost/panfrost_mmu.c

index c0123d09f699c74d6bd18b6ab0a18944268c7d4b..83fa384f6a24ce213832fb7b510557a89aafb538 100644 (file)
@@ -500,11 +500,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        mapping_set_unevictable(mapping);
 
        for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+               /* Can happen if the last fault only partially filled this
+                * section of the pages array before failing. In that case
+                * we skip already filled pages.
+                */
+               if (pages[i])
+                       continue;
+
                pages[i] = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(pages[i])) {
                        ret = PTR_ERR(pages[i]);
                        pages[i] = NULL;
-                       goto err_pages;
+                       goto err_unlock;
                }
        }
 
@@ -512,7 +519,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
                                        NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
        if (ret)
-               goto err_pages;
+               goto err_unlock;
 
        ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
        if (ret)
@@ -534,8 +541,6 @@ out:
 
 err_map:
        sg_free_table(sgt);
-err_pages:
-       drm_gem_shmem_put_pages(&bo->base);
 err_unlock:
        dma_resv_unlock(obj->resv);
 err_bo: