]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe: unconditionally apply PINNED for pin_map()
authorMatthew Auld <matthew.auld@intel.com>
Thu, 3 Apr 2025 10:24:46 +0000 (11:24 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Fri, 4 Apr 2025 10:41:08 +0000 (11:41 +0100)
Some users apply PINNED and some don't when using pin_map(). The pin in
pin_map() should imply PINNED so just unconditionally apply it and clean
up all users.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Link: https://lore.kernel.org/r/20250403102440.266113-14-matthew.auld@intel.com
drivers/gpu/drm/xe/display/intel_fbdev_fb.c
drivers/gpu/drm/xe/display/xe_plane_initial.c
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_ggtt.c
drivers/gpu/drm/xe/xe_lmtt.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_pt.c

index 3a1e505ff1820bf6719830dd1b1037f12898f6cc..267f31697343d53be0d40994a89ae98ba87bc926 100644 (file)
@@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
                                           NULL, size,
                                           ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
                                           XE_BO_FLAG_STOLEN |
-                                          XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+                                          XE_BO_FLAG_GGTT);
                if (!IS_ERR(obj))
                        drm_info(&xe->drm, "Allocated fbdev into stolen\n");
                else
@@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
                obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
                                           ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
                                           XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
-                                          XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+                                          XE_BO_FLAG_GGTT);
        }
 
        if (IS_ERR(obj)) {
index 4ca0cb571194165310a4e68721d7ebc3b5c8ccf7..6502b82741732cae2aa99954eed00720c54dc834 100644 (file)
@@ -83,7 +83,7 @@ initial_plane_bo(struct xe_device *xe,
        if (plane_config->size == 0)
                return NULL;
 
-       flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+       flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
 
        base = round_down(plane_config->base, page_size);
        if (IS_DGFX(xe)) {
index d5fe0ea889ad84343b3e97ac808becb388ba2cb6..52f89476bf62529274769ecd1e15085ee7f1f242 100644 (file)
@@ -202,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
 
        big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
                                   ttm_bo_type_kernel,
-                                  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                  XE_BO_FLAG_PINNED);
+                                  XE_BO_FLAG_VRAM_IF_DGFX(tile));
        if (IS_ERR(big)) {
                KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
                goto vunmap;
@@ -211,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
 
        pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
-                                 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                 XE_BO_FLAG_PINNED);
+                                 XE_BO_FLAG_VRAM_IF_DGFX(tile));
        if (IS_ERR(pt)) {
                KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
                           PTR_ERR(pt));
@@ -222,8 +220,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
        tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
                                    2 * SZ_4K,
                                    ttm_bo_type_kernel,
-                                   XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                   XE_BO_FLAG_PINNED);
+                                   XE_BO_FLAG_VRAM_IF_DGFX(tile));
        if (IS_ERR(tiny)) {
                KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
                           PTR_ERR(tiny));
index 2166087fca090217c57b12e22f4068b885917e06..29e5849a9ae3275cf4f8080df035e1a91425627a 100644 (file)
@@ -2024,7 +2024,7 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
                flags |= XE_BO_FLAG_GGTT;
 
        bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
-                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
                                       alignment);
        if (IS_ERR(bo))
                return bo;
index 5fcb2b4c2c1397bb1d27d7e5bacf8ee660042944..7062115909f2dfeacb2cc0d5d556dcb08616c28a 100644 (file)
@@ -365,7 +365,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
         * scratch entries, rather keep the scratch page in system memory on
         * platforms where 64K pages are needed for VRAM.
         */
-       flags = XE_BO_FLAG_PINNED;
+       flags = 0;
        if (ggtt->flags & XE_GGTT_FLAGS_64K)
                flags |= XE_BO_FLAG_SYSTEM;
        else
index 89393dcb53d9d639e309245b2f49b2e276231eae..63db66df064b5098e697d409d6b27987b7acf733 100644 (file)
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
                                             lmtt->ops->lmtt_pte_num(level)),
                                  ttm_bo_type_kernel,
                                  XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
-                                 XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
+                                 XE_BO_FLAG_NEEDS_64K);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto out_free_pt;
index 96b244aaf2d6eaccc6128c00386fb82b1647492c..3777cc30d688a81d59125ab9ba57b5138d39c426 100644 (file)
@@ -209,7 +209,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                                  num_entries * XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
                                  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                 XE_BO_FLAG_PINNED |
                                  XE_BO_FLAG_PAGETABLE);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
index 49f2908f30d31d8db98655c5066d55cd765ac154..8966d5a188aad379581dc845d172688e52e9de5e 100644 (file)
@@ -117,7 +117,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
                return ERR_PTR(-ENOMEM);
 
        bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                  XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | XE_BO_FLAG_PINNED |
+                  XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
                   XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
        if (vm->xef) /* userspace */
                bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;