]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe: Convert xe_bo_create_pin_map() for exhaustive eviction
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Mon, 8 Sep 2025 10:12:44 +0000 (12:12 +0200)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 10 Sep 2025 07:16:06 +0000 (09:16 +0200)
Introduce an xe_bo_create_pin_map_novm() function that does not
take the drm_exec paramenter to simplify the conversion of many
callsites.
For the rest, ensure that the same drm_exec context that was used
for locking the vm is passed down to validation.

Use xe_validation_guard() where appropriate.

v2:
- Avoid gotos from within xe_validation_guard(). (Matt Brost)
- Break out the change to pf_provision_vf_lmem8 to a separate
  patch.
- Adapt to signature change of xe_validation_guard().

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250908101246.65025-12-thomas.hellstrom@linux.intel.com
17 files changed:
drivers/gpu/drm/xe/display/intel_fbdev_fb.c
drivers/gpu/drm/xe/display/xe_dsb_buffer.c
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_gsc.c
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
drivers/gpu/drm/xe/xe_guc_engine_activity.c
drivers/gpu/drm/xe/xe_lmtt.c
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_oa.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_pt.h
drivers/gpu/drm/xe/xe_pxp_submit.c
drivers/gpu/drm/xe/xe_vm.c

index d96ba2b510655dee52d663b558ac0e5b34bf434f..8ea9a472113c419a9bf00987f5ab10eec4fb7ff3 100644 (file)
@@ -42,11 +42,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
        obj = ERR_PTR(-ENODEV);
 
        if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
-               obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
-                                          NULL, size,
-                                          ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
-                                          XE_BO_FLAG_STOLEN |
-                                          XE_BO_FLAG_GGTT);
+               obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+                                               size,
+                                               ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+                                               XE_BO_FLAG_STOLEN |
+                                               XE_BO_FLAG_GGTT, false);
                if (!IS_ERR(obj))
                        drm_info(&xe->drm, "Allocated fbdev into stolen\n");
                else
@@ -54,10 +54,10 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
        }
 
        if (IS_ERR(obj)) {
-               obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
-                                          ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
-                                          XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
-                                          XE_BO_FLAG_GGTT);
+               obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size,
+                                               ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+                                               XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+                                               XE_BO_FLAG_GGTT, false);
        }
 
        if (IS_ERR(obj)) {
index 9f941fc2e36bb2cd0656ad21b21c10bedb869a98..58581d7aaae699cca14fc907e2e601c392db4202 100644 (file)
@@ -43,11 +43,11 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
                return false;
 
        /* Set scanout flag for WC mapping */
-       obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
-                                  NULL, PAGE_ALIGN(size),
-                                  ttm_bo_type_kernel,
-                                  XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
-                                  XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
+       obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+                                       PAGE_ALIGN(size),
+                                       ttm_bo_type_kernel,
+                                       XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+                                       XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
        if (IS_ERR(obj)) {
                kfree(vma);
                return false;
index 30f1073141fcbb1f325cd16fa4c9c9029d0252d0..4ae847b628e23025231f6d89c08d614131216048 100644 (file)
@@ -72,10 +72,10 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
        int ret = 0;
 
        /* allocate object of two page for HDCP command memory and store it */
-       bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
-                                 ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM |
-                                 XE_BO_FLAG_GGTT);
+       bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2,
+                                      ttm_bo_type_kernel,
+                                      XE_BO_FLAG_SYSTEM |
+                                      XE_BO_FLAG_GGTT, false);
 
        if (IS_ERR(bo)) {
                drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
index afa794e5606522be94169374069ee801d7cf5826..5904d658d1f2954db263a3bfdb7df8b489fc95e7 100644 (file)
@@ -204,7 +204,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
 
        big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
                                   ttm_bo_type_kernel,
-                                  XE_BO_FLAG_VRAM_IF_DGFX(tile));
+                                  XE_BO_FLAG_VRAM_IF_DGFX(tile),
+                                  exec);
        if (IS_ERR(big)) {
                KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
                goto vunmap;
@@ -212,7 +213,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
 
        pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
-                                 XE_BO_FLAG_VRAM_IF_DGFX(tile));
+                                 XE_BO_FLAG_VRAM_IF_DGFX(tile),
+                                 exec);
        if (IS_ERR(pt)) {
                KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
                           PTR_ERR(pt));
@@ -222,7 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
        tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
                                    2 * SZ_4K,
                                    ttm_bo_type_kernel,
-                                   XE_BO_FLAG_VRAM_IF_DGFX(tile));
+                                   XE_BO_FLAG_VRAM_IF_DGFX(tile),
+                                   exec);
        if (IS_ERR(tiny)) {
                KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
                           PTR_ERR(tiny));
index b7d7d14457905a16446b0fc531afb0d5d692be4b..1012655b65fd0fe9cd58f275adc5caf799782316 100644 (file)
@@ -2513,16 +2513,59 @@ xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
        return ret ? ERR_PTR(ret) : bo;
 }
 
+/**
+ * xe_bo_create_pin_map() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * @vm: The vm to associate the buffer object with. The vm's resv must be locked
+ * with the transaction represented by @exec.
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, and
+ * previously used for locking @vm's resv.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
+ * configured for interruptible locking.
+ */
 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
                                   struct xe_vm *vm, size_t size,
-                                  enum ttm_bo_type type, u32 flags)
+                                  enum ttm_bo_type type, u32 flags,
+                                  struct drm_exec *exec)
 {
-       struct drm_exec *exec = vm ? xe_vm_validation_exec(vm) : XE_VALIDATION_UNIMPLEMENTED;
-
        return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
                                               0, exec);
 }
 
+/**
+ * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @intr: Whether to execut any waits for backing store interruptible.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+                                       size_t size, enum ttm_bo_type type, u32 flags,
+                                       bool intr)
+{
+       return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
+}
+
 static void __xe_bo_unpin_map_no_vm(void *arg)
 {
        xe_bo_unpin_map_no_vm(arg);
@@ -2535,8 +2578,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
        int ret;
 
        KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
-
-       bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
+       bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
        if (IS_ERR(bo))
                return bo;
 
index d9aef56ab8781aff7a5b4498c9f403291e70c5ff..143f59bcfb9352d608e05f3f43cde584c249614f 100644 (file)
@@ -108,7 +108,11 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t s
                                u16 cpu_caching, u32 flags, struct drm_exec *exec);
 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
                                   struct xe_vm *vm, size_t size,
-                                  enum ttm_bo_type type, u32 flags);
+                                  enum ttm_bo_type type, u32 flags,
+                                  struct drm_exec *exec);
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+                                       size_t size, enum ttm_bo_type type, u32 flags,
+                                       bool intr);
 struct xe_bo *
 xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
                             size_t size, u64 offset, enum ttm_bo_type type,
index f5ae28af60d46a5634c2befe4c27eb4c7fb8a5b2..83d61bf8ec62e82143de62254daffe7095da7b09 100644 (file)
@@ -136,10 +136,10 @@ static int query_compatibility_version(struct xe_gsc *gsc)
        u64 ggtt_offset;
        int err;
 
-       bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
-                                 ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM |
-                                 XE_BO_FLAG_GGTT);
+       bo = xe_bo_create_pin_map_novm(xe, tile, GSC_VER_PKT_SZ * 2,
+                                      ttm_bo_type_kernel,
+                                      XE_BO_FLAG_SYSTEM |
+                                      XE_BO_FLAG_GGTT, false);
        if (IS_ERR(bo)) {
                xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
                return PTR_ERR(bo);
index c712111aa30d59ee0ce8dda4d8bb62bd32ee9287..44cc612b0a7529173563fa81d726ce1b30929c3a 100644 (file)
@@ -55,12 +55,12 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
        xe_gt_assert(gt, size % sizeof(u32) == 0);
        xe_gt_assert(gt, size == ndwords * sizeof(u32));
 
-       bo = xe_bo_create_pin_map(xe, tile, NULL,
-                                 ALIGN(size, PAGE_SIZE),
-                                 ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM |
-                                 XE_BO_FLAG_GGTT |
-                                 XE_BO_FLAG_GGTT_INVALIDATE);
+       bo = xe_bo_create_pin_map_novm(xe, tile,
+                                      ALIGN(size, PAGE_SIZE),
+                                      ttm_bo_type_kernel,
+                                      XE_BO_FLAG_SYSTEM |
+                                      XE_BO_FLAG_GGTT |
+                                      XE_BO_FLAG_GGTT_INVALIDATE, false);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
@@ -91,12 +91,12 @@ static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
        xe_gt_assert(gt, size % sizeof(u32) == 0);
        xe_gt_assert(gt, size == ndwords * sizeof(u32));
 
-       bo = xe_bo_create_pin_map(xe, tile, NULL,
-                                 ALIGN(size, PAGE_SIZE),
-                                 ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM |
-                                 XE_BO_FLAG_GGTT |
-                                 XE_BO_FLAG_GGTT_INVALIDATE);
+       bo = xe_bo_create_pin_map_novm(xe, tile,
+                                      ALIGN(size, PAGE_SIZE),
+                                      ttm_bo_type_kernel,
+                                      XE_BO_FLAG_SYSTEM |
+                                      XE_BO_FLAG_GGTT |
+                                      XE_BO_FLAG_GGTT_INVALIDATE, false);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index 92e1f9f41b8c57c34a88e282b6cfb5aa2ebc83cf..2b99c1ebdd58683d502710e1b5d89351086fe47d 100644 (file)
@@ -94,16 +94,17 @@ static int allocate_engine_activity_buffers(struct xe_guc *guc,
        struct xe_tile *tile = gt_to_tile(gt);
        struct xe_bo *bo, *metadata_bo;
 
-       metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
-                                          ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
-                                          XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+       metadata_bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(metadata_size),
+                                               ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+                                               XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE,
+                                               false);
 
        if (IS_ERR(metadata_bo))
                return PTR_ERR(metadata_bo);
 
-       bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
-                                 ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                 XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+       bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(size),
+                                      ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                      XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, false);
 
        if (IS_ERR(bo)) {
                xe_bo_unpin_map_no_vm(metadata_bo);
index f2bfbfa3efa16b3c946bcb6673a9c55d87fe6ca3..62fc5a1a332d7789ee8d55e04961ba2491a3236c 100644 (file)
@@ -67,12 +67,12 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
                goto out;
        }
 
-       bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL,
-                                 PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
-                                            lmtt->ops->lmtt_pte_num(level)),
-                                 ttm_bo_type_kernel,
-                                 XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
-                                 XE_BO_FLAG_NEEDS_64K);
+       bo = xe_bo_create_pin_map_novm(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt),
+                                      PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+                                                 lmtt->ops->lmtt_pte_num(level)),
+                                      ttm_bo_type_kernel,
+                                      XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+                                      XE_BO_FLAG_NEEDS_64K, false);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto out_free_pt;
index 8f6c3ba47882863eb68a0922117eb781fd05ce09..6d52e0eb97f54f2110998776d47b7dc7a624bf03 100644 (file)
@@ -1340,9 +1340,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
        if (vm && vm->xef) /* userspace */
                bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
 
-       lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
-                                      ttm_bo_type_kernel,
-                                      bo_flags);
+       lrc->bo = xe_bo_create_pin_map_novm(xe, tile,
+                                           bo_size,
+                                           ttm_bo_type_kernel,
+                                           bo_flags, false);
        if (IS_ERR(lrc->bo))
                return PTR_ERR(lrc->bo);
 
index 3f0c8832120f33d6a2b61db7eb53f604453d3c15..6fad5d469629382172c5daac701a93d01a732c9e 100644 (file)
@@ -35,6 +35,7 @@
 #include "xe_sched_job.h"
 #include "xe_sync.h"
 #include "xe_trace_bo.h"
+#include "xe_validation.h"
 #include "xe_vm.h"
 #include "xe_vram.h"
 
@@ -173,7 +174,7 @@ static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm,
 }
 
 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
-                                struct xe_vm *vm)
+                                struct xe_vm *vm, struct drm_exec *exec)
 {
        struct xe_device *xe = tile_to_xe(tile);
        u16 pat_index = xe->pat.idx[XE_CACHE_WB];
@@ -200,7 +201,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                                  num_entries * XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
                                  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
-                                 XE_BO_FLAG_PAGETABLE);
+                                 XE_BO_FLAG_PAGETABLE, exec);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
@@ -404,6 +405,8 @@ int xe_migrate_init(struct xe_migrate *m)
        struct xe_tile *tile = m->tile;
        struct xe_gt *primary_gt = tile->primary_gt;
        struct xe_device *xe = tile_to_xe(tile);
+       struct xe_validation_ctx ctx;
+       struct drm_exec exec;
        struct xe_vm *vm;
        int err;
 
@@ -413,11 +416,16 @@ int xe_migrate_init(struct xe_migrate *m)
        if (IS_ERR(vm))
                return PTR_ERR(vm);
 
-       xe_vm_lock(vm, false);
-       err = xe_migrate_prepare_vm(tile, m, vm);
-       xe_vm_unlock(vm);
+       err = 0;
+       xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+               err = xe_vm_drm_exec_lock(vm, &exec);
+               drm_exec_retry_on_contention(&exec);
+               err = xe_migrate_prepare_vm(tile, m, vm, &exec);
+               drm_exec_retry_on_contention(&exec);
+               xe_validation_retry_on_oom(&ctx, &err);
+       }
        if (err)
-               goto err_out;
+               return err;
 
        if (xe->info.has_usm) {
                struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
index a188bad172ad298fd4556bfe02128c70bbbce530..a4894eb0d7f3f36e575e764bc1a84b0d7060f753 100644 (file)
@@ -883,9 +883,9 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
 {
        struct xe_bo *bo;
 
-       bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
-                                 size, ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
+       bo = xe_bo_create_pin_map_novm(stream->oa->xe, stream->gt->tile,
+                                      size, ttm_bo_type_kernel,
+                                      XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, false);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index 07ecf4132d41932f4c0b78f8d773e5b3cf3ef1ed..01eea8eb17795b5e65f3a24cde39a65d727ba594 100644 (file)
@@ -90,6 +90,7 @@ static void xe_pt_free(struct xe_pt *pt)
  * @vm: The vm to create for.
  * @tile: The tile to create for.
  * @level: The page-table level.
+ * @exec: The drm_exec object used to lock the vm.
  *
  * Allocate and initialize a single struct xe_pt metadata structure. Also
  * create the corresponding page-table bo, but don't initialize it. If the
@@ -101,7 +102,7 @@ static void xe_pt_free(struct xe_pt *pt)
  * error.
  */
 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
-                          unsigned int level)
+                          unsigned int level, struct drm_exec *exec)
 {
        struct xe_pt *pt;
        struct xe_bo *bo;
@@ -125,9 +126,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
                bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
 
        pt->level = level;
+
+       drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec));
        bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
                                  ttm_bo_type_kernel,
-                                 bo_flags);
+                                 bo_flags, exec);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto err_kfree;
@@ -591,7 +594,8 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
        if (covers || !*child) {
                u64 flags = 0;
 
-               xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
+               xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1,
+                                       xe_vm_validation_exec(vm));
                if (IS_ERR(xe_child))
                        return PTR_ERR(xe_child);
 
index 5ecf003d513c04a170e7e2649d5609f0dad37b4c..4daeebaab5a1f226581ece37ea1ccd75476d25e6 100644 (file)
@@ -10,6 +10,7 @@
 #include "xe_pt_types.h"
 
 struct dma_fence;
+struct drm_exec;
 struct xe_bo;
 struct xe_device;
 struct xe_exec_queue;
@@ -29,7 +30,7 @@ struct xe_vma_ops;
 unsigned int xe_pt_shift(unsigned int level);
 
 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
-                          unsigned int level);
+                          unsigned int level, struct drm_exec *exec);
 
 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
                          struct xe_pt *pt);
index ca95f2a4d4ef5def6f41a4411406e5ab06322114..e60526e30030945a253c7dd9cf40e900f2180ac3 100644 (file)
@@ -54,8 +54,9 @@ static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
         * Each termination is 16 DWORDS, so 4K is enough to contain a
         * termination for each sessions.
         */
-       bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+       bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
+                                      XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT,
+                                      false);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto out_queue;
@@ -87,7 +88,9 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
 {
        struct xe_tile *tile = gt_to_tile(gt);
        struct xe_device *xe = tile_to_xe(tile);
+       struct xe_validation_ctx ctx;
        struct xe_hw_engine *hwe;
+       struct drm_exec exec;
        struct xe_vm *vm;
        struct xe_bo *bo;
        struct xe_exec_queue *q;
@@ -106,15 +109,26 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
                return PTR_ERR(vm);
 
        /* We allocate a single object for the batch and the in/out memory */
-       xe_vm_lock(vm, false);
-       bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
-                                 ttm_bo_type_kernel,
-                                 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
-       xe_vm_unlock(vm);
-       if (IS_ERR(bo)) {
-               err = PTR_ERR(bo);
-               goto vm_out;
+
+       xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags){}, err) {
+               err = xe_vm_drm_exec_lock(vm, &exec);
+               drm_exec_retry_on_contention(&exec);
+               if (err)
+                       break;
+
+               bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
+                                         ttm_bo_type_kernel,
+                                         XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED |
+                                         XE_BO_FLAG_NEEDS_UC, &exec);
+               drm_exec_retry_on_contention(&exec);
+               if (IS_ERR(bo)) {
+                       err = PTR_ERR(bo);
+                       xe_validation_retry_on_oom(&ctx, &err);
+                       break;
+               }
        }
+       if (err)
+               goto vm_out;
 
        fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
        if (IS_ERR(fence)) {
index 785e81cf023d40b3d81b5ecb9374268daf7e5f3f..7b04b392b7296b570d4bec5d954c990845f73360 100644 (file)
@@ -1370,6 +1370,7 @@ static void vm_destroy_work_func(struct work_struct *w);
  * @xe: xe device.
  * @tile: tile to set up for.
  * @vm: vm to set up for.
+ * @exec: The struct drm_exec object used to lock the vm resv.
  *
  * Sets up a pagetable tree with one page-table per level and a single
  * leaf PTE. All pagetable entries point to the single page-table or,
@@ -1379,20 +1380,19 @@ static void vm_destroy_work_func(struct work_struct *w);
  * Return: 0 on success, negative error code on error.
  */
 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
-                               struct xe_vm *vm)
+                               struct xe_vm *vm, struct drm_exec *exec)
 {
        u8 id = tile->id;
        int i;
 
        for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
-               vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
+               vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec);
                if (IS_ERR(vm->scratch_pt[id][i])) {
                        int err = PTR_ERR(vm->scratch_pt[id][i]);
 
                        vm->scratch_pt[id][i] = NULL;
                        return err;
                }
-
                xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
        }
 
@@ -1420,9 +1420,26 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
        }
 }
 
+static void xe_vm_pt_destroy(struct xe_vm *vm)
+{
+       struct xe_tile *tile;
+       u8 id;
+
+       xe_vm_assert_held(vm);
+
+       for_each_tile(tile, vm->xe, id) {
+               if (vm->pt_root[id]) {
+                       xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+                       vm->pt_root[id] = NULL;
+               }
+       }
+}
+
 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
 {
        struct drm_gem_object *vm_resv_obj;
+       struct xe_validation_ctx ctx;
+       struct drm_exec exec;
        struct xe_vm *vm;
        int err, number_tiles = 0;
        struct xe_tile *tile;
@@ -1507,49 +1524,68 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
 
        drm_gem_object_put(vm_resv_obj);
 
-       err = xe_vm_lock(vm, true);
-       if (err)
-               goto err_close;
+       err = 0;
+       xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+                           err) {
+               err = xe_vm_drm_exec_lock(vm, &exec);
+               drm_exec_retry_on_contention(&exec);
 
-       if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
-               vm->flags |= XE_VM_FLAG_64K;
+               if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+                       vm->flags |= XE_VM_FLAG_64K;
 
-       for_each_tile(tile, xe, id) {
-               if (flags & XE_VM_FLAG_MIGRATION &&
-                   tile->id != XE_VM_FLAG_TILE_ID(flags))
-                       continue;
+               for_each_tile(tile, xe, id) {
+                       if (flags & XE_VM_FLAG_MIGRATION &&
+                           tile->id != XE_VM_FLAG_TILE_ID(flags))
+                               continue;
 
-               vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
-               if (IS_ERR(vm->pt_root[id])) {
-                       err = PTR_ERR(vm->pt_root[id]);
-                       vm->pt_root[id] = NULL;
-                       goto err_unlock_close;
+                       vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level,
+                                                      &exec);
+                       if (IS_ERR(vm->pt_root[id])) {
+                               err = PTR_ERR(vm->pt_root[id]);
+                               vm->pt_root[id] = NULL;
+                               xe_vm_pt_destroy(vm);
+                               drm_exec_retry_on_contention(&exec);
+                               xe_validation_retry_on_oom(&ctx, &err);
+                               break;
+                       }
                }
-       }
+               if (err)
+                       break;
 
-       if (xe_vm_has_scratch(vm)) {
-               for_each_tile(tile, xe, id) {
-                       if (!vm->pt_root[id])
-                               continue;
+               if (xe_vm_has_scratch(vm)) {
+                       for_each_tile(tile, xe, id) {
+                               if (!vm->pt_root[id])
+                                       continue;
 
-                       err = xe_vm_create_scratch(xe, tile, vm);
+                               err = xe_vm_create_scratch(xe, tile, vm, &exec);
+                               if (err) {
+                                       xe_vm_free_scratch(vm);
+                                       xe_vm_pt_destroy(vm);
+                                       drm_exec_retry_on_contention(&exec);
+                                       xe_validation_retry_on_oom(&ctx, &err);
+                                       break;
+                               }
+                       }
                        if (err)
-                               goto err_unlock_close;
+                               break;
+                       vm->batch_invalidate_tlb = true;
                }
-               vm->batch_invalidate_tlb = true;
-       }
 
-       if (vm->flags & XE_VM_FLAG_LR_MODE)
-               vm->batch_invalidate_tlb = false;
+               if (vm->flags & XE_VM_FLAG_LR_MODE) {
+                       INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+                       vm->batch_invalidate_tlb = false;
+               }
 
-       /* Fill pt_root after allocating scratch tables */
-       for_each_tile(tile, xe, id) {
-               if (!vm->pt_root[id])
-                       continue;
+               /* Fill pt_root after allocating scratch tables */
+               for_each_tile(tile, xe, id) {
+                       if (!vm->pt_root[id])
+                               continue;
 
-               xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+                       xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+               }
        }
-       xe_vm_unlock(vm);
+       if (err)
+               goto err_close;
 
        /* Kernel migration VM shouldn't have a circular loop.. */
        if (!(flags & XE_VM_FLAG_MIGRATION)) {
@@ -1582,7 +1618,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
                                      &xe->usm.next_asid, GFP_KERNEL);
                up_write(&xe->usm.lock);
                if (err < 0)
-                       goto err_unlock_close;
+                       goto err_close;
 
                vm->usm.asid = asid;
        }
@@ -1591,8 +1627,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
 
        return vm;
 
-err_unlock_close:
-       xe_vm_unlock(vm);
 err_close:
        xe_vm_close_and_put(vm);
        return ERR_PTR(err);
@@ -1725,13 +1759,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
         * destroy the pagetables immediately.
         */
        xe_vm_free_scratch(vm);
-
-       for_each_tile(tile, xe, id) {
-               if (vm->pt_root[id]) {
-                       xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
-                       vm->pt_root[id] = NULL;
-               }
-       }
+       xe_vm_pt_destroy(vm);
        xe_vm_unlock(vm);
 
        /*
@@ -3781,7 +3809,6 @@ release_vm_lock:
  */
 int xe_vm_lock(struct xe_vm *vm, bool intr)
 {
-       struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
        int ret;
 
        if (intr)
@@ -3789,9 +3816,6 @@ int xe_vm_lock(struct xe_vm *vm, bool intr)
        else
                ret = dma_resv_lock(xe_vm_resv(vm), NULL);
 
-       if (!ret)
-               xe_vm_set_validation_exec(vm, exec);
-
        return ret;
 }
 
@@ -3803,7 +3827,6 @@ int xe_vm_lock(struct xe_vm *vm, bool intr)
  */
 void xe_vm_unlock(struct xe_vm *vm)
 {
-       xe_vm_set_validation_exec(vm, NULL);
        dma_resv_unlock(xe_vm_resv(vm));
 }