]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Attach last fence to TLB invalidation job queues
authorMatthew Brost <matthew.brost@intel.com>
Fri, 31 Oct 2025 23:40:46 +0000 (16:40 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Tue, 4 Nov 2025 16:20:57 +0000 (08:20 -0800)
Add support for attaching the last fence to TLB invalidation job queues
to address serialization issues during bursts of unbind jobs. Ensure
that user fence signaling for a bind job reflects both the bind job
itself and the last fences of all related TLB invalidations. Maintain
submission order based solely on the state of the bind and TLB
invalidation queues.

Introduce support functions for last fence attachment to TLB
invalidation queues.

v3:
 - Fix assert in xe_exec_queue_tlb_inval_last_fence_set (CI)
 - Ensure migrate lock held for migrate queues (Testing)
v5:
 - Style nits (Thomas)
 - Rewrite commit message (Thomas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251031234050.3043507-3-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_migrate.h
drivers/gpu/drm/xe/xe_vm.c

index e3414b337569c9e40f837b43823fc9c23637902c..aada199faf71862403241e6c7c704eac542f0e80 100644 (file)
@@ -387,6 +387,7 @@ void xe_exec_queue_destroy(struct kref *ref)
 {
        struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
        struct xe_exec_queue *eq, *next;
+       int i;
 
        xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
 
@@ -397,6 +398,9 @@ void xe_exec_queue_destroy(struct kref *ref)
                xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
 
        xe_exec_queue_last_fence_put_unlocked(q);
+       for_each_tlb_inval(i)
+               xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
+
        if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
                list_for_each_entry_safe(eq, next, &q->multi_gt_list,
                                         multi_gt_link)
@@ -1014,7 +1018,9 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
                                                    struct xe_vm *vm)
 {
-       if (q->flags & EXEC_QUEUE_FLAG_VM) {
+       if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
+               xe_migrate_job_lock_assert(q);
+       } else if (q->flags & EXEC_QUEUE_FLAG_VM) {
                lockdep_assert_held(&vm->lock);
        } else {
                xe_vm_assert_held(vm);
@@ -1113,6 +1119,7 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
                                  struct dma_fence *fence)
 {
        xe_exec_queue_last_fence_lockdep_assert(q, vm);
+       xe_assert(vm->xe, !dma_fence_is_container(fence));
 
        xe_exec_queue_last_fence_put(q, vm);
        q->last_fence = dma_fence_get(fence);
@@ -1141,6 +1148,100 @@ int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
        return err;
 }
 
+/**
+ * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind for
+ * @type: Either primary or media GT
+ */
+void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
+                                           struct xe_vm *vm,
+                                           unsigned int type)
+{
+       xe_exec_queue_last_fence_lockdep_assert(q, vm);
+       xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+                 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+
+       xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
+}
+
+/**
+ * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB
+ * invalidation fence unlocked
+ * @q: The exec queue
+ * @type: Either primary or media GT
+ *
+ * Only safe to be called from xe_exec_queue_destroy().
+ */
+void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
+                                                    unsigned int type)
+{
+       xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+                 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+
+       dma_fence_put(q->tlb_inval[type].last_fence);
+       q->tlb_inval[type].last_fence = NULL;
+}
+
+/**
+ * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind for
+ * @type: Either primary or media GT
+ *
+ * Get last fence, takes a ref
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
+                                                        struct xe_vm *vm,
+                                                        unsigned int type)
+{
+       struct dma_fence *fence;
+
+       xe_exec_queue_last_fence_lockdep_assert(q, vm);
+       xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+                 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+       xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
+                                     EXEC_QUEUE_FLAG_MIGRATE));
+
+       if (q->tlb_inval[type].last_fence &&
+           test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                    &q->tlb_inval[type].last_fence->flags))
+               xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
+
+       fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
+       dma_fence_get(fence);
+       return fence;
+}
+
+/**
+ * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind for
+ * @fence: The fence
+ * @type: Either primary or media GT
+ *
+ * Set the last fence for the tlb invalidation type on the queue. Increases
+ * reference count for fence, when closing queue
+ * xe_exec_queue_tlb_inval_last_fence_put should be called.
+ */
+void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
+                                           struct xe_vm *vm,
+                                           struct dma_fence *fence,
+                                           unsigned int type)
+{
+       xe_exec_queue_last_fence_lockdep_assert(q, vm);
+       xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+                 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+       xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
+                                     EXEC_QUEUE_FLAG_MIGRATE));
+       xe_assert(vm->xe, !dma_fence_is_container(fence));
+
+       xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
+       q->tlb_inval[type].last_fence = dma_fence_get(fence);
+}
+
 /**
  * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
  * within all LRCs of a queue.
index a4dfbe858bda2e384bb1cfac01e6771a3a1f7506..1ba7354b33d1b07504374d1cc5de031189d61a74 100644 (file)
@@ -14,6 +14,10 @@ struct drm_file;
 struct xe_device;
 struct xe_file;
 
+#define for_each_tlb_inval(__i)        \
+       for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
+            __i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)
+
 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
                                           u32 logical_mask, u16 width,
                                           struct xe_hw_engine *hw_engine, u32 flags,
@@ -86,6 +90,23 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
                                  struct dma_fence *fence);
 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
                                      struct xe_vm *vm);
+
+void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
+                                           struct xe_vm *vm,
+                                           unsigned int type);
+
+void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
+                                                    unsigned int type);
+
+struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
+                                                        struct xe_vm *vm,
+                                                        unsigned int type);
+
+void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
+                                           struct xe_vm *vm,
+                                           struct dma_fence *fence,
+                                           unsigned int type);
+
 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
 
 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
index 9bda1148e17b5597e6c722b713f9275906ca9417..771ffe35cd0c6c54e6308643fdf69c6c56790aac 100644 (file)
@@ -146,6 +146,11 @@ struct xe_exec_queue {
                 * dependency scheduler
                 */
                struct xe_dep_scheduler *dep_scheduler;
+               /**
+                * @last_fence: last fence for tlb invalidation, protected by
+                * vm->lock in write mode
+                */
+               struct dma_fence *last_fence;
        } tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT];
 
        /** @pxp: PXP info tracking */
index 56a5804726e96864a4af7f8d26628c3a46a73814..5003e3c4dd1705f0612cd67fda6f4532fbd6dd5c 100644 (file)
@@ -2333,6 +2333,20 @@ void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
                xe_vm_assert_held(q->vm);       /* User queues VM's should be locked */
 }
 
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+/**
+ * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue
+ * @q: Migrate queue
+ */
+void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
+{
+       struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
+
+       xe_gt_assert(q->gt, q == m->q);
+       lockdep_assert_held(&m->job_mutex);
+}
+#endif
+
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
 #include "tests/xe_migrate.c"
 #endif
index 4fad324b625356c99d53da2e7fada4e996d06126..9b5791617f5e07001f09eb17ef948418345c5a0b 100644 (file)
@@ -152,6 +152,14 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
 
 void xe_migrate_wait(struct xe_migrate *m);
 
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+void xe_migrate_job_lock_assert(struct xe_exec_queue *q);
+#else
+static inline void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
+{
+}
+#endif
+
 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q);
 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q);
 
index 6c67ab88a7c0fd232ab093b9a711befd21847b3b..7343f34757d249e5b139149f110dccd5270758c7 100644 (file)
@@ -1731,8 +1731,13 @@ void xe_vm_close_and_put(struct xe_vm *vm)
 
        down_write(&vm->lock);
        for_each_tile(tile, xe, id) {
-               if (vm->q[id])
+               if (vm->q[id]) {
+                       int i;
+
                        xe_exec_queue_last_fence_put(vm->q[id], vm);
+                       for_each_tlb_inval(i)
+                               xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i);
+               }
        }
        up_write(&vm->lock);