]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Wait on in-syncs when swicthing to dma-fence mode
authorMatthew Brost <matthew.brost@intel.com>
Fri, 12 Dec 2025 18:28:45 +0000 (10:28 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Mon, 15 Dec 2025 22:02:54 +0000 (14:02 -0800)
If a dma-fence submission has in-fences and pagefault queues are running
work, there is little incentive to kick the pagefault queues off the
hardware until the dma-fence submission is ready to run. Therefore, wait
on the in-fences of the dma-fence submission before removing the
pagefault queues from the hardware.

v2:
 - Fix kernel doc (CI)
 - Don't wait under lock (Thomas)
 - Make wait interruptable

Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251212182847.1683222-6-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_hw_engine_group.c
drivers/gpu/drm/xe/xe_hw_engine_group.h
drivers/gpu/drm/xe/xe_sync.c
drivers/gpu/drm/xe/xe_sync.h

index fd9480031750694b80b605ef4b2668548cc9c2dd..730a5c9c26374bc02a2910de25f4dde6cdc99d0b 100644 (file)
@@ -121,7 +121,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
        struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
        struct drm_exec *exec = &vm_exec.exec;
-       u32 i, num_syncs, num_ufence = 0;
+       u32 i, num_syncs, num_in_sync = 0, num_ufence = 0;
        struct xe_validation_ctx ctx;
        struct xe_sched_job *job;
        struct xe_vm *vm;
@@ -183,6 +183,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
                if (xe_sync_is_ufence(&syncs[num_syncs]))
                        num_ufence++;
+
+               if (!num_in_sync && xe_sync_needs_wait(&syncs[num_syncs]))
+                       num_in_sync++;
        }
 
        if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
@@ -203,7 +206,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        mode = xe_hw_engine_group_find_exec_mode(q);
 
        if (mode == EXEC_MODE_DMA_FENCE) {
-               err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
+               err = xe_hw_engine_group_get_mode(group, mode, &previous_mode,
+                                                 syncs, num_in_sync ?
+                                                 num_syncs : 0);
                if (err)
                        goto err_syncs;
        }
index 4d9263a1a20807efd81df72347869cc12855c860..40ce5d5f543c6e3464f0f459d4694f11a874fff3 100644 (file)
@@ -11,6 +11,7 @@
 #include "xe_gt.h"
 #include "xe_gt_stats.h"
 #include "xe_hw_engine_group.h"
+#include "xe_sync.h"
 #include "xe_vm.h"
 
 static void
@@ -21,7 +22,8 @@ hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
        int err;
        enum xe_hw_engine_group_execution_mode previous_mode;
 
-       err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
+       err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode,
+                                         NULL, 0);
        if (err)
                return;
 
@@ -189,10 +191,12 @@ void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group
 /**
  * xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
  * @group: The hw engine group
+ * @has_deps: dma-fence job triggering suspend has dependencies
  *
  * Return: 0 on success, negative error code on error.
  */
-static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
+static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group,
+                                                      bool has_deps)
 {
        int err;
        struct xe_exec_queue *q;
@@ -201,11 +205,18 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
        lockdep_assert_held_write(&group->mode_sem);
 
        list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+               bool idle_skip_suspend;
+
                if (!xe_vm_in_fault_mode(q->vm))
                        continue;
 
+               idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
+               if (!idle_skip_suspend && has_deps)
+                       return -EAGAIN;
+
                xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
-               need_resume |= !xe_exec_queue_idle_skip_suspend(q);
+
+               need_resume |= !idle_skip_suspend;
                q->ops->suspend(q);
        }
 
@@ -258,7 +269,7 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
        return 0;
 }
 
-static int switch_mode(struct xe_hw_engine_group *group)
+static int switch_mode(struct xe_hw_engine_group *group, bool has_deps)
 {
        int err = 0;
        enum xe_hw_engine_group_execution_mode new_mode;
@@ -268,7 +279,8 @@ static int switch_mode(struct xe_hw_engine_group *group)
        switch (group->cur_mode) {
        case EXEC_MODE_LR:
                new_mode = EXEC_MODE_DMA_FENCE;
-               err = xe_hw_engine_group_suspend_faulting_lr_jobs(group);
+               err = xe_hw_engine_group_suspend_faulting_lr_jobs(group,
+                                                                 has_deps);
                break;
        case EXEC_MODE_DMA_FENCE:
                new_mode = EXEC_MODE_LR;
@@ -284,19 +296,36 @@ static int switch_mode(struct xe_hw_engine_group *group)
        return 0;
 }
 
+static int wait_syncs(struct xe_sync_entry *syncs, int num_syncs)
+{
+       int err, i;
+
+       for (i = 0; i < num_syncs; ++i) {
+               err = xe_sync_entry_wait(syncs + i);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 /**
  * xe_hw_engine_group_get_mode() - Get the group to execute in the new mode
  * @group: The hw engine group
  * @new_mode: The new execution mode
  * @previous_mode: Pointer to the previous mode provided for use by caller
+ * @syncs: Syncs from exec IOCTL
+ * @num_syncs: Number of syncs from exec IOCTL
  *
  * Return: 0 if successful, -EINTR if locking failed.
  */
 int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
                                enum xe_hw_engine_group_execution_mode new_mode,
-                               enum xe_hw_engine_group_execution_mode *previous_mode)
+                               enum xe_hw_engine_group_execution_mode *previous_mode,
+                               struct xe_sync_entry *syncs, int num_syncs)
 __acquires(&group->mode_sem)
 {
+       bool has_deps = !!num_syncs;
        int err = down_read_interruptible(&group->mode_sem);
 
        if (err)
@@ -306,15 +335,25 @@ __acquires(&group->mode_sem)
 
        if (new_mode != group->cur_mode) {
                up_read(&group->mode_sem);
+retry:
                err = down_write_killable(&group->mode_sem);
                if (err)
                        return err;
 
                if (new_mode != group->cur_mode) {
-                       err = switch_mode(group);
+                       err = switch_mode(group, has_deps);
                        if (err) {
                                up_write(&group->mode_sem);
-                               return err;
+
+                               if (err != -EAGAIN)
+                                       return err;
+
+                               err = wait_syncs(syncs, num_syncs);
+                               if (err)
+                                       return err;
+
+                               has_deps = false;
+                               goto retry;
                        }
                }
                downgrade_write(&group->mode_sem);
index 797ee81acbf25952480f419b78db4e79200adbd6..8b17ccd30b70a85a9d525f4c22fc07025660df6c 100644 (file)
@@ -11,6 +11,7 @@
 struct drm_device;
 struct xe_exec_queue;
 struct xe_gt;
+struct xe_sync_entry;
 
 int xe_hw_engine_setup_groups(struct xe_gt *gt);
 
@@ -19,7 +20,8 @@ void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct
 
 int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
                                enum xe_hw_engine_group_execution_mode new_mode,
-                               enum xe_hw_engine_group_execution_mode *previous_mode);
+                               enum xe_hw_engine_group_execution_mode *previous_mode,
+                               struct xe_sync_entry *syncs, int num_syncs);
 void xe_hw_engine_group_put(struct xe_hw_engine_group *group);
 
 enum xe_hw_engine_group_execution_mode
index 1fc4fa278b787ddbce73009e24ece8450067573b..ee1344a880b950c4fcbc3a17e5a6b21e7c0b3002 100644 (file)
@@ -228,6 +228,34 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
        return 0;
 }
 
+/**
+ * xe_sync_entry_wait() - Wait on in-sync
+ * @sync: Sync object
+ *
+ * If the sync is in an in-sync, wait on the sync to signal.
+ *
+ * Return: 0 on success, -ERESTARTSYS on failure (interruption)
+ */
+int xe_sync_entry_wait(struct xe_sync_entry *sync)
+{
+       if (sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)
+               return 0;
+
+       return dma_fence_wait(sync->fence, true);
+}
+
+/**
+ * xe_sync_needs_wait() - Sync needs a wait (input dma-fence not signaled)
+ * @sync: Sync object
+ *
+ * Return: True if sync needs a wait, False otherwise
+ */
+bool xe_sync_needs_wait(struct xe_sync_entry *sync)
+{
+       return !(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL) &&
+               !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &sync->fence->flags);
+}
+
 void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
 {
        if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
index 51f2d803e9776600a054022bc3dee74464a30019..6b949194acff112ac4c0901a1e72f8c9c7abf467 100644 (file)
@@ -29,6 +29,8 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
                           struct xe_sched_job *job);
 void xe_sync_entry_signal(struct xe_sync_entry *sync,
                          struct dma_fence *fence);
+int xe_sync_entry_wait(struct xe_sync_entry *sync);
+bool xe_sync_needs_wait(struct xe_sync_entry *sync);
 void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
 struct dma_fence *
 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,