]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Add timeout to preempt fences
authorMatthew Brost <matthew.brost@intel.com>
Wed, 26 Jun 2024 00:41:37 +0000 (17:41 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Wed, 3 Jul 2024 22:27:50 +0000 (15:27 -0700)
To adhere to dma fencing rules that fences must signal within a
reasonable amount of time, add a 5 second timeout to preempt fences. If
this timeout occurs, kill the associated VM as this fatal to the VM.

v2:
 - Add comment for smp_wmb (Checkpatch)
 - Fix kernel doc typo (Inspection)
 - Add comment for killed check (Niranjana)
v3:
 - Drop smp_wmb (Matthew Auld)
 - Don't take vm->lock in preempt fence worker (Matthew Auld)
 - Drop RB given changes to patch
v4:
 - Add WRITE/READ_ONCE (Niranjana)
 - Don't export xe_vm_kill (Niranjana)

Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Tested-by: Stuart Summers <stuart.summers@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240626004137.4060806-1-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_execlist.c
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_preempt_fence.c
drivers/gpu/drm/xe/xe_vm.c

index 201588ec33c35955ba6223a960d94794653b6a8d..ded9f93964296c40af775558c86cdbbf4acff2ea 100644 (file)
@@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
        int (*suspend)(struct xe_exec_queue *q);
        /**
         * @suspend_wait: Wait for an exec queue to suspend executing, should be
-        * call after suspend.
+        * call after suspend. In dma-fencing path thus must return within a
+        * reasonable amount of time. -ETIME return shall indicate an error
+        * waiting for suspend resulting in associated VM getting killed.
         */
-       void (*suspend_wait)(struct xe_exec_queue *q);
+       int (*suspend_wait)(struct xe_exec_queue *q);
        /**
         * @resume: Resume exec queue execution, exec queue must be in a suspended
         * state and dma fence returned from most recent suspend call must be
index db906117db6d69140b733b4224281d2612250f1f..7502e3486eafa7d5f5f0e024f752f8e26a06a3b8 100644 (file)
@@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
        return 0;
 }
 
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
 
 {
        /* NIY */
+       return 0;
 }
 
 static void execlist_exec_queue_resume(struct xe_exec_queue *q)
index 373447758a60253dde0c890802a2dd29379f8b91..6392381e8e697ceab0de445e9d11bda98b4eed23 100644 (file)
@@ -1301,6 +1301,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
        kfree(msg);
 }
 
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+       if (!q->guc->suspend_pending)
+               return;
+
+       WRITE_ONCE(q->guc->suspend_pending, false);
+       wake_up(&q->guc->suspend_wait);
+}
+
 static void suspend_fence_signal(struct xe_exec_queue *q)
 {
        struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1310,9 +1319,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
                  guc_read_stopped(guc));
        xe_assert(xe, q->guc->suspend_pending);
 
-       q->guc->suspend_pending = false;
-       smp_wmb();
-       wake_up(&q->guc->suspend_wait);
+       __suspend_fence_signal(q);
 }
 
 static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1465,6 +1472,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
 {
        trace_xe_exec_queue_kill(q);
        set_exec_queue_killed(q);
+       __suspend_fence_signal(q);
        xe_guc_exec_queue_trigger_cleanup(q);
 }
 
@@ -1561,12 +1569,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
        return 0;
 }
 
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
 {
        struct xe_guc *guc = exec_queue_to_guc(q);
+       int ret;
+
+       /*
+        * Likely don't need to check exec_queue_killed() as we clear
+        * suspend_pending upon kill but to be paranoid but races in which
+        * suspend_pending is set after kill also check kill here.
+        */
+       ret = wait_event_timeout(q->guc->suspend_wait,
+                                !READ_ONCE(q->guc->suspend_pending) ||
+                                exec_queue_killed(q) ||
+                                guc_read_stopped(guc),
+                                HZ * 5);
 
-       wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
-                  guc_read_stopped(guc));
+       if (!ret) {
+               xe_gt_warn(guc_to_gt(guc),
+                          "Suspend fence, guc_id=%d, failed to respond",
+                          q->guc->id);
+               /* XXX: Trigger GT reset? */
+               return -ETIME;
+       }
+
+       return 0;
 }
 
 static void guc_exec_queue_resume(struct xe_exec_queue *q)
index e8b8ae5c6485e3798d93e3f8aa072e482e850cfb..56e709d2fb30e514f5b47b5c3ce24c0e90555c9c 100644 (file)
@@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w)
                container_of(w, typeof(*pfence), preempt_work);
        struct xe_exec_queue *q = pfence->q;
 
-       if (pfence->error)
+       if (pfence->error) {
                dma_fence_set_error(&pfence->base, pfence->error);
-       else
-               q->ops->suspend_wait(q);
+       } else if (!q->ops->reset_status(q)) {
+               int err = q->ops->suspend_wait(q);
+
+               if (err)
+                       dma_fence_set_error(&pfence->base, err);
+       } else {
+               dma_fence_set_error(&pfence->base, -ENOENT);
+       }
 
        dma_fence_signal(&pfence->base);
        /*
index 5b166fa03684e24e05e0309976ee2d378af5dff2..0c764647a552dee91cfcbfbbc3b98487dad988eb 100644 (file)
@@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
                if (q->lr.pfence) {
                        long timeout = dma_fence_wait(q->lr.pfence, false);
 
-                       if (timeout < 0)
+                       /* Only -ETIME on fence indicates VM needs to be killed */
+                       if (timeout < 0 || q->lr.pfence->error == -ETIME)
                                return -ETIME;
+
                        dma_fence_put(q->lr.pfence);
                        q->lr.pfence = NULL;
                }
@@ -311,6 +313,14 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
 
 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
 
+/*
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
 static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
 {
        struct xe_exec_queue *q;