]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe: Add timeout to preempt fences
authorMatthew Brost <matthew.brost@intel.com>
Wed, 26 Jun 2024 00:41:37 +0000 (17:41 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2024 10:03:30 +0000 (12:03 +0200)
[ Upstream commit 627c961d672d3304564455ba471f5e4405170eec ]

To adhere to dma fencing rules that fences must signal within a
reasonable amount of time, add a 5 second timeout to preempt fences. If
this timeout occurs, kill the associated VM as this fatal to the VM.

v2:
 - Add comment for smp_wmb (Checkpatch)
 - Fix kernel doc typo (Inspection)
 - Add comment for killed check (Niranjana)
v3:
 - Drop smp_wmb (Matthew Auld)
 - Don't take vm->lock in preempt fence worker (Matthew Auld)
 - Drop RB given changes to patch
v4:
 - Add WRITE/READ_ONCE (Niranjana)
 - Don't export xe_vm_kill (Niranjana)

Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Tested-by: Stuart Summers <stuart.summers@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240626004137.4060806-1-matthew.brost@intel.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_execlist.c
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_preempt_fence.c
drivers/gpu/drm/xe/xe_vm.c

index f6ee0ae80fd63dbe63ba461ec60c029bc5534bb5..fc2a1a20b7e4b0ebcfb87b27bc00c89607c4afd9 100644 (file)
@@ -169,9 +169,11 @@ struct xe_exec_queue_ops {
        int (*suspend)(struct xe_exec_queue *q);
        /**
         * @suspend_wait: Wait for an exec queue to suspend executing, should be
-        * call after suspend.
+        * call after suspend. In dma-fencing path thus must return within a
+        * reasonable amount of time. -ETIME return shall indicate an error
+        * waiting for suspend resulting in associated VM getting killed.
         */
-       void (*suspend_wait)(struct xe_exec_queue *q);
+       int (*suspend_wait)(struct xe_exec_queue *q);
        /**
         * @resume: Resume exec queue execution, exec queue must be in a suspended
         * state and dma fence returned from most recent suspend call must be
index db906117db6d69140b733b4224281d2612250f1f..7502e3486eafa7d5f5f0e024f752f8e26a06a3b8 100644 (file)
@@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
        return 0;
 }
 
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
 
 {
        /* NIY */
+       return 0;
 }
 
 static void execlist_exec_queue_resume(struct xe_exec_queue *q)
index 64b3a7848f4ab6deb53aa1d8472bec64b640d8e7..fd4ac3899edd24eb77f6f1616f8aa949c3fec8af 100644 (file)
@@ -1318,6 +1318,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
        kfree(msg);
 }
 
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+       if (!q->guc->suspend_pending)
+               return;
+
+       WRITE_ONCE(q->guc->suspend_pending, false);
+       wake_up(&q->guc->suspend_wait);
+}
+
 static void suspend_fence_signal(struct xe_exec_queue *q)
 {
        struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1327,9 +1336,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
                  guc_read_stopped(guc));
        xe_assert(xe, q->guc->suspend_pending);
 
-       q->guc->suspend_pending = false;
-       smp_wmb();
-       wake_up(&q->guc->suspend_wait);
+       __suspend_fence_signal(q);
 }
 
 static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1486,6 +1493,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
 {
        trace_xe_exec_queue_kill(q);
        set_exec_queue_killed(q);
+       __suspend_fence_signal(q);
        xe_guc_exec_queue_trigger_cleanup(q);
 }
 
@@ -1584,12 +1592,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
        return 0;
 }
 
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
 {
        struct xe_guc *guc = exec_queue_to_guc(q);
+       int ret;
+
+       /*
+        * Likely don't need to check exec_queue_killed() as we clear
+        * suspend_pending upon kill but to be paranoid but races in which
+        * suspend_pending is set after kill also check kill here.
+        */
+       ret = wait_event_timeout(q->guc->suspend_wait,
+                                !READ_ONCE(q->guc->suspend_pending) ||
+                                exec_queue_killed(q) ||
+                                guc_read_stopped(guc),
+                                HZ * 5);
 
-       wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
-                  guc_read_stopped(guc));
+       if (!ret) {
+               xe_gt_warn(guc_to_gt(guc),
+                          "Suspend fence, guc_id=%d, failed to respond",
+                          q->guc->id);
+               /* XXX: Trigger GT reset? */
+               return -ETIME;
+       }
+
+       return 0;
 }
 
 static void guc_exec_queue_resume(struct xe_exec_queue *q)
index c453f45328b1c57eac65f32341aee489bc071379..83fbeea5aa201a597cf9703834f78c4aab95d2ae 100644 (file)
@@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w)
                container_of(w, typeof(*pfence), preempt_work);
        struct xe_exec_queue *q = pfence->q;
 
-       if (pfence->error)
+       if (pfence->error) {
                dma_fence_set_error(&pfence->base, pfence->error);
-       else
-               q->ops->suspend_wait(q);
+       } else if (!q->ops->reset_status(q)) {
+               int err = q->ops->suspend_wait(q);
+
+               if (err)
+                       dma_fence_set_error(&pfence->base, err);
+       } else {
+               dma_fence_set_error(&pfence->base, -ENOENT);
+       }
 
        dma_fence_signal(&pfence->base);
        /*
index b49bee0dfac5da481bba23c354b873ac7d79e2ef..743c8d79d79d21b5f2d13b64f8b186850c42101e 100644 (file)
@@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
                if (q->lr.pfence) {
                        long timeout = dma_fence_wait(q->lr.pfence, false);
 
-                       if (timeout < 0)
+                       /* Only -ETIME on fence indicates VM needs to be killed */
+                       if (timeout < 0 || q->lr.pfence->error == -ETIME)
                                return -ETIME;
+
                        dma_fence_put(q->lr.pfence);
                        q->lr.pfence = NULL;
                }
@@ -311,6 +313,14 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
 
 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
 
+/*
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
 static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
 {
        struct xe_exec_queue *q;