]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe: Add a reason string to the devcoredump
authorJohn Harrison <John.C.Harrison@Intel.com>
Thu, 28 Nov 2024 21:08:22 +0000 (13:08 -0800)
committerJohn Harrison <John.C.Harrison@Intel.com>
Mon, 2 Dec 2024 23:50:41 +0000 (15:50 -0800)
There are debug level prints giving more information about the cause
of the hang immediately before core dumps are created. However, not
everyone has debug level prints enabled or saves the dmesg log at all.
So include that information in the dump file itself. Also, at least
one of those prints included the pid as well as the process name. So
include that in the capture too.

v2: Fix kvfree vs kfree and missing kernel-doc (review feedback from
Matthew Brost)
v3: Use GFP_ATOMIC instead of GFP_KERNEL.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241128210824.3302147-2-John.C.Harrison@Intel.com
drivers/gpu/drm/xe/xe_devcoredump.c
drivers/gpu/drm/xe/xe_devcoredump.h
drivers/gpu/drm/xe/xe_devcoredump_types.h
drivers/gpu/drm/xe/xe_guc_submit.c

index 0d3a4dc87a7fbd62f834ac92fd4a0495a19aeb38..baac50f6dd7e193743137159aaa0c4cbadccaea0 100644 (file)
@@ -100,6 +100,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
        p = drm_coredump_printer(&iter);
 
        drm_puts(&p, "**** Xe Device Coredump ****\n");
+       drm_printf(&p, "Reason: %s\n", ss->reason);
        drm_puts(&p, "kernel: " UTS_RELEASE "\n");
        drm_puts(&p, "module: " KBUILD_MODNAME "\n");
 
@@ -107,7 +108,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
        drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
        ts = ktime_to_timespec64(ss->boot_time);
        drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
-       drm_printf(&p, "Process: %s\n", ss->process_name);
+       drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
        xe_device_snapshot_print(xe, &p);
 
        drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
@@ -139,6 +140,9 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
 {
        int i;
 
+       kfree(ss->reason);
+       ss->reason = NULL;
+
        xe_guc_log_snapshot_free(ss->guc.log);
        ss->guc.log = NULL;
 
@@ -259,8 +263,11 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
        ss->snapshot_time = ktime_get_real();
        ss->boot_time = ktime_get_boottime();
 
-       if (q->vm && q->vm->xef)
+       if (q->vm && q->vm->xef) {
                process_name = q->vm->xef->process_name;
+               ss->pid = q->vm->xef->pid;
+       }
+
        strscpy(ss->process_name, process_name);
 
        ss->gt = q->gt;
@@ -298,15 +305,18 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
  * xe_devcoredump - Take the required snapshots and initialize coredump device.
  * @q: The faulty xe_exec_queue, where the issue was detected.
  * @job: The faulty xe_sched_job, where the issue was detected.
+ * @fmt: Printf format + args to describe the reason for the core dump
  *
  * This function should be called at the crash time within the serialized
  * gt_reset. It is skipped if we still have the core dump device available
  * with the information of the 'first' snapshot.
  */
-void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job)
+__printf(3, 4)
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...)
 {
        struct xe_device *xe = gt_to_xe(q->gt);
        struct xe_devcoredump *coredump = &xe->devcoredump;
+       va_list varg;
 
        if (coredump->captured) {
                drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n");
@@ -314,6 +324,11 @@ void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job)
        }
 
        coredump->captured = true;
+
+       va_start(varg, fmt);
+       coredump->snapshot.reason = kvasprintf(GFP_ATOMIC, fmt, varg);
+       va_end(varg);
+
        devcoredump_snapshot(coredump, q, job);
 
        drm_info(&xe->drm, "Xe device coredump has been created\n");
index c04a534e3384e46273df30ccd53111e39336bc8d..6a17e6d6010220efb8fa97fc7376e68e9d5a658f 100644 (file)
@@ -14,11 +14,12 @@ struct xe_exec_queue;
 struct xe_sched_job;
 
 #ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job);
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...);
 int xe_devcoredump_init(struct xe_device *xe);
 #else
 static inline void xe_devcoredump(struct xe_exec_queue *q,
-                                 struct xe_sched_job *job)
+                                 struct xe_sched_job *job,
+                                 const char *fmt, ...)
 {
 }
 
index be4d59ea9ac8f2b3b21f3af15172c70970110798..e6234e887102b918979e66fba34add39d3237949 100644 (file)
@@ -28,6 +28,10 @@ struct xe_devcoredump_snapshot {
        ktime_t boot_time;
        /** @process_name: Name of process that triggered this gpu hang */
        char process_name[TASK_COMM_LEN];
+       /** @pid: Process id of process that triggered this gpu hang */
+       pid_t pid;
+       /** @reason: The reason the coredump was triggered */
+       char *reason;
 
        /** @gt: Affected GT, used by forcewake for delayed capture */
        struct xe_gt *gt;
index d00af8d8acbebcd01059bdae21ab4d84341248bd..9c36329fe8576e8e77babd1bf59d2ddf1d1d51a9 100644 (file)
@@ -901,7 +901,8 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
                if (!ret) {
                        xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
                                   q->guc->id);
-                       xe_devcoredump(q, NULL);
+                       xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
+                                      q->guc->id);
                        xe_sched_submission_start(sched);
                        xe_gt_reset_async(q->gt);
                        return;
@@ -909,7 +910,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
        }
 
        if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
-               xe_devcoredump(q, NULL);
+               xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
 
        xe_sched_submission_start(sched);
 }
@@ -1136,7 +1137,9 @@ trigger_reset:
                                xe_gt_warn(guc_to_gt(guc),
                                           "Schedule disable failed to respond, guc_id=%d",
                                           q->guc->id);
-                       xe_devcoredump(q, job);
+                       xe_devcoredump(q, job,
+                                      "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
+                                      q->guc->id, ret, xe_guc_read_stopped(guc));
                        set_exec_queue_extra_ref(q);
                        xe_exec_queue_get(q);   /* GT reset owns this */
                        set_exec_queue_banned(q);
@@ -1166,7 +1169,10 @@ trigger_reset:
        trace_xe_sched_job_timedout(job);
 
        if (!exec_queue_killed(q))
-               xe_devcoredump(q, job);
+               xe_devcoredump(q, job,
+                              "Timedout job - seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+                              xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+                              q->guc->id, q->flags);
 
        /*
         * Kernel jobs should never fail, nor should VM jobs if they do