]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Add more GT stats around pagefault mode switch flows
authorMatthew Brost <matthew.brost@intel.com>
Fri, 12 Dec 2025 18:28:47 +0000 (10:28 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Mon, 15 Dec 2025 22:02:56 +0000 (14:02 -0800)
Add GT stats to measure the time spent switching between pagefault mode
and dma-fence mode. Also add a GT stat to indicate when pagefault
suspend is skipped because the system is idle. These metrics will help
profile pagefault workloads while 3D and display are enabled.

v2:
 - Use GT stats helper functions (Francois)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Francois Dugast <francois.dugast@intel.com>
Link: https://patch.msgid.link/20251212182847.1683222-8-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_gt_stats.c
drivers/gpu/drm/xe/xe_gt_stats_types.h
drivers/gpu/drm/xe/xe_hw_engine_group.c

index 714045ad935483910a88446a237bb1a8eb5bde4e..fb2904bd0abd4d974ad148e8620c9f7aa8f4f930 100644 (file)
@@ -68,8 +68,14 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
        DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
        DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
                     "hw_engine_group_suspend_lr_queue_count"),
+       DEF_STAT_STR(HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
+                    "hw_engine_group_skip_lr_queue_count"),
        DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
                     "hw_engine_group_wait_dma_queue_count"),
+       DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+                    "hw_engine_group_suspend_lr_queue_us"),
+       DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+                    "hw_engine_group_wait_dma_queue_us"),
 };
 
 /**
index aada5df421e5ed73e516de9a57fba2db1ccfcb96..b92d013091d5e9542a7f74853a9bcc48816cbddd 100644 (file)
@@ -45,7 +45,10 @@ enum xe_gt_stats_id {
        XE_GT_STATS_ID_SVM_64K_BIND_US,
        XE_GT_STATS_ID_SVM_2M_BIND_US,
        XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
+       XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
        XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
+       XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+       XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
        /* must be the last entry */
        __XE_GT_STATS_NUM_IDS,
 };
index 40ce5d5f543c6e3464f0f459d4694f11a874fff3..f69a32c274580bd76b0be06cee298f6085044b6f 100644 (file)
@@ -200,7 +200,9 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
 {
        int err;
        struct xe_exec_queue *q;
+       struct xe_gt *gt = NULL;
        bool need_resume = false;
+       ktime_t start = xe_gt_stats_ktime_get();
 
        lockdep_assert_held_write(&group->mode_sem);
 
@@ -215,9 +217,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
                        return -EAGAIN;
 
                xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
+               if (idle_skip_suspend)
+                       xe_gt_stats_incr(q->gt,
+                                        XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT, 1);
 
                need_resume |= !idle_skip_suspend;
                q->ops->suspend(q);
+               gt = q->gt;
        }
 
        list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
@@ -229,6 +235,12 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
                        return err;
        }
 
+       if (gt) {
+               xe_gt_stats_incr(gt,
+                                XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+                                xe_gt_stats_ktime_us_delta(start));
+       }
+
        if (need_resume)
                xe_hw_engine_group_resume_faulting_lr_jobs(group);
 
@@ -249,7 +261,9 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
 {
        long timeout;
        struct xe_exec_queue *q;
+       struct xe_gt *gt = NULL;
        struct dma_fence *fence;
+       ktime_t start = xe_gt_stats_ktime_get();
 
        lockdep_assert_held_write(&group->mode_sem);
 
@@ -261,11 +275,18 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
                fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
                timeout = dma_fence_wait(fence, false);
                dma_fence_put(fence);
+               gt = q->gt;
 
                if (timeout < 0)
                        return -ETIME;
        }
 
+       if (gt) {
+               xe_gt_stats_incr(gt,
+                                XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+                                xe_gt_stats_ktime_us_delta(start));
+       }
+
        return 0;
 }