Add GT stats to measure the time spent switching between pagefault mode
and dma-fence mode. Also add a GT stat to indicate when pagefault
suspend is skipped because the system is idle. These metrics will help
profile pagefault workloads while 3D and display are enabled.
v2:
- Use GT stats helper functions (Francois)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Francois Dugast <francois.dugast@intel.com>
Link: https://patch.msgid.link/20251212182847.1683222-8-matthew.brost@intel.com
DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
"hw_engine_group_suspend_lr_queue_count"),
+ DEF_STAT_STR(HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
+ "hw_engine_group_skip_lr_queue_count"),
DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
"hw_engine_group_wait_dma_queue_count"),
+ DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+ "hw_engine_group_suspend_lr_queue_us"),
+ DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+ "hw_engine_group_wait_dma_queue_us"),
};
/**
XE_GT_STATS_ID_SVM_64K_BIND_US,
XE_GT_STATS_ID_SVM_2M_BIND_US,
XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
{
int err;
struct xe_exec_queue *q;
+ struct xe_gt *gt = NULL;
bool need_resume = false;
+ ktime_t start = xe_gt_stats_ktime_get();
lockdep_assert_held_write(&group->mode_sem);
return -EAGAIN;
xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
+ if (idle_skip_suspend)
+ xe_gt_stats_incr(q->gt,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT, 1);
need_resume |= !idle_skip_suspend;
q->ops->suspend(q);
+ gt = q->gt;
}
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
return err;
}
+ if (gt) {
+ xe_gt_stats_incr(gt,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+ xe_gt_stats_ktime_us_delta(start));
+ }
+
if (need_resume)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
{
long timeout;
struct xe_exec_queue *q;
+ struct xe_gt *gt = NULL;
struct dma_fence *fence;
+ ktime_t start = xe_gt_stats_ktime_get();
lockdep_assert_held_write(&group->mode_sem);
fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
timeout = dma_fence_wait(fence, false);
dma_fence_put(fence);
+ gt = q->gt;
if (timeout < 0)
return -ETIME;
}
+ if (gt) {
+ xe_gt_stats_incr(gt,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+ xe_gt_stats_ktime_us_delta(start));
+ }
+
return 0;
}