]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/sched: Cleanup event names
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Mon, 26 May 2025 12:54:49 +0000 (14:54 +0200)
committerPhilipp Stanner <phasta@kernel.org>
Wed, 28 May 2025 14:16:13 +0000 (16:16 +0200)
All events now start with the same prefix (drm_sched_job_).

drm_sched_job_wait_dep was misleading because it wasn't waiting
at all. It's now replaced by trace_drm_sched_job_unschedulable,
which is only traced if the job cannot be scheduled.
For moot dependencies, nothing is traced.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://lore.kernel.org/r/20250526125505.2360-8-pierre-eric.pelloux-prayer@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c

index 38cdd659a286012f016fa0ebddf8f8ac78e4d5b3..4ce53e493fef48f0feaec1093c635166be4da255 100644 (file)
@@ -63,17 +63,17 @@ DECLARE_EVENT_CLASS(drm_sched_job,
                      __entry->job_count, __entry->hw_job_count, __entry->client_id)
 );
 
-DEFINE_EVENT(drm_sched_job, drm_sched_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
            TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
            TP_ARGS(sched_job, entity)
 );
 
-DEFINE_EVENT(drm_sched_job, drm_run_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
            TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
            TP_ARGS(sched_job, entity)
 );
 
-TRACE_EVENT(drm_sched_process_job,
+TRACE_EVENT(drm_sched_job_done,
            TP_PROTO(struct drm_sched_fence *fence),
            TP_ARGS(fence),
            TP_STRUCT__entry(
@@ -112,7 +112,7 @@ TRACE_EVENT(drm_sched_job_add_dep,
                  __entry->ctx, __entry->seqno)
 );
 
-TRACE_EVENT(drm_sched_job_wait_dep,
+TRACE_EVENT(drm_sched_job_unschedulable,
            TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
            TP_ARGS(sched_job, fence),
            TP_STRUCT__entry(
index 8c1589f476aeba141dabfda87b2fc8edb4dcc1c6..0b42e1aa70395c0c6a70980a95c4def92a1c1e6a 100644 (file)
@@ -476,10 +476,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 
        while ((entity->dependency =
                        drm_sched_job_dependency(sched_job, entity))) {
-               trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
-
-               if (drm_sched_entity_add_dependency_cb(entity))
+               if (drm_sched_entity_add_dependency_cb(entity)) {
+                       trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
                        return NULL;
+               }
        }
 
        /* skip jobs from entity that marked guilty */
@@ -585,7 +585,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
        bool first;
        ktime_t submit_ts;
 
-       trace_drm_sched_job(sched_job, entity);
+       trace_drm_sched_job_queue(sched_job, entity);
 
        if (trace_drm_sched_job_add_dep_enabled()) {
                struct dma_fence *entry;
index 34252bac83b9cb5b2e5d955f458c2ed0fe1e83ef..e3679f26f72485fb79b1086eebe43865d72dad81 100644 (file)
@@ -401,7 +401,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
        atomic_sub(s_job->credits, &sched->credit_count);
        atomic_dec(sched->score);
 
-       trace_drm_sched_process_job(s_fence);
+       trace_drm_sched_job_done(s_fence);
 
        dma_fence_get(&s_fence->finished);
        drm_sched_fence_finished(s_fence, result);
@@ -1234,7 +1234,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
        atomic_add(sched_job->credits, &sched->credit_count);
        drm_sched_job_begin(sched_job);
 
-       trace_drm_run_job(sched_job, entity);
+       trace_drm_sched_job_run(sched_job, entity);
        /*
         * The run_job() callback must by definition return a fence whose
         * refcount has been incremented for the scheduler already.