]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/sched: Fix outdated comments referencing thread
authorPhilipp Stanner <phasta@kernel.org>
Fri, 14 Mar 2025 10:10:23 +0000 (11:10 +0100)
committerPhilipp Stanner <phasta@kernel.org>
Tue, 13 May 2025 13:39:48 +0000 (15:39 +0200)
The GPU scheduler's comments refer to a "thread" at various places.
Those are leftovers from commit a6149f039369 ("drm/sched: Convert drm
scheduler to use a work queue rather than kthread").

Replace all references to kthreads.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://lore.kernel.org/r/20250314101023.111248-2-phasta@kernel.org
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index bd39db7bb2408763d0388b62c108b1702e7a1e81..a9ec77a10d9312be2e435d3720707e43e4d360dc 100644 (file)
@@ -545,10 +545,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
                return;
 
        /*
-        * Only when the queue is empty are we guaranteed that the scheduler
-        * thread cannot change ->last_scheduled. To enforce ordering we need
-        * a read barrier here. See drm_sched_entity_pop_job() for the other
-        * side.
+        * Only when the queue is empty are we guaranteed that
+        * drm_sched_run_job_work() cannot change entity->last_scheduled. To
+        * enforce ordering we need a read barrier here. See
+        * drm_sched_entity_pop_job() for the other side.
         */
        smp_rmb();
 
index 829579c41c6b5d8b2abce5ad373c7017469b7680..f7118497e47a8bd71435fdca41d9d8ec08acecbc 100644 (file)
@@ -391,7 +391,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
  * drm_sched_job_done - complete a job
  * @s_job: pointer to the job which is done
  *
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
  */
 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
 {
@@ -551,9 +551,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
 
        if (job) {
                /*
-                * Remove the bad job so it cannot be freed by concurrent
-                * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
-                * is parked at which point it's safe.
+                * Remove the bad job so it cannot be freed by a concurrent
+                * &struct drm_sched_backend_ops.free_job. It will be
+                * reinserted after the scheduler's work items have been
+                * cancelled, at which point it's safe.
                 */
                list_del_init(&job->list);
                spin_unlock(&sched->job_list_lock);
@@ -599,10 +600,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 
        /*
         * Reinsert back the bad job here - now it's safe as
-        * drm_sched_get_finished_job cannot race against us and release the
+        * drm_sched_get_finished_job() cannot race against us and release the
         * bad job at this point - we parked (waited for) any in progress
-        * (earlier) cleanups and drm_sched_get_finished_job will not be called
-        * now until the scheduler thread is unparked.
+        * (earlier) cleanups and drm_sched_get_finished_job() will not be
+        * called now until the scheduler's work items are submitted again.
         */
        if (bad && bad->sched == sched)
                /*
@@ -615,7 +616,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
         * Iterate the job list from later to  earlier one and either deactive
         * their HW callbacks or remove them from pending list if they already
         * signaled.
-        * This iteration is thread safe as sched thread is stopped.
+        * This iteration is thread safe as the scheduler's work items have been
+        * cancelled.
         */
        list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
                                         list) {
@@ -680,9 +682,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
        struct drm_sched_job *s_job, *tmp;
 
        /*
-        * Locking the list is not required here as the sched thread is parked
-        * so no new jobs are being inserted or removed. Also concurrent
-        * GPU recovers can't run in parallel.
+        * Locking the list is not required here as the scheduler's work items
+        * are currently not running, so no new jobs are being inserted or
+        * removed. Also concurrent GPU recovers can't run in parallel.
         */
        list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                struct dma_fence *fence = s_job->s_fence->parent;
index 1a7e377d4cbb4fc12ed93c548b236970217945e8..d860db087ea5f870ad5584027eaf5239ed26d300 100644 (file)
@@ -192,7 +192,7 @@ struct drm_sched_entity {
         * @last_scheduled:
         *
         * Points to the finished fence of the last scheduled job. Only written
-        * by the scheduler thread, can be accessed locklessly from
+        * by drm_sched_entity_pop_job(). Can be accessed locklessly from
         * drm_sched_job_arm() if the queue is empty.
         */
        struct dma_fence __rcu          *last_scheduled;