]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/sched: Consolidate drm_sched_rq_select_entity_rr
authorTvrtko Ursulin <tvrtko.ursulin@igalia.com>
Tue, 8 Jul 2025 12:21:21 +0000 (13:21 +0100)
committerPhilipp Stanner <phasta@kernel.org>
Wed, 9 Jul 2025 09:48:30 +0000 (11:48 +0200)
Extract out two copies of the identical code to
drm_sched_rq_select_entity_rr()'s epilogue to make it smaller and more
readable.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Philipp Stanner <phasta@kernel.org>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
[phasta: commit message]
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://lore.kernel.org/r/20250708122121.75689-1-tvrtko.ursulin@igalia.com
drivers/gpu/drm/scheduler/sched_main.c

index 648b5d2feff886bc12e81af3c35335fa4e5dd050..81ad40d9582bce1e2e901ff8bd4c70862008878a 100644 (file)
@@ -263,38 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
        entity = rq->current_entity;
        if (entity) {
                list_for_each_entry_continue(entity, &rq->entities, list) {
-                       if (drm_sched_entity_is_ready(entity)) {
-                               /* If we can't queue yet, preserve the current
-                                * entity in terms of fairness.
-                                */
-                               if (!drm_sched_can_queue(sched, entity)) {
-                                       spin_unlock(&rq->lock);
-                                       return ERR_PTR(-ENOSPC);
-                               }
-
-                               rq->current_entity = entity;
-                               reinit_completion(&entity->entity_idle);
-                               spin_unlock(&rq->lock);
-                               return entity;
-                       }
+                       if (drm_sched_entity_is_ready(entity))
+                               goto found;
                }
        }
 
        list_for_each_entry(entity, &rq->entities, list) {
-               if (drm_sched_entity_is_ready(entity)) {
-                       /* If we can't queue yet, preserve the current entity in
-                        * terms of fairness.
-                        */
-                       if (!drm_sched_can_queue(sched, entity)) {
-                               spin_unlock(&rq->lock);
-                               return ERR_PTR(-ENOSPC);
-                       }
-
-                       rq->current_entity = entity;
-                       reinit_completion(&entity->entity_idle);
-                       spin_unlock(&rq->lock);
-                       return entity;
-               }
+               if (drm_sched_entity_is_ready(entity))
+                       goto found;
 
                if (entity == rq->current_entity)
                        break;
@@ -303,6 +279,22 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
        spin_unlock(&rq->lock);
 
        return NULL;
+
+found:
+       if (!drm_sched_can_queue(sched, entity)) {
+               /*
+                * If scheduler cannot take more jobs signal the caller to not
+                * consider lower priority queues.
+                */
+               entity = ERR_PTR(-ENOSPC);
+       } else {
+               rq->current_entity = entity;
+               reinit_completion(&entity->entity_idle);
+       }
+
+       spin_unlock(&rq->lock);
+
+       return entity;
 }
 
 /**