]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/panthor: Fix the group priority rotation logic
authorBoris Brezillon <boris.brezillon@collabora.com>
Fri, 28 Nov 2025 09:48:35 +0000 (10:48 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Fri, 28 Nov 2025 09:56:44 +0000 (10:56 +0100)
When rotating group priorities, we want the group with the
highest priority to go back to the end of the queue, and all
other active groups to get their priority bumped, otherwise
some groups will never get a chance to run with the highest
priority. This implies moving the rotation itself to
tick_work(), and only dealing with old group ordering in
tick_ctx_insert_old_group().

v2:
- Add R-b
- Fix the commit message

v3:
- Drop the full_tick argument in tick_ctx_init()
- Collect R-b

Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block")
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: https://patch.msgid.link/20251128094839.3856402-5-boris.brezillon@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
drivers/gpu/drm/panthor/panthor_sched.c

index 5d280d9c82259421773f00cc15ca37471b3c149f..531b52ee3a92bcb9024830fafe283b06a147b23c 100644 (file)
@@ -2050,31 +2050,22 @@ tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
 static void
 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
                          struct panthor_sched_tick_ctx *ctx,
-                         struct panthor_group *group,
-                         bool full_tick)
+                         struct panthor_group *group)
 {
        struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
        struct panthor_group *other_group;
 
-       if (!full_tick) {
-               list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
-               return;
-       }
-
-       /* Rotate to make sure groups with lower CSG slot
-        * priorities have a chance to get a higher CSG slot
-        * priority next time they get picked. This priority
-        * has an impact on resource request ordering, so it's
-        * important to make sure we don't let one group starve
-        * all other groups with the same group priority.
-        */
+       /* Class groups in descending priority order so we can easily rotate. */
        list_for_each_entry(other_group,
                            &ctx->old_groups[csg_slot->group->priority],
                            run_node) {
                struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
 
-               if (other_csg_slot->priority > csg_slot->priority) {
-                       list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
+               /* Our group has a higher prio than the one we're testing against,
+                * place it just before.
+                */
+               if (csg_slot->priority > other_csg_slot->priority) {
+                       list_add_tail(&group->run_node, &other_group->run_node);
                        return;
                }
        }
@@ -2084,8 +2075,7 @@ tick_ctx_insert_old_group(struct panthor_scheduler *sched,
 
 static void
 tick_ctx_init(struct panthor_scheduler *sched,
-             struct panthor_sched_tick_ctx *ctx,
-             bool full_tick)
+             struct panthor_sched_tick_ctx *ctx)
 {
        struct panthor_device *ptdev = sched->ptdev;
        struct panthor_csg_slots_upd_ctx upd_ctx;
@@ -2123,7 +2113,7 @@ tick_ctx_init(struct panthor_scheduler *sched,
                                group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
                }
 
-               tick_ctx_insert_old_group(sched, ctx, group, full_tick);
+               tick_ctx_insert_old_group(sched, ctx, group);
                csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
                                        csg_iface->output->ack ^ CSG_STATUS_UPDATE,
                                        CSG_STATUS_UPDATE);
@@ -2466,7 +2456,7 @@ static void tick_work(struct work_struct *work)
        if (panthor_device_reset_is_pending(sched->ptdev))
                goto out_unlock;
 
-       tick_ctx_init(sched, &ctx, full_tick);
+       tick_ctx_init(sched, &ctx);
        if (ctx.csg_upd_failed_mask)
                goto out_cleanup_ctx;
 
@@ -2492,9 +2482,29 @@ static void tick_work(struct work_struct *work)
        for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
             prio >= 0 && !tick_ctx_is_full(sched, &ctx);
             prio--) {
+               struct panthor_group *old_highest_prio_group =
+                       list_first_entry_or_null(&ctx.old_groups[prio],
+                                                struct panthor_group, run_node);
+
+               /* Pull out the group with the highest prio for rotation. */
+               if (old_highest_prio_group)
+                       list_del(&old_highest_prio_group->run_node);
+
+               /* Re-insert old active groups so they get a chance to run with higher prio. */
+               tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
+
+               /* Fill the remaining slots with runnable groups. */
                tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
                                               true, false);
-               tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
+
+               /* Re-insert the old group with the highest prio, and give it a chance to be
+                * scheduled again (but with a lower prio) if there's room left.
+                */
+               if (old_highest_prio_group) {
+                       list_add_tail(&old_highest_prio_group->run_node, &ctx.old_groups[prio]);
+                       tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
+                                                      true, true);
+               }
        }
 
        /* If we have free CSG slots left, pick idle groups */