]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/panthor: Fix the logic that decides when to stop ticking
authorBoris Brezillon <boris.brezillon@collabora.com>
Fri, 28 Nov 2025 09:48:37 +0000 (10:48 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Fri, 28 Nov 2025 09:56:44 +0000 (10:56 +0100)
When we have multiple active groups with the same priority, we need to
keep ticking for the priority rotation to take place. If we don't do
that, we might starve slots with lower priorities.

It's annoying to deal with that in tick_ctx_update_resched_target(),
so let's add a ::stop_tick field to the tick context which is
initialized to true, and downgraded to false as soon as we detect
something that requires to tick to happen. This way we can complement
the current logic with extra conditions if needed.

v2:
- Add R-b

v3:
- Drop panthor_sched_tick_ctx::min_priority (no longer relevant)
- Collect R-b

Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block")
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: https://patch.msgid.link/20251128094839.3856402-7-boris.brezillon@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
drivers/gpu/drm/panthor/panthor_sched.c

index 1efeabc4b0ac7c2a78775ebc9e5abd824d1724ab..8846608dd12754a0ac7806947f2de6e7d129774d 100644 (file)
@@ -1985,10 +1985,10 @@ struct panthor_sched_tick_ctx {
        struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
        u32 idle_group_count;
        u32 group_count;
-       enum panthor_csg_priority min_priority;
        struct panthor_vm *vms[MAX_CS_PER_CSG];
        u32 as_count;
        bool immediate_tick;
+       bool stop_tick;
        u32 csg_upd_failed_mask;
 };
 
@@ -2031,17 +2031,21 @@ tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
                if (!owned_by_tick_ctx)
                        group_get(group);
 
-               list_move_tail(&group->run_node, &ctx->groups[group->priority]);
                ctx->group_count++;
+
+               /* If we have more than one active group with the same priority,
+                * we need to keep ticking to rotate the CSG priority.
+                */
                if (group_is_idle(group))
                        ctx->idle_group_count++;
+               else if (!list_empty(&ctx->groups[group->priority]))
+                       ctx->stop_tick = false;
+
+               list_move_tail(&group->run_node, &ctx->groups[group->priority]);
 
                if (i == ctx->as_count)
                        ctx->vms[ctx->as_count++] = group->vm;
 
-               if (ctx->min_priority > group->priority)
-                       ctx->min_priority = group->priority;
-
                if (tick_ctx_is_full(sched, ctx))
                        return;
        }
@@ -2085,7 +2089,7 @@ tick_ctx_init(struct panthor_scheduler *sched,
        memset(ctx, 0, sizeof(*ctx));
        csgs_upd_ctx_init(&upd_ctx);
 
-       ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
+       ctx->stop_tick = true;
        for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
                INIT_LIST_HEAD(&ctx->groups[i]);
                INIT_LIST_HEAD(&ctx->old_groups[i]);
@@ -2397,32 +2401,18 @@ static u64
 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
                               const struct panthor_sched_tick_ctx *ctx)
 {
-       /* We had space left, no need to reschedule until some external event happens. */
-       if (!tick_ctx_is_full(sched, ctx))
-               goto no_tick;
-
-       /* If idle groups were scheduled, no need to wake up until some external
-        * event happens (group unblocked, new job submitted, ...).
-        */
-       if (ctx->idle_group_count)
-               goto no_tick;
+       u64 resched_target;
 
-       if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
+       if (ctx->stop_tick)
                goto no_tick;
 
-       /* If there are groups of the same priority waiting, we need to
-        * keep the scheduler ticking, otherwise, we'll just wait for
-        * new groups with higher priority to be queued.
-        */
-       if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
-               u64 resched_target = sched->last_tick + sched->tick_period;
+       resched_target = sched->last_tick + sched->tick_period;
 
-               if (time_before64(sched->resched_target, sched->last_tick) ||
-                   time_before64(resched_target, sched->resched_target))
-                       sched->resched_target = resched_target;
+       if (time_before64(sched->resched_target, sched->last_tick) ||
+           time_before64(resched_target, sched->resched_target))
+               sched->resched_target = resched_target;
 
-               return sched->resched_target - sched->last_tick;
-       }
+       return sched->resched_target - sched->last_tick;
 
 no_tick:
        sched->resched_target = U64_MAX;