From 99820b4b7e50d9651f01d2d55b6b9ba92dcc5b99 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 28 Nov 2025 10:48:38 +0100 Subject: [PATCH] drm/panthor: Make sure we resume the tick when new jobs are submitted If the group is already assigned a slot but was idle before this job submission, we need to make sure the priority rotation happens in the future. Extract the existing logic living in group_schedule_locked() and call this new sched_resume_tick() helper from the "group is assigned a slot" path. v2: - Add R-b v3: - Re-use queue_mask to clear the bit - Collect R-b Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block") Reviewed-by: Steven Price Reviewed-by: Chia-I Wu Link: https://patch.msgid.link/20251128094839.3856402-8-boris.brezillon@collabora.com Signed-off-by: Boris Brezillon --- drivers/gpu/drm/panthor/panthor_sched.c | 43 +++++++++++++++++++------ 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 8846608dd127..7759a8a8565e 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -2624,14 +2624,33 @@ static void sync_upd_work(struct work_struct *work) sched_queue_delayed_work(sched, tick, 0); } +static void sched_resume_tick(struct panthor_device *ptdev) +{ + struct panthor_scheduler *sched = ptdev->scheduler; + u64 delay_jiffies, now; + + drm_WARN_ON(&ptdev->base, sched->resched_target != U64_MAX); + + /* Scheduler tick was off, recalculate the resched_target based on the + * last tick event, and queue the scheduler work. + */ + now = get_jiffies_64(); + sched->resched_target = sched->last_tick + sched->tick_period; + if (sched->used_csg_slot_count == sched->csg_slot_count && + time_before64(now, sched->resched_target)) + delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); + else + delay_jiffies = 0; + + sched_queue_delayed_work(sched, tick, delay_jiffies); +} + static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) { struct panthor_device *ptdev = group->ptdev; struct panthor_scheduler *sched = ptdev->scheduler; struct list_head *queue = &sched->groups.runnable[group->priority]; - u64 delay_jiffies = 0; bool was_idle; - u64 now; if (!group_can_run(group)) return; @@ -2676,13 +2695,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) /* Scheduler tick was off, recalculate the resched_target based on the * last tick event, and queue the scheduler work. */ - now = get_jiffies_64(); - sched->resched_target = sched->last_tick + sched->tick_period; - if (sched->used_csg_slot_count == sched->csg_slot_count && - time_before64(now, sched->resched_target)) - delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); - - sched_queue_delayed_work(sched, tick, delay_jiffies); + sched_resume_tick(ptdev); } static void queue_stop(struct panthor_queue *queue, @@ -3314,6 +3327,18 @@ queue_run_job(struct drm_sched_job *sched_job) if (group->csg_id < 0) { group_schedule_locked(group, BIT(job->queue_idx)); } else { + u32 queue_mask = BIT(job->queue_idx); + bool resume_tick = group_is_idle(group) && + (group->idle_queues & queue_mask) && + !(group->blocked_queues & queue_mask) && + sched->resched_target == U64_MAX; + + /* We just added something to the queue, so it's no longer idle. */ + group->idle_queues &= ~queue_mask; + + if (resume_tick) + sched_resume_tick(ptdev); + gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); if (!sched->pm.has_ref && !(group->blocked_queues & BIT(job->queue_idx))) { -- 2.47.3