]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/panthor: Make sure we resume the tick when new jobs are submitted
authorBoris Brezillon <boris.brezillon@collabora.com>
Fri, 28 Nov 2025 09:48:38 +0000 (10:48 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Fri, 28 Nov 2025 09:56:44 +0000 (10:56 +0100)
If the group is already assigned a slot but was idle before this job
submission, we need to make sure the priority rotation happens in the
future. Extract the existing logic living in group_schedule_locked()
and call this new sched_resume_tick() helper from the "group is
assigned a slot" path.

v2:
- Add R-b

v3:
- Re-use queue_mask to clear the bit
- Collect R-b

Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block")
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: https://patch.msgid.link/20251128094839.3856402-8-boris.brezillon@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
drivers/gpu/drm/panthor/panthor_sched.c

index 8846608dd12754a0ac7806947f2de6e7d129774d..7759a8a8565e8e67368952472da356f179557cce 100644 (file)
@@ -2624,14 +2624,33 @@ static void sync_upd_work(struct work_struct *work)
                sched_queue_delayed_work(sched, tick, 0);
 }
 
+static void sched_resume_tick(struct panthor_device *ptdev)
+{
+       struct panthor_scheduler *sched = ptdev->scheduler;
+       u64 delay_jiffies, now;
+
+       drm_WARN_ON(&ptdev->base, sched->resched_target != U64_MAX);
+
+       /* Scheduler tick was off, recalculate the resched_target based on the
+        * last tick event, and queue the scheduler work.
+        */
+       now = get_jiffies_64();
+       sched->resched_target = sched->last_tick + sched->tick_period;
+       if (sched->used_csg_slot_count == sched->csg_slot_count &&
+           time_before64(now, sched->resched_target))
+               delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
+       else
+               delay_jiffies = 0;
+
+       sched_queue_delayed_work(sched, tick, delay_jiffies);
+}
+
 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
 {
        struct panthor_device *ptdev = group->ptdev;
        struct panthor_scheduler *sched = ptdev->scheduler;
        struct list_head *queue = &sched->groups.runnable[group->priority];
-       u64 delay_jiffies = 0;
        bool was_idle;
-       u64 now;
 
        if (!group_can_run(group))
                return;
@@ -2676,13 +2695,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
        /* Scheduler tick was off, recalculate the resched_target based on the
         * last tick event, and queue the scheduler work.
         */
-       now = get_jiffies_64();
-       sched->resched_target = sched->last_tick + sched->tick_period;
-       if (sched->used_csg_slot_count == sched->csg_slot_count &&
-           time_before64(now, sched->resched_target))
-               delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
-
-       sched_queue_delayed_work(sched, tick, delay_jiffies);
+       sched_resume_tick(ptdev);
 }
 
 static void queue_stop(struct panthor_queue *queue,
@@ -3314,6 +3327,18 @@ queue_run_job(struct drm_sched_job *sched_job)
        if (group->csg_id < 0) {
                group_schedule_locked(group, BIT(job->queue_idx));
        } else {
+               u32 queue_mask = BIT(job->queue_idx);
+               bool resume_tick = group_is_idle(group) &&
+                                  (group->idle_queues & queue_mask) &&
+                                  !(group->blocked_queues & queue_mask) &&
+                                  sched->resched_target == U64_MAX;
+
+               /* We just added something to the queue, so it's no longer idle. */
+               group->idle_queues &= ~queue_mask;
+
+               if (resume_tick)
+                       sched_resume_tick(ptdev);
+
                gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
                if (!sched->pm.has_ref &&
                    !(group->blocked_queues & BIT(job->queue_idx))) {