static void
tick_ctx_insert_old_group(struct panthor_scheduler *sched,
struct panthor_sched_tick_ctx *ctx,
- struct panthor_group *group,
- bool full_tick)
+ struct panthor_group *group)
{
struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
struct panthor_group *other_group;
- if (!full_tick) {
- list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
- return;
- }
-
- /* Rotate to make sure groups with lower CSG slot
- * priorities have a chance to get a higher CSG slot
- * priority next time they get picked. This priority
- * has an impact on resource request ordering, so it's
- * important to make sure we don't let one group starve
- * all other groups with the same group priority.
- */
+ /* Class groups in descending priority order so we can easily rotate. */
list_for_each_entry(other_group,
&ctx->old_groups[csg_slot->group->priority],
run_node) {
struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
- if (other_csg_slot->priority > csg_slot->priority) {
- list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
+ /* Our group has a higher prio than the one we're testing against,
+ * place it just before.
+ */
+ if (csg_slot->priority > other_csg_slot->priority) {
+ list_add_tail(&group->run_node, &other_group->run_node);
return;
}
}
static void
tick_ctx_init(struct panthor_scheduler *sched,
- struct panthor_sched_tick_ctx *ctx,
- bool full_tick)
+ struct panthor_sched_tick_ctx *ctx)
{
struct panthor_device *ptdev = sched->ptdev;
struct panthor_csg_slots_upd_ctx upd_ctx;
group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
}
- tick_ctx_insert_old_group(sched, ctx, group, full_tick);
+ tick_ctx_insert_old_group(sched, ctx, group);
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
csg_iface->output->ack ^ CSG_STATUS_UPDATE,
CSG_STATUS_UPDATE);
if (panthor_device_reset_is_pending(sched->ptdev))
goto out_unlock;
- tick_ctx_init(sched, &ctx, full_tick);
+ tick_ctx_init(sched, &ctx);
if (ctx.csg_upd_failed_mask)
goto out_cleanup_ctx;
for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
prio >= 0 && !tick_ctx_is_full(sched, &ctx);
prio--) {
+ struct panthor_group *old_highest_prio_group =
+ list_first_entry_or_null(&ctx.old_groups[prio],
+ struct panthor_group, run_node);
+
+ /* Pull out the group with the highest prio for rotation. */
+ if (old_highest_prio_group)
+ list_del(&old_highest_prio_group->run_node);
+
+ /* Re-insert old active groups so they get a chance to run with higher prio. */
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
+
+ /* Fill the remaining slots with runnable groups. */
tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
true, false);
- tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
+
+ /* Re-insert the old group with the highest prio, and give it a chance to be
+ * scheduled again (but with a lower prio) if there's room left.
+ */
+ if (old_highest_prio_group) {
+ list_add_tail(&old_highest_prio_group->run_node, &ctx.old_groups[prio]);
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
+ true, true);
+ }
}
/* If we have free CSG slots left, pick idle groups */