* When run to parity is disabled, we give a minimum quantum to the running
* entity to ensure progress.
*/
-static inline void set_protect_slice(struct sched_entity *se)
+static inline void set_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
u64 slice = normalized_sysctl_sched_base_slice;
u64 vprot = se->deadline;
if (sched_feat(RUN_TO_PARITY))
- slice = cfs_rq_min_slice(cfs_rq_of(se));
+ slice = cfs_rq_min_slice(cfs_rq);
slice = min(slice, se->slice);
if (slice != se->slice)
se->vprot = vprot;
}
+static inline void update_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ u64 slice = cfs_rq_min_slice(cfs_rq);
+
+ se->vprot = min_vruntime(se->vprot, se->vruntime + calc_delta_fair(slice, se));
+}
+
static inline bool protect_slice(struct sched_entity *se)
{
return ((s64)(se->vprot - se->vruntime) > 0);
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
- set_protect_slice(se);
+ set_protect_slice(cfs_rq, se);
}
update_stats_curr_start(cfs_rq, se);
if (__pick_eevdf(cfs_rq, !do_preempt_short) == pse)
goto preempt;
+ if (sched_feat(RUN_TO_PARITY) && do_preempt_short)
+ update_protect_slice(cfs_rq, se);
+
return;
preempt: