]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/rt: Clean up usage of rt_task()
authorQais Yousef <qyousef@layalina.io>
Mon, 10 Jun 2024 19:20:16 +0000 (20:20 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 7 Aug 2024 16:32:37 +0000 (18:32 +0200)
rt_task() checks if a task has RT priority. But depends on your
dictionary, this could mean it belongs to RT class, or is a 'realtime'
task, which includes RT and DL classes.

Since this has caused some confusion already on discussion [1], it
seemed a clean up is due.

I define the usage of rt_task() to be tasks that belong to RT class.
Make sure that it returns true only for RT class and audit the users and
replace the ones required the old behavior with the new realtime_task()
which returns true for RT and DL classes. Introduce similar
realtime_prio() to create similar distinction to rt_prio() and update
the users that required the old behavior to use the new function.

Move MAX_DL_PRIO to prio.h so it can be used in the new definitions.

Document the functions to make it more obvious what is the difference
between them. PI-boosted tasks is a factor that must be taken into
account when choosing which function to use.

Rename task_is_realtime() to realtime_task_policy() as the old name is
confusing against the new realtime_task().

No functional changes were intended.

[1] https://lore.kernel.org/lkml/20240506100509.GL40213@noisy.programming.kicks-ass.net/

Signed-off-by: Qais Yousef <qyousef@layalina.io>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Phil Auld <pauld@redhat.com>
Reviewed-by: "Steven Rostedt (Google)" <rostedt@goodmis.org>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20240610192018.1567075-2-qyousef@layalina.io
15 files changed:
fs/bcachefs/six.c
fs/select.c
include/linux/ioprio.h
include/linux/sched/deadline.h
include/linux/sched/prio.h
include/linux/sched/rt.h
kernel/locking/rtmutex.c
kernel/locking/rwsem.c
kernel/locking/ww_mutex.h
kernel/sched/core.c
kernel/sched/syscalls.c
kernel/time/hrtimer.c
kernel/trace/trace_sched_wakeup.c
mm/page-writeback.c
mm/page_alloc.c

index 3a494c5d12478595c76bebc89fd15b517c5ed6d0..b30870bf7e4a050d53a50070afa03b08c8d9af40 100644 (file)
@@ -335,7 +335,7 @@ static inline bool six_owner_running(struct six_lock *lock)
         */
        rcu_read_lock();
        struct task_struct *owner = READ_ONCE(lock->owner);
-       bool ret = owner ? owner_on_cpu(owner) : !rt_task(current);
+       bool ret = owner ? owner_on_cpu(owner) : !realtime_task(current);
        rcu_read_unlock();
 
        return ret;
index 9515c3fa1a03e8a8576f90f8a4dee46509dd1954..8d5c1419416c9b78f8a875c42a2d94472e445921 100644 (file)
@@ -82,7 +82,7 @@ u64 select_estimate_accuracy(struct timespec64 *tv)
         * Realtime tasks get a slack of 0 for obvious reasons.
         */
 
-       if (rt_task(current))
+       if (realtime_task(current))
                return 0;
 
        ktime_get_ts64(&now);
index db1249cd9692080f495c8986826d96eaf56b7995..75859b78d540eb9ed184ad3e4705b080012cd329 100644 (file)
@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
 {
        if (task->policy == SCHED_IDLE)
                return IOPRIO_CLASS_IDLE;
-       else if (task_is_realtime(task))
+       else if (realtime_task_policy(task))
                return IOPRIO_CLASS_RT;
        else
                return IOPRIO_CLASS_BE;
index df3aca89d4f5040d3809474578cfe12c953858eb..5cb88b748ad604a29af5e207b6c6746c0d309b19 100644 (file)
@@ -10,8 +10,6 @@
 
 #include <linux/sched.h>
 
-#define MAX_DL_PRIO            0
-
 static inline int dl_prio(int prio)
 {
        if (unlikely(prio < MAX_DL_PRIO))
@@ -19,6 +17,10 @@ static inline int dl_prio(int prio)
        return 0;
 }
 
+/*
+ * Returns true if a task has a priority that belongs to DL class. PI-boosted
+ * tasks will return true. Use dl_policy() to ignore PI-boosted tasks.
+ */
 static inline int dl_task(struct task_struct *p)
 {
        return dl_prio(p->prio);
index ab83d85e1183aa962e797e15e2d08fce6b976f84..6ab43b4f72f9ea48b162ae484706cbe919fcdf2d 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #define MAX_RT_PRIO            100
+#define MAX_DL_PRIO            0
 
 #define MAX_PRIO               (MAX_RT_PRIO + NICE_WIDTH)
 #define DEFAULT_PRIO           (MAX_RT_PRIO + NICE_WIDTH / 2)
index b2b9e6eb968302f59e9d9efe47d7a4b8987a515b..a055dd68a77cf04136aa3aaa7f4ef2e0d3ce8d1a 100644 (file)
@@ -7,18 +7,43 @@
 struct task_struct;
 
 static inline int rt_prio(int prio)
+{
+       if (unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO))
+               return 1;
+       return 0;
+}
+
+static inline int realtime_prio(int prio)
 {
        if (unlikely(prio < MAX_RT_PRIO))
                return 1;
        return 0;
 }
 
+/*
+ * Returns true if a task has a priority that belongs to RT class. PI-boosted
+ * tasks will return true. Use rt_policy() to ignore PI-boosted tasks.
+ */
 static inline int rt_task(struct task_struct *p)
 {
        return rt_prio(p->prio);
 }
 
-static inline bool task_is_realtime(struct task_struct *tsk)
+/*
+ * Returns true if a task has a priority that belongs to RT or DL classes.
+ * PI-boosted tasks will return true. Use realtime_task_policy() to ignore
+ * PI-boosted tasks.
+ */
+static inline int realtime_task(struct task_struct *p)
+{
+       return realtime_prio(p->prio);
+}
+
+/*
+ * Returns true if a task has a policy that belongs to RT or DL classes.
+ * PI-boosted tasks will return false.
+ */
+static inline bool realtime_task_policy(struct task_struct *tsk)
 {
        int policy = tsk->policy;
 
index 88d08eeb8bc03df508146f30a8120d80654a1759..55c9dab37f337edfcd578eb9f7d0d4d041707538 100644 (file)
@@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task)
 {
        int prio = task->prio;
 
-       if (!rt_prio(prio))
+       if (!realtime_prio(prio))
                return DEFAULT_PRIO;
 
        return prio;
@@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
         * Note that RT tasks are excluded from same priority (lateral)
         * steals to prevent the introduction of an unbounded latency.
         */
-       if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
+       if (realtime_prio(waiter->tree.prio))
                return false;
 
        return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
index 33cac79e3994602469ec71f2185eb1d6818e564e..516174a64fa5b98794ac02634a1a199f4e40b27a 100644 (file)
@@ -631,7 +631,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
                         * if it is an RT task or wait in the wait queue
                         * for too long.
                         */
-                       if (has_handoff || (!rt_task(waiter->task) &&
+                       if (has_handoff || (!realtime_task(waiter->task) &&
                                            !time_after(jiffies, waiter->timeout)))
                                return false;
 
@@ -914,7 +914,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
                if (owner_state != OWNER_WRITER) {
                        if (need_resched())
                                break;
-                       if (rt_task(current) &&
+                       if (realtime_task(current) &&
                           (prev_owner_state != OWNER_WRITER))
                                break;
                }
index 3ad2cc4823e591f9a93c3ba9af6b24f93d78df06..fa4b416a1f621ed95d85a5ff140fc9d1c1ae22c3 100644 (file)
@@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
        int a_prio = a->task->prio;
        int b_prio = b->task->prio;
 
-       if (rt_prio(a_prio) || rt_prio(b_prio)) {
+       if (realtime_prio(a_prio) || realtime_prio(b_prio)) {
 
                if (a_prio > b_prio)
                        return true;
index 29fde993d3f8ba247caed4512381518862182e72..673cbeb7ad488751eb046475dc581b48e0b2b149 100644 (file)
@@ -166,7 +166,7 @@ static inline int __task_prio(const struct task_struct *p)
        if (p->dl_server)
                return -1; /* deadline */
 
-       if (rt_prio(p->prio)) /* includes deadline */
+       if (realtime_prio(p->prio)) /* includes deadline */
                return p->prio; /* [-1, 99] */
 
        if (p->sched_class == &idle_sched_class)
@@ -8590,7 +8590,7 @@ void normalize_rt_tasks(void)
                schedstat_set(p->stats.sleep_start, 0);
                schedstat_set(p->stats.block_start, 0);
 
-               if (!dl_task(p) && !rt_task(p)) {
+               if (!realtime_task(p)) {
                        /*
                         * Renice negative nice level userspace
                         * tasks back to 0:
index ae1b42775ef95c5f0ffc41d0491c4903f213db69..6d60326d73e4a442c2c6792b6441a407b2d927b5 100644 (file)
@@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p)
         * keep the priority unchanged. Otherwise, update priority
         * to the normal priority:
         */
-       if (!rt_prio(p->prio))
+       if (!realtime_prio(p->prio))
                return p->normal_prio;
        return p->prio;
 }
index b8ee320208d411743555947746a59e03a3fc027e..a1d1d8d886a89b3b6892e25b71ae9793dab2514d 100644 (file)
@@ -1975,7 +1975,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
         * expiry.
         */
        if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
-               if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
+               if (realtime_task_policy(current) && !(mode & HRTIMER_MODE_SOFT))
                        mode |= HRTIMER_MODE_HARD;
        }
 
@@ -2075,7 +2075,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
        u64 slack;
 
        slack = current->timer_slack_ns;
-       if (rt_task(current))
+       if (realtime_task(current))
                slack = 0;
 
        hrtimer_init_sleeper_on_stack(&t, clockid, mode);
@@ -2280,7 +2280,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
         * Override any slack passed by the user if under
         * rt contraints.
         */
-       if (rt_task(current))
+       if (realtime_task(current))
                delta = 0;
 
        hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
index 130ca7e7787efb4df7e14f528d0023bd1d12e2cd..1824e17c93c7792f2b1989adbee98a8895d0b844 100644 (file)
@@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
         *  - wakeup_dl handles tasks belonging to sched_dl class only.
         */
        if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
-           (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
+           (wakeup_rt && !realtime_task(p)) ||
            (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
                return;
 
index 4430ac68e4c4144907e7c7d10ff6728f7711f6ca..78dcad72970374af1731d31ac1511a9cc166cfdc 100644 (file)
@@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
        tsk = current;
-       if (rt_task(tsk)) {
+       if (realtime_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
                thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
        }
@@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
        else
                dirty = vm_dirty_ratio * node_memory / 100;
 
-       if (rt_task(tsk))
+       if (realtime_task(tsk))
                dirty += dirty / 4;
 
        /*
index 28f80daf5c0418163fd111052e61b24ab2a7ba41..54274e468d5142f0f76869d445599077f5807073 100644 (file)
@@ -4002,7 +4002,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
                 */
                if (alloc_flags & ALLOC_MIN_RESERVE)
                        alloc_flags &= ~ALLOC_CPUSET;
-       } else if (unlikely(rt_task(current)) && in_task())
+       } else if (unlikely(realtime_task(current)) && in_task())
                alloc_flags |= ALLOC_MIN_RESERVE;
 
        alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);