]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Simplify yield_to()
authorPeter Zijlstra <peterz@infradead.org>
Fri, 9 Jun 2023 14:58:23 +0000 (16:58 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Sep 2023 13:01:28 +0000 (15:01 +0200)
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index 1d5cbb3050570263be558bd0f57027a035fb63a1..6c8c40a545608cbd6b98b023f7fbffd104baa21b 100644 (file)
@@ -8888,55 +8888,46 @@ int __sched yield_to(struct task_struct *p, bool preempt)
 {
        struct task_struct *curr = current;
        struct rq *rq, *p_rq;
-       unsigned long flags;
        int yielded = 0;
 
-       local_irq_save(flags);
-       rq = this_rq();
+       scoped_guard (irqsave) {
+               rq = this_rq();
 
 again:
-       p_rq = task_rq(p);
-       /*
-        * If we're the only runnable task on the rq and target rq also
-        * has only one task, there's absolutely no point in yielding.
-        */
-       if (rq->nr_running == 1 && p_rq->nr_running == 1) {
-               yielded = -ESRCH;
-               goto out_irq;
-       }
+               p_rq = task_rq(p);
+               /*
+                * If we're the only runnable task on the rq and target rq also
+                * has only one task, there's absolutely no point in yielding.
+                */
+               if (rq->nr_running == 1 && p_rq->nr_running == 1)
+                       return -ESRCH;
 
-       double_rq_lock(rq, p_rq);
-       if (task_rq(p) != p_rq) {
-               double_rq_unlock(rq, p_rq);
-               goto again;
-       }
+               guard(double_rq_lock)(rq, p_rq);
+               if (task_rq(p) != p_rq)
+                       goto again;
 
-       if (!curr->sched_class->yield_to_task)
-               goto out_unlock;
+               if (!curr->sched_class->yield_to_task)
+                       return 0;
 
-       if (curr->sched_class != p->sched_class)
-               goto out_unlock;
+               if (curr->sched_class != p->sched_class)
+                       return 0;
 
-       if (task_on_cpu(p_rq, p) || !task_is_running(p))
-               goto out_unlock;
+               if (task_on_cpu(p_rq, p) || !task_is_running(p))
+                       return 0;
 
-       yielded = curr->sched_class->yield_to_task(rq, p);
-       if (yielded) {
-               schedstat_inc(rq->yld_count);
-               /*
-                * Make p's CPU reschedule; pick_next_entity takes care of
-                * fairness.
-                */
-               if (preempt && rq != p_rq)
-                       resched_curr(p_rq);
+               yielded = curr->sched_class->yield_to_task(rq, p);
+               if (yielded) {
+                       schedstat_inc(rq->yld_count);
+                       /*
+                        * Make p's CPU reschedule; pick_next_entity
+                        * takes care of fairness.
+                        */
+                       if (preempt && rq != p_rq)
+                               resched_curr(p_rq);
+               }
        }
 
-out_unlock:
-       double_rq_unlock(rq, p_rq);
-out_irq:
-       local_irq_restore(flags);
-
-       if (yielded > 0)
+       if (yielded)
                schedule();
 
        return yielded;