]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/core: Dequeue PSI signals for blocked tasks that are delayed
authorPeter Zijlstra <peterz@infradead.org>
Thu, 10 Oct 2024 08:28:36 +0000 (08:28 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 11 Oct 2024 08:49:33 +0000 (10:49 +0200)
psi_dequeue() in for blocked task expects psi_sched_switch() to clear
the TSK_.*RUNNING PSI flags and set the TSK_IOWAIT flags however
psi_sched_switch() uses "!task_on_rq_queued(prev)" to detect if the task
is blocked or still runnable which is no longer true with DELAY_DEQUEUE
since a blocking task can be left queued on the runqueue.

This can lead to PSI splats similar to:

    psi: inconsistent task state! task=... cpu=... psi_flags=4 clear=0 set=4

when the task is requeued since the TSK_RUNNING flag was not cleared
when the task was blocked.

Explicitly communicate that the task was blocked to psi_sched_switch()
even if it was delayed and is still on the runqueue.

  [ prateek: Broke off the relevant part from [1], commit message ]

Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue")
Closes: https://lore.kernel.org/lkml/20240830123458.3557-1-spasswolf@web.de/
Closes: https://lore.kernel.org/all/cd67fbcd-d659-4822-bb90-7e8fbb40a856@molgen.mpg.de/
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Johannes Weiner <hannes@cmpxchg.org>
Link: https://lore.kernel.org/lkml/20241004123506.GR18071@noisy.programming.kicks-ass.net/
kernel/sched/core.c

index a860996622a63029b0c7948e3740f27d9c673b6f..9e09140ccb34710fa5bfa93d0ed79a8f16dd08ea 100644 (file)
@@ -6537,6 +6537,7 @@ static void __sched notrace __schedule(int sched_mode)
         * as a preemption by schedule_debug() and RCU.
         */
        bool preempt = sched_mode > SM_NONE;
+       bool block = false;
        unsigned long *switch_count;
        unsigned long prev_state;
        struct rq_flags rf;
@@ -6622,6 +6623,7 @@ static void __sched notrace __schedule(int sched_mode)
                         * After this, schedule() must not care about p->state any more.
                         */
                        block_task(rq, prev, flags);
+                       block = true;
                }
                switch_count = &prev->nvcsw;
        }
@@ -6667,7 +6669,7 @@ picked:
 
                migrate_disable_switch(rq, prev);
                psi_account_irqtime(rq, prev, next);
-               psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+               psi_sched_switch(prev, next, block);
 
                trace_sched_switch(preempt, prev, next, prev_state);