]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched: idle: Respect the CPU system wakeup QoS limit for s2idle
authorUlf Hansson <ulf.hansson@linaro.org>
Tue, 25 Nov 2025 11:26:45 +0000 (12:26 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 25 Nov 2025 18:01:29 +0000 (19:01 +0100)
A CPU system wakeup QoS limit may have been requested by user space. To
avoid breaking this constraint when entering a low power state during
s2idle, let's start to take into account the QoS limit.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dhruva Gole <d-gole@ti.com>
Reviewed-by: Kevin Hilman (TI) <khilman@baylibre.com>
Tested-by: Kevin Hilman (TI) <khilman@baylibre.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/20251125112650.329269-5-ulf.hansson@linaro.org
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/cpuidle/cpuidle.c
include/linux/cpuidle.h
kernel/sched/idle.c

index 56132e843c99192d4e48ea3d1378326aebcf95cb..c7876e9e024f9076663063ad21cfc69343fdbbe7 100644 (file)
@@ -184,20 +184,22 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
  * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
  * @drv: cpuidle driver for the given CPU.
  * @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
  *
  * If there are states with the ->enter_s2idle callback, find the deepest of
  * them and enter it with frozen tick.
  */
-int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+                        u64 latency_limit_ns)
 {
        int index;
 
        /*
-        * Find the deepest state with ->enter_s2idle present, which guarantees
-        * that interrupts won't be enabled when it exits and allows the tick to
-        * be frozen safely.
+        * Find the deepest state with ->enter_s2idle present that meets the
+        * specified latency limit, which guarantees that interrupts won't be
+        * enabled when it exits and allows the tick to be frozen safely.
         */
-       index = find_deepest_state(drv, dev, U64_MAX, 0, true);
+       index = find_deepest_state(drv, dev, latency_limit_ns, 0, true);
        if (index > 0) {
                enter_s2idle_proper(drv, dev, index);
                local_irq_enable();
index a9ee4fe55dcfcca8200cc69b9651f91ca1b6b696..4073690504a732195bb50a5e78083e32e7255e7b 100644 (file)
@@ -248,7 +248,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
                                      struct cpuidle_device *dev,
                                      u64 latency_limit_ns);
 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
-                               struct cpuidle_device *dev);
+                               struct cpuidle_device *dev,
+                               u64 latency_limit_ns);
 extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
 #else
 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
@@ -256,7 +257,8 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
                                             u64 latency_limit_ns)
 {return -ENODEV; }
 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
-                                      struct cpuidle_device *dev)
+                                      struct cpuidle_device *dev,
+                                      u64 latency_limit_ns)
 {return -ENODEV; }
 static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
 {
index c39b089d4f09b6de75aa21a338296c5184fbd41f..c1c3d01666106846eac3532d9415ed0365d5f7b8 100644 (file)
@@ -131,12 +131,13 @@ void __cpuidle default_idle_call(void)
 }
 
 static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
-                              struct cpuidle_device *dev)
+                              struct cpuidle_device *dev,
+                              u64 max_latency_ns)
 {
        if (current_clr_polling_and_test())
                return -EBUSY;
 
-       return cpuidle_enter_s2idle(drv, dev);
+       return cpuidle_enter_s2idle(drv, dev, max_latency_ns);
 }
 
 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
@@ -205,12 +206,13 @@ static void cpuidle_idle_call(void)
                u64 max_latency_ns;
 
                if (idle_should_enter_s2idle()) {
+                       max_latency_ns = cpu_wakeup_latency_qos_limit() *
+                                        NSEC_PER_USEC;
 
-                       entered_state = call_cpuidle_s2idle(drv, dev);
+                       entered_state = call_cpuidle_s2idle(drv, dev,
+                                                           max_latency_ns);
                        if (entered_state > 0)
                                goto exit_idle;
-
-                       max_latency_ns = U64_MAX;
                } else {
                        max_latency_ns = dev->forced_idle_latency_limit_ns;
                }