]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
PM: s2idle: Drop redundant locks when entering s2idle
authorUlf Hansson <ulf.hansson@linaro.org>
Tue, 11 Mar 2025 16:08:22 +0000 (17:08 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 12 Mar 2025 20:14:53 +0000 (21:14 +0100)
The calls to cpus_read_lock|unlock() protects us from getting CPUS
hotplugged, while entering suspend-to-idle. However, when s2idle_enter() is
called we should be far beyond the point when CPUs may be hotplugged.
Let's therefore simplify the code and drop the use of the lock.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/20250311160827.1129643-2-ulf.hansson@linaro.org
[ rjw: Rewrote the new comment ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
kernel/power/suspend.c

index 09f8397bae15fb9c895d060d7708c9bca5ef62f7..1876abf1be15b4c509ea550226492389274ab81f 100644 (file)
@@ -91,6 +91,12 @@ static void s2idle_enter(void)
 {
        trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
 
+       /*
+        * The correctness of the code below depends on the number of online
+        * CPUs being stable, but CPUs cannot be taken offline or put online
+        * while it is running.
+        */
+
        raw_spin_lock_irq(&s2idle_lock);
        if (pm_wakeup_pending())
                goto out;
@@ -98,8 +104,6 @@ static void s2idle_enter(void)
        s2idle_state = S2IDLE_STATE_ENTER;
        raw_spin_unlock_irq(&s2idle_lock);
 
-       cpus_read_lock();
-
        /* Push all the CPUs into the idle loop. */
        wake_up_all_idle_cpus();
        /* Make the current CPU wait so it can enter the idle loop too. */
@@ -112,8 +116,6 @@ static void s2idle_enter(void)
         */
        wake_up_all_idle_cpus();
 
-       cpus_read_unlock();
-
        raw_spin_lock_irq(&s2idle_lock);
 
  out: