]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
PM: sleep: Adjust check before setting power.must_resume
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 27 Feb 2025 10:53:50 +0000 (11:53 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Apr 2025 12:39:11 +0000 (14:39 +0200)
[ Upstream commit eeb87d17aceab7803a5a5bcb6cf2817b745157cf ]

The check before setting power.must_resume in device_suspend_noirq()
does not take power.child_count into account, but it should do that, so
use pm_runtime_need_not_resume() in it for this purpose and adjust the
comment next to it accordingly.

Fixes: 107d47b2b95e ("PM: sleep: core: Simplify the SMART_SUSPEND flag handling")
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/3353728.44csPzL39Z@rjwysocki.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/base/power/main.c
drivers/base/power/runtime.c
include/linux/pm_runtime.h

index 4a67e83300e164cfcaf216004df3786de3597369..d4875c3712ede84f90bb1e1e1ceb9b4bf3683afa 100644 (file)
@@ -1254,14 +1254,13 @@ Skip:
        dev->power.is_noirq_suspended = true;
 
        /*
-        * Skipping the resume of devices that were in use right before the
-        * system suspend (as indicated by their PM-runtime usage counters)
-        * would be suboptimal.  Also resume them if doing that is not allowed
-        * to be skipped.
+        * Devices must be resumed unless they are explicitly allowed to be left
+        * in suspend, but even in that case skipping the resume of devices that
+        * were in use right before the system suspend (as indicated by their
+        * runtime PM usage counters and child counters) would be suboptimal.
         */
-       if (atomic_read(&dev->power.usage_count) > 1 ||
-           !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
-             dev->power.may_skip_resume))
+       if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+             dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
                dev->power.must_resume = true;
 
        if (dev->power.must_resume)
index 2ee45841486bc73225b3e971164466647b3ce6d3..04113adb092b523ac0663303fd3cac43cbb07140 100644 (file)
@@ -1874,7 +1874,7 @@ void pm_runtime_drop_link(struct device_link *link)
        pm_request_idle(link->supplier);
 }
 
-static bool pm_runtime_need_not_resume(struct device *dev)
+bool pm_runtime_need_not_resume(struct device *dev)
 {
        return atomic_read(&dev->power.usage_count) <= 1 &&
                (atomic_read(&dev->power.child_count) == 0 ||
index d39dc863f612fe18dc34182117f87908d63c8e6d..d0b29cd1fd204e62a3a85cfeb6215e9b5e92c11e 100644 (file)
@@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work)
 
 extern int pm_generic_runtime_suspend(struct device *dev);
 extern int pm_generic_runtime_resume(struct device *dev);
+extern bool pm_runtime_need_not_resume(struct device *dev);
 extern int pm_runtime_force_suspend(struct device *dev);
 extern int pm_runtime_force_resume(struct device *dev);
 
@@ -241,6 +242,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
 
 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }