]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
PM: sleep: Adjust check before setting power.must_resume
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 27 Feb 2025 10:53:50 +0000 (11:53 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Apr 2025 12:30:55 +0000 (14:30 +0200)
[ Upstream commit eeb87d17aceab7803a5a5bcb6cf2817b745157cf ]

The check before setting power.must_resume in device_suspend_noirq()
does not take power.child_count into account, but it should do that, so
use pm_runtime_need_not_resume() in it for this purpose and adjust the
comment next to it accordingly.

Fixes: 107d47b2b95e ("PM: sleep: core: Simplify the SMART_SUSPEND flag handling")
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/3353728.44csPzL39Z@rjwysocki.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/base/power/main.c
drivers/base/power/runtime.c
include/linux/pm_runtime.h

index fbc57c4fcdd0107f510e88c8803dba081916a7d9..34f1969dab73b62aa6fb638d39a896f0d44db7b7 100644 (file)
@@ -1242,14 +1242,13 @@ Skip:
        dev->power.is_noirq_suspended = true;
 
        /*
-        * Skipping the resume of devices that were in use right before the
-        * system suspend (as indicated by their PM-runtime usage counters)
-        * would be suboptimal.  Also resume them if doing that is not allowed
-        * to be skipped.
+        * Devices must be resumed unless they are explicitly allowed to be left
+        * in suspend, but even in that case skipping the resume of devices that
+        * were in use right before the system suspend (as indicated by their
+        * runtime PM usage counters and child counters) would be suboptimal.
         */
-       if (atomic_read(&dev->power.usage_count) > 1 ||
-           !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
-             dev->power.may_skip_resume))
+       if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+             dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
                dev->power.must_resume = true;
 
        if (dev->power.must_resume)
index f5c9e6629f0c7fc50c06f17a9108f2b122cbb1c4..4950864d3ea5064139abf1d747e0d8d6b91eb4f1 100644 (file)
@@ -1811,7 +1811,7 @@ void pm_runtime_drop_link(struct device_link *link)
        pm_request_idle(link->supplier);
 }
 
-static bool pm_runtime_need_not_resume(struct device *dev)
+bool pm_runtime_need_not_resume(struct device *dev)
 {
        return atomic_read(&dev->power.usage_count) <= 1 &&
                (atomic_read(&dev->power.child_count) == 0 ||
index ca856e5829145d3bb8c1b411bd62c81836fd3740..96e3256738e4893b34480ad69425c38e84248d88 100644 (file)
@@ -32,6 +32,7 @@ static inline bool queue_pm_work(struct work_struct *work)
 
 extern int pm_generic_runtime_suspend(struct device *dev);
 extern int pm_generic_runtime_resume(struct device *dev);
+extern bool pm_runtime_need_not_resume(struct device *dev);
 extern int pm_runtime_force_suspend(struct device *dev);
 extern int pm_runtime_force_resume(struct device *dev);
 
@@ -220,6 +221,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
 
 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }