]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
PM: sleep: Make async resume handle consumers like children
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 23 Jun 2025 12:54:39 +0000 (14:54 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 3 Jul 2025 14:53:33 +0000 (16:53 +0200)
Avoid starting "async" resume processing upfront for devices that have
suppliers and start "async" resume processing for a device's consumers
right after resuming the device itself.

Suggested-by: Saravana Kannan <saravanak@google.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Link: https://patch.msgid.link/3378088.aeNJFYEL58@rjwysocki.net
drivers/base/power/main.c

index e6fc52b852956b427e27ae6e640be20210d87c60..d7ef5048a4524a0064322199b3a923c4faaf6815 100644 (file)
@@ -647,14 +647,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func)
        /*
         * Start processing "async" children of the device unless it's been
         * started already for them.
-        *
-        * This could have been done for the device's "async" consumers too, but
-        * they either need to wait for their parents or the processing has
-        * already started for them after their parents were processed.
         */
        device_for_each_child(dev, func, dpm_async_with_cleanup);
 }
 
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+       struct device_link *link;
+       int idx;
+
+       dpm_async_resume_children(dev, func);
+
+       idx = device_links_read_lock();
+
+       /* Start processing the device's "async" consumers. */
+       list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+               if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+                       dpm_async_with_cleanup(link->consumer, func);
+
+       device_links_read_unlock(idx);
+}
+
 static void dpm_clear_async_state(struct device *dev)
 {
        reinit_completion(&dev->power.completion);
@@ -663,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev)
 
 static bool dpm_root_device(struct device *dev)
 {
-       return !dev->parent;
+       lockdep_assert_held(&dpm_list_mtx);
+
+       /*
+        * Since this function is required to run under dpm_list_mtx, the
+        * list_empty() below will only return true if the device's list of
+        * consumers is actually empty before calling it.
+        */
+       return !dev->parent && list_empty(&dev->links.suppliers);
 }
 
 static void async_resume_noirq(void *data, async_cookie_t cookie);
@@ -752,7 +772,7 @@ Out:
                pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
        }
 
-       dpm_async_resume_children(dev, async_resume_noirq);
+       dpm_async_resume_subordinate(dev, async_resume_noirq);
 }
 
 static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -895,7 +915,7 @@ Out:
                pm_dev_err(dev, state, async ? " async early" : " early", error);
        }
 
-       dpm_async_resume_children(dev, async_resume_early);
+       dpm_async_resume_subordinate(dev, async_resume_early);
 }
 
 static void async_resume_early(void *data, async_cookie_t cookie)
@@ -1071,7 +1091,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
                pm_dev_err(dev, state, async ? " async" : "", error);
        }
 
-       dpm_async_resume_children(dev, async_resume);
+       dpm_async_resume_subordinate(dev, async_resume);
 }
 
 static void async_resume(void *data, async_cookie_t cookie)