]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
PM: core: Add two macros for walking device links
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 2 Sep 2025 13:45:14 +0000 (15:45 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sat, 6 Sep 2025 15:16:32 +0000 (17:16 +0200)
Add separate macros for walking links to suppliers and consumers of a
device to help device links users to avoid exposing the internals of
struct dev_links_info in their code and possible coding mistakes related
to that.

Accordingly, use the new macros to replace open-coded device links list
walks in the core power management code.

No intentional functional impact.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Link: https://patch.msgid.link/1944671.tdWV9SEqCh@rafael.j.wysocki
drivers/base/base.h
drivers/base/power/main.c
drivers/base/power/runtime.c

index 123031a757d916d230c054a553706c34773fd317..700aecd22fd34a9eed1ab14f7265c29676ef3e64 100644 (file)
@@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev);
 void fw_devlink_drivers_done(void);
 void fw_devlink_probing_done(void);
 
+#define dev_for_each_link_to_supplier(__link, __dev)   \
+       list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+                                device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev)   \
+       list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+                                device_links_read_lock_held())
+
 /* device pm support */
 void device_pm_move_to_tail(struct device *dev);
 
index 2fa53896db82e2833defc328c58512aae91a16f7..e3a7f25bf8d39b947e08b0a5febb28a4afca28d9 100644 (file)
 
 typedef int (*pm_callback_t)(struct device *);
 
-#define list_for_each_entry_srcu_locked(pos, head, member) \
-       list_for_each_entry_srcu(pos, head, member, \
-                       device_links_read_lock_held())
-
 /*
  * The entries in the dpm_list list are in a depth first order, simply
  * because children are guaranteed to be discovered after parents, and
@@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
         * callbacks freeing the link objects for the links in the list we're
         * walking.
         */
-       list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
+       dev_for_each_link_to_supplier(link, dev)
                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
                        dpm_wait(link->supplier, async);
 
@@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
         * continue instead of trying to continue in parallel with its
         * unregistration).
         */
-       list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node)
+       dev_for_each_link_to_consumer(link, dev)
                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
                        dpm_wait(link->consumer, async);
 
@@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
        idx = device_links_read_lock();
 
        /* Start processing the device's "async" consumers. */
-       list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node)
+       dev_for_each_link_to_consumer(link, dev)
                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
                        dpm_async_with_cleanup(link->consumer, func);
 
@@ -1330,7 +1326,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
        idx = device_links_read_lock();
 
        /* Start processing the device's "async" suppliers. */
-       list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
+       dev_for_each_link_to_supplier(link, dev)
                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
                        dpm_async_with_cleanup(link->supplier, func);
 
@@ -1384,7 +1380,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
 
        idx = device_links_read_lock();
 
-       list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
+       dev_for_each_link_to_supplier(link, dev)
                link->supplier->power.must_resume = true;
 
        device_links_read_unlock(idx);
@@ -1813,7 +1809,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
 
        idx = device_links_read_lock();
 
-       list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) {
+       dev_for_each_link_to_supplier(link, dev) {
                spin_lock_irq(&link->supplier->power.lock);
                link->supplier->power.direct_complete = false;
                spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2061,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
 
        idx = device_links_read_lock();
 
-       list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) {
+       dev_for_each_link_to_supplier(link, dev) {
                if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
                        continue;
 
index 8c23a11e80176432b698ae7b71d40d52c27af911..7420b9851fe0fda26dc7d973f87d28dd3fcce3eb 100644 (file)
@@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev)
 
        idx = device_links_read_lock();
 
-       list_for_each_entry_srcu(link, &dev->links.suppliers, c_node,
-                                device_links_read_lock_held())
+       dev_for_each_link_to_supplier(link, dev)
                if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
                        link->supplier_preactivated = true;
                        pm_runtime_get_sync(link->supplier);