]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: replace use of system_unbound_wq with system_dfl_wq
authorMarco Crivellari <marco.crivellari@suse.com>
Wed, 24 Dec 2025 14:47:05 +0000 (15:47 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 3 Apr 2026 17:52:12 +0000 (13:52 -0400)
This patch continues the effort to refactor workqueue APIs, which has begun
with the changes introducing new workqueues and a new alloc_workqueue flag:

   commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
   commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

The point of the refactoring is to eventually alter the default behavior of
workqueues to become unbound by default so that their workload placement is
optimized by the scheduler.

Before that to happen after a careful review and conversion of each individual
case, workqueue users must be converted to the better named new workqueues with
no intended behaviour changes:

   system_wq -> system_percpu_wq
   system_unbound_wq -> system_dfl_wq

This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
removed in the future.

Suggested-by: Tejun Heo <tj@kernel.org>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c

index 938fb0b2368dfdea66af18b7b140563f891055ee..8686c6dc2c08f1c726a59fcf0bb7a0253b0f1535 100644 (file)
@@ -179,7 +179,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
        list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                /* For XGMI run all resets in parallel to speed up the process */
                if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
-                       if (!queue_work(system_unbound_wq,
+                       if (!queue_work(system_dfl_wq,
                                        &tmp_adev->reset_cntl->reset_work))
                                r = -EALREADY;
                } else
index 9c936519bb2bf200c9e5f6ac7ef72ab59d8399b5..bf271a97d1e827e9cc40efd763831743618eea95 100644 (file)
@@ -5339,7 +5339,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                        /* For XGMI run all resets in parallel to speed up the process */
                        if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
-                               if (!queue_work(system_unbound_wq,
+                               if (!queue_work(system_dfl_wq,
                                                &tmp_adev->xgmi_reset_work))
                                        r = -EALREADY;
                        } else
index 7a2fcb7ded1d55bb4b3f330935bf5af08c8790ff..1b982b803e6f3c8b98360093a179cf92d4c6eca6 100644 (file)
@@ -116,7 +116,7 @@ static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
        /* Mode1 reset needs to be triggered on all devices together */
        list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
                /* For XGMI run all resets in parallel to speed up the process */
-               if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
+               if (!queue_work(system_dfl_wq, &tmp_adev->xgmi_reset_work))
                        r = -EALREADY;
                if (r) {
                        dev_err(tmp_adev->dev,