]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdgpu/gfx12.1: add support for disable_kq
authorAlex Deucher <alexander.deucher@amd.com>
Wed, 28 Jan 2026 19:50:31 +0000 (14:50 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 6 Mar 2026 21:32:21 +0000 (16:32 -0500)
Plumb in support for disabling kernel queues and make it
the default.  For testing, kernel queues can be re-enabled
by setting amdgpu.user_queue=0

v2: integrate feedback from Lijo

Acked-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c

index 3fd489ea0b79da47e74fa8c7202a68d35c6b6e3a..4e02b62cdbb334721fec367c5a55678a3296f8f2 100644 (file)
@@ -1155,11 +1155,13 @@ static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
                break;
        }
 
-       /* recalculate compute rings to use based on hardware configuration */
-       num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
-                            adev->gfx.mec.num_queue_per_pipe) / 2;
-       adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
-                                         num_compute_rings);
+       if (adev->gfx.num_compute_rings) {
+               /* recalculate compute rings to use based on hardware configuration */
+               num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+                                    adev->gfx.mec.num_queue_per_pipe) / 2;
+               adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+                                                 num_compute_rings);
+       }
 
        num_xcc = NUM_XCC(adev->gfx.xcc_mask);
 
@@ -2794,6 +2796,33 @@ static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
        gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
 }
 
+static int gfx_v12_1_set_userq_eop_interrupts(struct amdgpu_device *adev,
+                                             bool enable)
+{
+       unsigned int irq_type;
+       int m, p, r;
+
+       if (adev->gfx.disable_kq) {
+               for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+                       for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+                               irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+                                       + (m * adev->gfx.mec.num_pipe_per_mec)
+                                       + p;
+                               if (enable)
+                                       r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+                                                          irq_type);
+                               else
+                                       r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+                                                          irq_type);
+                               if (r)
+                                       return r;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -2801,6 +2830,7 @@ static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
 
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+       gfx_v12_1_set_userq_eop_interrupts(adev, false);
 
        num_xcc = NUM_XCC(adev->gfx.xcc_mask);
        for (i = 0; i < num_xcc; i++) {
@@ -2868,10 +2898,26 @@ static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
 
+
+       switch (amdgpu_user_queue) {
+       case -1:
+       default:
+               adev->gfx.disable_kq = true;
+               adev->gfx.disable_uq = true;
+               break;
+       case 0:
+               adev->gfx.disable_kq = false;
+               adev->gfx.disable_uq = true;
+               break;
+       }
+
        adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
 
-       adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
-                                         AMDGPU_MAX_COMPUTE_RINGS);
+       if (adev->gfx.disable_kq)
+               adev->gfx.num_compute_rings = 0;
+       else
+               adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+                                                 AMDGPU_MAX_COMPUTE_RINGS);
 
        gfx_v12_1_set_kiq_pm4_funcs(adev);
        gfx_v12_1_set_ring_funcs(adev);
@@ -2898,6 +2944,10 @@ static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
        if (r)
                return r;
 
+       r = gfx_v12_1_set_userq_eop_interrupts(adev, true);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -3716,21 +3766,23 @@ static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
        if (xcc_id == -EINVAL)
                return;
 
-       switch (me_id) {
-       case 1:
-       case 2:
-               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-                       ring = &adev->gfx.compute_ring
+       if (!adev->gfx.disable_kq) {
+               switch (me_id) {
+               case 1:
+               case 2:
+                       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                               ring = &adev->gfx.compute_ring
                                        [i +
                                         xcc_id * adev->gfx.num_compute_rings];
-                       if (ring->me == me_id && ring->pipe == pipe_id &&
-                           ring->queue == queue_id)
-                               drm_sched_fault(&ring->sched);
+                               if (ring->me == me_id && ring->pipe == pipe_id &&
+                                   ring->queue == queue_id)
+                                       drm_sched_fault(&ring->sched);
+                       }
+                       break;
+               default:
+                       dev_dbg(adev->dev, "Unexpected me %d in priv_fault\n", me_id);
+                       break;
                }
-               break;
-       default:
-               dev_dbg(adev->dev, "Unexpected me %d in priv_fault\n", me_id);
-               break;
        }
 }