]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: clean up GC reset functions
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 11 Jul 2025 18:01:42 +0000 (14:01 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 16 Jul 2025 20:10:10 +0000 (16:10 -0400)
Make them consistent and use the reset flags.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c

index d739bfb20383e75b665c279d5385a07299b3bf2d..506454ed27bde68ca86be0f3d0c1a7b3e9a174fa 100644 (file)
@@ -4952,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
                        }
                }
        }
-       /* TODO: Add queue reset mask when FW fully supports it */
+
        adev->gfx.gfx_supported_reset =
                amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
        adev->gfx.compute_supported_reset =
                amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+       if (!amdgpu_sriov_vf(adev)) {
+               adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+               adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+       }
 
        r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
        if (r) {
@@ -9534,8 +9538,8 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
        u64 addr;
        int r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.gfx_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
@@ -9607,8 +9611,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
        unsigned long flags;
        int i, r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
index 37dcec2d078415849eaf8c08b2edd5bc81b55cac..372dceceff359ac358b43073f6eb5501a2a88463 100644 (file)
@@ -1806,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
        case IP_VERSION(11, 0, 2):
        case IP_VERSION(11, 0, 3):
                if ((adev->gfx.me_fw_version >= 2280) &&
-                           (adev->gfx.mec_fw_version >= 2410)) {
-                               adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
-                               adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                   (adev->gfx.mec_fw_version >= 2410) &&
+                   !amdgpu_sriov_vf(adev)) {
+                       adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                       adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
                }
                break;
        default:
+               if (!amdgpu_sriov_vf(adev)) {
+                       adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                       adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+               }
                break;
        }
 
@@ -6818,8 +6823,8 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        int r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.gfx_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        drm_sched_wqueue_stop(&ring->sched);
 
@@ -6989,8 +6994,8 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        int r = 0;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        drm_sched_wqueue_stop(&ring->sched);
 
index e4fc42470cf3ef2c08f87db28b88080751c25441..7220ed2fa2a33c1ff13b032de2dec21bf8f9d2c2 100644 (file)
@@ -1542,10 +1542,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
        case IP_VERSION(12, 0, 0):
        case IP_VERSION(12, 0, 1):
                if ((adev->gfx.me_fw_version >= 2660) &&
-                           (adev->gfx.mec_fw_version >= 2920)) {
-                               adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
-                               adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                   (adev->gfx.mec_fw_version >= 2920) &&
+                   !amdgpu_sriov_vf(adev)) {
+                       adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                       adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
                }
+               break;
+       default:
+               break;
        }
 
        if (!adev->enable_mes_kiq) {
@@ -5314,8 +5318,8 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        int r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.gfx_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        drm_sched_wqueue_stop(&ring->sched);
 
@@ -5437,8 +5441,8 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        int r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        drm_sched_wqueue_stop(&ring->sched);
 
index 4c61157405b078928e38fef4c2b2f61e5f241263..ac058697054f9a27f00595bb538beb4c78dd9a96 100644 (file)
@@ -2410,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
                amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
        adev->gfx.compute_supported_reset =
                amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+       if (!amdgpu_sriov_vf(adev))
+               adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
 
        r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
        if (r) {
@@ -7181,8 +7183,8 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
        unsigned long flags;
        int i, r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
index 5f92975cc3058197d04c008cb7c31a5c9166fcc1..3c10595125e0c4185accfe926b7a4f57b837f026 100644 (file)
@@ -1148,13 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 4, 3):
        case IP_VERSION(9, 4, 4):
-               if (adev->gfx.mec_fw_version >= 155) {
+               if ((adev->gfx.mec_fw_version >= 155) &&
+                   !amdgpu_sriov_vf(adev)) {
                        adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
                        adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
                }
                break;
        case IP_VERSION(9, 5, 0):
-               if (adev->gfx.mec_fw_version >= 21) {
+               if ((adev->gfx.mec_fw_version >= 21) &&
+                   !amdgpu_sriov_vf(adev)) {
                        adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
                        adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
                }
@@ -3561,8 +3563,8 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
        unsigned long flags;
        int r;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+               return -EOPNOTSUPP;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
@@ -3594,7 +3596,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
                dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
 
 pipe_reset:
-       if(r) {
+       if (r) {
+               if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
+                       return -EOPNOTSUPP;
                r = gfx_v9_4_3_reset_hw_pipe(ring);
                dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
                                r ? "failed" : "successfully");