]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/msm/a6xx: Switch to preemption safe AO counter
authorAkhil P Oommen <akhilpo@oss.qualcomm.com>
Fri, 27 Mar 2026 00:13:52 +0000 (05:43 +0530)
committerRob Clark <robin.clark@oss.qualcomm.com>
Tue, 31 Mar 2026 20:47:29 +0000 (13:47 -0700)
CP_ALWAYS_ON_COUNTER is not save-restored during preemption, so it won't
provide accurate data about the 'submit' when preemption is enabled.
Switch to CP_ALWAYS_ON_CONTEXT which is preemption safe.

Fixes: e7ae83da4a28 ("drm/msm/a6xx: Implement preemption for a7xx targets")
Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
Patchwork: https://patchwork.freedesktop.org/patch/714657/
Message-ID: <20260327-a8xx-gpu-batch2-v2-3-2b53c38d2101@oss.qualcomm.com>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
drivers/gpu/drm/msm/adreno/a6xx_gpu.c

index 8013d6700c88ed5c846f18b42babf72456232bf2..29cbebbb46cb7325caa4fc437e5e31b05de6fb4a 100644 (file)
@@ -347,7 +347,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
         * GPU registers so we need to add 0x1a800 to the register value on A630
         * to get the right value from PM4.
         */
-       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_CONTEXT,
                rbmemptr_stats(ring, index, alwayson_start));
 
        /* Invalidate CCU depth and color */
@@ -388,7 +388,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
                rbmemptr_stats(ring, index, cpcycles_end));
-       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_CONTEXT,
                rbmemptr_stats(ring, index, alwayson_end));
 
        /* Write the fence to the scratch register */
@@ -457,7 +457,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        struct msm_ringbuffer *ring = submit->ring;
-       u32 rbbm_perfctr_cp0, cp_always_on_counter;
+       u32 rbbm_perfctr_cp0, cp_always_on_context;
        unsigned int i, ibs = 0;
 
        adreno_check_and_reenable_stall(adreno_gpu);
@@ -480,14 +480,14 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        if (adreno_is_a8xx(adreno_gpu)) {
                rbbm_perfctr_cp0 = REG_A8XX_RBBM_PERFCTR_CP(0);
-               cp_always_on_counter = REG_A8XX_CP_ALWAYS_ON_COUNTER;
+               cp_always_on_context = REG_A8XX_CP_ALWAYS_ON_CONTEXT;
        } else {
                rbbm_perfctr_cp0 = REG_A7XX_RBBM_PERFCTR_CP(0);
-               cp_always_on_counter = REG_A6XX_CP_ALWAYS_ON_COUNTER;
+               cp_always_on_context = REG_A6XX_CP_ALWAYS_ON_CONTEXT;
        }
 
        get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_start));
-       get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_start));
+       get_stats_counter(ring, cp_always_on_context, rbmemptr_stats(ring, index, alwayson_start));
 
        OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
        OUT_RING(ring, CP_SET_THREAD_BOTH);
@@ -535,7 +535,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        }
 
        get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_end));
-       get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_end));
+       get_stats_counter(ring, cp_always_on_context, rbmemptr_stats(ring, index, alwayson_end));
 
        /* Write the fence to the scratch register */
        if (adreno_is_a8xx(adreno_gpu)) {