]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/msm: Expose uche trap base via uapi
authorDanylo Piliaiev <danylo.piliaiev@gmail.com>
Tue, 3 Dec 2024 09:59:20 +0000 (10:59 +0100)
committerRob Clark <robdclark@chromium.org>
Fri, 3 Jan 2025 15:20:27 +0000 (07:20 -0800)
This adds MSM_PARAM_UCHE_TRAP_BASE that will be used by Mesa
implementation for VK_KHR_shader_clock and GL_ARB_shader_clock.

Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Patchwork: https://patchwork.freedesktop.org/patch/627036/
Signed-off-by: Rob Clark <robdclark@chromium.org>
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
include/uapi/drm/msm_drm.h

index 50c490b492f08a1a7ebfe33b2f206cafd91a84ba..f1b18a6663f7b334e95d85195d40611aa8758e7d 100644 (file)
@@ -251,8 +251,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
                gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
 
        /* Disable L2 bypass to avoid UCHE out of bounds errors */
-       gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
-       gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+       gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+       gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
 
        gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
                        (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
@@ -693,6 +693,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
        if (ret)
                goto fail;
 
+       adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
+
        if (!gpu->aspace) {
                /* TODO we think it is possible to configure the GPU to
                 * restrict access to VRAM carveout.  But the required
index ee89db72e36e7c363381baa7dac61919e8a48950..caf2c0a7a29f16a280bc0450272daee117d1c6d8 100644 (file)
@@ -750,10 +750,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
 
        /* Disable L2 bypass in the UCHE */
-       gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
-       gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
-       gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
-       gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+       gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+       gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
+       gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+       gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
 
        /* Set the GMEM VA range (0 to gpu->gmem) */
        gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
@@ -1805,5 +1805,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
        adreno_gpu->ubwc_config.macrotile_mode = 0;
        adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
 
+       adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
+
        return gpu;
 }
index 019610341df1506c89f44e86b8d1deeb27d61857..0ae29a7c8a4d3f74236a35cc919f69d5c0a384a0 100644 (file)
@@ -1123,12 +1123,12 @@ static int hw_init(struct msm_gpu *gpu)
 
        /* Disable L2 bypass in the UCHE */
        if (adreno_is_a7xx(adreno_gpu)) {
-               gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
-               gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+               gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+               gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
        } else {
-               gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
-               gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
-               gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+               gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0);
+               gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+               gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
        }
 
        if (!(adreno_is_a650_family(adreno_gpu) ||
@@ -2533,6 +2533,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
                }
        }
 
+       adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
+
        if (gpu->aspace)
                msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
                                a6xx_fault_handler);
index 75f5367e73caace4648491b041f80b7c4d26bf89..c7454b6cfb7f752a23bd74b67d0eadbd02ac089b 100644 (file)
@@ -385,6 +385,9 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
        case MSM_PARAM_MACROTILE_MODE:
                *value = adreno_gpu->ubwc_config.macrotile_mode;
                return 0;
+       case MSM_PARAM_UCHE_TRAP_BASE:
+               *value = adreno_gpu->uche_trap_base;
+               return 0;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
                return -EINVAL;
index f5d6087376f52c93648e136449cfd4f703ecfb7f..dcf454629ce037b2a8274a6699674ad754ce1f07 100644 (file)
@@ -253,6 +253,8 @@ struct adreno_gpu {
        bool gmu_is_wrapper;
 
        bool has_ray_tracing;
+
+       u64 uche_trap_base;
 };
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 
index b916aab80ddea954b95044a6c1dcb0b4a9a9a480..2342cb90857e0ecfe1c1f43f795be21080535ddb 100644 (file)
@@ -90,6 +90,7 @@ struct drm_msm_timespec {
 #define MSM_PARAM_RAYTRACING 0x11 /* RO */
 #define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
 #define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
+#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
 
 /* For backwards compat.  The original support for preemption was based on
  * a single ring per priority level so # of priority levels equals the #