]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/msm/a6xx: Track current_ctx_seqno per ring
authorAntonino Maniscalco <antomani103@gmail.com>
Thu, 3 Oct 2024 16:12:51 +0000 (18:12 +0200)
committerRob Clark <robdclark@chromium.org>
Thu, 3 Oct 2024 20:18:34 +0000 (13:18 -0700)
With preemption it is not enough to track the current_ctx_seqno globally
as execution might switch between rings.

This is especially problematic when current_ctx_seqno is used to
determine whether a page table switch is necessary as it might lead to
security bugs.

Track current context per ring.

Tested-by: Rob Clark <robdclark@gmail.com>
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8650-QRD
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8550-QRD
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8450-HDK
Signed-off-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/618012/
Signed-off-by: Rob Clark <robdclark@chromium.org>
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_ringbuffer.h

index 0dc255ddf5ceba87090f64d5cb9f078b61104063..379a3d346c300f3ccc9e9bd08ef2a32aa3e24ceb 100644 (file)
@@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        /* ignore if there has not been a ctx switch: */
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
index b46ff49f47cf07a4b9a3b4925a0d24478a8c1fae..b6df115bb5670a2b6012195a7eab548109485b00 100644 (file)
@@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        /* ignore if there has not been a ctx switch: */
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
index 8b4cdf95f4453bb76e7efb93d86080ef678c9f68..50c490b492f08a1a7ebfe33b2f206cafd91a84ba 100644 (file)
@@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        /* ignore if there has not been a ctx switch: */
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
index e09044930547fe9f338a3089fb526d93b45c6203..ee89db72e36e7c363381baa7dac61919e8a48950 100644 (file)
@@ -77,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -132,7 +132,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        unsigned int i, ibs = 0;
 
        if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
-               gpu->cur_ctx_seqno = 0;
+               ring->cur_ctx_seqno = 0;
                a5xx_submit_in_rb(gpu, submit);
                return;
        }
@@ -171,7 +171,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
index 40a3d18c5b1ebcc8bb47b8c425790e06eb8316c4..14904c4a602584bab39adbf183582a37d19069d3 100644 (file)
@@ -109,7 +109,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
        u32 asid;
        u64 memptr = rbmemptr(ring, ttbr0);
 
-       if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
+       if (ctx->seqno == ring->cur_ctx_seqno)
                return;
 
        if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -219,7 +219,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -305,7 +305,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+                       if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -854,6 +854,7 @@ static int hw_init(struct msm_gpu *gpu)
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
        u64 gmem_range_min;
+       unsigned int i;
        int ret;
 
        if (!adreno_has_gmu_wrapper(adreno_gpu)) {
@@ -1135,7 +1136,8 @@ static int hw_init(struct msm_gpu *gpu)
        /* Always come up on rb 0 */
        a6xx_gpu->cur_ring = gpu->rb[0];
 
-       gpu->cur_ctx_seqno = 0;
+       for (i = 0; i < gpu->nr_rings; i++)
+               gpu->rb[i]->cur_ctx_seqno = 0;
 
        /* Enable the SQE_to start the CP engine */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
index a274b846642374c47f690d85fe444096c41a5d8d..0d4a3744cfcbd2662d27f9929ff691581707325b 100644 (file)
@@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        mutex_unlock(&gpu->active_lock);
 
        gpu->funcs->submit(gpu, submit);
-       gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+       submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
 
        pm_runtime_put(&gpu->pdev->dev);
        hangcheck_timer_reset(gpu);
index 1f02bb9956be2720a2760646ccdf92f8bead7dd0..7cabc8480d7c5461ab8d8726fcc21690cbaf7366 100644 (file)
@@ -193,17 +193,6 @@ struct msm_gpu {
         */
        refcount_t sysprof_active;
 
-       /**
-        * cur_ctx_seqno:
-        *
-        * The ctx->seqno value of the last context to submit rendering,
-        * and the one with current pgtables installed (for generations
-        * that support per-context pgtables).  Tracked by seqno rather
-        * than pointer value to avoid dangling pointers, and cases where
-        * a ctx can be freed and a new one created with the same address.
-        */
-       int cur_ctx_seqno;
-
        /**
         * lock:
         *
index 40791b2ade46ef0e16e2a4088291a575d3be9e82..174f83137a49940ec80b1fbf548e214fa3c32784 100644 (file)
@@ -100,6 +100,16 @@ struct msm_ringbuffer {
         * preemption.  Can be aquired from irq context.
         */
        spinlock_t preempt_lock;
+
+       /**
+        * cur_ctx_seqno:
+        *
+        * The ctx->seqno value of the last context to submit to this ring
+        * Tracked by seqno rather than pointer value to avoid dangling
+        * pointers, and cases where a ctx can be freed and a new one created
+        * with the same address.
+        */
+       int cur_ctx_seqno;
 };
 
 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,