]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Revert "drm/msm/gpu: Push gpu lock down past runpm"
authorRob Clark <robdclark@chromium.org>
Tue, 9 Jan 2024 18:22:17 +0000 (10:22 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Feb 2024 08:51:36 +0000 (09:51 +0100)
commit 917e9b7c2350e3e53162fcf5035e5f2d68e2cbed upstream.

This reverts commit abe2023b4cea192ab266b351fd38dc9dbd846df0.

Changing the locking order means that scheduler/msm_job_run() can race
with the recovery kthread worker, with the result that the GPU gets an
extra runpm get when we are trying to power it off.  Leaving the GPU in
an unrecovered state.

I'll need to come up with a different scheme for appeasing lockdep.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/573835/
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_ringbuffer.c

index 7f64c66673002fa5b2c41fe3d857d7e25f0a97c4..5c10b559a5957ae0b8b9f270c82128885ecf2930 100644 (file)
@@ -749,12 +749,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        struct msm_ringbuffer *ring = submit->ring;
        unsigned long flags;
 
-       pm_runtime_get_sync(&gpu->pdev->dev);
+       WARN_ON(!mutex_is_locked(&gpu->lock));
 
-       mutex_lock(&gpu->lock);
+       pm_runtime_get_sync(&gpu->pdev->dev);
 
        msm_gpu_hw_init(gpu);
 
+       submit->seqno = submit->hw_fence->seqno;
+
        update_sw_cntrs(gpu);
 
        /*
@@ -779,11 +781,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        gpu->funcs->submit(gpu, submit);
        gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
 
-       hangcheck_timer_reset(gpu);
-
-       mutex_unlock(&gpu->lock);
-
        pm_runtime_put(&gpu->pdev->dev);
+       hangcheck_timer_reset(gpu);
 }
 
 /*
index 95257ab0185dc4dde977b54908c714cf623c408a..a7e152f659a2cbdc7cfbda0e8923a04960e462f9 100644 (file)
@@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
        msm_fence_init(submit->hw_fence, fctx);
 
-       submit->seqno = submit->hw_fence->seqno;
-
        mutex_lock(&priv->lru.lock);
 
        for (i = 0; i < submit->nr_bos; i++) {
@@ -34,8 +32,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
        mutex_unlock(&priv->lru.lock);
 
+       /* TODO move submit path over to using a per-ring lock.. */
+       mutex_lock(&gpu->lock);
+
        msm_gpu_submit(gpu, submit);
 
+       mutex_unlock(&gpu->lock);
+
        return dma_fence_get(submit->hw_fence);
 }