]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/msm: move wq handling to KMS code
authorDmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
Sat, 5 Jul 2025 10:02:26 +0000 (13:02 +0300)
committerRob Clark <robin.clark@oss.qualcomm.com>
Sat, 5 Jul 2025 14:13:35 +0000 (07:13 -0700)
The global workqueue is only used for vblanks inside KMS code. Move
allocation / flushing / deallcation of it to msm_kms.c

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
Patchwork: https://patchwork.freedesktop.org/patch/662573/
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_kms.c
drivers/gpu/drm/msm/msm_kms.h

index 078d3674ff411cf07614ae68889d8d0147453d10..f7abe8ba73ef0899ff1985ebf26571b7c459a52f 100644 (file)
@@ -980,7 +980,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
                        return 0;
                }
 
-               queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+               queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work,
                                   msecs_to_jiffies(dpu_enc->idle_timeout));
 
                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
index 0133c0c01a0bcd619089a5565719d764d88b63f8..2ee03ce2fd398be4f5b101be09c6dfb495324128 100644 (file)
@@ -511,7 +511,7 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
 
        if (pending & PENDING_CURSOR) {
                update_cursor(crtc);
-               drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+               drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
        }
 }
 
index 298861f373b04cb4f7a37c42d0648a1c40d2aad1..4c4900a7beda8f7bd3184230a1c1b5f7ebd0c588 100644 (file)
@@ -1196,7 +1196,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
        }
 
        if (pending & PENDING_CURSOR)
-               drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
+               drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->kms->wq);
 }
 
 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
index 2283a377cda14fc08aaab71e093b71f8e3954eec..8f79f5b9a61eb39dbd63b4ff225b96e63ee9a5dd 100644 (file)
@@ -77,13 +77,6 @@ static int msm_drm_uninit(struct device *dev)
                        drm_atomic_helper_shutdown(ddev);
        }
 
-       /* We must cancel and cleanup any pending vblank enable/disable
-        * work before msm_irq_uninstall() to avoid work re-enabling an
-        * irq after uninstall has disabled it.
-        */
-
-       flush_workqueue(priv->wq);
-
        msm_gem_shrinker_cleanup(ddev);
 
        msm_perf_debugfs_cleanup(priv);
@@ -97,8 +90,6 @@ static int msm_drm_uninit(struct device *dev)
        ddev->dev_private = NULL;
        drm_dev_put(ddev);
 
-       destroy_workqueue(priv->wq);
-
        return 0;
 }
 
@@ -119,12 +110,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        ddev->dev_private = priv;
        priv->dev = ddev;
 
-       priv->wq = alloc_ordered_workqueue("msm", 0);
-       if (!priv->wq) {
-               ret = -ENOMEM;
-               goto err_put_dev;
-       }
-
        INIT_LIST_HEAD(&priv->objects);
        mutex_init(&priv->obj_lock);
 
@@ -149,7 +134,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        if (priv->kms_init) {
                ret = drmm_mode_config_init(ddev);
                if (ret)
-                       goto err_destroy_wq;
+                       goto err_put_dev;
        }
 
        dma_set_max_seg_size(dev, UINT_MAX);
@@ -157,7 +142,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        /* Bind all our sub-components: */
        ret = component_bind_all(dev, ddev);
        if (ret)
-               goto err_destroy_wq;
+               goto err_put_dev;
 
        ret = msm_gem_shrinker_init(ddev);
        if (ret)
@@ -194,8 +179,6 @@ err_msm_uninit:
 
        return ret;
 
-err_destroy_wq:
-       destroy_workqueue(priv->wq);
 err_put_dev:
        drm_dev_put(ddev);
 
index 2b49c4b800eef039bb49907058c426b152e0f7f1..33d668a18ff3613b40341df540d504ffff65b2a7 100644 (file)
@@ -175,8 +175,6 @@ struct msm_drm_private {
                struct mutex lock;
        } lru;
 
-       struct workqueue_struct *wq;
-
        unsigned int num_crtcs;
 
        struct msm_drm_thread event_thread[MAX_CRTCS];
index e82b8569a46846fbd212c987f9f0d3e7939e12a2..c6c4d3a89ba829e161b060b52c91f5323cb5a806 100644 (file)
@@ -137,7 +137,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
        vbl_work->enable = enable;
        vbl_work->priv = priv;
 
-       queue_work(priv->wq, &vbl_work->work);
+       queue_work(priv->kms->wq, &vbl_work->work);
 
        return 0;
 }
@@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
 
        BUG_ON(!kms);
 
+       /* We must cancel and cleanup any pending vblank enable/disable
+        * work before msm_irq_uninstall() to avoid work re-enabling an
+        * irq after uninstall has disabled it.
+        */
+
+       flush_workqueue(kms->wq);
+
        /* clean up event worker threads */
        for (i = 0; i < priv->num_crtcs; i++) {
                if (priv->event_thread[i].worker)
@@ -261,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
        ret = priv->kms_init(ddev);
        if (ret) {
                DRM_DEV_ERROR(dev, "failed to load kms\n");
-               return ret;
+               goto err_msm_uninit;
        }
 
        /* Enable normalization of plane zpos */
index 7cdb2eb6770035a8bbed548de3dcbebf94188fff..e48529c0a1554e8b9bf477dd71f59286b388de73 100644 (file)
@@ -153,6 +153,8 @@ struct msm_kms {
        struct mutex commit_lock[MAX_CRTCS];
        unsigned pending_crtc_mask;
        struct msm_pending_timer pending_timers[MAX_CRTCS];
+
+       struct workqueue_struct *wq;
 };
 
 static inline int msm_kms_init(struct msm_kms *kms,
@@ -165,6 +167,10 @@ static inline int msm_kms_init(struct msm_kms *kms,
 
        kms->funcs = funcs;
 
+       kms->wq = alloc_ordered_workqueue("msm", 0);
+       if (!kms->wq)
+               return -ENOMEM;
+
        for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
                ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
                if (ret) {
@@ -181,6 +187,8 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
 
        for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
                msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
+
+       destroy_workqueue(kms->wq);
 }
 
 #define for_each_crtc_mask(dev, crtc, crtc_mask) \