]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/vmwgfx: Update last_read_seqno under the fence lock
authorIan Forbes <ian.forbes@broadcom.com>
Fri, 30 May 2025 18:35:08 +0000 (13:35 -0500)
committerZack Rusin <zack.rusin@broadcom.com>
Wed, 18 Jun 2025 02:49:31 +0000 (22:49 -0400)
There was a possible race in vmw_update_seqno. Because of this race it
was possible for last_read_seqno to go backwards. Remove this function
and replace it with vmw_update_fences which now sets and returns the
last_read_seqno while holding the fence lock. This serialization via the
fence lock ensures that last_read_seqno is monotonic again.

Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
Link: https://lore.kernel.org/r/20250530183510.733175-1-ian.forbes@broadcom.com
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c

index dd4ca6a9c690bd4d731558a3d67b67d1a7ab3c41..8fe02131a6c48c05a5f0241217a8d30808764141 100644 (file)
@@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
        cmd_fence = (struct svga_fifo_cmd_fence *) fm;
        cmd_fence->fence = *seqno;
        vmw_cmd_commit_flush(dev_priv, bytes);
-       vmw_update_seqno(dev_priv);
+       vmw_fences_update(dev_priv->fman);
 
 out_err:
        return ret;
index 37b832e552a40551fbfdc83609d5d7d0c76ceab7..bc0342c58b4b2c14b314494a41405a9ac01b85ff 100644 (file)
@@ -440,8 +440,10 @@ static int vmw_device_init(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
        }
 
-       dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
-       atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+       u32 seqno = vmw_fence_read(dev_priv);
+
+       atomic_set(&dev_priv->last_read_seqno, seqno);
+       atomic_set(&dev_priv->marker_seq, seqno);
        return 0;
 }
 
@@ -454,7 +456,7 @@ static void vmw_device_fini(struct vmw_private *vmw)
        while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
                ;
 
-       vmw->last_read_seqno = vmw_fence_read(vmw);
+       atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
 
        vmw_write(vmw, SVGA_REG_CONFIG_DONE,
                  vmw->config_done_state);
index 594af8eb04c6d2b50aedd5caf6ef8b81c828a4a5..19565e4aa59cdab8da86b24a1b37f381338cdb08 100644 (file)
@@ -522,7 +522,7 @@ struct vmw_private {
        int cmdbuf_waiters; /* Protected by waiter_lock */
        int error_waiters; /* Protected by waiter_lock */
        int fifo_queue_waiters; /* Protected by waiter_lock */
-       uint32_t last_read_seqno;
+       atomic_t last_read_seqno;
        struct vmw_fence_manager *fman;
        uint32_t irq_mask; /* Updates protected by waiter_lock */
 
@@ -1006,7 +1006,6 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
                             uint32_t seqno,
                             bool interruptible,
                             unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
index e831e324e7378d6bd544bb5d2a6c019b16d0423a..90ce5372343bfb7eee3642e3f54b9d5d52c5bb5e 100644 (file)
@@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 
                fence_rep.handle = fence_handle;
                fence_rep.seqno = fence->base.seqno;
-               vmw_update_seqno(dev_priv);
-               fence_rep.passed_seqno = dev_priv->last_read_seqno;
+               fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
        }
 
        /*
index 588d50ababf604933d68877c516441900dc1f831..136f6b816795c61a7826bb84d21634157146294a 100644 (file)
@@ -172,7 +172,7 @@ vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
        wake_up_process(wait->task);
 }
 
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
 
 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
 {
@@ -457,7 +457,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
        return true;
 }
 
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
 {
        struct vmw_fence_obj *fence, *next_fence;
        struct list_head action_list;
@@ -495,13 +495,17 @@ rerun:
 
        if (!list_empty(&fman->cleanup_list))
                (void) schedule_work(&fman->work);
+       atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+       return seqno;
 }
 
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
 {
+       u32 seqno;
        spin_lock(&fman->lock);
-       __vmw_fences_update(fman);
+       seqno = __vmw_fences_update(fman);
        spin_unlock(&fman->lock);
+       return seqno;
 }
 
 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -778,7 +782,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
                (struct drm_vmw_fence_signaled_arg *) data;
        struct ttm_base_object *base;
        struct vmw_fence_obj *fence;
-       struct vmw_fence_manager *fman;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_private *dev_priv = vmw_priv(dev);
 
@@ -787,14 +790,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
                return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
-       fman = fman_from_fence(fence);
 
        arg->signaled = vmw_fence_obj_signaled(fence);
 
        arg->signaled_flags = arg->flags;
-       spin_lock(&fman->lock);
-       arg->passed_seqno = dev_priv->last_read_seqno;
-       spin_unlock(&fman->lock);
+       arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
 
        ttm_base_object_unref(&base);
 
index a7eee579c76a94583aa9a9c809606b0a40352fa6..10264dab5f6a75793a8d77199278e6f28aa94aa8 100644 (file)
@@ -86,7 +86,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
        return fence;
 }
 
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
 
 extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
 
index 086e69a130d426214e9a108efe5cb170cca273dd..592cd78e10e00117219714b1d3f9aad282889ca4 100644 (file)
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
        return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 }
 
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
-       uint32_t seqno = vmw_fence_read(dev_priv);
-
-       if (dev_priv->last_read_seqno != seqno) {
-               dev_priv->last_read_seqno = seqno;
-               vmw_fences_update(dev_priv->fman);
-       }
-}
-
 bool vmw_seqno_passed(struct vmw_private *dev_priv,
                         uint32_t seqno)
 {
        bool ret;
+       u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
 
-       if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+       if (last_read_seqno - seqno < VMW_FENCE_WRAP)
                return true;
 
-       vmw_update_seqno(dev_priv);
-       if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+       last_read_seqno = vmw_fences_update(dev_priv->fman);
+       if (last_read_seqno - seqno < VMW_FENCE_WRAP)
                return true;
 
        if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))