From e54333618cd70d9e0fab33af8f6e71ca70a42ec9 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 20 May 2025 12:33:18 +0200 Subject: [PATCH] 5.10-stable patches added patches: drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch --- ...-a-deadlock-in-dma-buf-fence-polling.patch | 107 ++++++++++++++++++ queue-5.10/series | 1 + 2 files changed, 108 insertions(+) create mode 100644 queue-5.10/drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch diff --git a/queue-5.10/drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch b/queue-5.10/drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch new file mode 100644 index 0000000000..d9352565e1 --- /dev/null +++ b/queue-5.10/drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch @@ -0,0 +1,107 @@ +From e58337100721f3cc0c7424a18730e4f39844934f Mon Sep 17 00:00:00 2001 +From: Zack Rusin +Date: Mon, 22 Jul 2024 14:41:13 -0400 +Subject: drm/vmwgfx: Fix a deadlock in dma buf fence polling + +From: Zack Rusin + +commit e58337100721f3cc0c7424a18730e4f39844934f upstream. + +Introduce a version of the fence ops that on release doesn't remove +the fence from the pending list, and thus doesn't require a lock to +fix poll->fence wait->fence unref deadlocks. + +vmwgfx overwrites the wait callback to iterate over the list of all +fences and update their status, to do that it holds a lock to prevent +the list modifcations from other threads. The fence destroy callback +both deletes the fence and removes it from the list of pending +fences, for which it holds a lock. + +dma buf polling cb unrefs a fence after it's been signaled: so the poll +calls the wait, which signals the fences, which are being destroyed. +The destruction tries to acquire the lock on the pending fences list +which it can never get because it's held by the wait from which it +was called. + +Old bug, but not a lot of userspace apps were using dma-buf polling +interfaces. Fix those, in particular this fixes KDE stalls/deadlock. + +Signed-off-by: Zack Rusin +Fixes: 2298e804e96e ("drm/vmwgfx: rework to new fence interface, v2") +Cc: Broadcom internal kernel review list +Cc: dri-devel@lists.freedesktop.org +Cc: # v6.2+ +Reviewed-by: Maaz Mombasawala +Reviewed-by: Martin Krastev +Link: https://patchwork.freedesktop.org/patch/msgid/20240722184313.181318-2-zack.rusin@broadcom.com +[Minor context change fixed] +Signed-off-by: Zhi Yang +Signed-off-by: He Zhe +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 17 +++++++---------- + 1 file changed, 7 insertions(+), 10 deletions(-) + +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -32,7 +32,6 @@ + #define VMW_FENCE_WRAP (1 << 31) + + struct vmw_fence_manager { +- int num_fence_objects; + struct vmw_private *dev_priv; + spinlock_t lock; + struct list_head fence_list; +@@ -113,13 +112,13 @@ static void vmw_fence_obj_destroy(struct + { + struct vmw_fence_obj *fence = + container_of(f, struct vmw_fence_obj, base); +- + struct vmw_fence_manager *fman = fman_from_fence(fence); + +- spin_lock(&fman->lock); +- list_del_init(&fence->head); +- --fman->num_fence_objects; +- spin_unlock(&fman->lock); ++ if (!list_empty(&fence->head)) { ++ spin_lock(&fman->lock); ++ list_del_init(&fence->head); ++ spin_unlock(&fman->lock); ++ } + fence->destroy(fence); + } + +@@ -250,7 +249,6 @@ static const struct dma_fence_ops vmw_fe + .release = vmw_fence_obj_destroy, + }; + +- + /** + * Execute signal actions on fences recently signaled. + * This is done from a workqueue so we don't have to execute +@@ -353,7 +351,6 @@ static int vmw_fence_obj_init(struct vmw + goto out_unlock; + } + list_add_tail(&fence->head, &fman->fence_list); +- ++fman->num_fence_objects; + + out_unlock: + spin_unlock(&fman->lock); +@@ -402,7 +399,7 @@ static bool vmw_fence_goal_new_locked(st + { + u32 goal_seqno; + u32 *fifo_mem; +- struct vmw_fence_obj *fence; ++ struct vmw_fence_obj *fence, *next_fence; + + if (likely(!fman->seqno_valid)) + return false; +@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(st + return false; + + fman->seqno_valid = false; +- list_for_each_entry(fence, &fman->fence_list, head) { ++ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { + if (!list_empty(&fence->seq_passed_actions)) { + fman->seqno_valid = true; + vmw_mmio_write(fence->base.seqno, diff --git a/queue-5.10/series b/queue-5.10/series index bd10373d75..6d30c95220 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -103,3 +103,4 @@ dmaengine-ti-k3-udma-add-missing-locking.patch dmaengine-ti-k3-udma-use-cap_mask-directly-from-dma_device-structure-instead-of-a-local-copy.patch clocksource-i8253-use-raw_spinlock_irqsave-in-clockevent_i8253_disable.patch asoc-q6afe-clocks-fix-reprobing-of-the-driver.patch +drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch -- 2.47.3