--- /dev/null
+From e379856b428acafb8ed689f31d65814da6447b2e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Adri=C3=A1n=20Larumbe?= <adrian.larumbe@collabora.com>
+Date: Mon, 3 Mar 2025 19:08:45 +0000
+Subject: drm/panthor: Replace sleep locks with spinlocks in fdinfo path
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Adrián Larumbe <adrian.larumbe@collabora.com>
+
+commit e379856b428acafb8ed689f31d65814da6447b2e upstream.
+
+Commit 0590c94c3596 ("drm/panthor: Fix race condition when gathering fdinfo
+group samples") introduced an xarray lock to deal with potential
+use-after-free errors when accessing groups fdinfo figures. However, this
+toggles the kernel's atomic context status, so the next nested mutex lock
+will raise a warning when the kernel is compiled with mutex debug options:
+
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_MUTEXES=y
+
+Replace Panthor's group fdinfo data mutex with a guarded spinlock.
+
+Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
+Fixes: 0590c94c3596 ("drm/panthor: Fix race condition when gathering fdinfo group samples")
+Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Signed-off-by: Steven Price <steven.price@arm.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250303190923.1639985-1-adrian.larumbe@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/panthor/panthor_sched.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/panthor/panthor_sched.c
++++ b/drivers/gpu/drm/panthor/panthor_sched.c
+@@ -9,6 +9,7 @@
+ #include <drm/panthor_drm.h>
+
+ #include <linux/build_bug.h>
++#include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/dma-mapping.h>
+@@ -900,8 +901,6 @@ static void group_release_work(struct wo
+ release_work);
+ u32 i;
+
+- mutex_destroy(&group->fdinfo.lock);
+-
+ for (i = 0; i < group->queue_count; i++)
+ group_free_queue(group, group->queues[i]);
+
+@@ -2845,12 +2844,12 @@ static void update_fdinfo_stats(struct p
+ struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
+ struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
+
+- mutex_lock(&group->fdinfo.lock);
+- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+- fdinfo->cycles += data->cycles.after - data->cycles.before;
+- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
+- fdinfo->time += data->time.after - data->time.before;
+- mutex_unlock(&group->fdinfo.lock);
++ scoped_guard(spinlock, &group->fdinfo.lock) {
++ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
++ fdinfo->cycles += data->cycles.after - data->cycles.before;
++ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
++ fdinfo->time += data->time.after - data->time.before;
++ }
+ }
+
+ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
+@@ -2864,12 +2863,11 @@ void panthor_fdinfo_gather_group_samples
+
+ xa_lock(&gpool->xa);
+ xa_for_each(&gpool->xa, i, group) {
+- mutex_lock(&group->fdinfo.lock);
++ guard(spinlock)(&group->fdinfo.lock);
+ pfile->stats.cycles += group->fdinfo.data.cycles;
+ pfile->stats.time += group->fdinfo.data.time;
+ group->fdinfo.data.cycles = 0;
+ group->fdinfo.data.time = 0;
+- mutex_unlock(&group->fdinfo.lock);
+ }
+ xa_unlock(&gpool->xa);
+ }