--- /dev/null
+From 5fc4cbd9fde5d4630494fd6ffc884148fb618087 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 8 Sep 2022 11:10:32 +0200
+Subject: mbcache: Avoid nesting of cache->c_list_lock under bit locks
+
+From: Jan Kara <jack@suse.cz>
+
+commit 5fc4cbd9fde5d4630494fd6ffc884148fb618087 upstream.
+
+Commit 307af6c87937 ("mbcache: automatically delete entries from cache
+on freeing") started nesting cache->c_list_lock under the bit locks
+protecting hash buckets of the mbcache hash table in
+mb_cache_entry_create(). This causes problems for real-time kernels
+because there spinlocks are sleeping locks while bitlocks stay atomic.
+Luckily the nesting is easy to avoid by holding entry reference until
+the entry is added to the LRU list. This makes sure we cannot race with
+entry deletion.
+
+Cc: stable@kernel.org
+Fixes: 307af6c87937 ("mbcache: automatically delete entries from cache on freeing")
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20220908091032.10513-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/mbcache.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/fs/mbcache.c
++++ b/fs/mbcache.c
+@@ -89,8 +89,14 @@ int mb_cache_entry_create(struct mb_cach
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->e_list);
+- /* Initial hash reference */
+- atomic_set(&entry->e_refcnt, 1);
++ /*
++ * We create entry with two references. One reference is kept by the
++ * hash table, the other reference is used to protect us from
++ * mb_cache_entry_delete_or_get() until the entry is fully setup. This
++ * avoids nesting of cache->c_list_lock into hash table bit locks which
++ * is problematic for RT.
++ */
++ atomic_set(&entry->e_refcnt, 2);
+ entry->e_key = key;
+ entry->e_value = value;
+ entry->e_flags = 0;
+@@ -106,15 +112,12 @@ int mb_cache_entry_create(struct mb_cach
+ }
+ }
+ hlist_bl_add_head(&entry->e_hash_list, head);
+- /*
+- * Add entry to LRU list before it can be found by
+- * mb_cache_entry_delete() to avoid races
+- */
++ hlist_bl_unlock(head);
+ spin_lock(&cache->c_list_lock);
+ list_add_tail(&entry->e_list, &cache->c_list);
+ cache->c_entry_count++;
+ spin_unlock(&cache->c_list_lock);
+- hlist_bl_unlock(head);
++ mb_cache_entry_put(cache, entry);
+
+ return 0;
+ }