From: Greg Kroah-Hartman Date: Tue, 10 Jan 2023 16:48:10 +0000 (+0100) Subject: 4.19-stable patches X-Git-Tag: v5.15.87~11 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ae888487de688850f3def769994edfb9328a8c3e;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: mbcache-avoid-nesting-of-cache-c_list_lock-under-bit-locks.patch --- diff --git a/queue-4.19/mbcache-avoid-nesting-of-cache-c_list_lock-under-bit-locks.patch b/queue-4.19/mbcache-avoid-nesting-of-cache-c_list_lock-under-bit-locks.patch new file mode 100644 index 00000000000..9ab4e26d056 --- /dev/null +++ b/queue-4.19/mbcache-avoid-nesting-of-cache-c_list_lock-under-bit-locks.patch @@ -0,0 +1,66 @@ +From 5fc4cbd9fde5d4630494fd6ffc884148fb618087 Mon Sep 17 00:00:00 2001 +From: Jan Kara +Date: Thu, 8 Sep 2022 11:10:32 +0200 +Subject: mbcache: Avoid nesting of cache->c_list_lock under bit locks + +From: Jan Kara + +commit 5fc4cbd9fde5d4630494fd6ffc884148fb618087 upstream. + +Commit 307af6c87937 ("mbcache: automatically delete entries from cache +on freeing") started nesting cache->c_list_lock under the bit locks +protecting hash buckets of the mbcache hash table in +mb_cache_entry_create(). This causes problems for real-time kernels +because there spinlocks are sleeping locks while bitlocks stay atomic. +Luckily the nesting is easy to avoid by holding entry reference until +the entry is added to the LRU list. This makes sure we cannot race with +entry deletion. + +Cc: stable@kernel.org +Fixes: 307af6c87937 ("mbcache: automatically delete entries from cache on freeing") +Reported-by: Mike Galbraith +Signed-off-by: Jan Kara +Link: https://lore.kernel.org/r/20220908091032.10513-1-jack@suse.cz +Signed-off-by: Theodore Ts'o +Signed-off-by: Greg Kroah-Hartman +--- + fs/mbcache.c | 17 ++++++++++------- + 1 file changed, 10 insertions(+), 7 deletions(-) + +--- a/fs/mbcache.c ++++ b/fs/mbcache.c +@@ -89,8 +89,14 @@ int mb_cache_entry_create(struct mb_cach + return -ENOMEM; + + INIT_LIST_HEAD(&entry->e_list); +- /* Initial hash reference */ +- atomic_set(&entry->e_refcnt, 1); ++ /* ++ * We create entry with two references. One reference is kept by the ++ * hash table, the other reference is used to protect us from ++ * mb_cache_entry_delete_or_get() until the entry is fully setup. This ++ * avoids nesting of cache->c_list_lock into hash table bit locks which ++ * is problematic for RT. ++ */ ++ atomic_set(&entry->e_refcnt, 2); + entry->e_key = key; + entry->e_value = value; + entry->e_flags = 0; +@@ -106,15 +112,12 @@ int mb_cache_entry_create(struct mb_cach + } + } + hlist_bl_add_head(&entry->e_hash_list, head); +- /* +- * Add entry to LRU list before it can be found by +- * mb_cache_entry_delete() to avoid races +- */ ++ hlist_bl_unlock(head); + spin_lock(&cache->c_list_lock); + list_add_tail(&entry->e_list, &cache->c_list); + cache->c_entry_count++; + spin_unlock(&cache->c_list_lock); +- hlist_bl_unlock(head); ++ mb_cache_entry_put(cache, entry); + + return 0; + } diff --git a/queue-4.19/series b/queue-4.19/series index 39011e581a4..4019e034c08 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -459,3 +459,4 @@ riscv-uaccess-fix-type-of-0-variable-on-error-in-get_user.patch ext4-don-t-allow-journal-inode-to-have-encrypt-flag.patch hfs-hfsplus-use-warn_on-for-sanity-check.patch hfs-hfsplus-avoid-warn_on-for-sanity-check-use-proper-error-handling.patch +mbcache-avoid-nesting-of-cache-c_list_lock-under-bit-locks.patch