--- /dev/null
+From e21a2f17566cbd64926fb8f16323972f7a064444 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Sat, 17 Feb 2024 16:14:31 +0800
+Subject: cachefiles: fix memory leak in cachefiles_add_cache()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit e21a2f17566cbd64926fb8f16323972f7a064444 upstream.
+
+The following memory leak was reported after unbinding /dev/cachefiles:
+
+==================================================================
+unreferenced object 0xffff9b674176e3c0 (size 192):
+ comm "cachefilesd2", pid 680, jiffies 4294881224
+ hex dump (first 32 bytes):
+ 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace (crc ea38a44b):
+ [<ffffffff8eb8a1a5>] kmem_cache_alloc+0x2d5/0x370
+ [<ffffffff8e917f86>] prepare_creds+0x26/0x2e0
+ [<ffffffffc002eeef>] cachefiles_determine_cache_security+0x1f/0x120
+ [<ffffffffc00243ec>] cachefiles_add_cache+0x13c/0x3a0
+ [<ffffffffc0025216>] cachefiles_daemon_write+0x146/0x1c0
+ [<ffffffff8ebc4a3b>] vfs_write+0xcb/0x520
+ [<ffffffff8ebc5069>] ksys_write+0x69/0xf0
+ [<ffffffff8f6d4662>] do_syscall_64+0x72/0x140
+ [<ffffffff8f8000aa>] entry_SYSCALL_64_after_hwframe+0x6e/0x76
+==================================================================
+
+Put the reference count of cache_cred in cachefiles_daemon_unbind() to
+fix the problem. And also put cache_cred in cachefiles_add_cache() error
+branch to avoid memory leaks.
+
+Fixes: 9ae326a69004 ("CacheFiles: A cache that backs onto a mounted filesystem")
+CC: stable@vger.kernel.org
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Link: https://lore.kernel.org/r/20240217081431.796809-1-libaokun1@huawei.com
+Acked-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cachefiles/bind.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/cachefiles/bind.c
++++ b/fs/cachefiles/bind.c
+@@ -245,6 +245,8 @@ error_open_root:
+ kmem_cache_free(cachefiles_object_jar, fsdef);
+ error_root_object:
+ cachefiles_end_secure(cache, saved_cred);
++ put_cred(cache->cache_cred);
++ cache->cache_cred = NULL;
+ pr_err("Failed to register: %d\n", ret);
+ return ret;
+ }
+@@ -265,6 +267,7 @@ void cachefiles_daemon_unbind(struct cac
+
+ dput(cache->graveyard);
+ mntput(cache->mnt);
++ put_cred(cache->cache_cred);
+
+ kfree(cache->rootdirname);
+ kfree(cache->secctx);
--- /dev/null
+From 2331fd4a49864e1571b4f50aa3aa1536ed6220d0 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Thu, 4 Jan 2024 22:20:36 +0800
+Subject: ext4: avoid bb_free and bb_fragments inconsistency in mb_free_blocks()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit 2331fd4a49864e1571b4f50aa3aa1536ed6220d0 upstream.
+
+After updating bb_free in mb_free_blocks, it is possible to return without
+updating bb_fragments because the block being freed is found to have
+already been freed, which leads to inconsistency between bb_free and
+bb_fragments.
+
+Since the group may be unlocked in ext4_grp_locked_error(), this can lead
+to problems such as dividing by zero when calculating the average fragment
+length. Hence move the update of bb_free to after the block double-free
+check guarantees that the corresponding statistics are updated only after
+the core block bitmap is modified.
+
+Fixes: eabe0444df90 ("ext4: speed-up releasing blocks on commit")
+CC: <stable@vger.kernel.org> # 3.10
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20240104142040.2835097-5-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 39 +++++++++++++++++++++------------------
+ 1 file changed, 21 insertions(+), 18 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1494,11 +1494,6 @@ static void mb_free_blocks(struct inode
+ mb_check_buddy(e4b);
+ mb_free_blocks_double(inode, e4b, first, count);
+
+- this_cpu_inc(discard_pa_seq);
+- e4b->bd_info->bb_free += count;
+- if (first < e4b->bd_info->bb_first_free)
+- e4b->bd_info->bb_first_free = first;
+-
+ /* access memory sequentially: check left neighbour,
+ * clear range and then check right neighbour
+ */
+@@ -1512,23 +1507,31 @@ static void mb_free_blocks(struct inode
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t blocknr;
+
++ /*
++ * Fastcommit replay can free already freed blocks which
++ * corrupts allocation info. Regenerate it.
++ */
++ if (sbi->s_mount_state & EXT4_FC_REPLAY) {
++ mb_regenerate_buddy(e4b);
++ goto check;
++ }
++
+ blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
+ blocknr += EXT4_C2B(sbi, block);
+- if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+- ext4_grp_locked_error(sb, e4b->bd_group,
+- inode ? inode->i_ino : 0,
+- blocknr,
+- "freeing already freed block (bit %u); block bitmap corrupt.",
+- block);
+- ext4_mark_group_bitmap_corrupted(
+- sb, e4b->bd_group,
++ ext4_grp_locked_error(sb, e4b->bd_group,
++ inode ? inode->i_ino : 0, blocknr,
++ "freeing already freed block (bit %u); block bitmap corrupt.",
++ block);
++ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+- } else {
+- mb_regenerate_buddy(e4b);
+- }
+- goto done;
++ return;
+ }
+
++ this_cpu_inc(discard_pa_seq);
++ e4b->bd_info->bb_free += count;
++ if (first < e4b->bd_info->bb_first_free)
++ e4b->bd_info->bb_first_free = first;
++
+ /* let's maintain fragments counter */
+ if (left_is_free && right_is_free)
+ e4b->bd_info->bb_fragments--;
+@@ -1553,8 +1556,8 @@ static void mb_free_blocks(struct inode
+ if (first <= last)
+ mb_buddy_mark_free(e4b, first >> 1, last >> 1);
+
+-done:
+ mb_set_largest_free_order(sb, e4b->bd_info);
++check:
+ mb_check_buddy(e4b);
+ }
+