--- /dev/null
+From caixinchen1@huawei.com Mon Mar 20 02:20:45 2023
+From: Cai Xinchen <caixinchen1@huawei.com>
+Date: Mon, 20 Mar 2023 01:15:07 +0000
+Subject: cgroup: Add missing cpus_read_lock() to cgroup_attach_task_all()
+To: <longman@redhat.com>, <lizefan.x@bytedance.com>, <tj@kernel.org>, <hannes@cmpxchg.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <mkoutny@suse.com>, <zhangqiao22@huawei.com>, <juri.lelli@redhat.com>, <penguin-kernel@I-love.SAKURA.ne.jp>, <stable@vger.kernel.org>, <cgroups@vger.kernel.org>, <linux-kernel@vger.kernel.org>
+Message-ID: <20230320011507.129441-4-caixinchen1@huawei.com>
+
+From: Cai Xinchen <caixinchen1@huawei.com>
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit 43626dade36fa74d3329046f4ae2d7fdefe401c6 upstream.
+
+syzbot is hitting percpu_rwsem_assert_held(&cpu_hotplug_lock) warning at
+cpuset_attach() [1], for commit 4f7e7236435ca0ab ("cgroup: Fix
+threadgroup_rwsem <-> cpus_read_lock() deadlock") missed that
+cpuset_attach() is also called from cgroup_attach_task_all().
+Add cpus_read_lock() like what cgroup_procs_write_start() does.
+
+Link: https://syzkaller.appspot.com/bug?extid=29d3a3b4d86c8136ad9e [1]
+Reported-by: syzbot <syzbot+29d3a3b4d86c8136ad9e@syzkaller.appspotmail.com>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Fixes: 4f7e7236435ca0ab ("cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock")
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Cai Xinchen <caixinchen1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cgroup/cgroup-v1.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -13,6 +13,7 @@
+ #include <linux/delayacct.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/cgroupstats.h>
++#include <linux/cpu.h>
+
+ #include <trace/events/cgroup.h>
+
+@@ -55,6 +56,7 @@ int cgroup_attach_task_all(struct task_s
+ int retval = 0;
+
+ mutex_lock(&cgroup_mutex);
++ get_online_cpus();
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+ for_each_root(root) {
+ struct cgroup *from_cgrp;
+@@ -71,6 +73,7 @@ int cgroup_attach_task_all(struct task_s
+ break;
+ }
+ percpu_up_write(&cgroup_threadgroup_rwsem);
++ put_online_cpus();
+ mutex_unlock(&cgroup_mutex);
+
+ return retval;
--- /dev/null
+From caixinchen1@huawei.com Mon Mar 20 02:21:45 2023
+From: Cai Xinchen <caixinchen1@huawei.com>
+Date: Mon, 20 Mar 2023 01:15:05 +0000
+Subject: cgroup/cpuset: Change cpuset_rwsem and hotplug lock order
+To: <longman@redhat.com>, <lizefan.x@bytedance.com>, <tj@kernel.org>, <hannes@cmpxchg.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <mkoutny@suse.com>, <zhangqiao22@huawei.com>, <juri.lelli@redhat.com>, <penguin-kernel@I-love.SAKURA.ne.jp>, <stable@vger.kernel.org>, <cgroups@vger.kernel.org>, <linux-kernel@vger.kernel.org>
+Message-ID: <20230320011507.129441-2-caixinchen1@huawei.com>
+
+From: Juri Lelli <juri.lelli@redhat.com>
+
+commit d74b27d63a8bebe2fe634944e4ebdc7b10db7a39 upstream.
+
+commit 1243dc518c9da ("cgroup/cpuset: Convert cpuset_mutex to
+percpu_rwsem") is performance patch which is not backport. So
+convert percpu_rwsem to cpuset_mutex.
+
+commit aa44002e7db25 ("cpuset: Fix unsafe lock order between
+cpuset lock and cpuslock") makes lock order keep cpuset_mutex
+->cpu_hotplug_lock. We should change lock order in cpuset_attach.
+
+original commit message:
+
+cpuset_rwsem is going to be acquired from sched_setscheduler() with a
+following patch. There are however paths (e.g., spawn_ksoftirqd) in
+which sched_scheduler() is eventually called while holding hotplug lock;
+this creates a dependecy between hotplug lock (to be always acquired
+first) and cpuset_rwsem (to be always acquired after hotplug lock).
+
+Fix paths which currently take the two locks in the wrong order (after
+a following patch is applied).
+
+Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bristot@redhat.com
+Cc: claudio@evidence.eu.com
+Cc: lizefan@huawei.com
+Cc: longman@redhat.com
+Cc: luca.abeni@santannapisa.it
+Cc: mathieu.poirier@linaro.org
+Cc: rostedt@goodmis.org
+Cc: tj@kernel.org
+Cc: tommaso.cucinotta@santannapisa.it
+Link: https://lkml.kernel.org/r/20190719140000.31694-7-juri.lelli@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Cai Xinchen <caixinchen1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+v2:
+ * Change get_online_cpus/put_online_cpus lock order in cpuset_attach
+ to keep cpuset_mutex and hotplug lock order
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cpuset.h | 8 ++++----
+ kernel/cgroup/cpuset.c | 24 +++++++++++++++++-------
+ 2 files changed, 21 insertions(+), 11 deletions(-)
+
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -40,14 +40,14 @@ static inline bool cpusets_enabled(void)
+
+ static inline void cpuset_inc(void)
+ {
+- static_branch_inc(&cpusets_pre_enable_key);
+- static_branch_inc(&cpusets_enabled_key);
++ static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
++ static_branch_inc_cpuslocked(&cpusets_enabled_key);
+ }
+
+ static inline void cpuset_dec(void)
+ {
+- static_branch_dec(&cpusets_enabled_key);
+- static_branch_dec(&cpusets_pre_enable_key);
++ static_branch_dec_cpuslocked(&cpusets_enabled_key);
++ static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
+ }
+
+ extern int cpuset_init(void);
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -830,8 +830,8 @@ static void rebuild_sched_domains_locked
+ cpumask_var_t *doms;
+ int ndoms;
+
++ lockdep_assert_cpus_held();
+ lockdep_assert_held(&cpuset_mutex);
+- get_online_cpus();
+
+ /*
+ * We have raced with CPU hotplug. Don't do anything to avoid
+@@ -839,15 +839,13 @@ static void rebuild_sched_domains_locked
+ * Anyways, hotplug work item will rebuild sched domains.
+ */
+ if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
+- goto out;
++ return;
+
+ /* Generate domain masks and attrs */
+ ndoms = generate_sched_domains(&doms, &attr);
+
+ /* Have scheduler rebuild the domains */
+ partition_sched_domains(ndoms, doms, attr);
+-out:
+- put_online_cpus();
+ }
+ #else /* !CONFIG_SMP */
+ static void rebuild_sched_domains_locked(void)
+@@ -857,9 +855,11 @@ static void rebuild_sched_domains_locked
+
+ void rebuild_sched_domains(void)
+ {
++ get_online_cpus();
+ mutex_lock(&cpuset_mutex);
+ rebuild_sched_domains_locked();
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ }
+
+ /**
+@@ -1528,13 +1528,13 @@ static void cpuset_attach(struct cgroup_
+ cgroup_taskset_first(tset, &css);
+ cs = css_cs(css);
+
+- mutex_lock(&cpuset_mutex);
+-
+ /*
+ * It should hold cpus lock because a cpu offline event can
+ * cause set_cpus_allowed_ptr() failed.
+ */
+ get_online_cpus();
++ mutex_lock(&cpuset_mutex);
++
+ /* prepare for attach */
+ if (cs == &top_cpuset)
+ cpumask_copy(cpus_attach, cpu_possible_mask);
+@@ -1553,7 +1553,6 @@ static void cpuset_attach(struct cgroup_
+ cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
+ cpuset_update_task_spread_flag(cs, task);
+ }
+- put_online_cpus();
+
+ /*
+ * Change mm for all threadgroup leaders. This is expensive and may
+@@ -1589,6 +1588,7 @@ static void cpuset_attach(struct cgroup_
+ wake_up(&cpuset_attach_wq);
+
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ }
+
+ /* The various types of files and directories in a cpuset file system */
+@@ -1617,6 +1617,7 @@ static int cpuset_write_u64(struct cgrou
+ cpuset_filetype_t type = cft->private;
+ int retval = 0;
+
++ get_online_cpus();
+ mutex_lock(&cpuset_mutex);
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
+@@ -1654,6 +1655,7 @@ static int cpuset_write_u64(struct cgrou
+ }
+ out_unlock:
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ return retval;
+ }
+
+@@ -1664,6 +1666,7 @@ static int cpuset_write_s64(struct cgrou
+ cpuset_filetype_t type = cft->private;
+ int retval = -ENODEV;
+
++ get_online_cpus();
+ mutex_lock(&cpuset_mutex);
+ if (!is_cpuset_online(cs))
+ goto out_unlock;
+@@ -1678,6 +1681,7 @@ static int cpuset_write_s64(struct cgrou
+ }
+ out_unlock:
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ return retval;
+ }
+
+@@ -1716,6 +1720,7 @@ static ssize_t cpuset_write_resmask(stru
+ kernfs_break_active_protection(of->kn);
+ flush_work(&cpuset_hotplug_work);
+
++ get_online_cpus();
+ mutex_lock(&cpuset_mutex);
+ if (!is_cpuset_online(cs))
+ goto out_unlock;
+@@ -1741,6 +1746,7 @@ static ssize_t cpuset_write_resmask(stru
+ free_trial_cpuset(trialcs);
+ out_unlock:
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ kernfs_unbreak_active_protection(of->kn);
+ css_put(&cs->css);
+ flush_workqueue(cpuset_migrate_mm_wq);
+@@ -1985,6 +1991,7 @@ static int cpuset_css_online(struct cgro
+ if (!parent)
+ return 0;
+
++ get_online_cpus();
+ mutex_lock(&cpuset_mutex);
+
+ set_bit(CS_ONLINE, &cs->flags);
+@@ -2035,6 +2042,7 @@ static int cpuset_css_online(struct cgro
+ spin_unlock_irq(&callback_lock);
+ out_unlock:
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ return 0;
+ }
+
+@@ -2048,6 +2056,7 @@ static void cpuset_css_offline(struct cg
+ {
+ struct cpuset *cs = css_cs(css);
+
++ get_online_cpus();
+ mutex_lock(&cpuset_mutex);
+
+ if (is_sched_load_balance(cs))
+@@ -2057,6 +2066,7 @@ static void cpuset_css_offline(struct cg
+ clear_bit(CS_ONLINE, &cs->flags);
+
+ mutex_unlock(&cpuset_mutex);
++ put_online_cpus();
+ }
+
+ static void cpuset_css_free(struct cgroup_subsys_state *css)
--- /dev/null
+From caixinchen1@huawei.com Mon Mar 20 02:21:15 2023
+From: Cai Xinchen <caixinchen1@huawei.com>
+Date: Mon, 20 Mar 2023 01:15:06 +0000
+Subject: cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock
+To: <longman@redhat.com>, <lizefan.x@bytedance.com>, <tj@kernel.org>, <hannes@cmpxchg.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <mkoutny@suse.com>, <zhangqiao22@huawei.com>, <juri.lelli@redhat.com>, <penguin-kernel@I-love.SAKURA.ne.jp>, <stable@vger.kernel.org>, <cgroups@vger.kernel.org>, <linux-kernel@vger.kernel.org>
+Message-ID: <20230320011507.129441-3-caixinchen1@huawei.com>
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 4f7e7236435ca0abe005c674ebd6892c6e83aeb3 upstream.
+
+Add #include <linux/cpu.h> to avoid compile error on some architectures.
+
+commit 9a3284fad42f6 ("cgroup: Optimize single thread migration") and
+commit 671c11f0619e5 ("cgroup: Elide write-locking threadgroup_rwsem
+when updating csses on an empty subtree") are not backport. So ignore the
+input parameter of cgroup_attach_lock/cgroup_attach_unlock.
+
+original commit message:
+
+Bringing up a CPU may involve creating and destroying tasks which requires
+read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
+cpus_read_lock(). However, cpuset's ->attach(), which may be called with
+thredagroup_rwsem write-locked, also wants to disable CPU hotplug and
+acquires cpus_read_lock(), leading to a deadlock.
+
+Fix it by guaranteeing that ->attach() is always called with CPU hotplug
+disabled and removing cpus_read_lock() call from cpuset_attach().
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reviewed-and-tested-by: Imran Khan <imran.f.khan@oracle.com>
+Reported-and-tested-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Fixes: 05c7b7a92cc8 ("cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug")
+Cc: stable@vger.kernel.org # v5.17+
+Signed-off-by: Cai Xinchen <caixinchen1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+v2:
+ - Add #include <linux/cpu.h> in kernel/cgroup/cgroup.c to avoid compile
+ error on some architectures
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cgroup/cgroup.c | 50 ++++++++++++++++++++++++++++++++++++++++++++-----
+ kernel/cgroup/cpuset.c | 7 ------
+ 2 files changed, 46 insertions(+), 11 deletions(-)
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -55,6 +55,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/file.h>
+ #include <linux/sched/cputime.h>
++#include <linux/cpu.h>
+ #include <net/sock.h>
+
+ #define CREATE_TRACE_POINTS
+@@ -2210,6 +2211,45 @@ int task_cgroup_path(struct task_struct
+ EXPORT_SYMBOL_GPL(task_cgroup_path);
+
+ /**
++ * cgroup_attach_lock - Lock for ->attach()
++ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
++ *
++ * cgroup migration sometimes needs to stabilize threadgroups against forks and
++ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
++ * implementations (e.g. cpuset), also need to disable CPU hotplug.
++ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
++ * lead to deadlocks.
++ *
++ * Bringing up a CPU may involve creating and destroying tasks which requires
++ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
++ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
++ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
++ * waiting for an on-going CPU hotplug operation which in turn is waiting for
++ * the threadgroup_rwsem to be released to create new tasks. For more details:
++ *
++ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
++ *
++ * Resolve the situation by always acquiring cpus_read_lock() before optionally
++ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
++ * CPU hotplug is disabled on entry.
++ */
++static void cgroup_attach_lock(void)
++{
++ get_online_cpus();
++ percpu_down_write(&cgroup_threadgroup_rwsem);
++}
++
++/**
++ * cgroup_attach_unlock - Undo cgroup_attach_lock()
++ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
++ */
++static void cgroup_attach_unlock(void)
++{
++ percpu_up_write(&cgroup_threadgroup_rwsem);
++ put_online_cpus();
++}
++
++/**
+ * cgroup_migrate_add_task - add a migration target task to a migration context
+ * @task: target task
+ * @mgctx: target migration context
+@@ -2694,7 +2734,7 @@ struct task_struct *cgroup_procs_write_s
+ if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+ return ERR_PTR(-EINVAL);
+
+- percpu_down_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_lock();
+
+ rcu_read_lock();
+ if (pid) {
+@@ -2725,7 +2765,7 @@ struct task_struct *cgroup_procs_write_s
+ goto out_unlock_rcu;
+
+ out_unlock_threadgroup:
+- percpu_up_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_unlock();
+ out_unlock_rcu:
+ rcu_read_unlock();
+ return tsk;
+@@ -2740,7 +2780,7 @@ void cgroup_procs_write_finish(struct ta
+ /* release reference from cgroup_procs_write_start() */
+ put_task_struct(task);
+
+- percpu_up_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_unlock();
+ for_each_subsys(ss, ssid)
+ if (ss->post_attach)
+ ss->post_attach();
+@@ -2799,7 +2839,7 @@ static int cgroup_update_dfl_csses(struc
+
+ lockdep_assert_held(&cgroup_mutex);
+
+- percpu_down_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_lock();
+
+ /* look up all csses currently attached to @cgrp's subtree */
+ spin_lock_irq(&css_set_lock);
+@@ -2830,7 +2870,7 @@ static int cgroup_update_dfl_csses(struc
+ ret = cgroup_migrate_execute(&mgctx);
+ out_finish:
+ cgroup_migrate_finish(&mgctx);
+- percpu_up_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_unlock();
+ return ret;
+ }
+
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1528,11 +1528,7 @@ static void cpuset_attach(struct cgroup_
+ cgroup_taskset_first(tset, &css);
+ cs = css_cs(css);
+
+- /*
+- * It should hold cpus lock because a cpu offline event can
+- * cause set_cpus_allowed_ptr() failed.
+- */
+- get_online_cpus();
++ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
+ mutex_lock(&cpuset_mutex);
+
+ /* prepare for attach */
+@@ -1588,7 +1584,6 @@ static void cpuset_attach(struct cgroup_
+ wake_up(&cpuset_attach_wq);
+
+ mutex_unlock(&cpuset_mutex);
+- put_online_cpus();
+ }
+
+ /* The various types of files and directories in a cpuset file system */
--- /dev/null
+From 5c099c4fdc438014d5893629e70a8ba934433ee8 Mon Sep 17 00:00:00 2001
+From: Ye Bin <yebin10@huawei.com>
+Date: Tue, 6 Dec 2022 22:41:34 +0800
+Subject: ext4: fix kernel BUG in 'ext4_write_inline_data_end()'
+
+From: Ye Bin <yebin10@huawei.com>
+
+commit 5c099c4fdc438014d5893629e70a8ba934433ee8 upstream.
+
+Syzbot report follow issue:
+------------[ cut here ]------------
+kernel BUG at fs/ext4/inline.c:227!
+invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+CPU: 1 PID: 3629 Comm: syz-executor212 Not tainted 6.1.0-rc5-syzkaller-00018-g59d0d52c30d4 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022
+RIP: 0010:ext4_write_inline_data+0x344/0x3e0 fs/ext4/inline.c:227
+RSP: 0018:ffffc90003b3f368 EFLAGS: 00010293
+RAX: 0000000000000000 RBX: ffff8880704e16c0 RCX: 0000000000000000
+RDX: ffff888021763a80 RSI: ffffffff821e31a4 RDI: 0000000000000006
+RBP: 000000000006818e R08: 0000000000000006 R09: 0000000000068199
+R10: 0000000000000079 R11: 0000000000000000 R12: 000000000000000b
+R13: 0000000000068199 R14: ffffc90003b3f408 R15: ffff8880704e1c82
+FS: 000055555723e3c0(0000) GS:ffff8880b9b00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fffe8ac9080 CR3: 0000000079f81000 CR4: 0000000000350ee0
+Call Trace:
+ <TASK>
+ ext4_write_inline_data_end+0x2a3/0x12f0 fs/ext4/inline.c:768
+ ext4_write_end+0x242/0xdd0 fs/ext4/inode.c:1313
+ ext4_da_write_end+0x3ed/0xa30 fs/ext4/inode.c:3063
+ generic_perform_write+0x316/0x570 mm/filemap.c:3764
+ ext4_buffered_write_iter+0x15b/0x460 fs/ext4/file.c:285
+ ext4_file_write_iter+0x8bc/0x16e0 fs/ext4/file.c:700
+ call_write_iter include/linux/fs.h:2191 [inline]
+ do_iter_readv_writev+0x20b/0x3b0 fs/read_write.c:735
+ do_iter_write+0x182/0x700 fs/read_write.c:861
+ vfs_iter_write+0x74/0xa0 fs/read_write.c:902
+ iter_file_splice_write+0x745/0xc90 fs/splice.c:686
+ do_splice_from fs/splice.c:764 [inline]
+ direct_splice_actor+0x114/0x180 fs/splice.c:931
+ splice_direct_to_actor+0x335/0x8a0 fs/splice.c:886
+ do_splice_direct+0x1ab/0x280 fs/splice.c:974
+ do_sendfile+0xb19/0x1270 fs/read_write.c:1255
+ __do_sys_sendfile64 fs/read_write.c:1323 [inline]
+ __se_sys_sendfile64 fs/read_write.c:1309 [inline]
+ __x64_sys_sendfile64+0x1d0/0x210 fs/read_write.c:1309
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+---[ end trace 0000000000000000 ]---
+
+Above issue may happens as follows:
+ext4_da_write_begin
+ ext4_da_write_inline_data_begin
+ ext4_da_convert_inline_data_to_extent
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ext4_da_write_end
+
+ext4_run_li_request
+ ext4_mb_prefetch
+ ext4_read_block_bitmap_nowait
+ ext4_validate_block_bitmap
+ ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT)
+ percpu_counter_sub(&sbi->s_freeclusters_counter,grp->bb_free);
+ -> sbi->s_freeclusters_counter become zero
+ext4_da_write_begin
+ if (ext4_nonda_switch(inode->i_sb)) -> As freeclusters_counter is zero will return true
+ *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
+ ext4_write_begin
+ext4_da_write_end
+ if (write_mode == FALL_BACK_TO_NONDELALLOC)
+ ext4_write_end
+ if (inline_data)
+ ext4_write_inline_data_end
+ ext4_write_inline_data
+ BUG_ON(pos + len > EXT4_I(inode)->i_inline_size);
+ -> As inode is already convert to extent, so 'pos + len' > inline_size
+ -> then trigger BUG.
+
+To solve this issue, instead of checking ext4_has_inline_data() which
+is only cleared after data has been written back, check the
+EXT4_STATE_MAY_INLINE_DATA flag in ext4_write_end().
+
+Fixes: f19d5870cbf7 ("ext4: add normal write support for inline data")
+Reported-by: syzbot+4faa160fa96bfba639f8@syzkaller.appspotmail.com
+Reported-by: Jun Nie <jun.nie@linaro.org>
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Link: https://lore.kernel.org/r/20221206144134.1919987-1-yebin@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ta: Fix conflict in if expression and use the local variable inline_data
+as it is initialized with ext4_has_inline_data(inode) anyway.]
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1428,7 +1428,8 @@ static int ext4_write_end(struct file *f
+ int inline_data = ext4_has_inline_data(inode);
+
+ trace_ext4_write_end(inode, pos, len, copied);
+- if (inline_data) {
++ if (inline_data &&
++ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+ ret = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
+ if (ret < 0) {
--- /dev/null
+From 2ab4f4018cb6b8010ca5002c3bdc37783b5d28c2 Mon Sep 17 00:00:00 2001
+From: Cristian Marussi <cristian.marussi@arm.com>
+Date: Tue, 7 Mar 2023 16:23:24 +0000
+Subject: firmware: arm_scmi: Fix device node validation for mailbox transport
+
+From: Cristian Marussi <cristian.marussi@arm.com>
+
+commit 2ab4f4018cb6b8010ca5002c3bdc37783b5d28c2 upstream.
+
+When mailboxes are used as a transport it is possible to setup the SCMI
+transport layer, depending on the underlying channels configuration, to use
+one or two mailboxes, associated, respectively, to one or two, distinct,
+shared memory areas: any other combination should be treated as invalid.
+
+Add more strict checking of SCMI mailbox transport device node descriptors.
+
+Fixes: 5c8a47a5a91d ("firmware: arm_scmi: Make scmi core independent of the transport type")
+Cc: <stable@vger.kernel.org> # 4.19
+Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
+Link: https://lore.kernel.org/r/20230307162324.891866-1-cristian.marussi@arm.com
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+[Cristian: backported to v4.19]
+Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/arm_scmi/driver.c | 37 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 37 insertions(+)
+
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -705,6 +705,39 @@ static int scmi_remove(struct platform_d
+ return ret;
+ }
+
++static int scmi_mailbox_chan_validate(struct device *cdev)
++{
++ int num_mb, num_sh, ret = 0;
++ struct device_node *np = cdev->of_node;
++
++ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
++ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
++ /* Bail out if mboxes and shmem descriptors are inconsistent */
++ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
++ dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
++ of_node_full_name(np));
++ return -EINVAL;
++ }
++
++ if (num_sh > 1) {
++ struct device_node *np_tx, *np_rx;
++
++ np_tx = of_parse_phandle(np, "shmem", 0);
++ np_rx = of_parse_phandle(np, "shmem", 1);
++ /* SCMI Tx and Rx shared mem areas have to be distinct */
++ if (!np_tx || !np_rx || np_tx == np_rx) {
++ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
++ of_node_full_name(np));
++ ret = -EINVAL;
++ }
++
++ of_node_put(np_tx);
++ of_node_put(np_rx);
++ }
++
++ return ret;
++}
++
+ static inline int
+ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
+ {
+@@ -720,6 +753,10 @@ scmi_mbox_chan_setup(struct scmi_info *i
+ goto idr_alloc;
+ }
+
++ ret = scmi_mailbox_chan_validate(dev);
++ if (ret)
++ return ret;
++
+ cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
--- /dev/null
+From 70376c7ff31221f1d21db5611d8209e677781d3a Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Sun, 4 Dec 2022 17:00:04 +0100
+Subject: gfs2: Always check inode size of inline inodes
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit 70376c7ff31221f1d21db5611d8209e677781d3a upstream.
+
+Check if the inode size of stuffed (inline) inodes is within the allowed
+range when reading inodes from disk (gfs2_dinode_in()). This prevents
+us from on-disk corruption.
+
+The two checks in stuffed_readpage() and gfs2_unstuffer_page() that just
+truncate inline data to the maximum allowed size don't actually make
+sense, and they can be removed now as well.
+
+Reported-by: syzbot+7bb81dfa9cda07d9cd9d@syzkaller.appspotmail.com
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+[pchelkin@ispras.ru: adjust the inode variable inside gfs2_dinode_in with
+the format used before upstream commit 7db354444ad8 ("gfs2: Cosmetic
+gfs2_dinode_{in,out} cleanup")]
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/aops.c | 2 --
+ fs/gfs2/bmap.c | 3 ---
+ fs/gfs2/glops.c | 3 +++
+ 3 files changed, 3 insertions(+), 5 deletions(-)
+
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -480,8 +480,6 @@ int stuffed_readpage(struct gfs2_inode *
+ return error;
+
+ kaddr = kmap_atomic(page);
+- if (dsize > gfs2_max_stuffed_size(ip))
+- dsize = gfs2_max_stuffed_size(ip);
+ memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
+ kunmap_atomic(kaddr);
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -72,9 +72,6 @@ static int gfs2_unstuffer_page(struct gf
+ void *kaddr = kmap(page);
+ u64 dsize = i_size_read(inode);
+
+- if (dsize > gfs2_max_stuffed_size(ip))
+- dsize = gfs2_max_stuffed_size(ip);
+-
+ memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
+ kunmap(page);
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -388,6 +388,9 @@ static int gfs2_dinode_in(struct gfs2_in
+ ip->i_depth = (u8)depth;
+ ip->i_entries = be32_to_cpu(str->di_entries);
+
++ if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip))
++ goto corrupt;
++
+ if (S_ISREG(ip->i_inode.i_mode))
+ gfs2_set_aops(&ip->i_inode);
+
--- /dev/null
+From caa4b35b4317d5147b3ab0fbdc9c075c7d2e9c12 Mon Sep 17 00:00:00 2001
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+Date: Sun, 1 Jan 2023 16:57:44 -0500
+Subject: net: sched: cbq: dont intepret cls results when asked to drop
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+commit caa4b35b4317d5147b3ab0fbdc9c075c7d2e9c12 upstream.
+
+If asked to drop a packet via TC_ACT_SHOT it is unsafe to assume that
+res.class contains a valid pointer
+
+Sample splat reported by Kyle Zeng
+
+[ 5.405624] 0: reclassify loop, rule prio 0, protocol 800
+[ 5.406326] ==================================================================
+[ 5.407240] BUG: KASAN: slab-out-of-bounds in cbq_enqueue+0x54b/0xea0
+[ 5.407987] Read of size 1 at addr ffff88800e3122aa by task poc/299
+[ 5.408731]
+[ 5.408897] CPU: 0 PID: 299 Comm: poc Not tainted 5.10.155+ #15
+[ 5.409516] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
+BIOS 1.15.0-1 04/01/2014
+[ 5.410439] Call Trace:
+[ 5.410764] dump_stack+0x87/0xcd
+[ 5.411153] print_address_description+0x7a/0x6b0
+[ 5.411687] ? vprintk_func+0xb9/0xc0
+[ 5.411905] ? printk+0x76/0x96
+[ 5.412110] ? cbq_enqueue+0x54b/0xea0
+[ 5.412323] kasan_report+0x17d/0x220
+[ 5.412591] ? cbq_enqueue+0x54b/0xea0
+[ 5.412803] __asan_report_load1_noabort+0x10/0x20
+[ 5.413119] cbq_enqueue+0x54b/0xea0
+[ 5.413400] ? __kasan_check_write+0x10/0x20
+[ 5.413679] __dev_queue_xmit+0x9c0/0x1db0
+[ 5.413922] dev_queue_xmit+0xc/0x10
+[ 5.414136] ip_finish_output2+0x8bc/0xcd0
+[ 5.414436] __ip_finish_output+0x472/0x7a0
+[ 5.414692] ip_finish_output+0x5c/0x190
+[ 5.414940] ip_output+0x2d8/0x3c0
+[ 5.415150] ? ip_mc_finish_output+0x320/0x320
+[ 5.415429] __ip_queue_xmit+0x753/0x1760
+[ 5.415664] ip_queue_xmit+0x47/0x60
+[ 5.415874] __tcp_transmit_skb+0x1ef9/0x34c0
+[ 5.416129] tcp_connect+0x1f5e/0x4cb0
+[ 5.416347] tcp_v4_connect+0xc8d/0x18c0
+[ 5.416577] __inet_stream_connect+0x1ae/0xb40
+[ 5.416836] ? local_bh_enable+0x11/0x20
+[ 5.417066] ? lock_sock_nested+0x175/0x1d0
+[ 5.417309] inet_stream_connect+0x5d/0x90
+[ 5.417548] ? __inet_stream_connect+0xb40/0xb40
+[ 5.417817] __sys_connect+0x260/0x2b0
+[ 5.418037] __x64_sys_connect+0x76/0x80
+[ 5.418267] do_syscall_64+0x31/0x50
+[ 5.418477] entry_SYSCALL_64_after_hwframe+0x61/0xc6
+[ 5.418770] RIP: 0033:0x473bb7
+[ 5.418952] Code: 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00
+00 00 90 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2a 00 00
+00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 18 89 54 24 0c 48 89 34
+24 89
+[ 5.420046] RSP: 002b:00007fffd20eb0f8 EFLAGS: 00000246 ORIG_RAX:
+000000000000002a
+[ 5.420472] RAX: ffffffffffffffda RBX: 00007fffd20eb578 RCX: 0000000000473bb7
+[ 5.420872] RDX: 0000000000000010 RSI: 00007fffd20eb110 RDI: 0000000000000007
+[ 5.421271] RBP: 00007fffd20eb150 R08: 0000000000000001 R09: 0000000000000004
+[ 5.421671] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001
+[ 5.422071] R13: 00007fffd20eb568 R14: 00000000004fc740 R15: 0000000000000002
+[ 5.422471]
+[ 5.422562] Allocated by task 299:
+[ 5.422782] __kasan_kmalloc+0x12d/0x160
+[ 5.423007] kasan_kmalloc+0x5/0x10
+[ 5.423208] kmem_cache_alloc_trace+0x201/0x2e0
+[ 5.423492] tcf_proto_create+0x65/0x290
+[ 5.423721] tc_new_tfilter+0x137e/0x1830
+[ 5.423957] rtnetlink_rcv_msg+0x730/0x9f0
+[ 5.424197] netlink_rcv_skb+0x166/0x300
+[ 5.424428] rtnetlink_rcv+0x11/0x20
+[ 5.424639] netlink_unicast+0x673/0x860
+[ 5.424870] netlink_sendmsg+0x6af/0x9f0
+[ 5.425100] __sys_sendto+0x58d/0x5a0
+[ 5.425315] __x64_sys_sendto+0xda/0xf0
+[ 5.425539] do_syscall_64+0x31/0x50
+[ 5.425764] entry_SYSCALL_64_after_hwframe+0x61/0xc6
+[ 5.426065]
+[ 5.426157] The buggy address belongs to the object at ffff88800e312200
+[ 5.426157] which belongs to the cache kmalloc-128 of size 128
+[ 5.426955] The buggy address is located 42 bytes to the right of
+[ 5.426955] 128-byte region [ffff88800e312200, ffff88800e312280)
+[ 5.427688] The buggy address belongs to the page:
+[ 5.427992] page:000000009875fabc refcount:1 mapcount:0
+mapping:0000000000000000 index:0x0 pfn:0xe312
+[ 5.428562] flags: 0x100000000000200(slab)
+[ 5.428812] raw: 0100000000000200 dead000000000100 dead000000000122
+ffff888007843680
+[ 5.429325] raw: 0000000000000000 0000000000100010 00000001ffffffff
+ffff88800e312401
+[ 5.429875] page dumped because: kasan: bad access detected
+[ 5.430214] page->mem_cgroup:ffff88800e312401
+[ 5.430471]
+[ 5.430564] Memory state around the buggy address:
+[ 5.430846] ffff88800e312180: fc fc fc fc fc fc fc fc fc fc fc fc
+fc fc fc fc
+[ 5.431267] ffff88800e312200: 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 fc
+[ 5.431705] >ffff88800e312280: fc fc fc fc fc fc fc fc fc fc fc fc
+fc fc fc fc
+[ 5.432123] ^
+[ 5.432391] ffff88800e312300: 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 fc
+[ 5.432810] ffff88800e312380: fc fc fc fc fc fc fc fc fc fc fc fc
+fc fc fc fc
+[ 5.433229] ==================================================================
+[ 5.433648] Disabling lock debugging due to kernel taint
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Kyle Zeng <zengyhkyle@gmail.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_cbq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -236,6 +236,8 @@ cbq_classify(struct sk_buff *skb, struct
+ result = tcf_classify(skb, fl, &res, true);
+ if (!fl || result < 0)
+ goto fallback;
++ if (result == TC_ACT_SHOT)
++ return NULL;
+
+ cl = (void *)res.class;
+ if (!cl) {
+@@ -256,8 +258,6 @@ cbq_classify(struct sk_buff *skb, struct
+ case TC_ACT_TRAP:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ /* fall through */
+- case TC_ACT_SHOT:
+- return NULL;
+ case TC_ACT_RECLASSIFY:
+ return cbq_reclassify(skb, cl);
+ }
drm-etnaviv-fix-reference-leak-when-mmaping-imported-buffer.patch
s390-uaccess-add-missing-earlyclobber-annotations-to-__clear_user.patch
usb-host-ohci-pxa27x-fix-and-vs-typo.patch
+ext4-fix-kernel-bug-in-ext4_write_inline_data_end.patch
+firmware-arm_scmi-fix-device-node-validation-for-mailbox-transport.patch
+gfs2-always-check-inode-size-of-inline-inodes.patch
+net-sched-cbq-dont-intepret-cls-results-when-asked-to-drop.patch
+cgroup-cpuset-change-cpuset_rwsem-and-hotplug-lock-order.patch
+cgroup-fix-threadgroup_rwsem-cpus_read_lock-deadlock.patch
+cgroup-add-missing-cpus_read_lock-to-cgroup_attach_task_all.patch