--- /dev/null
+From f18024ed11fa6ccda4a41279553dc246a4e75071 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 May 2023 19:29:53 +0000
+Subject: cgroup: always put cset in cgroup_css_set_put_fork
+
+From: John Sperbeck <jsperbeck@google.com>
+
+[ Upstream commit 2bd110339288c18823dcace602b63b0d8627e520 ]
+
+A successful call to cgroup_css_set_fork() will always have taken
+a ref on kargs->cset (regardless of CLONE_INTO_CGROUP), so always
+do a corresponding put in cgroup_css_set_put_fork().
+
+Without this, a cset and its contained css structures will be
+leaked for some fork failures. The following script reproduces
+the leak for a fork failure due to exceeding pids.max in the
+pids controller. A similar thing can happen if we jump to the
+bad_fork_cancel_cgroup label in copy_process().
+
+[ -z "$1" ] && echo "Usage $0 pids-root" && exit 1
+PID_ROOT=$1
+CGROUP=$PID_ROOT/foo
+
+[ -e $CGROUP ] && rmdir -f $CGROUP
+mkdir $CGROUP
+echo 5 > $CGROUP/pids.max
+echo $$ > $CGROUP/cgroup.procs
+
+fork_bomb()
+{
+ set -e
+ for i in $(seq 10); do
+ /bin/sleep 3600 &
+ done
+}
+
+(fork_bomb) &
+wait
+echo $$ > $PID_ROOT/cgroup.procs
+kill $(cat $CGROUP/cgroup.procs)
+rmdir $CGROUP
+
+Fixes: ef2c41cf38a7 ("clone3: allow spawning processes into cgroups")
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: John Sperbeck <jsperbeck@google.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 002e563ec2ac8..36c95626afecc 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -6471,19 +6471,18 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
+ __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
+ {
++ struct cgroup *cgrp = kargs->cgrp;
++ struct css_set *cset = kargs->cset;
++
+ cgroup_threadgroup_change_end(current);
+
+- if (kargs->flags & CLONE_INTO_CGROUP) {
+- struct cgroup *cgrp = kargs->cgrp;
+- struct css_set *cset = kargs->cset;
++ if (cset) {
++ put_css_set(cset);
++ kargs->cset = NULL;
++ }
+
++ if (kargs->flags & CLONE_INTO_CGROUP) {
+ cgroup_unlock();
+-
+- if (cset) {
+- put_css_set(cset);
+- kargs->cset = NULL;
+- }
+-
+ if (cgrp) {
+ cgroup_put(cgrp);
+ kargs->cgrp = NULL;
+--
+2.39.2
+
--- /dev/null
+From f5243ab555f82795bc22d3f80d674a6357d8db72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 15:23:10 +0530
+Subject: cgroup: bpf: use cgroup_lock()/cgroup_unlock() wrappers
+
+From: Kamalesh Babulal <kamalesh.babulal@oracle.com>
+
+[ Upstream commit 4cdb91b0dea7d7f59fa84a13c7753cd434fdedcf ]
+
+Replace mutex_[un]lock() with cgroup_[un]lock() wrappers to stay
+consistent across cgroup core and other subsystem code, while
+operating on the cgroup_mutex.
+
+Signed-off-by: Kamalesh Babulal <kamalesh.babulal@oracle.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Stable-dep-of: 2bd110339288 ("cgroup: always put cset in cgroup_css_set_put_fork")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/cgroup.c | 38 ++++++++++++------------
+ kernel/bpf/cgroup_iter.c | 4 +--
+ kernel/bpf/local_storage.c | 4 +--
+ kernel/cgroup/cgroup-v1.c | 16 +++++-----
+ kernel/cgroup/cgroup.c | 60 +++++++++++++++++++-------------------
+ 5 files changed, 61 insertions(+), 61 deletions(-)
+
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index 819f011f0a9cd..b86b907e566ca 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -173,11 +173,11 @@ void bpf_cgroup_atype_put(int cgroup_atype)
+ {
+ int i = cgroup_atype - CGROUP_LSM_START;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ if (--cgroup_lsm_atype[i].refcnt <= 0)
+ cgroup_lsm_atype[i].attach_btf_id = 0;
+ WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ }
+ #else
+ static enum cgroup_bpf_attach_type
+@@ -282,7 +282,7 @@ static void cgroup_bpf_release(struct work_struct *work)
+
+ unsigned int atype;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
+ struct hlist_head *progs = &cgrp->bpf.progs[atype];
+@@ -315,7 +315,7 @@ static void cgroup_bpf_release(struct work_struct *work)
+ bpf_cgroup_storage_free(storage);
+ }
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+ cgroup_bpf_put(p);
+@@ -729,9 +729,9 @@ static int cgroup_bpf_attach(struct cgroup *cgrp,
+ {
+ int ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -831,7 +831,7 @@ static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
+
+ cg_link = container_of(link, struct bpf_cgroup_link, link);
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ /* link might have been auto-released by dying cgroup, so fail */
+ if (!cg_link->cgroup) {
+ ret = -ENOLINK;
+@@ -843,7 +843,7 @@ static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
+ }
+ ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
+ out_unlock:
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -1009,9 +1009,9 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
+ {
+ int ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -1120,9 +1120,9 @@ static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
+ {
+ int ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ ret = __cgroup_bpf_query(cgrp, attr, uattr);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -1189,11 +1189,11 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
+ if (!cg_link->cgroup)
+ return;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ /* re-check cgroup under lock again */
+ if (!cg_link->cgroup) {
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return;
+ }
+
+@@ -1205,7 +1205,7 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
+ cg = cg_link->cgroup;
+ cg_link->cgroup = NULL;
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ cgroup_put(cg);
+ }
+@@ -1232,10 +1232,10 @@ static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
+ container_of(link, struct bpf_cgroup_link, link);
+ u64 cg_id = 0;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ if (cg_link->cgroup)
+ cg_id = cgroup_id(cg_link->cgroup);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ seq_printf(seq,
+ "cgroup_id:\t%llu\n"
+@@ -1251,10 +1251,10 @@ static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
+ container_of(link, struct bpf_cgroup_link, link);
+ u64 cg_id = 0;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ if (cg_link->cgroup)
+ cg_id = cgroup_id(cg_link->cgroup);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ info->cgroup.cgroup_id = cg_id;
+ info->cgroup.attach_type = cg_link->type;
+diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c
+index c187a9e62bdbb..d57ccb02477f8 100644
+--- a/kernel/bpf/cgroup_iter.c
++++ b/kernel/bpf/cgroup_iter.c
+@@ -58,7 +58,7 @@ static void *cgroup_iter_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ struct cgroup_iter_priv *p = seq->private;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ /* cgroup_iter doesn't support read across multiple sessions. */
+ if (*pos > 0) {
+@@ -89,7 +89,7 @@ static void cgroup_iter_seq_stop(struct seq_file *seq, void *v)
+ {
+ struct cgroup_iter_priv *p = seq->private;
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ /* pass NULL to the prog for post-processing */
+ if (!v) {
+diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
+index 098cf336fae6e..f01ca6f1ee031 100644
+--- a/kernel/bpf/local_storage.c
++++ b/kernel/bpf/local_storage.c
+@@ -333,14 +333,14 @@ static void cgroup_storage_map_free(struct bpf_map *_map)
+ struct list_head *storages = &map->list;
+ struct bpf_cgroup_storage *storage, *stmp;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ list_for_each_entry_safe(storage, stmp, storages, list_map) {
+ bpf_cgroup_storage_unlink(storage);
+ bpf_cgroup_storage_free(storage);
+ }
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ WARN_ON(!RB_EMPTY_ROOT(&map->root));
+ WARN_ON(!list_empty(&map->list));
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 52bb5a74a23b9..aeef06c465ef1 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -58,7 +58,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+ struct cgroup_root *root;
+ int retval = 0;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ cgroup_attach_lock(true);
+ for_each_root(root) {
+ struct cgroup *from_cgrp;
+@@ -72,7 +72,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+ break;
+ }
+ cgroup_attach_unlock(true);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ return retval;
+ }
+@@ -106,7 +106,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ if (ret)
+ return ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+
+@@ -145,7 +145,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ out_err:
+ cgroup_migrate_finish(&mgctx);
+ percpu_up_write(&cgroup_threadgroup_rwsem);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -847,13 +847,13 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
+ kernfs_break_active_protection(new_parent);
+ kernfs_break_active_protection(kn);
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ ret = kernfs_rename(kn, new_parent, new_name_str);
+ if (!ret)
+ TRACE_CGROUP_PATH(rename, cgrp);
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ kernfs_unbreak_active_protection(kn);
+ kernfs_unbreak_active_protection(new_parent);
+@@ -1119,7 +1119,7 @@ int cgroup1_reconfigure(struct fs_context *fc)
+ trace_cgroup_remount(root);
+
+ out_unlock:
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -1246,7 +1246,7 @@ int cgroup1_get_tree(struct fs_context *fc)
+ if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
+ ret = 1; /* restart */
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ if (!ret)
+ ret = cgroup_do_get_tree(fc);
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 2319946715e0c..002e563ec2ac8 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1385,7 +1385,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
+ cgroup_favor_dynmods(root, false);
+ cgroup_exit_root_id(root);
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ cgroup_rstat_exit(cgrp);
+ kernfs_destroy_root(root->kf_root);
+@@ -1619,7 +1619,7 @@ void cgroup_kn_unlock(struct kernfs_node *kn)
+ else
+ cgrp = kn->parent->priv;
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ kernfs_unbreak_active_protection(kn);
+ cgroup_put(cgrp);
+@@ -1664,7 +1664,7 @@ struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
+ if (drain_offline)
+ cgroup_lock_and_drain_offline(cgrp);
+ else
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ if (!cgroup_is_dead(cgrp))
+ return cgrp;
+@@ -2161,13 +2161,13 @@ int cgroup_do_get_tree(struct fs_context *fc)
+ struct super_block *sb = fc->root->d_sb;
+ struct cgroup *cgrp;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ spin_lock_irq(&css_set_lock);
+
+ cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
+
+ spin_unlock_irq(&css_set_lock);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ nsdentry = kernfs_node_dentry(cgrp->kn, sb);
+ dput(fc->root);
+@@ -2350,13 +2350,13 @@ int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ {
+ int ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ spin_lock_irq(&css_set_lock);
+
+ ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
+
+ spin_unlock_irq(&css_set_lock);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ return ret;
+ }
+@@ -2382,7 +2382,7 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+ int hierarchy_id = 1;
+ int ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ spin_lock_irq(&css_set_lock);
+
+ root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
+@@ -2396,7 +2396,7 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+ }
+
+ spin_unlock_irq(&css_set_lock);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(task_cgroup_path);
+@@ -3107,7 +3107,7 @@ void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
+ int ssid;
+
+ restart:
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
+ for_each_subsys(ss, ssid) {
+@@ -3121,7 +3121,7 @@ void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
+ prepare_to_wait(&dsct->offline_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ schedule();
+ finish_wait(&dsct->offline_waitq, &wait);
+
+@@ -4370,9 +4370,9 @@ int cgroup_rm_cftypes(struct cftype *cfts)
+ if (!(cfts[0].flags & __CFTYPE_ADDED))
+ return -ENOENT;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ ret = cgroup_rm_cftypes_locked(cfts);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -4404,14 +4404,14 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+ if (ret)
+ return ret;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ list_add_tail(&cfts->node, &ss->cfts);
+ ret = cgroup_apply_cftypes(cfts, true);
+ if (ret)
+ cgroup_rm_cftypes_locked(cfts);
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ return ret;
+ }
+
+@@ -5380,7 +5380,7 @@ static void css_release_work_fn(struct work_struct *work)
+ struct cgroup_subsys *ss = css->ss;
+ struct cgroup *cgrp = css->cgroup;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ css->flags |= CSS_RELEASED;
+ list_del_rcu(&css->sibling);
+@@ -5421,7 +5421,7 @@ static void css_release_work_fn(struct work_struct *work)
+ NULL);
+ }
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+ queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
+@@ -5769,7 +5769,7 @@ static void css_killed_work_fn(struct work_struct *work)
+ struct cgroup_subsys_state *css =
+ container_of(work, struct cgroup_subsys_state, destroy_work);
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ do {
+ offline_css(css);
+@@ -5778,7 +5778,7 @@ static void css_killed_work_fn(struct work_struct *work)
+ css = css->parent;
+ } while (css && atomic_dec_and_test(&css->online_cnt));
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ }
+
+ /* css kill confirmation processing requires process context, bounce */
+@@ -5962,7 +5962,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
+
+ pr_debug("Initializing cgroup subsys %s\n", ss->name);
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ idr_init(&ss->css_idr);
+ INIT_LIST_HEAD(&ss->cfts);
+@@ -6006,7 +6006,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
+
+ BUG_ON(online_css(css));
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ }
+
+ /**
+@@ -6066,7 +6066,7 @@ int __init cgroup_init(void)
+
+ get_user_ns(init_cgroup_ns.user_ns);
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ /*
+ * Add init_css_set to the hash table so that dfl_root can link to
+@@ -6077,7 +6077,7 @@ int __init cgroup_init(void)
+
+ BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ for_each_subsys(ss, ssid) {
+ if (ss->early_init) {
+@@ -6129,9 +6129,9 @@ int __init cgroup_init(void)
+ if (ss->bind)
+ ss->bind(init_css_set.subsys[ssid]);
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ css_populate_dir(init_css_set.subsys[ssid]);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ }
+
+ /* init_css_set.subsys[] has been updated, re-hash */
+@@ -6236,7 +6236,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ if (!buf)
+ goto out;
+
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+ spin_lock_irq(&css_set_lock);
+
+ for_each_root(root) {
+@@ -6291,7 +6291,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ retval = 0;
+ out_unlock:
+ spin_unlock_irq(&css_set_lock);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ kfree(buf);
+ out:
+ return retval;
+@@ -6375,7 +6375,7 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ struct file *f;
+
+ if (kargs->flags & CLONE_INTO_CGROUP)
+- mutex_lock(&cgroup_mutex);
++ cgroup_lock();
+
+ cgroup_threadgroup_change_begin(current);
+
+@@ -6450,7 +6450,7 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+
+ err:
+ cgroup_threadgroup_change_end(current);
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+ if (f)
+ fput(f);
+ if (dst_cgrp)
+@@ -6477,7 +6477,7 @@ static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
+ struct cgroup *cgrp = kargs->cgrp;
+ struct css_set *cset = kargs->cset;
+
+- mutex_unlock(&cgroup_mutex);
++ cgroup_unlock();
+
+ if (cset) {
+ put_css_set(cset);
+--
+2.39.2
+
--- /dev/null
+From f4468345ce48cebe2e72d418e477538ec17b5935 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 07:45:45 +0000
+Subject: cgroup: fix missing cpus_read_{lock,unlock}() in
+ cgroup_transfer_tasks()
+
+From: Qi Zheng <zhengqi.arch@bytedance.com>
+
+[ Upstream commit ab1de7ead871ebe6d12a774c3c25de0388cde082 ]
+
+The commit 4f7e7236435c ("cgroup: Fix threadgroup_rwsem <-> cpus_read_lock()
+deadlock") fixed the deadlock between cgroup_threadgroup_rwsem and
+cpus_read_lock() by introducing cgroup_attach_{lock,unlock}() and removing
+cpus_read_{lock,unlock}() from cpuset_attach(). But cgroup_transfer_tasks()
+was missed and not handled, which will cause th following warning:
+
+ WARNING: CPU: 0 PID: 589 at kernel/cpu.c:526 lockdep_assert_cpus_held+0x32/0x40
+ CPU: 0 PID: 589 Comm: kworker/1:4 Not tainted 6.4.0-rc2-next-20230517 #50
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
+ Workqueue: events cpuset_hotplug_workfn
+ RIP: 0010:lockdep_assert_cpus_held+0x32/0x40
+ <...>
+ Call Trace:
+ <TASK>
+ cpuset_attach+0x40/0x240
+ cgroup_migrate_execute+0x452/0x5e0
+ ? _raw_spin_unlock_irq+0x28/0x40
+ cgroup_transfer_tasks+0x1f3/0x360
+ ? find_held_lock+0x32/0x90
+ ? cpuset_hotplug_workfn+0xc81/0xed0
+ cpuset_hotplug_workfn+0xcb1/0xed0
+ ? process_one_work+0x248/0x5b0
+ process_one_work+0x2b9/0x5b0
+ worker_thread+0x56/0x3b0
+ ? process_one_work+0x5b0/0x5b0
+ kthread+0xf1/0x120
+ ? kthread_complete_and_exit+0x20/0x20
+ ret_from_fork+0x1f/0x30
+ </TASK>
+
+So just use the cgroup_attach_{lock,unlock}() helper to fix it.
+
+Reported-by: Zhao Gongyi <zhaogongyi@bytedance.com>
+Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
+Acked-by: Muchun Song <songmuchun@bytedance.com>
+Fixes: 05c7b7a92cc8 ("cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug")
+Cc: stable@vger.kernel.org # v5.17+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup-v1.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index aeef06c465ef1..5407241dbb45f 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -108,7 +108,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+
+ cgroup_lock();
+
+- percpu_down_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_lock(true);
+
+ /* all tasks in @from are being moved, all csets are source */
+ spin_lock_irq(&css_set_lock);
+@@ -144,7 +144,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ } while (task && !ret);
+ out_err:
+ cgroup_migrate_finish(&mgctx);
+- percpu_up_write(&cgroup_threadgroup_rwsem);
++ cgroup_attach_unlock(true);
+ cgroup_unlock();
+ return ret;
+ }
+--
+2.39.2
+
--- /dev/null
+From 4bcc403d6595b2b01e4a03bdff3db757ad1e0e34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 17:16:35 +0530
+Subject: EDAC/qcom: Get rid of hardcoded register offsets
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+[ Upstream commit cbd77119b6355872cd308a60e99f9ca678435d15 ]
+
+The LLCC EDAC register offsets varies between each SoC. Hardcoding the
+register offsets won't work and will often result in crash due to
+accessing the wrong locations.
+
+Hence, get the register offsets from the LLCC driver matching the
+individual SoCs.
+
+Cc: <stable@vger.kernel.org> # 6.0: 5365cea199c7 ("soc: qcom: llcc: Rename reg_offset structs to reflect LLCC version")
+Cc: <stable@vger.kernel.org> # 6.0: c13d7d261e36 ("soc: qcom: llcc: Pass LLCC version based register offsets to EDAC driver")
+Cc: <stable@vger.kernel.org> # 6.0
+Fixes: a6e9d7ef252c ("soc: qcom: llcc: Add configuration data for SM8450 SoC")
+Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Link: https://lore.kernel.org/r/20230517114635.76358-3-manivannan.sadhasivam@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/edac/qcom_edac.c | 116 ++++++++++++++---------------
+ include/linux/soc/qcom/llcc-qcom.h | 6 --
+ 2 files changed, 58 insertions(+), 64 deletions(-)
+
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index b1f5b9a02d6dd..518092d7eaf73 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -21,30 +21,9 @@
+ #define TRP_SYN_REG_CNT 6
+ #define DRP_SYN_REG_CNT 8
+
+-#define LLCC_COMMON_STATUS0 0x0003000c
+ #define LLCC_LB_CNT_MASK GENMASK(31, 28)
+ #define LLCC_LB_CNT_SHIFT 28
+
+-/* Single & double bit syndrome register offsets */
+-#define TRP_ECC_SB_ERR_SYN0 0x0002304c
+-#define TRP_ECC_DB_ERR_SYN0 0x00020370
+-#define DRP_ECC_SB_ERR_SYN0 0x0004204c
+-#define DRP_ECC_DB_ERR_SYN0 0x00042070
+-
+-/* Error register offsets */
+-#define TRP_ECC_ERROR_STATUS1 0x00020348
+-#define TRP_ECC_ERROR_STATUS0 0x00020344
+-#define DRP_ECC_ERROR_STATUS1 0x00042048
+-#define DRP_ECC_ERROR_STATUS0 0x00042044
+-
+-/* TRP, DRP interrupt register offsets */
+-#define DRP_INTERRUPT_STATUS 0x00041000
+-#define TRP_INTERRUPT_0_STATUS 0x00020480
+-#define DRP_INTERRUPT_CLEAR 0x00041008
+-#define DRP_ECC_ERROR_CNTR_CLEAR 0x00040004
+-#define TRP_INTERRUPT_0_CLEAR 0x00020484
+-#define TRP_ECC_ERROR_CNTR_CLEAR 0x00020440
+-
+ /* Mask and shift macros */
+ #define ECC_DB_ERR_COUNT_MASK GENMASK(4, 0)
+ #define ECC_DB_ERR_WAYS_MASK GENMASK(31, 16)
+@@ -60,15 +39,6 @@
+ #define DRP_TRP_INT_CLEAR GENMASK(1, 0)
+ #define DRP_TRP_CNT_CLEAR GENMASK(1, 0)
+
+-/* Config registers offsets*/
+-#define DRP_ECC_ERROR_CFG 0x00040000
+-
+-/* Tag RAM, Data RAM interrupt register offsets */
+-#define CMN_INTERRUPT_0_ENABLE 0x0003001c
+-#define CMN_INTERRUPT_2_ENABLE 0x0003003c
+-#define TRP_INTERRUPT_0_ENABLE 0x00020488
+-#define DRP_INTERRUPT_ENABLE 0x0004100c
+-
+ #define SB_ERROR_THRESHOLD 0x1
+ #define SB_ERROR_THRESHOLD_SHIFT 24
+ #define SB_DB_TRP_INTERRUPT_ENABLE 0x3
+@@ -88,9 +58,6 @@ enum {
+ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ [LLCC_DRAM_CE] = {
+ .name = "DRAM Single-bit",
+- .synd_reg = DRP_ECC_SB_ERR_SYN0,
+- .count_status_reg = DRP_ECC_ERROR_STATUS1,
+- .ways_status_reg = DRP_ECC_ERROR_STATUS0,
+ .reg_cnt = DRP_SYN_REG_CNT,
+ .count_mask = ECC_SB_ERR_COUNT_MASK,
+ .ways_mask = ECC_SB_ERR_WAYS_MASK,
+@@ -98,9 +65,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ },
+ [LLCC_DRAM_UE] = {
+ .name = "DRAM Double-bit",
+- .synd_reg = DRP_ECC_DB_ERR_SYN0,
+- .count_status_reg = DRP_ECC_ERROR_STATUS1,
+- .ways_status_reg = DRP_ECC_ERROR_STATUS0,
+ .reg_cnt = DRP_SYN_REG_CNT,
+ .count_mask = ECC_DB_ERR_COUNT_MASK,
+ .ways_mask = ECC_DB_ERR_WAYS_MASK,
+@@ -108,9 +72,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ },
+ [LLCC_TRAM_CE] = {
+ .name = "TRAM Single-bit",
+- .synd_reg = TRP_ECC_SB_ERR_SYN0,
+- .count_status_reg = TRP_ECC_ERROR_STATUS1,
+- .ways_status_reg = TRP_ECC_ERROR_STATUS0,
+ .reg_cnt = TRP_SYN_REG_CNT,
+ .count_mask = ECC_SB_ERR_COUNT_MASK,
+ .ways_mask = ECC_SB_ERR_WAYS_MASK,
+@@ -118,9 +79,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ },
+ [LLCC_TRAM_UE] = {
+ .name = "TRAM Double-bit",
+- .synd_reg = TRP_ECC_DB_ERR_SYN0,
+- .count_status_reg = TRP_ECC_ERROR_STATUS1,
+- .ways_status_reg = TRP_ECC_ERROR_STATUS0,
+ .reg_cnt = TRP_SYN_REG_CNT,
+ .count_mask = ECC_DB_ERR_COUNT_MASK,
+ .ways_mask = ECC_DB_ERR_WAYS_MASK,
+@@ -128,7 +86,7 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ },
+ };
+
+-static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
++static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap)
+ {
+ u32 sb_err_threshold;
+ int ret;
+@@ -137,31 +95,31 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
+ * Configure interrupt enable registers such that Tag, Data RAM related
+ * interrupts are propagated to interrupt controller for servicing
+ */
+- ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
++ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ TRP0_INTERRUPT_ENABLE,
+ TRP0_INTERRUPT_ENABLE);
+ if (ret)
+ return ret;
+
+- ret = regmap_update_bits(llcc_bcast_regmap, TRP_INTERRUPT_0_ENABLE,
++ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->trp_interrupt_0_enable,
+ SB_DB_TRP_INTERRUPT_ENABLE,
+ SB_DB_TRP_INTERRUPT_ENABLE);
+ if (ret)
+ return ret;
+
+ sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
+- ret = regmap_write(llcc_bcast_regmap, DRP_ECC_ERROR_CFG,
++ ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_ecc_error_cfg,
+ sb_err_threshold);
+ if (ret)
+ return ret;
+
+- ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
++ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ DRP0_INTERRUPT_ENABLE,
+ DRP0_INTERRUPT_ENABLE);
+ if (ret)
+ return ret;
+
+- ret = regmap_write(llcc_bcast_regmap, DRP_INTERRUPT_ENABLE,
++ ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_interrupt_enable,
+ SB_DB_DRP_INTERRUPT_ENABLE);
+ return ret;
+ }
+@@ -175,24 +133,28 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
+ switch (err_type) {
+ case LLCC_DRAM_CE:
+ case LLCC_DRAM_UE:
+- ret = regmap_write(drv->bcast_regmap, DRP_INTERRUPT_CLEAR,
++ ret = regmap_write(drv->bcast_regmap,
++ drv->edac_reg_offset->drp_interrupt_clear,
+ DRP_TRP_INT_CLEAR);
+ if (ret)
+ return ret;
+
+- ret = regmap_write(drv->bcast_regmap, DRP_ECC_ERROR_CNTR_CLEAR,
++ ret = regmap_write(drv->bcast_regmap,
++ drv->edac_reg_offset->drp_ecc_error_cntr_clear,
+ DRP_TRP_CNT_CLEAR);
+ if (ret)
+ return ret;
+ break;
+ case LLCC_TRAM_CE:
+ case LLCC_TRAM_UE:
+- ret = regmap_write(drv->bcast_regmap, TRP_INTERRUPT_0_CLEAR,
++ ret = regmap_write(drv->bcast_regmap,
++ drv->edac_reg_offset->trp_interrupt_0_clear,
+ DRP_TRP_INT_CLEAR);
+ if (ret)
+ return ret;
+
+- ret = regmap_write(drv->bcast_regmap, TRP_ECC_ERROR_CNTR_CLEAR,
++ ret = regmap_write(drv->bcast_regmap,
++ drv->edac_reg_offset->trp_ecc_error_cntr_clear,
+ DRP_TRP_CNT_CLEAR);
+ if (ret)
+ return ret;
+@@ -205,16 +167,54 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
+ return ret;
+ }
+
++struct qcom_llcc_syn_regs {
++ u32 synd_reg;
++ u32 count_status_reg;
++ u32 ways_status_reg;
++};
++
++static void get_reg_offsets(struct llcc_drv_data *drv, int err_type,
++ struct qcom_llcc_syn_regs *syn_regs)
++{
++ const struct llcc_edac_reg_offset *edac_reg_offset = drv->edac_reg_offset;
++
++ switch (err_type) {
++ case LLCC_DRAM_CE:
++ syn_regs->synd_reg = edac_reg_offset->drp_ecc_sb_err_syn0;
++ syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
++ syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
++ break;
++ case LLCC_DRAM_UE:
++ syn_regs->synd_reg = edac_reg_offset->drp_ecc_db_err_syn0;
++ syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
++ syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
++ break;
++ case LLCC_TRAM_CE:
++ syn_regs->synd_reg = edac_reg_offset->trp_ecc_sb_err_syn0;
++ syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
++ syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
++ break;
++ case LLCC_TRAM_UE:
++ syn_regs->synd_reg = edac_reg_offset->trp_ecc_db_err_syn0;
++ syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
++ syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
++ break;
++ }
++}
++
+ /* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/
+ static int
+ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ {
+ struct llcc_edac_reg_data reg_data = edac_reg_data[err_type];
++ struct qcom_llcc_syn_regs regs = { };
+ int err_cnt, err_ways, ret, i;
+ u32 synd_reg, synd_val;
+
++ get_reg_offsets(drv, err_type, ®s);
++
+ for (i = 0; i < reg_data.reg_cnt; i++) {
+- synd_reg = reg_data.synd_reg + (i * 4);
++ synd_reg = regs.synd_reg + (i * 4);
+ ret = regmap_read(drv->regmaps[bank], synd_reg,
+ &synd_val);
+ if (ret)
+@@ -224,7 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ reg_data.name, i, synd_val);
+ }
+
+- ret = regmap_read(drv->regmaps[bank], reg_data.count_status_reg,
++ ret = regmap_read(drv->regmaps[bank], regs.count_status_reg,
+ &err_cnt);
+ if (ret)
+ goto clear;
+@@ -234,7 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
+ reg_data.name, err_cnt);
+
+- ret = regmap_read(drv->regmaps[bank], reg_data.ways_status_reg,
++ ret = regmap_read(drv->regmaps[bank], regs.ways_status_reg,
+ &err_ways);
+ if (ret)
+ goto clear;
+@@ -295,7 +295,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+
+ /* Iterate over the banks and look for Tag RAM or Data RAM errors */
+ for (i = 0; i < drv->num_banks; i++) {
+- ret = regmap_read(drv->regmaps[i], DRP_INTERRUPT_STATUS,
++ ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->drp_interrupt_status,
+ &drp_error);
+
+ if (!ret && (drp_error & SB_ECC_ERROR)) {
+@@ -310,7 +310,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ if (!ret)
+ irq_rc = IRQ_HANDLED;
+
+- ret = regmap_read(drv->regmaps[i], TRP_INTERRUPT_0_STATUS,
++ ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->trp_interrupt_0_status,
+ &trp_error);
+
+ if (!ret && (trp_error & SB_ECC_ERROR)) {
+@@ -342,7 +342,7 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ int ecc_irq;
+ int rc;
+
+- rc = qcom_llcc_core_setup(llcc_driv_data->bcast_regmap);
++ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
+ if (rc)
+ return rc;
+
+diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
+index dfa5706e90a7a..af093281e335c 100644
+--- a/include/linux/soc/qcom/llcc-qcom.h
++++ b/include/linux/soc/qcom/llcc-qcom.h
+@@ -57,9 +57,6 @@ struct llcc_slice_desc {
+ /**
+ * struct llcc_edac_reg_data - llcc edac registers data for each error type
+ * @name: Name of the error
+- * @synd_reg: Syndrome register address
+- * @count_status_reg: Status register address to read the error count
+- * @ways_status_reg: Status register address to read the error ways
+ * @reg_cnt: Number of registers
+ * @count_mask: Mask value to get the error count
+ * @ways_mask: Mask value to get the error ways
+@@ -68,9 +65,6 @@ struct llcc_slice_desc {
+ */
+ struct llcc_edac_reg_data {
+ char *name;
+- u64 synd_reg;
+- u64 count_status_reg;
+- u64 ways_status_reg;
+ u32 reg_cnt;
+ u32 count_mask;
+ u32 ways_mask;
+--
+2.39.2
+
--- /dev/null
+From d4632c0f70196d191fcc4960916667bf159a6e9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 May 2023 17:59:32 +0900
+Subject: ksmbd: validate smb request protocol id
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 1c1bcf2d3ea061613119b534f57507c377df20f9 ]
+
+This patch add the validation for smb request protocol id.
+If it is not one of the four ids(SMB1_PROTO_NUMBER, SMB2_PROTO_NUMBER,
+SMB2_TRANSFORM_PROTO_NUM, SMB2_COMPRESSION_TRANSFORM_ID), don't allow
+processing the request. And this will fix the following KASAN warning
+also.
+
+[ 13.905265] BUG: KASAN: slab-out-of-bounds in init_smb2_rsp_hdr+0x1b9/0x1f0
+[ 13.905900] Read of size 16 at addr ffff888005fd2f34 by task kworker/0:2/44
+...
+[ 13.908553] Call Trace:
+[ 13.908793] <TASK>
+[ 13.908995] dump_stack_lvl+0x33/0x50
+[ 13.909369] print_report+0xcc/0x620
+[ 13.910870] kasan_report+0xae/0xe0
+[ 13.911519] kasan_check_range+0x35/0x1b0
+[ 13.911796] init_smb2_rsp_hdr+0x1b9/0x1f0
+[ 13.912492] handle_ksmbd_work+0xe5/0x820
+
+Cc: stable@vger.kernel.org
+Reported-by: Chih-Yen Chang <cc85nod@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/connection.c | 5 +++--
+ fs/ksmbd/smb_common.c | 14 +++++++++++++-
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index bf8531b80a182..e1d2be19cddfa 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -366,8 +366,6 @@ int ksmbd_conn_handler_loop(void *p)
+ break;
+
+ memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
+- if (!ksmbd_smb_request(conn))
+- break;
+
+ /*
+ * We already read 4 bytes to find out PDU size, now
+@@ -385,6 +383,9 @@ int ksmbd_conn_handler_loop(void *p)
+ continue;
+ }
+
++ if (!ksmbd_smb_request(conn))
++ break;
++
+ if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId ==
+ SMB2_PROTO_NUMBER) {
+ if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE)
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index 95afb6b23a91c..05d7f3e910bf4 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -158,7 +158,19 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
+ */
+ bool ksmbd_smb_request(struct ksmbd_conn *conn)
+ {
+- return conn->request_buf[0] == 0;
++ __le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
++
++ if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
++ pr_err_ratelimited("smb2 compression not support yet");
++ return false;
++ }
++
++ if (*proto != SMB1_PROTO_NUMBER &&
++ *proto != SMB2_PROTO_NUMBER &&
++ *proto != SMB2_TRANSFORM_PROTO_NUM)
++ return false;
++
++ return true;
+ }
+
+ static bool supported_protocol(int idx)
+--
+2.39.2
+
--- /dev/null
+From 05f4d671b026be69209726a85408b87c0cfee8d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 13:34:41 +0530
+Subject: qcom: llcc/edac: Fix the base address used for accessing LLCC banks
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+[ Upstream commit ee13b5008707948d3052c1b5aab485c6cd53658e ]
+
+The Qualcomm LLCC/EDAC drivers were using a fixed register stride for
+accessing the (Control and Status Registers) CSRs of each LLCC bank.
+This stride only works for some SoCs like SDM845 for which driver
+support was initially added.
+
+But the later SoCs use different register stride that vary between the
+banks with holes in-between. So it is not possible to use a single register
+stride for accessing the CSRs of each bank. By doing so could result in a
+crash.
+
+For fixing this issue, let's obtain the base address of each LLCC bank from
+devicetree and get rid of the fixed stride. This also means, there is no
+need to rely on reg-names property and the base addresses can be obtained
+using the index.
+
+First index is LLCC bank 0 and last index is LLCC broadcast. If the SoC
+supports more than one bank, then those need to be defined in devicetree
+for index from 1..N-1.
+
+Reported-by: Parikshit Pareek <quic_ppareek@quicinc.com>
+Tested-by: Luca Weiss <luca.weiss@fairphone.com>
+Tested-by: Steev Klimaszewski <steev@kali.org> # Thinkpad X13s
+Tested-by: Andrew Halaney <ahalaney@redhat.com> # sa8540p-ride
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Link: https://lore.kernel.org/r/20230314080443.64635-13-manivannan.sadhasivam@linaro.org
+Stable-dep-of: cbd77119b635 ("EDAC/qcom: Get rid of hardcoded register offsets")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/edac/qcom_edac.c | 14 +++---
+ drivers/soc/qcom/llcc-qcom.c | 72 +++++++++++++++++-------------
+ include/linux/soc/qcom/llcc-qcom.h | 6 +--
+ 3 files changed, 48 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index 2c91ceff8a9ca..b1f5b9a02d6dd 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -215,7 +215,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+
+ for (i = 0; i < reg_data.reg_cnt; i++) {
+ synd_reg = reg_data.synd_reg + (i * 4);
+- ret = regmap_read(drv->regmap, drv->offsets[bank] + synd_reg,
++ ret = regmap_read(drv->regmaps[bank], synd_reg,
+ &synd_val);
+ if (ret)
+ goto clear;
+@@ -224,8 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ reg_data.name, i, synd_val);
+ }
+
+- ret = regmap_read(drv->regmap,
+- drv->offsets[bank] + reg_data.count_status_reg,
++ ret = regmap_read(drv->regmaps[bank], reg_data.count_status_reg,
+ &err_cnt);
+ if (ret)
+ goto clear;
+@@ -235,8 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
+ reg_data.name, err_cnt);
+
+- ret = regmap_read(drv->regmap,
+- drv->offsets[bank] + reg_data.ways_status_reg,
++ ret = regmap_read(drv->regmaps[bank], reg_data.ways_status_reg,
+ &err_ways);
+ if (ret)
+ goto clear;
+@@ -297,8 +295,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+
+ /* Iterate over the banks and look for Tag RAM or Data RAM errors */
+ for (i = 0; i < drv->num_banks; i++) {
+- ret = regmap_read(drv->regmap,
+- drv->offsets[i] + DRP_INTERRUPT_STATUS,
++ ret = regmap_read(drv->regmaps[i], DRP_INTERRUPT_STATUS,
+ &drp_error);
+
+ if (!ret && (drp_error & SB_ECC_ERROR)) {
+@@ -313,8 +310,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ if (!ret)
+ irq_rc = IRQ_HANDLED;
+
+- ret = regmap_read(drv->regmap,
+- drv->offsets[i] + TRP_INTERRUPT_0_STATUS,
++ ret = regmap_read(drv->regmaps[i], TRP_INTERRUPT_0_STATUS,
+ &trp_error);
+
+ if (!ret && (trp_error & SB_ECC_ERROR)) {
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index d4cba3b3c56c4..85219b5e1f416 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -50,8 +50,6 @@
+ #define LLCC_TRP_WRSC_EN 0x21f20
+ #define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c
+
+-#define BANK_OFFSET_STRIDE 0x80000
+-
+ #define LLCC_VERSION_2_0_0_0 0x02000000
+ #define LLCC_VERSION_2_1_0_0 0x02010000
+
+@@ -749,8 +747,8 @@ static int qcom_llcc_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+- const char *name)
++static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
++ const char *name)
+ {
+ void __iomem *base;
+ struct regmap_config llcc_regmap_config = {
+@@ -760,7 +758,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+ .fast_io = true,
+ };
+
+- base = devm_platform_ioremap_resource_byname(pdev, name);
++ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+@@ -778,6 +776,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ const struct llcc_slice_config *llcc_cfg;
+ u32 sz;
+ u32 version;
++ struct regmap *regmap;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+@@ -785,21 +784,51 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ goto err;
+ }
+
+- drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
+- if (IS_ERR(drv_data->regmap)) {
+- ret = PTR_ERR(drv_data->regmap);
++ /* Initialize the first LLCC bank regmap */
++ regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base");
++ if (IS_ERR(regmap)) {
++ ret = PTR_ERR(regmap);
+ goto err;
+ }
+
+- drv_data->bcast_regmap =
+- qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
++ cfg = of_device_get_match_data(&pdev->dev);
++
++ ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
++ if (ret)
++ goto err;
++
++ num_banks &= LLCC_LB_CNT_MASK;
++ num_banks >>= LLCC_LB_CNT_SHIFT;
++ drv_data->num_banks = num_banks;
++
++ drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
++ if (!drv_data->regmaps) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ drv_data->regmaps[0] = regmap;
++
++ /* Initialize rest of LLCC bank regmaps */
++ for (i = 1; i < num_banks; i++) {
++ char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
++
++ drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
++ if (IS_ERR(drv_data->regmaps[i])) {
++ ret = PTR_ERR(drv_data->regmaps[i]);
++ kfree(base);
++ goto err;
++ }
++
++ kfree(base);
++ }
++
++ drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
+ if (IS_ERR(drv_data->bcast_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
+
+- cfg = of_device_get_match_data(&pdev->dev);
+-
+ /* Extract version of the IP */
+ ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
+ &version);
+@@ -808,15 +837,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+
+ drv_data->version = version;
+
+- ret = regmap_read(drv_data->regmap, cfg->reg_offset[LLCC_COMMON_STATUS0],
+- &num_banks);
+- if (ret)
+- goto err;
+-
+- num_banks &= LLCC_LB_CNT_MASK;
+- num_banks >>= LLCC_LB_CNT_SHIFT;
+- drv_data->num_banks = num_banks;
+-
+ llcc_cfg = cfg->sct_data;
+ sz = cfg->size;
+
+@@ -824,16 +844,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ drv_data->max_slices = llcc_cfg[i].slice_id;
+
+- drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
+- GFP_KERNEL);
+- if (!drv_data->offsets) {
+- ret = -ENOMEM;
+- goto err;
+- }
+-
+- for (i = 0; i < num_banks; i++)
+- drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
+-
+ drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
+ GFP_KERNEL);
+ if (!drv_data->bitmap) {
+diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
+index bc2fb8343a944..dfa5706e90a7a 100644
+--- a/include/linux/soc/qcom/llcc-qcom.h
++++ b/include/linux/soc/qcom/llcc-qcom.h
+@@ -108,7 +108,7 @@ struct llcc_edac_reg_offset {
+
+ /**
+ * struct llcc_drv_data - Data associated with the llcc driver
+- * @regmap: regmap associated with the llcc device
++ * @regmaps: regmaps associated with the llcc device
+ * @bcast_regmap: regmap associated with llcc broadcast offset
+ * @cfg: pointer to the data structure for slice configuration
+ * @edac_reg_offset: Offset of the LLCC EDAC registers
+@@ -117,12 +117,11 @@ struct llcc_edac_reg_offset {
+ * @max_slices: max slices as read from device tree
+ * @num_banks: Number of llcc banks
+ * @bitmap: Bit map to track the active slice ids
+- * @offsets: Pointer to the bank offsets array
+ * @ecc_irq: interrupt for llcc cache error detection and reporting
+ * @version: Indicates the LLCC version
+ */
+ struct llcc_drv_data {
+- struct regmap *regmap;
++ struct regmap **regmaps;
+ struct regmap *bcast_regmap;
+ const struct llcc_slice_config *cfg;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
+@@ -131,7 +130,6 @@ struct llcc_drv_data {
+ u32 max_slices;
+ u32 num_banks;
+ unsigned long *bitmap;
+- u32 *offsets;
+ int ecc_irq;
+ u32 version;
+ };
+--
+2.39.2
+
--- /dev/null
+x86-head-64-switch-to-kernel_cs-as-soon-as-new-gdt-i.patch
+test_firmware-use-kstrtobool-instead-of-strtobool.patch
+test_firmware-prevent-race-conditions-by-a-correct-i.patch
+cgroup-bpf-use-cgroup_lock-cgroup_unlock-wrappers.patch
+cgroup-always-put-cset-in-cgroup_css_set_put_fork.patch
+cgroup-fix-missing-cpus_read_-lock-unlock-in-cgroup_.patch
+qcom-llcc-edac-fix-the-base-address-used-for-accessi.patch
+edac-qcom-get-rid-of-hardcoded-register-offsets.patch
+ksmbd-validate-smb-request-protocol-id.patch
--- /dev/null
+From 2b2cd3a2eef14204c72a4279682336c5ea712d6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 May 2023 10:47:45 +0200
+Subject: test_firmware: prevent race conditions by a correct implementation of
+ locking
+
+From: Mirsad Goran Todorovac <mirsad.todorovac@alu.unizg.hr>
+
+[ Upstream commit 4acfe3dfde685a5a9eaec5555351918e2d7266a1 ]
+
+Dan Carpenter spotted a race condition in a couple of situations like
+these in the test_firmware driver:
+
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&test_fw_mutex);
+ *(u8 *)cfg = val;
+ mutex_unlock(&test_fw_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t config_num_requests_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+ mutex_unlock(&test_fw_mutex);
+
+ rc = test_dev_config_update_u8(buf, count,
+ &test_fw_config->num_requests);
+
+out:
+ return rc;
+}
+
+static ssize_t config_read_fw_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_u8(buf, count,
+ &test_fw_config->read_fw_idx);
+}
+
+The function test_dev_config_update_u8() is called from both the locked
+and the unlocked context, function config_num_requests_store() and
+config_read_fw_idx_store() which can both be called asynchronously as
+they are driver's methods, while test_dev_config_update_u8() and siblings
+change their argument pointed to by u8 *cfg or similar pointer.
+
+To avoid deadlock on test_fw_mutex, the lock is dropped before calling
+test_dev_config_update_u8() and re-acquired within test_dev_config_update_u8()
+itself, but alas this creates a race condition.
+
+Having two locks wouldn't assure a race-proof mutual exclusion.
+
+This situation is best avoided by the introduction of a new, unlocked
+function __test_dev_config_update_u8() which can be called from the locked
+context and reducing test_dev_config_update_u8() to:
+
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_u8(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+doing the locking and calling the unlocked primitive, which enables both
+locked and unlocked versions without duplication of code.
+
+The similar approach was applied to all functions called from the locked
+and the unlocked context, which safely mitigates both deadlocks and race
+conditions in the driver.
+
+__test_dev_config_update_bool(), __test_dev_config_update_u8() and
+__test_dev_config_update_size_t() unlocked versions of the functions
+were introduced to be called from the locked contexts as a workaround
+without releasing the main driver's lock and thereof causing a race
+condition.
+
+The test_dev_config_update_bool(), test_dev_config_update_u8() and
+test_dev_config_update_size_t() locked versions of the functions
+are being called from driver methods without the unnecessary multiplying
+of the locking and unlocking code for each method, and complicating
+the code with saving of the return value across lock.
+
+Fixes: 7feebfa487b92 ("test_firmware: add support for request_firmware_into_buf")
+Cc: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Russ Weight <russell.h.weight@intel.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tianfei Zhang <tianfei.zhang@intel.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Colin Ian King <colin.i.king@gmail.com>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: linux-kselftest@vger.kernel.org
+Cc: stable@vger.kernel.org # v5.4
+Suggested-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Mirsad Goran Todorovac <mirsad.todorovac@alu.unizg.hr>
+Link: https://lore.kernel.org/r/20230509084746.48259-1-mirsad.todorovac@alu.unizg.hr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/test_firmware.c | 52 ++++++++++++++++++++++++++++++---------------
+ 1 file changed, 35 insertions(+), 17 deletions(-)
+
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 6ef3e6926da8a..13d3fa6aa972c 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -360,16 +360,26 @@ static ssize_t config_test_show_str(char *dst,
+ return len;
+ }
+
+-static int test_dev_config_update_bool(const char *buf, size_t size,
++static inline int __test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
+ {
+ int ret;
+
+- mutex_lock(&test_fw_mutex);
+ if (kstrtobool(buf, cfg) < 0)
+ ret = -EINVAL;
+ else
+ ret = size;
++
++ return ret;
++}
++
++static int test_dev_config_update_bool(const char *buf, size_t size,
++ bool *cfg)
++{
++ int ret;
++
++ mutex_lock(&test_fw_mutex);
++ ret = __test_dev_config_update_bool(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+@@ -380,7 +390,8 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val)
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ }
+
+-static int test_dev_config_update_size_t(const char *buf,
++static int __test_dev_config_update_size_t(
++ const char *buf,
+ size_t size,
+ size_t *cfg)
+ {
+@@ -391,9 +402,7 @@ static int test_dev_config_update_size_t(const char *buf,
+ if (ret)
+ return ret;
+
+- mutex_lock(&test_fw_mutex);
+ *(size_t *)cfg = new;
+- mutex_unlock(&test_fw_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+@@ -409,7 +418,7 @@ static ssize_t test_dev_config_show_int(char *buf, int val)
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ }
+
+-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
++static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+ {
+ u8 val;
+ int ret;
+@@ -418,14 +427,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+ if (ret)
+ return ret;
+
+- mutex_lock(&test_fw_mutex);
+ *(u8 *)cfg = val;
+- mutex_unlock(&test_fw_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+ }
+
++static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
++{
++ int ret;
++
++ mutex_lock(&test_fw_mutex);
++ ret = __test_dev_config_update_u8(buf, size, cfg);
++ mutex_unlock(&test_fw_mutex);
++
++ return ret;
++}
++
+ static ssize_t test_dev_config_show_u8(char *buf, u8 val)
+ {
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+@@ -478,10 +496,10 @@ static ssize_t config_num_requests_store(struct device *dev,
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+- mutex_unlock(&test_fw_mutex);
+
+- rc = test_dev_config_update_u8(buf, count,
+- &test_fw_config->num_requests);
++ rc = __test_dev_config_update_u8(buf, count,
++ &test_fw_config->num_requests);
++ mutex_unlock(&test_fw_mutex);
+
+ out:
+ return rc;
+@@ -525,10 +543,10 @@ static ssize_t config_buf_size_store(struct device *dev,
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+- mutex_unlock(&test_fw_mutex);
+
+- rc = test_dev_config_update_size_t(buf, count,
+- &test_fw_config->buf_size);
++ rc = __test_dev_config_update_size_t(buf, count,
++ &test_fw_config->buf_size);
++ mutex_unlock(&test_fw_mutex);
+
+ out:
+ return rc;
+@@ -555,10 +573,10 @@ static ssize_t config_file_offset_store(struct device *dev,
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+- mutex_unlock(&test_fw_mutex);
+
+- rc = test_dev_config_update_size_t(buf, count,
+- &test_fw_config->file_offset);
++ rc = __test_dev_config_update_size_t(buf, count,
++ &test_fw_config->file_offset);
++ mutex_unlock(&test_fw_mutex);
+
+ out:
+ return rc;
+--
+2.39.2
+
--- /dev/null
+From 96bf3215e13aa7eadd69e77cae31cf6e8e1f5936 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Jan 2023 10:22:03 +0100
+Subject: test_firmware: Use kstrtobool() instead of strtobool()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit f7d85515bd21902b218370a1a6301f76e4e636ff ]
+
+strtobool() is the same as kstrtobool().
+However, the latter is more used within the kernel.
+
+In order to remove strtobool() and slightly simplify kstrtox.h, switch to
+the other function name.
+
+While at it, include the corresponding header file (<linux/kstrtox.h>)
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Acked-by: Luis Chamberlain <mcgrof@kernel.org>
+Link: https://lore.kernel.org/r/34f04735d20e0138695dd4070651bd860a36b81c.1673688120.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 4acfe3dfde68 ("test_firmware: prevent race conditions by a correct implementation of locking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/test_firmware.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 7f165c517338a..6ef3e6926da8a 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ #include <linux/delay.h>
++#include <linux/kstrtox.h>
+ #include <linux/kthread.h>
+ #include <linux/vmalloc.h>
+ #include <linux/efi_embedded_fw.h>
+@@ -365,7 +366,7 @@ static int test_dev_config_update_bool(const char *buf, size_t size,
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+- if (strtobool(buf, cfg) < 0)
++ if (kstrtobool(buf, cfg) < 0)
+ ret = -EINVAL;
+ else
+ ret = size;
+--
+2.39.2
+
--- /dev/null
+From 2b4677a9a690bd26a16213f10725d1fa1f099e8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 11:26:41 -0500
+Subject: x86/head/64: Switch to KERNEL_CS as soon as new GDT is installed
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+[ Upstream commit a37f2699c36a7f6606ba3300f243227856c5ad6b ]
+
+The call to startup_64_setup_env() will install a new GDT but does not
+actually switch to using the KERNEL_CS entry until returning from the
+function call.
+
+Commit bcce82908333 ("x86/sev: Detect/setup SEV/SME features earlier in
+boot") moved the call to sme_enable() earlier in the boot process and in
+between the call to startup_64_setup_env() and the switch to KERNEL_CS.
+An SEV-ES or an SEV-SNP guest will trigger #VC exceptions during the call
+to sme_enable() and if the CS pushed on the stack as part of the exception
+and used by IRETQ is not mapped by the new GDT, then problems occur.
+Today, the current CS when entering startup_64 is the kernel CS value
+because it was set up by the decompressor code, so no issue is seen.
+
+However, a recent patchset that looked to avoid using the legacy
+decompressor during an EFI boot exposed this bug. At entry to startup_64,
+the CS value is that of EFI and is not mapped in the new kernel GDT. So
+when a #VC exception occurs, the CS value used by IRETQ is not valid and
+the guest boot crashes.
+
+Fix this issue by moving the block that switches to the KERNEL_CS value to
+be done immediately after returning from startup_64_setup_env().
+
+Fixes: bcce82908333 ("x86/sev: Detect/setup SEV/SME features earlier in boot")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Joerg Roedel <jroedel@suse.de>
+Link: https://lore.kernel.org/all/6ff1f28af2829cc9aea357ebee285825f90a431f.1684340801.git.thomas.lendacky%40amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/head_64.S | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index d860d437631b6..998cdb112b725 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -85,6 +85,15 @@ SYM_CODE_START_NOALIGN(startup_64)
+ call startup_64_setup_env
+ popq %rsi
+
++ /* Now switch to __KERNEL_CS so IRET works reliably */
++ pushq $__KERNEL_CS
++ leaq .Lon_kernel_cs(%rip), %rax
++ pushq %rax
++ lretq
++
++.Lon_kernel_cs:
++ UNWIND_HINT_EMPTY
++
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ /*
+ * Activate SEV/SME memory encryption if supported/enabled. This needs to
+@@ -98,15 +107,6 @@ SYM_CODE_START_NOALIGN(startup_64)
+ popq %rsi
+ #endif
+
+- /* Now switch to __KERNEL_CS so IRET works reliably */
+- pushq $__KERNEL_CS
+- leaq .Lon_kernel_cs(%rip), %rax
+- pushq %rax
+- lretq
+-
+-.Lon_kernel_cs:
+- UNWIND_HINT_EMPTY
+-
+ /* Sanitize CPU configuration */
+ call verify_cpu
+
+--
+2.39.2
+