--- /dev/null
+From dfd6200a095440b663099d8d42f1efb0175a1ce3 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 19 Jan 2023 19:03:49 +0800
+Subject: blk-cgroup: support to track if policy is online
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit dfd6200a095440b663099d8d42f1efb0175a1ce3 upstream.
+
+A new field 'online' is added to blkg_policy_data to fix following
+2 problem:
+
+1) In blkcg_activate_policy(), if pd_alloc_fn() with 'GFP_NOWAIT'
+ failed, 'queue_lock' will be dropped and pd_alloc_fn() will try again
+ without 'GFP_NOWAIT'. In the meantime, remove cgroup can race with
+ it, and pd_offline_fn() will be called without pd_init_fn() and
+ pd_online_fn(). This way null-ptr-deference can be triggered.
+
+2) In order to synchronize pd_free_fn() from blkg_free_workfn() and
+ blkcg_deactivate_policy(), 'list_del_init(&blkg->q_node)' will be
+ delayed to blkg_free_workfn(), hence pd_offline_fn() can be called
+ first in blkg_destroy(), and then blkcg_deactivate_policy() will
+ call it again, we must prevent it.
+
+The new field 'online' will be set after pd_online_fn() and will be
+cleared after pd_offline_fn(), in the meantime pd_offline_fn() will only
+be called if 'online' is set.
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20230119110350.2287325-3-yukuai1@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-cgroup.c | 24 +++++++++++++++++-------
+ block/blk-cgroup.h | 1 +
+ 2 files changed, 18 insertions(+), 7 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -255,6 +255,7 @@ static struct blkcg_gq *blkg_alloc(struc
+ blkg->pd[i] = pd;
+ pd->blkg = blkg;
+ pd->plid = i;
++ pd->online = false;
+ }
+
+ return blkg;
+@@ -326,8 +327,11 @@ static struct blkcg_gq *blkg_create(stru
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+- if (blkg->pd[i] && pol->pd_online_fn)
+- pol->pd_online_fn(blkg->pd[i]);
++ if (blkg->pd[i]) {
++ if (pol->pd_online_fn)
++ pol->pd_online_fn(blkg->pd[i]);
++ blkg->pd[i]->online = true;
++ }
+ }
+ }
+ blkg->online = true;
+@@ -432,8 +436,11 @@ static void blkg_destroy(struct blkcg_gq
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+- if (blkg->pd[i] && pol->pd_offline_fn)
+- pol->pd_offline_fn(blkg->pd[i]);
++ if (blkg->pd[i] && blkg->pd[i]->online) {
++ if (pol->pd_offline_fn)
++ pol->pd_offline_fn(blkg->pd[i]);
++ blkg->pd[i]->online = false;
++ }
+ }
+
+ blkg->online = false;
+@@ -1422,6 +1429,7 @@ retry:
+ blkg->pd[pol->plid] = pd;
+ pd->blkg = blkg;
+ pd->plid = pol->plid;
++ pd->online = false;
+ }
+
+ /* all allocated, init in the same order */
+@@ -1429,9 +1437,11 @@ retry:
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_init_fn(blkg->pd[pol->plid]);
+
+- if (pol->pd_online_fn)
+- list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
++ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
++ if (pol->pd_online_fn)
+ pol->pd_online_fn(blkg->pd[pol->plid]);
++ blkg->pd[pol->plid]->online = true;
++ }
+
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+@@ -1493,7 +1503,7 @@ void blkcg_deactivate_policy(struct requ
+
+ spin_lock(&blkcg->lock);
+ if (blkg->pd[pol->plid]) {
+- if (pol->pd_offline_fn)
++ if (blkg->pd[pol->plid]->online && pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg->pd[pol->plid]);
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -125,6 +125,7 @@ struct blkg_policy_data {
+ /* the blkg and policy id this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+ int plid;
++ bool online;
+ };
+
+ /*
--- /dev/null
+From 01bc4fda9ea0a6b52f12326486f07a4910666cf6 Mon Sep 17 00:00:00 2001
+From: Li Nan <linan122@huawei.com>
+Date: Fri, 19 Apr 2024 17:32:57 +0800
+Subject: blk-iocost: do not WARN if iocg was already offlined
+
+From: Li Nan <linan122@huawei.com>
+
+commit 01bc4fda9ea0a6b52f12326486f07a4910666cf6 upstream.
+
+In iocg_pay_debt(), warn is triggered if 'active_list' is empty, which
+is intended to confirm iocg is active when it has debt. However, warn
+can be triggered during a blkcg or disk removal, if iocg_waitq_timer_fn()
+is run at that time:
+
+ WARNING: CPU: 0 PID: 2344971 at block/blk-iocost.c:1402 iocg_pay_debt+0x14c/0x190
+ Call trace:
+ iocg_pay_debt+0x14c/0x190
+ iocg_kick_waitq+0x438/0x4c0
+ iocg_waitq_timer_fn+0xd8/0x130
+ __run_hrtimer+0x144/0x45c
+ __hrtimer_run_queues+0x16c/0x244
+ hrtimer_interrupt+0x2cc/0x7b0
+
+The warn in this situation is meaningless. Since this iocg is being
+removed, the state of the 'active_list' is irrelevant, and 'waitq_timer'
+is canceled after removing 'active_list' in ioc_pd_free(), which ensures
+iocg is freed after iocg_waitq_timer_fn() returns.
+
+Therefore, add the check if iocg was already offlined to avoid warn
+when removing a blkcg or disk.
+
+Signed-off-by: Li Nan <linan122@huawei.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Link: https://lore.kernel.org/r/20240419093257.3004211-1-linan666@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-iocost.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1430,8 +1430,11 @@ static void iocg_pay_debt(struct ioc_gq
+ lockdep_assert_held(&iocg->ioc->lock);
+ lockdep_assert_held(&iocg->waitq.lock);
+
+- /* make sure that nobody messed with @iocg */
+- WARN_ON_ONCE(list_empty(&iocg->active_list));
++ /*
++ * make sure that nobody messed with @iocg. Check iocg->pd.online
++ * to avoid warn when removing blkcg or disk.
++ */
++ WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
+ WARN_ON_ONCE(iocg->inuse > 1);
+
+ iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
--- /dev/null
+From bc27c52eea189e8f7492d40739b7746d67b65beb Mon Sep 17 00:00:00 2001
+From: Andrii Nakryiko <andrii@kernel.org>
+Date: Tue, 28 Jan 2025 17:22:46 -0800
+Subject: bpf: avoid holding freeze_mutex during mmap operation
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+commit bc27c52eea189e8f7492d40739b7746d67b65beb upstream.
+
+We use map->freeze_mutex to prevent races between map_freeze() and
+memory mapping BPF map contents with writable permissions. The way we
+naively do this means we'll hold freeze_mutex for entire duration of all
+the mm and VMA manipulations, which is completely unnecessary. This can
+potentially also lead to deadlocks, as reported by syzbot in [0].
+
+So, instead, hold freeze_mutex only during writeability checks, bump
+(proactively) "write active" count for the map, unlock the mutex and
+proceed with mmap logic. And only if something went wrong during mmap
+logic, then undo that "write active" counter increment.
+
+ [0] https://lore.kernel.org/bpf/678dcbc9.050a0220.303755.0066.GAE@google.com/
+
+Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY")
+Reported-by: syzbot+4dc041c686b7c816a71e@syzkaller.appspotmail.com
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20250129012246.1515826-2-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: David Sauerwein <dssauerw@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/syscall.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -813,7 +813,7 @@ static const struct vm_operations_struct
+ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct bpf_map *map = filp->private_data;
+- int err;
++ int err = 0;
+
+ if (!map->ops->map_mmap || map_value_has_spin_lock(map) ||
+ map_value_has_timer(map) || map_value_has_kptrs(map))
+@@ -838,7 +838,12 @@ static int bpf_map_mmap(struct file *fil
+ err = -EACCES;
+ goto out;
+ }
++ bpf_map_write_active_inc(map);
+ }
++out:
++ mutex_unlock(&map->freeze_mutex);
++ if (err)
++ return err;
+
+ /* set default open/close callbacks */
+ vma->vm_ops = &bpf_map_default_vmops;
+@@ -849,13 +854,11 @@ static int bpf_map_mmap(struct file *fil
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ err = map->ops->map_mmap(map, vma);
+- if (err)
+- goto out;
++ if (err) {
++ if (vma->vm_flags & VM_WRITE)
++ bpf_map_write_active_dec(map);
++ }
+
+- if (vma->vm_flags & VM_MAYWRITE)
+- bpf_map_write_active_inc(map);
+-out:
+- mutex_unlock(&map->freeze_mutex);
+ return err;
+ }
+
--- /dev/null
+From 28ead3eaabc16ecc907cfb71876da028080f6356 Mon Sep 17 00:00:00 2001
+From: Xu Kuohai <xukuohai@huawei.com>
+Date: Fri, 19 Jul 2024 19:00:53 +0800
+Subject: bpf: Prevent tail call between progs attached to different hooks
+
+From: Xu Kuohai <xukuohai@huawei.com>
+
+commit 28ead3eaabc16ecc907cfb71876da028080f6356 upstream.
+
+bpf progs can be attached to kernel functions, and the attached functions
+can take different parameters or return different return values. If
+prog attached to one kernel function tail calls prog attached to another
+kernel function, the ctx access or return value verification could be
+bypassed.
+
+For example, if prog1 is attached to func1 which takes only 1 parameter
+and prog2 is attached to func2 which takes two parameters. Since verifier
+assumes the bpf ctx passed to prog2 is constructed based on func2's
+prototype, verifier allows prog2 to access the second parameter from
+the bpf ctx passed to it. The problem is that verifier does not prevent
+prog1 from passing its bpf ctx to prog2 via tail call. In this case,
+the bpf ctx passed to prog2 is constructed from func1 instead of func2,
+that is, the assumption for ctx access verification is bypassed.
+
+Another example, if BPF LSM prog1 is attached to hook file_alloc_security,
+and BPF LSM prog2 is attached to hook bpf_lsm_audit_rule_known. Verifier
+knows the return value rules for these two hooks, e.g. it is legal for
+bpf_lsm_audit_rule_known to return positive number 1, and it is illegal
+for file_alloc_security to return positive number. So verifier allows
+prog2 to return positive number 1, but does not allow prog1 to return
+positive number. The problem is that verifier does not prevent prog1
+from calling prog2 via tail call. In this case, prog2's return value 1
+will be used as the return value for prog1's hook file_alloc_security.
+That is, the return value rule is bypassed.
+
+This patch adds restriction for tail call to prevent such bypasses.
+
+Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
+Link: https://lore.kernel.org/r/20240719110059.797546-4-xukuohai@huaweicloud.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+[Minor conflict resolved due to code context change.]
+Signed-off-by: Jianqi Ren <jianqi.ren.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/core.c | 19 +++++++++++++++++--
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -250,6 +250,7 @@ struct bpf_map {
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+ struct {
++ const struct btf_type *attach_func_proto;
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2121,6 +2121,7 @@ bool bpf_prog_map_compatible(struct bpf_
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ bool ret;
++ struct bpf_prog_aux *aux = fp->aux;
+
+ if (fp->kprobe_override)
+ return false;
+@@ -2132,12 +2133,26 @@ bool bpf_prog_map_compatible(struct bpf_
+ */
+ map->owner.type = prog_type;
+ map->owner.jited = fp->jited;
+- map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
++ map->owner.xdp_has_frags = aux->xdp_has_frags;
++ map->owner.attach_func_proto = aux->attach_func_proto;
+ ret = true;
+ } else {
+ ret = map->owner.type == prog_type &&
+ map->owner.jited == fp->jited &&
+- map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
++ map->owner.xdp_has_frags == aux->xdp_has_frags;
++ if (ret &&
++ map->owner.attach_func_proto != aux->attach_func_proto) {
++ switch (prog_type) {
++ case BPF_PROG_TYPE_TRACING:
++ case BPF_PROG_TYPE_LSM:
++ case BPF_PROG_TYPE_EXT:
++ case BPF_PROG_TYPE_STRUCT_OPS:
++ ret = false;
++ break;
++ default:
++ break;
++ }
++ }
+ }
+ spin_unlock(&map->owner.lock);
+
--- /dev/null
+From 7ad54b98fc1f141cfb70cfe2a3d6def5a85169ff Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@cjr.nz>
+Date: Sun, 18 Dec 2022 14:37:32 -0300
+Subject: cifs: use origin fullpath for automounts
+
+From: Paulo Alcantara <pc@cjr.nz>
+
+commit 7ad54b98fc1f141cfb70cfe2a3d6def5a85169ff upstream.
+
+Use TCP_Server_Info::origin_fullpath instead of cifs_tcon::tree_name
+when building source paths for automounts as it will be useful for
+domain-based DFS referrals where the connections and referrals would
+get either re-used from the cache or re-created when chasing the dfs
+link.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[apanyaki: backport to v6.1-stable]
+Signed-off-by: Andrew Paniakin <apanyaki@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifs_dfs_ref.c | 34 ++++++++++++++++++++++++++++++++--
+ fs/smb/client/cifsproto.h | 21 +++++++++++++++++++++
+ fs/smb/client/dir.c | 21 +++++++++++++++------
+ 3 files changed, 68 insertions(+), 8 deletions(-)
+
+--- a/fs/smb/client/cifs_dfs_ref.c
++++ b/fs/smb/client/cifs_dfs_ref.c
+@@ -258,6 +258,31 @@ compose_mount_options_err:
+ goto compose_mount_options_out;
+ }
+
++static int set_dest_addr(struct smb3_fs_context *ctx, const char *full_path)
++{
++ struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
++ char *str_addr = NULL;
++ int rc;
++
++ rc = dns_resolve_server_name_to_ip(full_path, &str_addr, NULL);
++ if (rc < 0)
++ goto out;
++
++ rc = cifs_convert_address(addr, str_addr, strlen(str_addr));
++ if (!rc) {
++ cifs_dbg(FYI, "%s: failed to convert ip address\n", __func__);
++ rc = -EINVAL;
++ goto out;
++ }
++
++ cifs_set_port(addr, ctx->port);
++ rc = 0;
++
++out:
++ kfree(str_addr);
++ return rc;
++}
++
+ /*
+ * Create a vfsmount that we can automount
+ */
+@@ -295,8 +320,7 @@ static struct vfsmount *cifs_dfs_do_auto
+ ctx = smb3_fc2context(fc);
+
+ page = alloc_dentry_path();
+- /* always use tree name prefix */
+- full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
++ full_path = dfs_get_automount_devname(mntpt, page);
+ if (IS_ERR(full_path)) {
+ mnt = ERR_CAST(full_path);
+ goto out;
+@@ -313,6 +337,12 @@ static struct vfsmount *cifs_dfs_do_auto
+ if (rc) {
+ mnt = ERR_PTR(rc);
+ goto out;
++ }
++
++ rc = set_dest_addr(ctx, full_path);
++ if (rc) {
++ mnt = ERR_PTR(rc);
++ goto out;
+ }
+
+ rc = smb3_parse_devname(full_path, ctx);
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -57,8 +57,29 @@ extern void exit_cifs_idmap(void);
+ extern int init_cifs_spnego(void);
+ extern void exit_cifs_spnego(void);
+ extern const char *build_path_from_dentry(struct dentry *, void *);
++char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++ const char *tree, int tree_len,
++ bool prefix);
+ extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry,
+ void *page, bool prefix);
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
++{
++ struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++ struct TCP_Server_Info *server = tcon->ses->server;
++
++ if (unlikely(!server->origin_fullpath))
++ return ERR_PTR(-EREMOTE);
++
++ return __build_path_from_dentry_optional_prefix(dentry, page,
++ server->origin_fullpath,
++ strlen(server->origin_fullpath),
++ true);
++}
++#endif
++
+ static inline void *alloc_dentry_path(void)
+ {
+ return __getname();
+--- a/fs/smb/client/dir.c
++++ b/fs/smb/client/dir.c
+@@ -78,14 +78,13 @@ build_path_from_dentry(struct dentry *di
+ prefix);
+ }
+
+-char *
+-build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
+- bool prefix)
++char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++ const char *tree, int tree_len,
++ bool prefix)
+ {
+ int dfsplen;
+ int pplen = 0;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ char dirsep = CIFS_DIR_SEP(cifs_sb);
+ char *s;
+
+@@ -93,7 +92,7 @@ build_path_from_dentry_optional_prefix(s
+ return ERR_PTR(-ENOMEM);
+
+ if (prefix)
+- dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1);
++ dfsplen = strnlen(tree, tree_len + 1);
+ else
+ dfsplen = 0;
+
+@@ -123,7 +122,7 @@ build_path_from_dentry_optional_prefix(s
+ }
+ if (dfsplen) {
+ s -= dfsplen;
+- memcpy(s, tcon->tree_name, dfsplen);
++ memcpy(s, tree, dfsplen);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+ int i;
+ for (i = 0; i < dfsplen; i++) {
+@@ -135,6 +134,16 @@ build_path_from_dentry_optional_prefix(s
+ return s;
+ }
+
++char *build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++ bool prefix)
++{
++ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++
++ return __build_path_from_dentry_optional_prefix(direntry, page, tcon->tree_name,
++ MAX_TREE_SIZE, prefix);
++}
++
+ /*
+ * Don't allow path components longer than the server max.
+ * Don't allow the separator character in a path component.
--- /dev/null
+From a995199384347261bb3f21b2e171fa7f988bd2f8 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Wed, 9 Apr 2025 12:40:43 +0300
+Subject: mm: fix apply_to_existing_page_range()
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit a995199384347261bb3f21b2e171fa7f988bd2f8 upstream.
+
+In the case of apply_to_existing_page_range(), apply_to_pte_range() is
+reached with 'create' set to false. When !create, the loop over the PTE
+page table is broken.
+
+apply_to_pte_range() will only move to the next PTE entry if 'create' is
+true or if the current entry is not pte_none().
+
+This means that the user of apply_to_existing_page_range() will not have
+'fn' called for any entries after the first pte_none() in the PTE page
+table.
+
+Fix the loop logic in apply_to_pte_range().
+
+There are no known runtime issues from this, but the fix is trivial enough
+for stable@ even without a known buggy user.
+
+Link: https://lkml.kernel.org/r/20250409094043.1629234-1-kirill.shutemov@linux.intel.com
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Fixes: be1db4753ee6 ("mm/memory.c: add apply_to_existing_page_range() helper")
+Cc: Daniel Axtens <dja@axtens.net>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2659,11 +2659,11 @@ static int apply_to_pte_range(struct mm_
+ if (fn) {
+ do {
+ if (create || !pte_none(*pte)) {
+- err = fn(pte++, addr, data);
++ err = fn(pte, addr, data);
+ if (err)
+ break;
+ }
+- } while (addr += PAGE_SIZE, addr != end);
++ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+ *mask |= PGTBL_PTE_MODIFIED;
+
--- /dev/null
+From 166c2c8a6a4dc2e4ceba9e10cfe81c3e469e3210 Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Thu, 15 Feb 2024 06:33:46 -0800
+Subject: net/sched: act_mirred: don't override retval if we already lost the skb
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+commit 166c2c8a6a4dc2e4ceba9e10cfe81c3e469e3210 upstream.
+
+If we're redirecting the skb, and haven't called tcf_mirred_forward(),
+yet, we need to tell the core to drop the skb by setting the retcode
+to SHOT. If we have called tcf_mirred_forward(), however, the skb
+is out of our hands and returning SHOT will lead to UaF.
+
+Move the retval override to the error path which actually need it.
+
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Fixes: e5cf1baf92cb ("act_mirred: use TC_ACT_REINSERT when possible")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[Minor conflict resolved due to code context change.]
+Signed-off-by: Jianqi Ren <jianqi.ren.cn@windriver.com>
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/act_mirred.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -259,13 +259,13 @@ static int tcf_mirred_act(struct sk_buff
+ dev = rcu_dereference_bh(m->tcfm_dev);
+ if (unlikely(!dev)) {
+ pr_notice_once("tc mirred: target device is gone\n");
+- goto out;
++ goto err_cant_do;
+ }
+
+ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
+ net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ dev->name);
+- goto out;
++ goto err_cant_do;
+ }
+
+ /* we could easily avoid the clone only if called by ingress and clsact;
+@@ -279,7 +279,7 @@ static int tcf_mirred_act(struct sk_buff
+ if (!use_reinsert) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+- goto out;
++ goto err_cant_do;
+ }
+
+ want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+@@ -321,12 +321,16 @@ static int tcf_mirred_act(struct sk_buff
+ }
+
+ err = tcf_mirred_forward(want_ingress, skb2);
+- if (err) {
+-out:
++ if (err)
+ tcf_action_inc_overlimit_qstats(&m->common);
+- if (tcf_mirred_is_act_redirect(m_eaction))
+- retval = TC_ACT_SHOT;
+- }
++ __this_cpu_dec(mirred_nest_level);
++
++ return retval;
++
++err_cant_do:
++ if (is_redirect)
++ retval = TC_ACT_SHOT;
++ tcf_action_inc_overlimit_qstats(&m->common);
+ __this_cpu_dec(mirred_nest_level);
+
+ return retval;
revert-loongarch-bpf-fix-off-by-one-error-in-build_prologue.patch
nvmet-fc-remove-unused-functions.patch
smb-server-fix-potential-null-ptr-deref-of-lease_ctx_info-in-smb2_open.patch
+cifs-use-origin-fullpath-for-automounts.patch
+net-sched-act_mirred-don-t-override-retval-if-we-already-lost-the-skb.patch
+bpf-avoid-holding-freeze_mutex-during-mmap-operation.patch
+bpf-prevent-tail-call-between-progs-attached-to-different-hooks.patch
+blk-cgroup-support-to-track-if-policy-is-online.patch
+blk-iocost-do-not-warn-if-iocg-was-already-offlined.patch
+mm-fix-apply_to_existing_page_range.patch