--- /dev/null
+From 26c7d51d8300a83cc2e6912a6b4afbe7e56adfc2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:50 +0200
+Subject: drm/amd/display: Don't warn when missing DCE encoder caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 8246147f1fbaed522b8bcc02ca34e4260747dcfb ]
+
+On some GPUs the VBIOS just doesn't have encoder caps,
+or maybe not for every encoder.
+
+This isn't really a problem and it's handled well,
+so let's not litter the logs with it.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 33e0227ee96e62d034781e91f215e32fd0b1d512)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 210466b2d8631..48bc459009547 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -899,13 +899,13 @@ void dce110_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+@@ -1800,13 +1800,13 @@ void dce60_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+--
+2.50.1
+
--- /dev/null
+drm-amd-display-don-t-warn-when-missing-dce-encoder-.patch
--- /dev/null
+From 4664b05c1fe81ce80fb62119cfe7e947d3f3e3ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:30 +0200
+Subject: bpf: Add cookie object to bpf maps
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 12df58ad294253ac1d8df0c9bb9cf726397a671d ]
+
+Add a cookie to BPF maps to uniquely identify BPF maps for the timespan
+when the node is up. This is different to comparing a pointer or BPF map
+id which could get rolled over and reused.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-1-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/syscall.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 4236de05a8e70..dd6a62134e7d1 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -200,6 +200,7 @@ struct bpf_map {
+ struct mutex freeze_mutex;
+ atomic64_t writecnt;
+ bool free_after_mult_rcu_gp;
++ u64 cookie; /* write-once */
+ };
+
+ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 6f309248f13fc..6d4d08f57ad38 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -31,6 +31,7 @@
+ #include <linux/bpf-netns.h>
+ #include <linux/rcupdate_trace.h>
+ #include <linux/memcontrol.h>
++#include <linux/cookie.h>
+
+ #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
+ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
+@@ -43,6 +44,7 @@
+ #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
+
+ DEFINE_PER_CPU(int, bpf_prog_active);
++DEFINE_COOKIE(bpf_map_cookie);
+ static DEFINE_IDR(prog_idr);
+ static DEFINE_SPINLOCK(prog_idr_lock);
+ static DEFINE_IDR(map_idr);
+@@ -886,6 +888,10 @@ static int map_create(union bpf_attr *attr)
+ if (err < 0)
+ goto free_map;
+
++ preempt_disable();
++ map->cookie = gen_cookie_next(&bpf_map_cookie);
++ preempt_enable();
++
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+--
+2.50.1
+
--- /dev/null
+From 93712198b7d903c81db45ba75f9d3e9d474f542c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:33 +0200
+Subject: bpf: Fix oob access in cgroup local storage
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit abad3d0bad72a52137e0c350c59542d75ae4f513 ]
+
+Lonial reported that an out-of-bounds access in cgroup local storage
+can be crafted via tail calls. Given two programs each utilizing a
+cgroup local storage with a different value size, and one program
+doing a tail call into the other. The verifier will validate each of
+the indivial programs just fine. However, in the runtime context
+the bpf_cg_run_ctx holds an bpf_prog_array_item which contains the
+BPF program as well as any cgroup local storage flavor the program
+uses. Helpers such as bpf_get_local_storage() pick this up from the
+runtime context:
+
+ ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
+ storage = ctx->prog_item->cgroup_storage[stype];
+
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
+ ptr = &READ_ONCE(storage->buf)->data[0];
+ else
+ ptr = this_cpu_ptr(storage->percpu_buf);
+
+For the second program which was called from the originally attached
+one, this means bpf_get_local_storage() will pick up the former
+program's map, not its own. With mismatching sizes, this can result
+in an unintended out-of-bounds access.
+
+To fix this issue, we need to extend bpf_map_owner with an array of
+storage_cookie[] to match on i) the exact maps from the original
+program if the second program was using bpf_get_local_storage(), or
+ii) allow the tail call combination if the second program was not
+using any of the cgroup local storage maps.
+
+Fixes: 7d9c3427894f ("bpf: Make cgroup storages shared between programs on the same cgroup")
+Reported-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-4-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/core.c | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index ea6728c304fe0..1046f290b4b2b 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -256,6 +256,7 @@ struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
++ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ };
+
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index aa3487e244549..73a1c66e54175 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1837,7 +1837,9 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ {
+ enum bpf_prog_type prog_type = fp->aux->dst_prog ? fp->aux->dst_prog->type : fp->type;
+ struct bpf_prog_aux *aux = fp->aux;
++ enum bpf_cgroup_storage_type i;
+ bool ret = false;
++ u64 cookie;
+
+ if (fp->kprobe_override)
+ return ret;
+@@ -1853,12 +1855,25 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ /* Note: xdp_has_frags doesn't exist in aux yet in our branch */
+ /* map->owner->xdp_has_frags = aux->xdp_has_frags; */
+ map->owner->attach_func_proto = aux->attach_func_proto;
++ for_each_cgroup_storage_type(i) {
++ map->owner->storage_cookie[i] =
++ aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ }
+ ret = true;
+ } else {
+ ret = map->owner->type == prog_type &&
+ map->owner->jited == fp->jited;
+ /* Note: xdp_has_frags check would go here when available */
+ /* && map->owner->xdp_has_frags == aux->xdp_has_frags; */
++ for_each_cgroup_storage_type(i) {
++ if (!ret)
++ break;
++ cookie = aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ ret = map->owner->storage_cookie[i] == cookie ||
++ !cookie;
++ }
+ if (ret &&
+ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+--
+2.50.1
+
--- /dev/null
+From e935b059299be9e861583cd7c3079102fefc77ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Sep 2025 17:19:45 -0400
+Subject: bpf: Move bpf map owner out of common struct
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 ]
+
+Given this is only relevant for BPF tail call maps, it is adding up space
+and penalizing other map types. We also need to extend this with further
+objects to track / compare to. Therefore, lets move this out into a separate
+structure and dynamically allocate it only for BPF tail call maps.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 23 +++++++++--------
+ kernel/bpf/arraymap.c | 1 -
+ kernel/bpf/core.c | 58 ++++++++++++++++++++++++++++++++-----------
+ kernel/bpf/syscall.c | 16 ++++++------
+ 4 files changed, 64 insertions(+), 34 deletions(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 6cf63f4240bdd..ea6728c304fe0 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -258,6 +258,7 @@ struct bpf_map_owner {
+ bool xdp_has_frags;
+ const struct btf_type *attach_func_proto;
+ };
++
+ struct bpf_map {
+ /* The first two cachelines with read-mostly members of which some
+ * are also accessed in fast-path (e.g. ops, max_entries).
+@@ -300,6 +301,8 @@ struct bpf_map {
+ };
+ struct mutex freeze_mutex;
+ atomic64_t writecnt;
++ spinlock_t owner_lock;
++ struct bpf_map_owner *owner;
+ bool free_after_mult_rcu_gp;
+ u64 cookie; /* write-once */
+ };
+@@ -1091,16 +1094,6 @@ struct bpf_prog_aux {
+ };
+
+ struct bpf_array_aux {
+- /* 'Ownership' of prog array is claimed by the first program that
+- * is going to use this map or by the first program which FD is
+- * stored in the map to make sure that all callers and callees have
+- * the same prog type and JITed flag.
+- */
+- struct {
+- spinlock_t lock;
+- enum bpf_prog_type type;
+- bool jited;
+- } owner;
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+@@ -1248,6 +1241,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+ }
+
++static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
++{
++ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
++}
++
++static inline void bpf_map_owner_free(struct bpf_map *map)
++{
++ kfree(map->owner);
++}
++
+ struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 2788da290c216..dc42970dda975 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -1044,7 +1044,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+ INIT_WORK(&aux->work, prog_array_map_clear_deferred);
+ INIT_LIST_HEAD(&aux->poke_progs);
+ mutex_init(&aux->poke_mutex);
+- spin_lock_init(&aux->owner.lock);
+
+ map = array_map_alloc(attr);
+ if (IS_ERR(map)) {
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 1ded3eb492b8e..aa3487e244549 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1832,31 +1832,59 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ }
+ #endif
+
+-bool bpf_prog_array_compatible(struct bpf_array *array,
+- const struct bpf_prog *fp)
++static bool __bpf_prog_map_compatible(struct bpf_map *map,
++ const struct bpf_prog *fp)
+ {
+- bool ret;
++ enum bpf_prog_type prog_type = fp->aux->dst_prog ? fp->aux->dst_prog->type : fp->type;
++ struct bpf_prog_aux *aux = fp->aux;
++ bool ret = false;
+
+ if (fp->kprobe_override)
+- return false;
+-
+- spin_lock(&array->aux->owner.lock);
++ return ret;
+
+- if (!array->aux->owner.type) {
+- /* There's no owner yet where we could check for
+- * compatibility.
+- */
+- array->aux->owner.type = fp->type;
+- array->aux->owner.jited = fp->jited;
++ spin_lock(&map->owner_lock);
++ /* There's no owner yet where we could check for compatibility. */
++ if (!map->owner) {
++ map->owner = bpf_map_owner_alloc(map);
++ if (!map->owner)
++ goto err;
++ map->owner->type = prog_type;
++ map->owner->jited = fp->jited;
++ /* Note: xdp_has_frags doesn't exist in aux yet in our branch */
++ /* map->owner->xdp_has_frags = aux->xdp_has_frags; */
++ map->owner->attach_func_proto = aux->attach_func_proto;
+ ret = true;
+ } else {
+- ret = array->aux->owner.type == fp->type &&
+- array->aux->owner.jited == fp->jited;
++ ret = map->owner->type == prog_type &&
++ map->owner->jited == fp->jited;
++ /* Note: xdp_has_frags check would go here when available */
++ /* && map->owner->xdp_has_frags == aux->xdp_has_frags; */
++ if (ret &&
++ map->owner->attach_func_proto != aux->attach_func_proto) {
++ switch (prog_type) {
++ case BPF_PROG_TYPE_TRACING:
++ case BPF_PROG_TYPE_LSM:
++ case BPF_PROG_TYPE_EXT:
++ case BPF_PROG_TYPE_STRUCT_OPS:
++ ret = false;
++ break;
++ default:
++ break;
++ }
++ }
+ }
+- spin_unlock(&array->aux->owner.lock);
++err:
++ spin_unlock(&map->owner_lock);
+ return ret;
+ }
+
++bool bpf_prog_array_compatible(struct bpf_array *array,
++ const struct bpf_prog *fp)
++{
++ struct bpf_map *map = &array->map;
++ return __bpf_prog_map_compatible(map, fp);
++}
++
+ static int bpf_check_tail_call(const struct bpf_prog *fp)
+ {
+ struct bpf_prog_aux *aux = fp->aux;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 6d4d08f57ad38..b80d125dcea97 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -477,6 +477,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
+
+ security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
++ bpf_map_owner_free(map);
+ /* implementation dependent freeing */
+ map->ops->map_free(map);
+ }
+@@ -576,17 +577,15 @@ static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
+
+ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
+ {
+- const struct bpf_map *map = filp->private_data;
+- const struct bpf_array *array;
++ struct bpf_map *map = filp->private_data;
+ u32 type = 0, jited = 0;
+
+- if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
+- array = container_of(map, struct bpf_array, map);
+- spin_lock(&array->aux->owner.lock);
+- type = array->aux->owner.type;
+- jited = array->aux->owner.jited;
+- spin_unlock(&array->aux->owner.lock);
++ spin_lock(&map->owner_lock);
++ if (map->owner) {
++ type = map->owner->type;
++ jited = map->owner->jited;
+ }
++ spin_unlock(&map->owner_lock);
+
+ seq_printf(m,
+ "map_type:\t%u\n"
+@@ -895,6 +894,7 @@ static int map_create(union bpf_attr *attr)
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
++ spin_lock_init(&map->owner_lock);
+
+ map->spin_lock_off = -EINVAL;
+ map->timer_off = -EINVAL;
+--
+2.50.1
+
--- /dev/null
+From 3ef7fabfb77e4bf10d6ceb0b5f0c2bd0a078243a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:32 +0200
+Subject: bpf: Move cgroup iterator helpers to bpf.h
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 9621e60f59eae87eb9ffe88d90f24f391a1ef0f0 ]
+
+Move them into bpf.h given we also need them in core code.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-3-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf-cgroup.h | 5 --
+ include/linux/bpf.h | 109 ++++++++++++++++++++++++++++++++++---
+ 2 files changed, 101 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index 3536ab432b30c..79c9d3d412cb6 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -91,9 +91,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+ extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+ #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+
+-#define for_each_cgroup_storage_type(stype) \
+- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+-
+ struct bpf_cgroup_storage_map;
+
+ struct bpf_storage_buffer {
+@@ -545,8 +542,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) ({ 0; })
+
+-#define for_each_cgroup_storage_type(stype) for (; false; )
+-
+ #endif /* CONFIG_CGROUP_BPF */
+
+ #endif /* _BPF_CGROUP_H */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index dd6a62134e7d1..6cf63f4240bdd 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -157,6 +157,107 @@ struct bpf_map_ops {
+ const struct bpf_iter_seq_info *iter_seq_info;
+ };
+
++enum {
++ /* Support at most 11 fields in a BTF type */
++ BTF_FIELDS_MAX = 11,
++};
++
++enum btf_field_type {
++ BPF_SPIN_LOCK = (1 << 0),
++ BPF_TIMER = (1 << 1),
++ BPF_KPTR_UNREF = (1 << 2),
++ BPF_KPTR_REF = (1 << 3),
++ BPF_KPTR_PERCPU = (1 << 4),
++ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
++ BPF_LIST_HEAD = (1 << 5),
++ BPF_LIST_NODE = (1 << 6),
++ BPF_RB_ROOT = (1 << 7),
++ BPF_RB_NODE = (1 << 8),
++ BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
++ BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
++ BPF_REFCOUNT = (1 << 9),
++ BPF_WORKQUEUE = (1 << 10),
++ BPF_UPTR = (1 << 11),
++ BPF_RES_SPIN_LOCK = (1 << 12),
++};
++
++enum bpf_cgroup_storage_type {
++ BPF_CGROUP_STORAGE_SHARED,
++ BPF_CGROUP_STORAGE_PERCPU,
++ __BPF_CGROUP_STORAGE_MAX
++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
++};
++
++#ifdef CONFIG_CGROUP_BPF
++# define for_each_cgroup_storage_type(stype) \
++ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
++#else
++# define for_each_cgroup_storage_type(stype) for (; false; )
++#endif /* CONFIG_CGROUP_BPF */
++
++typedef void (*btf_dtor_kfunc_t)(void *);
++
++struct btf_field_kptr {
++ struct btf *btf;
++ struct module *module;
++ /* dtor used if btf_is_kernel(btf), otherwise the type is
++ * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
++ */
++ btf_dtor_kfunc_t dtor;
++ u32 btf_id;
++};
++
++struct btf_field_graph_root {
++ struct btf *btf;
++ u32 value_btf_id;
++ u32 node_offset;
++ struct btf_record *value_rec;
++};
++
++struct btf_field {
++ u32 offset;
++ u32 size;
++ enum btf_field_type type;
++ union {
++ struct btf_field_kptr kptr;
++ struct btf_field_graph_root graph_root;
++ };
++};
++
++struct btf_record {
++ u32 cnt;
++ u32 field_mask;
++ int spin_lock_off;
++ int res_spin_lock_off;
++ int timer_off;
++ int wq_off;
++ int refcount_off;
++ struct btf_field fields[];
++};
++
++/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
++struct bpf_rb_node_kern {
++ struct rb_node rb_node;
++ void *owner;
++} __attribute__((aligned(8)));
++
++/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
++struct bpf_list_node_kern {
++ struct list_head list_head;
++ void *owner;
++} __attribute__((aligned(8)));
++
++/* 'Ownership' of program-containing map is claimed by the first program
++ * that is going to use this map or by the first program which FD is
++ * stored in the map to make sure that all callers and callees have the
++ * same prog type, JITed flag and xdp_has_frags flag.
++ */
++struct bpf_map_owner {
++ enum bpf_prog_type type;
++ bool jited;
++ bool xdp_has_frags;
++ const struct btf_type *attach_func_proto;
++};
+ struct bpf_map {
+ /* The first two cachelines with read-mostly members of which some
+ * are also accessed in fast-path (e.g. ops, max_entries).
+@@ -614,14 +715,6 @@ struct bpf_prog_offload {
+ u32 jited_len;
+ };
+
+-enum bpf_cgroup_storage_type {
+- BPF_CGROUP_STORAGE_SHARED,
+- BPF_CGROUP_STORAGE_PERCPU,
+- __BPF_CGROUP_STORAGE_MAX
+-};
+-
+-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+-
+ /* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+--
+2.50.1
+
--- /dev/null
+From 40341b698a9202499d02c9c6f2da716fea533434 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:50 +0200
+Subject: drm/amd/display: Don't warn when missing DCE encoder caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 8246147f1fbaed522b8bcc02ca34e4260747dcfb ]
+
+On some GPUs the VBIOS just doesn't have encoder caps,
+or maybe not for every encoder.
+
+This isn't really a problem and it's handled well,
+so let's not litter the logs with it.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 33e0227ee96e62d034781e91f215e32fd0b1d512)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 1e77ffee71b30..fce0c5d72c1a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -899,13 +899,13 @@ void dce110_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+@@ -1799,13 +1799,13 @@ void dce60_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+--
+2.50.1
+
--- /dev/null
+From f96dd685c7941b5ef1cc4dd2b1640cf021b4c369 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 18:07:15 +0800
+Subject: fs: writeback: fix use-after-free in __mark_inode_dirty()
+
+From: Jiufei Xue <jiufei.xue@samsung.com>
+
+[ Upstream commit d02d2c98d25793902f65803ab853b592c7a96b29 ]
+
+An use-after-free issue occurred when __mark_inode_dirty() get the
+bdi_writeback that was in the progress of switching.
+
+CPU: 1 PID: 562 Comm: systemd-random- Not tainted 6.6.56-gb4403bd46a8e #1
+......
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __mark_inode_dirty+0x124/0x418
+lr : __mark_inode_dirty+0x118/0x418
+sp : ffffffc08c9dbbc0
+........
+Call trace:
+ __mark_inode_dirty+0x124/0x418
+ generic_update_time+0x4c/0x60
+ file_modified+0xcc/0xd0
+ ext4_buffered_write_iter+0x58/0x124
+ ext4_file_write_iter+0x54/0x704
+ vfs_write+0x1c0/0x308
+ ksys_write+0x74/0x10c
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x114
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x40/0xe4
+ el0t_64_sync_handler+0x120/0x12c
+ el0t_64_sync+0x194/0x198
+
+Root cause is:
+
+systemd-random-seed kworker
+----------------------------------------------------------------------
+___mark_inode_dirty inode_switch_wbs_work_fn
+
+ spin_lock(&inode->i_lock);
+ inode_attach_wb
+ locked_inode_to_wb_and_lock_list
+ get inode->i_wb
+ spin_unlock(&inode->i_lock);
+ spin_lock(&wb->list_lock)
+ spin_lock(&inode->i_lock)
+ inode_io_list_move_locked
+ spin_unlock(&wb->list_lock)
+ spin_unlock(&inode->i_lock)
+ spin_lock(&old_wb->list_lock)
+ inode_do_switch_wbs
+ spin_lock(&inode->i_lock)
+ inode->i_wb = new_wb
+ spin_unlock(&inode->i_lock)
+ spin_unlock(&old_wb->list_lock)
+ wb_put_many(old_wb, nr_switched)
+ cgwb_release
+ old wb released
+ wb_wakeup_delayed() accesses wb,
+ then trigger the use-after-free
+ issue
+
+Fix this race condition by holding inode spinlock until
+wb_wakeup_delayed() finished.
+
+Signed-off-by: Jiufei Xue <jiufei.xue@samsung.com>
+Link: https://lore.kernel.org/20250728100715.3863241-1-jiufei.xue@samsung.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fs-writeback.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 672d176524f5c..cb3f1790a296e 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2545,10 +2545,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ wakeup_bdi = inode_io_list_move_locked(inode, wb,
+ dirty_list);
+
+- spin_unlock(&wb->list_lock);
+- spin_unlock(&inode->i_lock);
+- trace_writeback_dirty_inode_enqueue(inode);
+-
+ /*
+ * If this is the first dirty inode for this bdi,
+ * we have to wake-up the corresponding bdi thread
+@@ -2558,6 +2554,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ if (wakeup_bdi &&
+ (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
+ wb_wakeup_delayed(wb);
++
++ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
++ trace_writeback_dirty_inode_enqueue(inode);
++
+ return;
+ }
+ }
+--
+2.50.1
+
--- /dev/null
+bpf-add-cookie-object-to-bpf-maps.patch
+bpf-move-cgroup-iterator-helpers-to-bpf.h.patch
+bpf-move-bpf-map-owner-out-of-common-struct.patch
+bpf-fix-oob-access-in-cgroup-local-storage.patch
+drm-amd-display-don-t-warn-when-missing-dce-encoder-.patch
+fs-writeback-fix-use-after-free-in-__mark_inode_dirt.patch
--- /dev/null
+From 4e95322f43847989e5388809765bbc3c0ddc2cc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 17:08:44 +0800
+Subject: Bluetooth: hci_sync: Avoid adding default advertising on startup
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit de5d7d3f27ddd4046736f558a40e252ddda82013 ]
+
+list_empty(&hdev->adv_instances) is always true during startup,
+so an advertising instance is added by default.
+
+Call trace:
+ dump_backtrace+0x94/0xec
+ show_stack+0x18/0x24
+ dump_stack_lvl+0x48/0x60
+ dump_stack+0x18/0x24
+ hci_setup_ext_adv_instance_sync+0x17c/0x328
+ hci_powered_update_adv_sync+0xb4/0x12c
+ hci_powered_update_sync+0x54/0x70
+ hci_power_on_sync+0xe4/0x278
+ hci_set_powered_sync+0x28/0x34
+ set_powered_sync+0x40/0x58
+ hci_cmd_sync_work+0x94/0x100
+ process_one_work+0x168/0x444
+ worker_thread+0x378/0x3f4
+ kthread+0x108/0x10c
+ ret_from_fork+0x10/0x20
+
+Link: https://github.com/bluez/bluez/issues/1442
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 965b0f2b43a72..a2c3b58db54c2 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3287,7 +3287,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
++ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev)) {
+ err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+--
+2.50.1
+
--- /dev/null
+From 47200630b66375fa3d104636db4420fd5e3dd5d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:30 +0200
+Subject: bpf: Add cookie object to bpf maps
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 12df58ad294253ac1d8df0c9bb9cf726397a671d ]
+
+Add a cookie to BPF maps to uniquely identify BPF maps for the timespan
+when the node is up. This is different to comparing a pointer or BPF map
+id which could get rolled over and reused.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-1-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/syscall.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index e9c1338851e34..2aaa1ed738303 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -260,6 +260,7 @@ struct bpf_map {
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+ s64 __percpu *elem_count;
++ u64 cookie; /* write-once */
+ };
+
+ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index b145f3ef3695e..377bb60b79164 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -35,6 +35,7 @@
+ #include <linux/rcupdate_trace.h>
+ #include <linux/memcontrol.h>
+ #include <linux/trace_events.h>
++#include <linux/cookie.h>
+
+ #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
+ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
+@@ -47,6 +48,7 @@
+ #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
+
+ DEFINE_PER_CPU(int, bpf_prog_active);
++DEFINE_COOKIE(bpf_map_cookie);
+ static DEFINE_IDR(prog_idr);
+ static DEFINE_SPINLOCK(prog_idr_lock);
+ static DEFINE_IDR(map_idr);
+@@ -1152,6 +1154,10 @@ static int map_create(union bpf_attr *attr)
+ if (err < 0)
+ goto free_map;
+
++ preempt_disable();
++ map->cookie = gen_cookie_next(&bpf_map_cookie);
++ preempt_enable();
++
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+--
+2.50.1
+
--- /dev/null
+From 5d65be96c294a92dcb1f3ff3de6d18627f0bb95d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:33 +0200
+Subject: bpf: Fix oob access in cgroup local storage
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit abad3d0bad72a52137e0c350c59542d75ae4f513 ]
+
+Lonial reported that an out-of-bounds access in cgroup local storage
+can be crafted via tail calls. Given two programs each utilizing a
+cgroup local storage with a different value size, and one program
+doing a tail call into the other. The verifier will validate each of
+the indivial programs just fine. However, in the runtime context
+the bpf_cg_run_ctx holds an bpf_prog_array_item which contains the
+BPF program as well as any cgroup local storage flavor the program
+uses. Helpers such as bpf_get_local_storage() pick this up from the
+runtime context:
+
+ ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
+ storage = ctx->prog_item->cgroup_storage[stype];
+
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
+ ptr = &READ_ONCE(storage->buf)->data[0];
+ else
+ ptr = this_cpu_ptr(storage->percpu_buf);
+
+For the second program which was called from the originally attached
+one, this means bpf_get_local_storage() will pick up the former
+program's map, not its own. With mismatching sizes, this can result
+in an unintended out-of-bounds access.
+
+To fix this issue, we need to extend bpf_map_owner with an array of
+storage_cookie[] to match on i) the exact maps from the original
+program if the second program was using bpf_get_local_storage(), or
+ii) allow the tail call combination if the second program was not
+using any of the cgroup local storage maps.
+
+Fixes: 7d9c3427894f ("bpf: Make cgroup storages shared between programs on the same cgroup")
+Reported-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-4-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/core.c | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 8f11c61606839..5f01845627d49 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -226,6 +226,7 @@ struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
++ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ };
+
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index d4eb6d9f276a5..3136af6559a82 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2121,7 +2121,9 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ struct bpf_prog_aux *aux = fp->aux;
++ enum bpf_cgroup_storage_type i;
+ bool ret = false;
++ u64 cookie;
+
+ if (fp->kprobe_override)
+ return ret;
+@@ -2136,11 +2138,24 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ map->owner->jited = fp->jited;
+ map->owner->xdp_has_frags = aux->xdp_has_frags;
+ map->owner->attach_func_proto = aux->attach_func_proto;
++ for_each_cgroup_storage_type(i) {
++ map->owner->storage_cookie[i] =
++ aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ }
+ ret = true;
+ } else {
+ ret = map->owner->type == prog_type &&
+ map->owner->jited == fp->jited &&
+ map->owner->xdp_has_frags == aux->xdp_has_frags;
++ for_each_cgroup_storage_type(i) {
++ if (!ret)
++ break;
++ cookie = aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ ret = map->owner->storage_cookie[i] == cookie ||
++ !cookie;
++ }
+ if (ret &&
+ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+--
+2.50.1
+
--- /dev/null
+From 0e6cd097148a22ac01adaa6cbd1ea985d548dc10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Sep 2025 13:34:50 -0400
+Subject: bpf: Move bpf map owner out of common struct
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 ]
+
+Given this is only relevant for BPF tail call maps, it is adding up space
+and penalizing other map types. We also need to extend this with further
+objects to track / compare to. Therefore, lets move this out into a separate
+structure and dynamically allocate it only for BPF tail call maps.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 36 ++++++++++++++++++++++++------------
+ kernel/bpf/core.c | 35 ++++++++++++++++++-----------------
+ kernel/bpf/syscall.c | 13 +++++++------
+ 3 files changed, 49 insertions(+), 35 deletions(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 9fac355afde7a..8f11c61606839 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -217,6 +217,18 @@ struct bpf_map_off_arr {
+ u8 field_sz[BPF_MAP_OFF_ARR_MAX];
+ };
+
++/* 'Ownership' of program-containing map is claimed by the first program
++ * that is going to use this map or by the first program which FD is
++ * stored in the map to make sure that all callers and callees have the
++ * same prog type, JITed flag and xdp_has_frags flag.
++ */
++struct bpf_map_owner {
++ enum bpf_prog_type type;
++ bool jited;
++ bool xdp_has_frags;
++ const struct btf_type *attach_func_proto;
++};
++
+ struct bpf_map {
+ /* The first two cachelines with read-mostly members of which some
+ * are also accessed in fast-path (e.g. ops, max_entries).
+@@ -258,18 +270,8 @@ struct bpf_map {
+ };
+ struct mutex freeze_mutex;
+ atomic64_t writecnt;
+- /* 'Ownership' of program-containing map is claimed by the first program
+- * that is going to use this map or by the first program which FD is
+- * stored in the map to make sure that all callers and callees have the
+- * same prog type, JITed flag and xdp_has_frags flag.
+- */
+- struct {
+- const struct btf_type *attach_func_proto;
+- spinlock_t lock;
+- enum bpf_prog_type type;
+- bool jited;
+- bool xdp_has_frags;
+- } owner;
++ spinlock_t owner_lock;
++ struct bpf_map_owner *owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+@@ -1495,6 +1497,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+ }
+
++static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
++{
++ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
++}
++
++static inline void bpf_map_owner_free(struct bpf_map *map)
++{
++ kfree(map->owner);
++}
++
+ struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 2ed1d00bede0b..d4eb6d9f276a5 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2120,28 +2120,29 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ const struct bpf_prog *fp)
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+- bool ret;
+ struct bpf_prog_aux *aux = fp->aux;
++ bool ret = false;
+
+ if (fp->kprobe_override)
+- return false;
++ return ret;
+
+- spin_lock(&map->owner.lock);
+- if (!map->owner.type) {
+- /* There's no owner yet where we could check for
+- * compatibility.
+- */
+- map->owner.type = prog_type;
+- map->owner.jited = fp->jited;
+- map->owner.xdp_has_frags = aux->xdp_has_frags;
+- map->owner.attach_func_proto = aux->attach_func_proto;
++ spin_lock(&map->owner_lock);
++ /* There's no owner yet where we could check for compatibility. */
++ if (!map->owner) {
++ map->owner = bpf_map_owner_alloc(map);
++ if (!map->owner)
++ goto err;
++ map->owner->type = prog_type;
++ map->owner->jited = fp->jited;
++ map->owner->xdp_has_frags = aux->xdp_has_frags;
++ map->owner->attach_func_proto = aux->attach_func_proto;
+ ret = true;
+ } else {
+- ret = map->owner.type == prog_type &&
+- map->owner.jited == fp->jited &&
+- map->owner.xdp_has_frags == aux->xdp_has_frags;
++ ret = map->owner->type == prog_type &&
++ map->owner->jited == fp->jited &&
++ map->owner->xdp_has_frags == aux->xdp_has_frags;
+ if (ret &&
+- map->owner.attach_func_proto != aux->attach_func_proto) {
++ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+@@ -2154,8 +2155,8 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ }
+ }
+ }
+- spin_unlock(&map->owner.lock);
+-
++err:
++ spin_unlock(&map->owner_lock);
+ return ret;
+ }
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 377bb60b79164..c15d243bfe382 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -631,6 +631,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
+ security_bpf_map_free(map);
+ kfree(map->off_arr);
+ bpf_map_release_memcg(map);
++ bpf_map_owner_free(map);
+ /* implementation dependent freeing, map_free callback also does
+ * bpf_map_free_kptr_off_tab, if needed.
+ */
+@@ -738,12 +739,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
+ struct bpf_map *map = filp->private_data;
+ u32 type = 0, jited = 0;
+
+- if (map_type_contains_progs(map)) {
+- spin_lock(&map->owner.lock);
+- type = map->owner.type;
+- jited = map->owner.jited;
+- spin_unlock(&map->owner.lock);
++ spin_lock(&map->owner_lock);
++ if (map->owner) {
++ type = map->owner->type;
++ jited = map->owner->jited;
+ }
++ spin_unlock(&map->owner_lock);
+
+ seq_printf(m,
+ "map_type:\t%u\n"
+@@ -1161,7 +1162,7 @@ static int map_create(union bpf_attr *attr)
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+- spin_lock_init(&map->owner.lock);
++ spin_lock_init(&map->owner_lock);
+
+ map->spin_lock_off = -EINVAL;
+ map->timer_off = -EINVAL;
+--
+2.50.1
+
--- /dev/null
+From 0e3732ce8b91370937a6897e16bdd9289bf58231 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:32 +0200
+Subject: bpf: Move cgroup iterator helpers to bpf.h
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 9621e60f59eae87eb9ffe88d90f24f391a1ef0f0 ]
+
+Move them into bpf.h given we also need them in core code.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-3-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf-cgroup.h | 5 -----
+ include/linux/bpf.h | 22 ++++++++++++++--------
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index 57e9e109257e5..429c766310c07 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -72,9 +72,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+ extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+ #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+
+-#define for_each_cgroup_storage_type(stype) \
+- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+-
+ struct bpf_cgroup_storage_map;
+
+ struct bpf_storage_buffer {
+@@ -506,8 +503,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) ({ 0; })
+
+-#define for_each_cgroup_storage_type(stype) for (; false; )
+-
+ #endif /* CONFIG_CGROUP_BPF */
+
+ #endif /* _BPF_CGROUP_H */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 2aaa1ed738303..9fac355afde7a 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -181,6 +181,20 @@ enum bpf_kptr_type {
+ BPF_KPTR_REF,
+ };
+
++enum bpf_cgroup_storage_type {
++ BPF_CGROUP_STORAGE_SHARED,
++ BPF_CGROUP_STORAGE_PERCPU,
++ __BPF_CGROUP_STORAGE_MAX
++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
++};
++
++#ifdef CONFIG_CGROUP_BPF
++# define for_each_cgroup_storage_type(stype) \
++ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
++#else
++# define for_each_cgroup_storage_type(stype) for (; false; )
++#endif /* CONFIG_CGROUP_BPF */
++
+ struct bpf_map_value_off_desc {
+ u32 offset;
+ enum bpf_kptr_type type;
+@@ -794,14 +808,6 @@ struct bpf_prog_offload {
+ u32 jited_len;
+ };
+
+-enum bpf_cgroup_storage_type {
+- BPF_CGROUP_STORAGE_SHARED,
+- BPF_CGROUP_STORAGE_PERCPU,
+- __BPF_CGROUP_STORAGE_MAX
+-};
+-
+-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+-
+ /* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+--
+2.50.1
+
--- /dev/null
+From d8ab3c4d6d078addcb6a5f9a9d2446a1318c09fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:32 +0100
+Subject: btrfs: avoid load/store tearing races when checking if an inode was
+ logged
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 986bf6ed44dff7fbae7b43a0882757ee7f5ba21b ]
+
+At inode_logged() we do a couple lockless checks for ->logged_trans, and
+these are generally safe except the second one in case we get a load or
+store tearing due to a concurrent call updating ->logged_trans (either at
+btrfs_log_inode() or later at inode_logged()).
+
+In the first case it's safe to compare to the current transaction ID since
+once ->logged_trans is set the current transaction, we never set it to a
+lower value.
+
+In the second case, where we check if it's greater than zero, we are prone
+to load/store tearing races, since we can have a concurrent task updating
+to the current transaction ID with store tearing for example, instead of
+updating with a single 64 bits write, to update with two 32 bits writes or
+four 16 bits writes. In that case the reading side at inode_logged() could
+see a positive value that does not match the current transaction and then
+return a false negative.
+
+Fix this by doing the second check while holding the inode's spinlock, add
+some comments about it too. Also add the data_race() annotation to the
+first check to avoid any reports from KCSAN (or similar tools) and comment
+about it.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f04eff44e8645..6e8e90bce0467 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3344,15 +3344,32 @@ static int inode_logged(struct btrfs_trans_handle *trans,
+ struct btrfs_key key;
+ int ret;
+
+- if (inode->logged_trans == trans->transid)
++ /*
++ * Quick lockless call, since once ->logged_trans is set to the current
++ * transaction, we never set it to a lower value anywhere else.
++ */
++ if (data_race(inode->logged_trans) == trans->transid)
+ return 1;
+
+ /*
+- * If logged_trans is not 0, then we know the inode logged was not logged
+- * in this transaction, so we can return false right away.
++ * If logged_trans is not 0 and not trans->transid, then we know the
++ * inode was not logged in this transaction, so we can return false
++ * right away. We take the lock to avoid a race caused by load/store
++ * tearing with a concurrent btrfs_log_inode() call or a concurrent task
++ * in this function further below - an update to trans->transid can be
++ * teared into two 32 bits updates for example, in which case we could
++ * see a positive value that is not trans->transid and assume the inode
++ * was not logged when it was.
+ */
+- if (inode->logged_trans > 0)
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == trans->transid) {
++ spin_unlock(&inode->lock);
++ return 1;
++ } else if (inode->logged_trans > 0) {
++ spin_unlock(&inode->lock);
+ return 0;
++ }
++ spin_unlock(&inode->lock);
+
+ /*
+ * If no log tree was created for this root in this transaction, then
+--
+2.50.1
+
--- /dev/null
+From d74e0681b3b79f5eb2b1e7d79a29d65e43646835 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:30 +0100
+Subject: btrfs: fix race between logging inode and checking if it was logged
+ before
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ef07b74e1be56f9eafda6aadebb9ebba0743c9f0 ]
+
+There's a race between checking if an inode was logged before and logging
+an inode that can cause us to mark an inode as not logged just after it
+was logged by a concurrent task:
+
+1) We have inode X which was not logged before neither in the current
+ transaction not in past transaction since the inode was loaded into
+ memory, so it's ->logged_trans value is 0;
+
+2) We are at transaction N;
+
+3) Task A calls inode_logged() against inode X, sees that ->logged_trans
+ is 0 and there is a log tree and so it proceeds to search in the log
+ tree for an inode item for inode X. It doesn't see any, but before
+ it sets ->logged_trans to N - 1...
+
+3) Task B calls btrfs_log_inode() against inode X, logs the inode and
+ sets ->logged_trans to N;
+
+4) Task A now sets ->logged_trans to N - 1;
+
+5) At this point anyone calling inode_logged() gets 0 (inode not logged)
+ since ->logged_trans is greater than 0 and less than N, but our inode
+ was really logged. As a consequence operations like rename, unlink and
+ link that happen afterwards in the current transaction end up not
+ updating the log when they should.
+
+Fix this by ensuring inode_logged() only updates ->logged_trans in case
+the inode item is not found in the log tree if after tacking the inode's
+lock (spinlock struct btrfs_inode::lock) the ->logged_trans value is still
+zero, since the inode lock is what protects setting ->logged_trans at
+btrfs_log_inode().
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 36 ++++++++++++++++++++++++++++++------
+ 1 file changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index de2b22a56c065..48274bfdeeeb8 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3302,6 +3302,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode)
++{
++ bool ret = false;
++
++ /*
++ * Do this only if ->logged_trans is still 0 to prevent races with
++ * concurrent logging as we may see the inode not logged when
++ * inode_logged() is called but it gets logged after inode_logged() did
++ * not find it in the log tree and we end up setting ->logged_trans to a
++ * value less than trans->transid after the concurrent logging task has
++ * set it to trans->transid. As a consequence, subsequent rename, unlink
++ * and link operations may end up not logging new names and removing old
++ * names from the log.
++ */
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == 0)
++ inode->logged_trans = trans->transid - 1;
++ else if (inode->logged_trans == trans->transid)
++ ret = true;
++ spin_unlock(&inode->lock);
++
++ return ret;
++}
++
+ /*
+ * Check if an inode was logged in the current transaction. This correctly deals
+ * with the case where the inode was logged but has a logged_trans of 0, which
+@@ -3336,10 +3361,8 @@ static int inode_logged(struct btrfs_trans_handle *trans,
+ * transaction's ID, to avoid the search below in a future call in case
+ * a log tree gets created after this.
+ */
+- if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
+- inode->logged_trans = trans->transid - 1;
+- return 0;
+- }
++ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
++ return mark_inode_as_not_logged(trans, inode);
+
+ /*
+ * We have a log tree and the inode's logged_trans is 0. We can't tell
+@@ -3393,8 +3416,7 @@ static int inode_logged(struct btrfs_trans_handle *trans,
+ * Set logged_trans to a value greater than 0 and less then the
+ * current transaction to avoid doing the search in future calls.
+ */
+- inode->logged_trans = trans->transid - 1;
+- return 0;
++ return mark_inode_as_not_logged(trans, inode);
+ }
+
+ /*
+@@ -3402,7 +3424,9 @@ static int inode_logged(struct btrfs_trans_handle *trans,
+ * the current transacion's ID, to avoid future tree searches as long as
+ * the inode is not evicted again.
+ */
++ spin_lock(&inode->lock);
+ inode->logged_trans = trans->transid;
++ spin_unlock(&inode->lock);
+
+ /*
+ * If it's a directory, then we must set last_dir_index_offset to the
+--
+2.50.1
+
--- /dev/null
+From 615ca7294805859513057f7bcc4ef4456ffbbd9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:31 +0100
+Subject: btrfs: fix race between setting last_dir_index_offset and inode
+ logging
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 59a0dd4ab98970086fd096281b1606c506ff2698 ]
+
+At inode_logged() if we find that the inode was not logged before we
+update its ->last_dir_index_offset to (u64)-1 with the goal that the
+next directory log operation will see the (u64)-1 and then figure out
+it must check what was the index of the last logged dir index key and
+update ->last_dir_index_offset to that key's offset (this is done in
+update_last_dir_index_offset()).
+
+This however has a possibility for a time window where a race can happen
+and lead to directory logging skipping dir index keys that should be
+logged. The race happens like this:
+
+1) Task A calls inode_logged(), sees ->logged_trans as 0 and then checks
+ that the inode item was logged before, but before it sets the inode's
+ ->last_dir_index_offset to (u64)-1...
+
+2) Task B is at btrfs_log_inode() which calls inode_logged() early, and
+ that has set ->last_dir_index_offset to (u64)-1;
+
+3) Task B then enters log_directory_changes() which calls
+ update_last_dir_index_offset(). There it sees ->last_dir_index_offset
+ is (u64)-1 and that the inode was logged before (ctx->logged_before is
+ true), and so it searches for the last logged dir index key in the log
+ tree and it finds that it has an offset (index) value of N, so it sets
+ ->last_dir_index_offset to N, so that we can skip index keys that are
+ less than or equal to N (later at process_dir_items_leaf());
+
+4) Task A now sets ->last_dir_index_offset to (u64)-1, undoing the update
+ that task B just did;
+
+5) Task B will now skip every index key when it enters
+ process_dir_items_leaf(), since ->last_dir_index_offset is (u64)-1.
+
+Fix this by making inode_logged() not touch ->last_dir_index_offset and
+initializing it to 0 when an inode is loaded (at btrfs_alloc_inode()) and
+then having update_last_dir_index_offset() treat a value of 0 as meaning
+we must check the log tree and update with the index of the last logged
+index key. This is fine since the minimum possible value for
+->last_dir_index_offset is 1 (BTRFS_DIR_START_INDEX - 1 = 2 - 1 = 1).
+This also simplifies the management of ->last_dir_index_offset and now
+all accesses to it are done under the inode's log_mutex.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/btrfs_inode.h | 2 +-
+ fs/btrfs/inode.c | 1 +
+ fs/btrfs/tree-log.c | 17 ++---------------
+ 3 files changed, 4 insertions(+), 16 deletions(-)
+
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 54c2ccb36b612..f3a3a31477a5d 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -164,7 +164,7 @@ struct btrfs_inode {
+ u64 new_delalloc_bytes;
+ /*
+ * The offset of the last dir index key that was logged.
+- * This is used only for directories.
++ * This is used only for directories. Protected by 'log_mutex'.
+ */
+ u64 last_dir_index_offset;
+ };
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 469a622b440b6..78cd7b8ccfc85 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8926,6 +8926,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ ei->last_sub_trans = 0;
+ ei->logged_trans = 0;
+ ei->delalloc_bytes = 0;
++ /* new_delalloc_bytes and last_dir_index_offset are in a union. */
+ ei->new_delalloc_bytes = 0;
+ ei->defrag_bytes = 0;
+ ei->disk_i_size = 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 48274bfdeeeb8..f04eff44e8645 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3428,19 +3428,6 @@ static int inode_logged(struct btrfs_trans_handle *trans,
+ inode->logged_trans = trans->transid;
+ spin_unlock(&inode->lock);
+
+- /*
+- * If it's a directory, then we must set last_dir_index_offset to the
+- * maximum possible value, so that the next attempt to log the inode does
+- * not skip checking if dir index keys found in modified subvolume tree
+- * leaves have been logged before, otherwise it would result in attempts
+- * to insert duplicate dir index keys in the log tree. This must be done
+- * because last_dir_index_offset is an in-memory only field, not persisted
+- * in the inode item or any other on-disk structure, so its value is lost
+- * once the inode is evicted.
+- */
+- if (S_ISDIR(inode->vfs_inode.i_mode))
+- inode->last_dir_index_offset = (u64)-1;
+-
+ return 1;
+ }
+
+@@ -4010,7 +3997,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+
+ /*
+ * If the inode was logged before and it was evicted, then its
+- * last_dir_index_offset is (u64)-1, so we don't the value of the last index
++ * last_dir_index_offset is 0, so we don't know the value of the last index
+ * key offset. If that's the case, search for it and update the inode. This
+ * is to avoid lookups in the log tree every time we try to insert a dir index
+ * key from a leaf changed in the current transaction, and to allow us to always
+@@ -4026,7 +4013,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode,
+
+ lockdep_assert_held(&inode->log_mutex);
+
+- if (inode->last_dir_index_offset != (u64)-1)
++ if (inode->last_dir_index_offset != 0)
+ return 0;
+
+ if (!ctx->logged_before) {
+--
+2.50.1
+
--- /dev/null
+From 7a952475e02471876d2bf06aa2366228dbe226a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 17:42:14 +0200
+Subject: cdc_ncm: Flag Intel OEM version of Fibocom L850-GL as WWAN
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+[ Upstream commit 4a73a36cb704813f588af13d9842d0ba5a185758 ]
+
+This lets NetworkManager/ModemManager know that this is a modem and
+needs to be connected first.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Link: https://patch.msgid.link/20250814154214.250103-1-lkundrak@v3.sk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/cdc_ncm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 9eb3c6b66a38b..c3b1e9af922b1 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -2042,6 +2042,13 @@ static const struct usb_device_id cdc_devs[] = {
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
++ /* Intel modem (label from OEM reads Fibocom L850-GL) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
++ USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&wwan_info,
++ },
++
+ /* DisplayLink docking stations */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+--
+2.50.1
+
--- /dev/null
+From 78b1ef8d7a1ede447703eb33902f5a0280b13a66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:50 +0200
+Subject: drm/amd/display: Don't warn when missing DCE encoder caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 8246147f1fbaed522b8bcc02ca34e4260747dcfb ]
+
+On some GPUs the VBIOS just doesn't have encoder caps,
+or maybe not for every encoder.
+
+This isn't really a problem and it's handled well,
+so let's not litter the logs with it.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 33e0227ee96e62d034781e91f215e32fd0b1d512)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 09260c23c3bde..85926d2300444 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -897,13 +897,13 @@ void dce110_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+--
+2.50.1
+
--- /dev/null
+From f06e5dd1ad8818f6b3665c5d2fc63a7978366cce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 18:07:15 +0800
+Subject: fs: writeback: fix use-after-free in __mark_inode_dirty()
+
+From: Jiufei Xue <jiufei.xue@samsung.com>
+
+[ Upstream commit d02d2c98d25793902f65803ab853b592c7a96b29 ]
+
+An use-after-free issue occurred when __mark_inode_dirty() get the
+bdi_writeback that was in the progress of switching.
+
+CPU: 1 PID: 562 Comm: systemd-random- Not tainted 6.6.56-gb4403bd46a8e #1
+......
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __mark_inode_dirty+0x124/0x418
+lr : __mark_inode_dirty+0x118/0x418
+sp : ffffffc08c9dbbc0
+........
+Call trace:
+ __mark_inode_dirty+0x124/0x418
+ generic_update_time+0x4c/0x60
+ file_modified+0xcc/0xd0
+ ext4_buffered_write_iter+0x58/0x124
+ ext4_file_write_iter+0x54/0x704
+ vfs_write+0x1c0/0x308
+ ksys_write+0x74/0x10c
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x114
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x40/0xe4
+ el0t_64_sync_handler+0x120/0x12c
+ el0t_64_sync+0x194/0x198
+
+Root cause is:
+
+systemd-random-seed kworker
+----------------------------------------------------------------------
+___mark_inode_dirty inode_switch_wbs_work_fn
+
+ spin_lock(&inode->i_lock);
+ inode_attach_wb
+ locked_inode_to_wb_and_lock_list
+ get inode->i_wb
+ spin_unlock(&inode->i_lock);
+ spin_lock(&wb->list_lock)
+ spin_lock(&inode->i_lock)
+ inode_io_list_move_locked
+ spin_unlock(&wb->list_lock)
+ spin_unlock(&inode->i_lock)
+ spin_lock(&old_wb->list_lock)
+ inode_do_switch_wbs
+ spin_lock(&inode->i_lock)
+ inode->i_wb = new_wb
+ spin_unlock(&inode->i_lock)
+ spin_unlock(&old_wb->list_lock)
+ wb_put_many(old_wb, nr_switched)
+ cgwb_release
+ old wb released
+ wb_wakeup_delayed() accesses wb,
+ then trigger the use-after-free
+ issue
+
+Fix this race condition by holding inode spinlock until
+wb_wakeup_delayed() finished.
+
+Signed-off-by: Jiufei Xue <jiufei.xue@samsung.com>
+Link: https://lore.kernel.org/20250728100715.3863241-1-jiufei.xue@samsung.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fs-writeback.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index be2d329843d44..41f8ae8a416fb 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2514,10 +2514,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ wakeup_bdi = inode_io_list_move_locked(inode, wb,
+ dirty_list);
+
+- spin_unlock(&wb->list_lock);
+- spin_unlock(&inode->i_lock);
+- trace_writeback_dirty_inode_enqueue(inode);
+-
+ /*
+ * If this is the first dirty inode for this bdi,
+ * we have to wake-up the corresponding bdi thread
+@@ -2527,6 +2523,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ if (wakeup_bdi &&
+ (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
+ wb_wakeup_delayed(wb);
++
++ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
++ trace_writeback_dirty_inode_enqueue(inode);
++
+ return;
+ }
+ }
+--
+2.50.1
+
--- /dev/null
+bpf-add-cookie-object-to-bpf-maps.patch
+bpf-move-cgroup-iterator-helpers-to-bpf.h.patch
+bpf-move-bpf-map-owner-out-of-common-struct.patch
+bpf-fix-oob-access-in-cgroup-local-storage.patch
+btrfs-fix-race-between-logging-inode-and-checking-if.patch
+btrfs-fix-race-between-setting-last_dir_index_offset.patch
+btrfs-avoid-load-store-tearing-races-when-checking-i.patch
+cdc_ncm-flag-intel-oem-version-of-fibocom-l850-gl-as.patch
+drm-amd-display-don-t-warn-when-missing-dce-encoder-.patch
+bluetooth-hci_sync-avoid-adding-default-advertising-.patch
+fs-writeback-fix-use-after-free-in-__mark_inode_dirt.patch
--- /dev/null
+From 5d147e7c4c1404dc1a21fead7ecf0d463b147349 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 17:08:44 +0800
+Subject: Bluetooth: hci_sync: Avoid adding default advertising on startup
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit de5d7d3f27ddd4046736f558a40e252ddda82013 ]
+
+list_empty(&hdev->adv_instances) is always true during startup,
+so an advertising instance is added by default.
+
+Call trace:
+ dump_backtrace+0x94/0xec
+ show_stack+0x18/0x24
+ dump_stack_lvl+0x48/0x60
+ dump_stack+0x18/0x24
+ hci_setup_ext_adv_instance_sync+0x17c/0x328
+ hci_powered_update_adv_sync+0xb4/0x12c
+ hci_powered_update_sync+0x54/0x70
+ hci_power_on_sync+0xe4/0x278
+ hci_set_powered_sync+0x28/0x34
+ set_powered_sync+0x40/0x58
+ hci_cmd_sync_work+0x94/0x100
+ process_one_work+0x168/0x444
+ worker_thread+0x378/0x3f4
+ kthread+0x108/0x10c
+ ret_from_fork+0x10/0x20
+
+Link: https://github.com/bluez/bluez/issues/1442
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index bc2aa514b8c5d..5f5137764b80a 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3354,7 +3354,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
++ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev)) {
+ err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+--
+2.50.1
+
--- /dev/null
+From 307aeae8c3f819392ab0023c26c797a41031caee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:30 +0200
+Subject: bpf: Add cookie object to bpf maps
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 12df58ad294253ac1d8df0c9bb9cf726397a671d ]
+
+Add a cookie to BPF maps to uniquely identify BPF maps for the timespan
+when the node is up. This is different to comparing a pointer or BPF map
+id which could get rolled over and reused.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-1-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/syscall.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1150a595aa54c..fcf48bd746001 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -306,6 +306,7 @@ struct bpf_map {
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
++ u64 cookie; /* write-once */
+ };
+
+ static inline const char *btf_field_type_name(enum btf_field_type type)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index ab74a226e3d6d..27cacdde359e8 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -35,6 +35,7 @@
+ #include <linux/rcupdate_trace.h>
+ #include <linux/memcontrol.h>
+ #include <linux/trace_events.h>
++#include <linux/cookie.h>
+
+ #include <net/netfilter/nf_bpf_link.h>
+ #include <net/netkit.h>
+@@ -51,6 +52,7 @@
+ #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
+
+ DEFINE_PER_CPU(int, bpf_prog_active);
++DEFINE_COOKIE(bpf_map_cookie);
+ static DEFINE_IDR(prog_idr);
+ static DEFINE_SPINLOCK(prog_idr_lock);
+ static DEFINE_IDR(map_idr);
+@@ -1360,6 +1362,10 @@ static int map_create(union bpf_attr *attr)
+ if (err < 0)
+ goto free_map;
+
++ preempt_disable();
++ map->cookie = gen_cookie_next(&bpf_map_cookie);
++ preempt_enable();
++
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+--
+2.50.1
+
--- /dev/null
+From 270d384c687791d0088ec16d96335b6e46381aa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:33 +0200
+Subject: bpf: Fix oob access in cgroup local storage
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit abad3d0bad72a52137e0c350c59542d75ae4f513 ]
+
+Lonial reported that an out-of-bounds access in cgroup local storage
+can be crafted via tail calls. Given two programs each utilizing a
+cgroup local storage with a different value size, and one program
+doing a tail call into the other. The verifier will validate each of
+the indivial programs just fine. However, in the runtime context
+the bpf_cg_run_ctx holds an bpf_prog_array_item which contains the
+BPF program as well as any cgroup local storage flavor the program
+uses. Helpers such as bpf_get_local_storage() pick this up from the
+runtime context:
+
+ ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
+ storage = ctx->prog_item->cgroup_storage[stype];
+
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
+ ptr = &READ_ONCE(storage->buf)->data[0];
+ else
+ ptr = this_cpu_ptr(storage->percpu_buf);
+
+For the second program which was called from the originally attached
+one, this means bpf_get_local_storage() will pick up the former
+program's map, not its own. With mismatching sizes, this can result
+in an unintended out-of-bounds access.
+
+To fix this issue, we need to extend bpf_map_owner with an array of
+storage_cookie[] to match on i) the exact maps from the original
+program if the second program was using bpf_get_local_storage(), or
+ii) allow the tail call combination if the second program was not
+using any of the cgroup local storage maps.
+
+Fixes: 7d9c3427894f ("bpf: Make cgroup storages shared between programs on the same cgroup")
+Reported-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-4-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/core.c | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index fa79393e41f82..6db72c66de91d 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -279,6 +279,7 @@ struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
++ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ };
+
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 0e2daea7e1efc..6f91e3a123e55 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2311,7 +2311,9 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ struct bpf_prog_aux *aux = fp->aux;
++ enum bpf_cgroup_storage_type i;
+ bool ret = false;
++ u64 cookie;
+
+ if (fp->kprobe_override)
+ return ret;
+@@ -2326,11 +2328,24 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ map->owner->jited = fp->jited;
+ map->owner->xdp_has_frags = aux->xdp_has_frags;
+ map->owner->attach_func_proto = aux->attach_func_proto;
++ for_each_cgroup_storage_type(i) {
++ map->owner->storage_cookie[i] =
++ aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ }
+ ret = true;
+ } else {
+ ret = map->owner->type == prog_type &&
+ map->owner->jited == fp->jited &&
+ map->owner->xdp_has_frags == aux->xdp_has_frags;
++ for_each_cgroup_storage_type(i) {
++ if (!ret)
++ break;
++ cookie = aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ ret = map->owner->storage_cookie[i] == cookie ||
++ !cookie;
++ }
+ if (ret &&
+ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+--
+2.50.1
+
--- /dev/null
+From 3d3e7783c84350e2f1bab415f97a7c176d2d1b5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:31 +0200
+Subject: bpf: Move bpf map owner out of common struct
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 ]
+
+Given this is only relevant for BPF tail call maps, it is adding up space
+and penalizing other map types. We also need to extend this with further
+objects to track / compare to. Therefore, lets move this out into a separate
+structure and dynamically allocate it only for BPF tail call maps.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: abad3d0bad72 ("bpf: Fix oob access in cgroup local storage")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 36 ++++++++++++++++++++++++------------
+ kernel/bpf/core.c | 35 ++++++++++++++++++-----------------
+ kernel/bpf/syscall.c | 13 +++++++------
+ 3 files changed, 49 insertions(+), 35 deletions(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index fcf48bd746001..cf3ca7b7f4487 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -256,6 +256,18 @@ struct bpf_list_node_kern {
+ void *owner;
+ } __attribute__((aligned(8)));
+
++/* 'Ownership' of program-containing map is claimed by the first program
++ * that is going to use this map or by the first program which FD is
++ * stored in the map to make sure that all callers and callees have the
++ * same prog type, JITed flag and xdp_has_frags flag.
++ */
++struct bpf_map_owner {
++ enum bpf_prog_type type;
++ bool jited;
++ bool xdp_has_frags;
++ const struct btf_type *attach_func_proto;
++};
++
+ struct bpf_map {
+ const struct bpf_map_ops *ops;
+ struct bpf_map *inner_map_meta;
+@@ -288,18 +300,8 @@ struct bpf_map {
+ struct rcu_head rcu;
+ };
+ atomic64_t writecnt;
+- /* 'Ownership' of program-containing map is claimed by the first program
+- * that is going to use this map or by the first program which FD is
+- * stored in the map to make sure that all callers and callees have the
+- * same prog type, JITed flag and xdp_has_frags flag.
+- */
+- struct {
+- const struct btf_type *attach_func_proto;
+- spinlock_t lock;
+- enum bpf_prog_type type;
+- bool jited;
+- bool xdp_has_frags;
+- } owner;
++ spinlock_t owner_lock;
++ struct bpf_map_owner *owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+@@ -1981,6 +1983,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+ }
+
++static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
++{
++ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
++}
++
++static inline void bpf_map_owner_free(struct bpf_map *map)
++{
++ kfree(map->owner);
++}
++
+ struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 767dcb8471f63..0e2daea7e1efc 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2310,28 +2310,29 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ const struct bpf_prog *fp)
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+- bool ret;
+ struct bpf_prog_aux *aux = fp->aux;
++ bool ret = false;
+
+ if (fp->kprobe_override)
+- return false;
++ return ret;
+
+- spin_lock(&map->owner.lock);
+- if (!map->owner.type) {
+- /* There's no owner yet where we could check for
+- * compatibility.
+- */
+- map->owner.type = prog_type;
+- map->owner.jited = fp->jited;
+- map->owner.xdp_has_frags = aux->xdp_has_frags;
+- map->owner.attach_func_proto = aux->attach_func_proto;
++ spin_lock(&map->owner_lock);
++ /* There's no owner yet where we could check for compatibility. */
++ if (!map->owner) {
++ map->owner = bpf_map_owner_alloc(map);
++ if (!map->owner)
++ goto err;
++ map->owner->type = prog_type;
++ map->owner->jited = fp->jited;
++ map->owner->xdp_has_frags = aux->xdp_has_frags;
++ map->owner->attach_func_proto = aux->attach_func_proto;
+ ret = true;
+ } else {
+- ret = map->owner.type == prog_type &&
+- map->owner.jited == fp->jited &&
+- map->owner.xdp_has_frags == aux->xdp_has_frags;
++ ret = map->owner->type == prog_type &&
++ map->owner->jited == fp->jited &&
++ map->owner->xdp_has_frags == aux->xdp_has_frags;
+ if (ret &&
+- map->owner.attach_func_proto != aux->attach_func_proto) {
++ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+@@ -2344,8 +2345,8 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ }
+ }
+ }
+- spin_unlock(&map->owner.lock);
+-
++err:
++ spin_unlock(&map->owner_lock);
+ return ret;
+ }
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 27cacdde359e8..ba4543e771a6e 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -767,6 +767,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
+
+ security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
++ bpf_map_owner_free(map);
+ bpf_map_free(map);
+ }
+
+@@ -861,12 +862,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
+ struct bpf_map *map = filp->private_data;
+ u32 type = 0, jited = 0;
+
+- if (map_type_contains_progs(map)) {
+- spin_lock(&map->owner.lock);
+- type = map->owner.type;
+- jited = map->owner.jited;
+- spin_unlock(&map->owner.lock);
++ spin_lock(&map->owner_lock);
++ if (map->owner) {
++ type = map->owner->type;
++ jited = map->owner->jited;
+ }
++ spin_unlock(&map->owner_lock);
+
+ seq_printf(m,
+ "map_type:\t%u\n"
+@@ -1369,7 +1370,7 @@ static int map_create(union bpf_attr *attr)
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+- spin_lock_init(&map->owner.lock);
++ spin_lock_init(&map->owner_lock);
+
+ if (attr->btf_key_type_id || attr->btf_value_type_id ||
+ /* Even the map's value is a kernel's struct,
+--
+2.50.1
+
--- /dev/null
+From 93c0aca9d39eabaaf88dedc70e45965d82bb4f37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:32 +0200
+Subject: bpf: Move cgroup iterator helpers to bpf.h
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 9621e60f59eae87eb9ffe88d90f24f391a1ef0f0 ]
+
+Move them into bpf.h given we also need them in core code.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-3-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: abad3d0bad72 ("bpf: Fix oob access in cgroup local storage")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf-cgroup.h | 5 -----
+ include/linux/bpf.h | 22 ++++++++++++++--------
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index 7e029c82ae45f..26ee360c345fa 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -77,9 +77,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+ extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+ #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+
+-#define for_each_cgroup_storage_type(stype) \
+- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+-
+ struct bpf_cgroup_storage_map;
+
+ struct bpf_storage_buffer {
+@@ -518,8 +515,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) ({ 0; })
+
+-#define for_each_cgroup_storage_type(stype) for (; false; )
+-
+ #endif /* CONFIG_CGROUP_BPF */
+
+ #endif /* _BPF_CGROUP_H */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index cf3ca7b7f4487..fa79393e41f82 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -205,6 +205,20 @@ enum btf_field_type {
+ BPF_WORKQUEUE = (1 << 10),
+ };
+
++enum bpf_cgroup_storage_type {
++ BPF_CGROUP_STORAGE_SHARED,
++ BPF_CGROUP_STORAGE_PERCPU,
++ __BPF_CGROUP_STORAGE_MAX
++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
++};
++
++#ifdef CONFIG_CGROUP_BPF
++# define for_each_cgroup_storage_type(stype) \
++ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
++#else
++# define for_each_cgroup_storage_type(stype) for (; false; )
++#endif /* CONFIG_CGROUP_BPF */
++
+ typedef void (*btf_dtor_kfunc_t)(void *);
+
+ struct btf_field_kptr {
+@@ -1028,14 +1042,6 @@ struct bpf_prog_offload {
+ u32 jited_len;
+ };
+
+-enum bpf_cgroup_storage_type {
+- BPF_CGROUP_STORAGE_SHARED,
+- BPF_CGROUP_STORAGE_PERCPU,
+- __BPF_CGROUP_STORAGE_MAX
+-};
+-
+-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+-
+ /* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+--
+2.50.1
+
--- /dev/null
+From b12170ba9843a31f6edf12d12539308578e9cbac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:32 +0100
+Subject: btrfs: avoid load/store tearing races when checking if an inode was
+ logged
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 986bf6ed44dff7fbae7b43a0882757ee7f5ba21b ]
+
+At inode_logged() we do a couple lockless checks for ->logged_trans, and
+these are generally safe except the second one in case we get a load or
+store tearing due to a concurrent call updating ->logged_trans (either at
+btrfs_log_inode() or later at inode_logged()).
+
+In the first case it's safe to compare to the current transaction ID since
+once ->logged_trans is set the current transaction, we never set it to a
+lower value.
+
+In the second case, where we check if it's greater than zero, we are prone
+to load/store tearing races, since we can have a concurrent task updating
+to the current transaction ID with store tearing for example, instead of
+updating with a single 64 bits write, to update with two 32 bits writes or
+four 16 bits writes. In that case the reading side at inode_logged() could
+see a positive value that does not match the current transaction and then
+return a false negative.
+
+Fix this by doing the second check while holding the inode's spinlock, add
+some comments about it too. Also add the data_race() annotation to the
+first check to avoid any reports from KCSAN (or similar tools) and comment
+about it.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index dc4c9fb0c0113..f917fdae7e672 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3364,15 +3364,32 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ struct btrfs_key key;
+ int ret;
+
+- if (inode->logged_trans == trans->transid)
++ /*
++ * Quick lockless call, since once ->logged_trans is set to the current
++ * transaction, we never set it to a lower value anywhere else.
++ */
++ if (data_race(inode->logged_trans) == trans->transid)
+ return 1;
+
+ /*
+- * If logged_trans is not 0, then we know the inode logged was not logged
+- * in this transaction, so we can return false right away.
++ * If logged_trans is not 0 and not trans->transid, then we know the
++ * inode was not logged in this transaction, so we can return false
++ * right away. We take the lock to avoid a race caused by load/store
++ * tearing with a concurrent btrfs_log_inode() call or a concurrent task
++ * in this function further below - an update to trans->transid can be
++ * teared into two 32 bits updates for example, in which case we could
++ * see a positive value that is not trans->transid and assume the inode
++ * was not logged when it was.
+ */
+- if (inode->logged_trans > 0)
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == trans->transid) {
++ spin_unlock(&inode->lock);
++ return 1;
++ } else if (inode->logged_trans > 0) {
++ spin_unlock(&inode->lock);
+ return 0;
++ }
++ spin_unlock(&inode->lock);
+
+ /*
+ * If no log tree was created for this root in this transaction, then
+--
+2.50.1
+
--- /dev/null
+From 8e630e7b8f48b35004fab39cbdf8e6e5b5f85f47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:30 +0100
+Subject: btrfs: fix race between logging inode and checking if it was logged
+ before
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ef07b74e1be56f9eafda6aadebb9ebba0743c9f0 ]
+
+There's a race between checking if an inode was logged before and logging
+an inode that can cause us to mark an inode as not logged just after it
+was logged by a concurrent task:
+
+1) We have inode X which was not logged before neither in the current
+ transaction not in past transaction since the inode was loaded into
+ memory, so it's ->logged_trans value is 0;
+
+2) We are at transaction N;
+
+3) Task A calls inode_logged() against inode X, sees that ->logged_trans
+ is 0 and there is a log tree and so it proceeds to search in the log
+ tree for an inode item for inode X. It doesn't see any, but before
+ it sets ->logged_trans to N - 1...
+
+3) Task B calls btrfs_log_inode() against inode X, logs the inode and
+ sets ->logged_trans to N;
+
+4) Task A now sets ->logged_trans to N - 1;
+
+5) At this point anyone calling inode_logged() gets 0 (inode not logged)
+ since ->logged_trans is greater than 0 and less than N, but our inode
+ was really logged. As a consequence operations like rename, unlink and
+ link that happen afterwards in the current transaction end up not
+ updating the log when they should.
+
+Fix this by ensuring inode_logged() only updates ->logged_trans in case
+the inode item is not found in the log tree if after tacking the inode's
+lock (spinlock struct btrfs_inode::lock) the ->logged_trans value is still
+zero, since the inode lock is what protects setting ->logged_trans at
+btrfs_log_inode().
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 36 ++++++++++++++++++++++++++++++------
+ 1 file changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 31adea5b0b96a..bf5d7e52467ac 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3322,6 +3322,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode)
++{
++ bool ret = false;
++
++ /*
++ * Do this only if ->logged_trans is still 0 to prevent races with
++ * concurrent logging as we may see the inode not logged when
++ * inode_logged() is called but it gets logged after inode_logged() did
++ * not find it in the log tree and we end up setting ->logged_trans to a
++ * value less than trans->transid after the concurrent logging task has
++ * set it to trans->transid. As a consequence, subsequent rename, unlink
++ * and link operations may end up not logging new names and removing old
++ * names from the log.
++ */
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == 0)
++ inode->logged_trans = trans->transid - 1;
++ else if (inode->logged_trans == trans->transid)
++ ret = true;
++ spin_unlock(&inode->lock);
++
++ return ret;
++}
++
+ /*
+ * Check if an inode was logged in the current transaction. This correctly deals
+ * with the case where the inode was logged but has a logged_trans of 0, which
+@@ -3356,10 +3381,8 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * transaction's ID, to avoid the search below in a future call in case
+ * a log tree gets created after this.
+ */
+- if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
+- inode->logged_trans = trans->transid - 1;
+- return 0;
+- }
++ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
++ return mark_inode_as_not_logged(trans, inode);
+
+ /*
+ * We have a log tree and the inode's logged_trans is 0. We can't tell
+@@ -3413,8 +3436,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * Set logged_trans to a value greater than 0 and less then the
+ * current transaction to avoid doing the search in future calls.
+ */
+- inode->logged_trans = trans->transid - 1;
+- return 0;
++ return mark_inode_as_not_logged(trans, inode);
+ }
+
+ /*
+@@ -3422,7 +3444,9 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * the current transacion's ID, to avoid future tree searches as long as
+ * the inode is not evicted again.
+ */
++ spin_lock(&inode->lock);
+ inode->logged_trans = trans->transid;
++ spin_unlock(&inode->lock);
+
+ /*
+ * If it's a directory, then we must set last_dir_index_offset to the
+--
+2.50.1
+
--- /dev/null
+From ae571216edcc31567b43ab5d48b083d91a509a0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:31 +0100
+Subject: btrfs: fix race between setting last_dir_index_offset and inode
+ logging
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 59a0dd4ab98970086fd096281b1606c506ff2698 ]
+
+At inode_logged() if we find that the inode was not logged before we
+update its ->last_dir_index_offset to (u64)-1 with the goal that the
+next directory log operation will see the (u64)-1 and then figure out
+it must check what was the index of the last logged dir index key and
+update ->last_dir_index_offset to that key's offset (this is done in
+update_last_dir_index_offset()).
+
+This however has a possibility for a time window where a race can happen
+and lead to directory logging skipping dir index keys that should be
+logged. The race happens like this:
+
+1) Task A calls inode_logged(), sees ->logged_trans as 0 and then checks
+ that the inode item was logged before, but before it sets the inode's
+ ->last_dir_index_offset to (u64)-1...
+
+2) Task B is at btrfs_log_inode() which calls inode_logged() early, and
+ that has set ->last_dir_index_offset to (u64)-1;
+
+3) Task B then enters log_directory_changes() which calls
+ update_last_dir_index_offset(). There it sees ->last_dir_index_offset
+ is (u64)-1 and that the inode was logged before (ctx->logged_before is
+ true), and so it searches for the last logged dir index key in the log
+ tree and it finds that it has an offset (index) value of N, so it sets
+ ->last_dir_index_offset to N, so that we can skip index keys that are
+ less than or equal to N (later at process_dir_items_leaf());
+
+4) Task A now sets ->last_dir_index_offset to (u64)-1, undoing the update
+ that task B just did;
+
+5) Task B will now skip every index key when it enters
+ process_dir_items_leaf(), since ->last_dir_index_offset is (u64)-1.
+
+Fix this by making inode_logged() not touch ->last_dir_index_offset and
+initializing it to 0 when an inode is loaded (at btrfs_alloc_inode()) and
+then having update_last_dir_index_offset() treat a value of 0 as meaning
+we must check the log tree and update with the index of the last logged
+index key. This is fine since the minimum possible value for
+->last_dir_index_offset is 1 (BTRFS_DIR_START_INDEX - 1 = 2 - 1 = 1).
+This also simplifies the management of ->last_dir_index_offset and now
+all accesses to it are done under the inode's log_mutex.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/btrfs_inode.h | 2 +-
+ fs/btrfs/inode.c | 1 +
+ fs/btrfs/tree-log.c | 17 ++---------------
+ 3 files changed, 4 insertions(+), 16 deletions(-)
+
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index db53a3263fbd0..89f582612fb99 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -247,7 +247,7 @@ struct btrfs_inode {
+ u64 new_delalloc_bytes;
+ /*
+ * The offset of the last dir index key that was logged.
+- * This is used only for directories.
++ * This is used only for directories. Protected by 'log_mutex'.
+ */
+ u64 last_dir_index_offset;
+ };
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f84e3f9fad84a..98d087a14be5e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7767,6 +7767,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ ei->last_sub_trans = 0;
+ ei->logged_trans = 0;
+ ei->delalloc_bytes = 0;
++ /* new_delalloc_bytes and last_dir_index_offset are in a union. */
+ ei->new_delalloc_bytes = 0;
+ ei->defrag_bytes = 0;
+ ei->disk_i_size = 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index bf5d7e52467ac..dc4c9fb0c0113 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3448,19 +3448,6 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ inode->logged_trans = trans->transid;
+ spin_unlock(&inode->lock);
+
+- /*
+- * If it's a directory, then we must set last_dir_index_offset to the
+- * maximum possible value, so that the next attempt to log the inode does
+- * not skip checking if dir index keys found in modified subvolume tree
+- * leaves have been logged before, otherwise it would result in attempts
+- * to insert duplicate dir index keys in the log tree. This must be done
+- * because last_dir_index_offset is an in-memory only field, not persisted
+- * in the inode item or any other on-disk structure, so its value is lost
+- * once the inode is evicted.
+- */
+- if (S_ISDIR(inode->vfs_inode.i_mode))
+- inode->last_dir_index_offset = (u64)-1;
+-
+ return 1;
+ }
+
+@@ -4052,7 +4039,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+
+ /*
+ * If the inode was logged before and it was evicted, then its
+- * last_dir_index_offset is (u64)-1, so we don't the value of the last index
++ * last_dir_index_offset is 0, so we don't know the value of the last index
+ * key offset. If that's the case, search for it and update the inode. This
+ * is to avoid lookups in the log tree every time we try to insert a dir index
+ * key from a leaf changed in the current transaction, and to allow us to always
+@@ -4068,7 +4055,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode,
+
+ lockdep_assert_held(&inode->log_mutex);
+
+- if (inode->last_dir_index_offset != (u64)-1)
++ if (inode->last_dir_index_offset != 0)
+ return 0;
+
+ if (!ctx->logged_before) {
+--
+2.50.1
+
--- /dev/null
+From 791f625eb52e79285b22666605e8f2918597c0cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 15:38:10 +0200
+Subject: btrfs: zoned: skip ZONE FINISH of conventional zones
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit f0ba0e7172a222ea6043b61ecd86723c46d7bcf2 ]
+
+Don't call ZONE FINISH for conventional zones as this will result in I/O
+errors. Instead check if the zone that needs finishing is a conventional
+zone and if yes skip it.
+
+Also factor out the actual handling of finishing a single zone into a
+helper function, as do_zone_finish() is growing ever bigger and the
+indentations levels are getting higher.
+
+Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 55 ++++++++++++++++++++++++++++++------------------
+ 1 file changed, 35 insertions(+), 20 deletions(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 2fdb2987c83ac..8e8edfe0c6190 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2186,6 +2186,40 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group)
+ rcu_read_unlock();
+ }
+
++static int call_zone_finish(struct btrfs_block_group *block_group,
++ struct btrfs_io_stripe *stripe)
++{
++ struct btrfs_device *device = stripe->dev;
++ const u64 physical = stripe->physical;
++ struct btrfs_zoned_device_info *zinfo = device->zone_info;
++ int ret;
++
++ if (!device->bdev)
++ return 0;
++
++ if (zinfo->max_active_zones == 0)
++ return 0;
++
++ if (btrfs_dev_is_sequential(device, physical)) {
++ unsigned int nofs_flags;
++
++ nofs_flags = memalloc_nofs_save();
++ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
++ physical >> SECTOR_SHIFT,
++ zinfo->zone_size >> SECTOR_SHIFT);
++ memalloc_nofs_restore(nofs_flags);
++
++ if (ret)
++ return ret;
++ }
++
++ if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
++ zinfo->reserved_active_zones++;
++ btrfs_dev_clear_active_zone(device, physical);
++
++ return 0;
++}
++
+ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
+ {
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+@@ -2270,31 +2304,12 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ down_read(&dev_replace->rwsem);
+ map = block_group->physical_map;
+ for (i = 0; i < map->num_stripes; i++) {
+- struct btrfs_device *device = map->stripes[i].dev;
+- const u64 physical = map->stripes[i].physical;
+- struct btrfs_zoned_device_info *zinfo = device->zone_info;
+- unsigned int nofs_flags;
+-
+- if (!device->bdev)
+- continue;
+-
+- if (zinfo->max_active_zones == 0)
+- continue;
+-
+- nofs_flags = memalloc_nofs_save();
+- ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+- physical >> SECTOR_SHIFT,
+- zinfo->zone_size >> SECTOR_SHIFT);
+- memalloc_nofs_restore(nofs_flags);
+
++ ret = call_zone_finish(block_group, &map->stripes[i]);
+ if (ret) {
+ up_read(&dev_replace->rwsem);
+ return ret;
+ }
+-
+- if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+- zinfo->reserved_active_zones++;
+- btrfs_dev_clear_active_zone(device, physical);
+ }
+ up_read(&dev_replace->rwsem);
+
+--
+2.50.1
+
--- /dev/null
+From 78326585a5b7efaf16187019e9636e1e00c7e381 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 17:42:14 +0200
+Subject: cdc_ncm: Flag Intel OEM version of Fibocom L850-GL as WWAN
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+[ Upstream commit 4a73a36cb704813f588af13d9842d0ba5a185758 ]
+
+This lets NetworkManager/ModemManager know that this is a modem and
+needs to be connected first.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Link: https://patch.msgid.link/20250814154214.250103-1-lkundrak@v3.sk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/cdc_ncm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 4abfdfcf0e289..5c89e03f93d61 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -2088,6 +2088,13 @@ static const struct usb_device_id cdc_devs[] = {
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
++ /* Intel modem (label from OEM reads Fibocom L850-GL) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
++ USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&wwan_info,
++ },
++
+ /* DisplayLink docking stations */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+--
+2.50.1
+
--- /dev/null
+From 130c281c64e13934bd0530d3601b0d82eb06863c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 May 2025 15:10:58 +0900
+Subject: cpupower: Fix a bug where the -t option of the set subcommand was not
+ working.
+
+From: Shinji Nomoto <fj5851bi@fujitsu.com>
+
+[ Upstream commit b3eaf14f4c63fd6abc7b68c6d7a07c5680a6d8e5 ]
+
+The set subcommand's -t option is documented as being available for boost
+configuration, but it was not actually functioning due to a bug
+in the option handling.
+
+Link: https://lore.kernel.org/r/20250522061122.2149188-2-fj5851bi@fujitsu.com
+Signed-off-by: Shinji Nomoto <fj5851bi@fujitsu.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/power/cpupower/utils/cpupower-set.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index 0677b58374abf..59ace394cf3ef 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -62,8 +62,8 @@ int cmd_set(int argc, char **argv)
+
+ params.params = 0;
+ /* parameter parsing */
+- while ((ret = getopt_long(argc, argv, "b:e:m:",
+- set_opts, NULL)) != -1) {
++ while ((ret = getopt_long(argc, argv, "b:e:m:t:",
++ set_opts, NULL)) != -1) {
+ switch (ret) {
+ case 'b':
+ if (params.perf_bias)
+--
+2.50.1
+
--- /dev/null
+From 7b1976978b9e5d621672fd987e7c56aa9f08f2bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:50 +0200
+Subject: drm/amd/display: Don't warn when missing DCE encoder caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 8246147f1fbaed522b8bcc02ca34e4260747dcfb ]
+
+On some GPUs the VBIOS just doesn't have encoder caps,
+or maybe not for every encoder.
+
+This isn't really a problem and it's handled well,
+so let's not litter the logs with it.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 33e0227ee96e62d034781e91f215e32fd0b1d512)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 4a9d07c31bc5b..0c50fe266c8a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+--
+2.50.1
+
--- /dev/null
+From cb1c250e64e93dbb297b2d28d65a37eb60928eb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Jul 2025 08:36:58 +0000
+Subject: drm/rockchip: vop2: make vp registers nonvolatile
+
+From: Piotr Zalewski <pZ010001011111@proton.me>
+
+[ Upstream commit a52dffaa46c2c5ff0b311c4dc1288581f7b9109e ]
+
+Make video port registers nonvolatile. As DSP_CTRL register is written
+to twice due to gamma LUT enable bit which is set outside of the main
+DSP_CTRL initialization within atomic_enable (for rk356x case it is also
+necessary to always disable gamma LUT before writing a new LUT) there is
+a chance that DSP_CTRL value read-out in gamma LUT init/update code is
+not the one which was written by the preceding DSP_CTRL initialization
+code within atomic_enable. This might result in misconfigured DSP_CTRL
+which leads to no visual output[1]. Since DSP_CTRL write takes effect
+after VSYNC[1] the issue is not always present. When tested on Pinetab2
+with kernel 6.14 it happenes only when DRM is compiled as a module[1].
+In order to confirm that it is a timing issue I inserted 18ms udelay
+before vop2_crtc_atomic_try_set_gamma in atomic enable and compiled DRM
+as module - this has also fixed the issue.
+
+[1] https://lore.kernel.org/linux-rockchip/562b38e5.a496.1975f09f983.Coremail.andyshrk@163.com/
+
+Reported-by: Diederik de Haas <didi.debian@cknow.org>
+Closes: https://lore.kernel.org/linux-rockchip/DAEVDSTMWI1E.J454VZN0R9MA@cknow.org/
+Suggested-by: Andy Yan <andy.yan@rock-chips.com>
+Signed-off-by: Piotr Zalewski <pZ010001011111@proton.me>
+Tested-by: Diederik de Haas <didi.debian@cknow.org>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/20250706083629.140332-2-pZ010001011111@proton.me
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 5d7df4c3b08c4..bb936964d9211 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -3153,12 +3153,13 @@ static int vop2_win_init(struct vop2 *vop2)
+ }
+
+ /*
+- * The window registers are only updated when config done is written.
+- * Until that they read back the old value. As we read-modify-write
+- * these registers mark them as non-volatile. This makes sure we read
+- * the new values from the regmap register cache.
++ * The window and video port registers are only updated when config
++ * done is written. Until that they read back the old value. As we
++ * read-modify-write these registers mark them as non-volatile. This
++ * makes sure we read the new values from the regmap register cache.
+ */
+ static const struct regmap_range vop2_nonvolatile_range[] = {
++ regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255),
+ regmap_reg_range(0x1000, 0x23ff),
+ };
+
+--
+2.50.1
+
--- /dev/null
+From ed481f39ef26e9d297c3c60cd8b19d87c33c7e1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 18:07:15 +0800
+Subject: fs: writeback: fix use-after-free in __mark_inode_dirty()
+
+From: Jiufei Xue <jiufei.xue@samsung.com>
+
+[ Upstream commit d02d2c98d25793902f65803ab853b592c7a96b29 ]
+
+An use-after-free issue occurred when __mark_inode_dirty() get the
+bdi_writeback that was in the progress of switching.
+
+CPU: 1 PID: 562 Comm: systemd-random- Not tainted 6.6.56-gb4403bd46a8e #1
+......
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __mark_inode_dirty+0x124/0x418
+lr : __mark_inode_dirty+0x118/0x418
+sp : ffffffc08c9dbbc0
+........
+Call trace:
+ __mark_inode_dirty+0x124/0x418
+ generic_update_time+0x4c/0x60
+ file_modified+0xcc/0xd0
+ ext4_buffered_write_iter+0x58/0x124
+ ext4_file_write_iter+0x54/0x704
+ vfs_write+0x1c0/0x308
+ ksys_write+0x74/0x10c
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x114
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x40/0xe4
+ el0t_64_sync_handler+0x120/0x12c
+ el0t_64_sync+0x194/0x198
+
+Root cause is:
+
+systemd-random-seed kworker
+----------------------------------------------------------------------
+___mark_inode_dirty inode_switch_wbs_work_fn
+
+ spin_lock(&inode->i_lock);
+ inode_attach_wb
+ locked_inode_to_wb_and_lock_list
+ get inode->i_wb
+ spin_unlock(&inode->i_lock);
+ spin_lock(&wb->list_lock)
+ spin_lock(&inode->i_lock)
+ inode_io_list_move_locked
+ spin_unlock(&wb->list_lock)
+ spin_unlock(&inode->i_lock)
+ spin_lock(&old_wb->list_lock)
+ inode_do_switch_wbs
+ spin_lock(&inode->i_lock)
+ inode->i_wb = new_wb
+ spin_unlock(&inode->i_lock)
+ spin_unlock(&old_wb->list_lock)
+ wb_put_many(old_wb, nr_switched)
+ cgwb_release
+ old wb released
+ wb_wakeup_delayed() accesses wb,
+ then trigger the use-after-free
+ issue
+
+Fix this race condition by holding inode spinlock until
+wb_wakeup_delayed() finished.
+
+Signed-off-by: Jiufei Xue <jiufei.xue@samsung.com>
+Link: https://lore.kernel.org/20250728100715.3863241-1-jiufei.xue@samsung.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fs-writeback.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 2391b09f4cede..4ae226778d646 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2572,10 +2572,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ wakeup_bdi = inode_io_list_move_locked(inode, wb,
+ dirty_list);
+
+- spin_unlock(&wb->list_lock);
+- spin_unlock(&inode->i_lock);
+- trace_writeback_dirty_inode_enqueue(inode);
+-
+ /*
+ * If this is the first dirty inode for this bdi,
+ * we have to wake-up the corresponding bdi thread
+@@ -2585,6 +2581,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ if (wakeup_bdi &&
+ (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
+ wb_wakeup_delayed(wb);
++
++ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
++ trace_writeback_dirty_inode_enqueue(inode);
++
+ return;
+ }
+ }
+--
+2.50.1
+
--- /dev/null
+From 03c7a9e85396049687055a96723741996e419af4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Save LBT before FPU in setup_sigcontext()
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 112ca94f6c3b3e0b2002a240de43c487a33e0234 ]
+
+Now if preemption happens between protected_save_fpu_context() and
+protected_save_lbt_context(), FTOP context is lost. Because FTOP is
+saved by protected_save_lbt_context() but protected_save_fpu_context()
+disables TM before that. So save LBT before FPU in setup_sigcontext()
+to avoid this potential risk.
+
+Signed-off-by: Hanlu Li <lihanlu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/signal.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
+index 4740cb5b23889..c9f7ca778364e 100644
+--- a/arch/loongarch/kernel/signal.c
++++ b/arch/loongarch/kernel/signal.c
+@@ -677,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
++#ifdef CONFIG_CPU_HAS_LBT
++ if (extctx->lbt.addr)
++ err |= protected_save_lbt_context(extctx);
++#endif
++
+ if (extctx->lasx.addr)
+ err |= protected_save_lasx_context(extctx);
+ else if (extctx->lsx.addr)
+@@ -684,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ else if (extctx->fpu.addr)
+ err |= protected_save_fpu_context(extctx);
+
+-#ifdef CONFIG_CPU_HAS_LBT
+- if (extctx->lbt.addr)
+- err |= protected_save_lbt_context(extctx);
+-#endif
+-
+ /* Set the "end" magic */
+ info = (struct sctx_info *)extctx->end.addr;
+ err |= __put_user(0, &info->magic);
+--
+2.50.1
+
--- /dev/null
+bpf-add-cookie-object-to-bpf-maps.patch
+bpf-move-bpf-map-owner-out-of-common-struct.patch
+bpf-move-cgroup-iterator-helpers-to-bpf.h.patch
+bpf-fix-oob-access-in-cgroup-local-storage.patch
+btrfs-fix-race-between-logging-inode-and-checking-if.patch
+btrfs-fix-race-between-setting-last_dir_index_offset.patch
+btrfs-avoid-load-store-tearing-races-when-checking-i.patch
+loongarch-save-lbt-before-fpu-in-setup_sigcontext.patch
+cdc_ncm-flag-intel-oem-version-of-fibocom-l850-gl-as.patch
+drm-amd-display-don-t-warn-when-missing-dce-encoder-.patch
+cpupower-fix-a-bug-where-the-t-option-of-the-set-sub.patch
+bluetooth-hci_sync-avoid-adding-default-advertising-.patch
+drm-rockchip-vop2-make-vp-registers-nonvolatile.patch
+btrfs-zoned-skip-zone-finish-of-conventional-zones.patch
+fs-writeback-fix-use-after-free-in-__mark_inode_dirt.patch
--- /dev/null
+From d50e539b058ed4b17ebdd7b4b7a895bdea5a549d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Sep 2025 12:20:24 +0300
+Subject: ALSA: usb-audio: Allow Focusrite devices to use low samplerates
+
+From: Tina Wuest <tina@wuest.me>
+
+[ Upstream commit cc8e91054c0a778074ecffaf12bd0944e884d71c ]
+
+Commit 05f254a6369ac020fc0382a7cbd3ef64ad997c92 ("ALSA: usb-audio:
+Improve filtering of sample rates on Focusrite devices") changed the
+check for max_rate in a way which was overly restrictive, forcing
+devices to use very high samplerates if they support them, despite
+support existing for lower rates as well.
+
+This maintains the intended outcome (ensuring samplerates selected are
+supported) while allowing devices with higher maximum samplerates to be
+opened at all supported samplerates.
+
+This patch was tested with a Clarett+ 8Pre USB
+
+Fixes: 05f254a6369a ("ALSA: usb-audio: Improve filtering of sample rates on Focusrite devices")
+Signed-off-by: Tina Wuest <tina@wuest.me>
+Link: https://patch.msgid.link/20250901092024.140993-1-tina@wuest.me
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/format.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 0ee532acbb603..ec95a063beb10 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -327,12 +327,16 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ max_rate = combine_quad(&fmt[6]);
+
+ switch (max_rate) {
++ case 192000:
++ if (rate == 176400 || rate == 192000)
++ return true;
++ fallthrough;
++ case 96000:
++ if (rate == 88200 || rate == 96000)
++ return true;
++ fallthrough;
+ case 48000:
+ return (rate == 44100 || rate == 48000);
+- case 96000:
+- return (rate == 88200 || rate == 96000);
+- case 192000:
+- return (rate == 176400 || rate == 192000);
+ default:
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+--
+2.50.1
+
--- /dev/null
+From 64006c68ee56975019eb8a3e365b8f403ed754cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Aug 2025 06:30:01 +0000
+Subject: ASoC: rsnd: tidyup direction name on rsnd_dai_connect()
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit 8022629548949eb4d2e2207b893bfb6d486700cb ]
+
+commit 2c6b6a3e8b93 ("ASoC: rsnd: use snd_pcm_direction_name()") uses
+snd_pcm_direction_name() instead of original method to get string
+"Playback" or "Capture". But io->substream might be NULL in this timing.
+Let's re-use original method.
+
+Fixes: 2c6b6a3e8b93 ("ASoC: rsnd: use snd_pcm_direction_name()")
+Reported-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Tested-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Message-ID: <87zfbmwq6v.wl-kuninori.morimoto.gx@renesas.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/renesas/rcar/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/soc/renesas/rcar/core.c b/sound/soc/renesas/rcar/core.c
+index a72f36d3ca2cd..4f4ed24cb3616 100644
+--- a/sound/soc/renesas/rcar/core.c
++++ b/sound/soc/renesas/rcar/core.c
+@@ -597,7 +597,7 @@ int rsnd_dai_connect(struct rsnd_mod *mod,
+
+ dev_dbg(dev, "%s is connected to io (%s)\n",
+ rsnd_mod_name(mod),
+- snd_pcm_direction_name(io->substream->stream));
++ rsnd_io_is_play(io) ? "Playback" : "Capture");
+
+ return 0;
+ }
+--
+2.50.1
+
--- /dev/null
+From ccd36d4ac2b293dbb23dad01dc7e27555654cb8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:58:51 +0000
+Subject: ASoC: soc-core: care NULL dirver name on
+ snd_soc_lookup_component_nolocked()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit 168873ca1799d3f23442b9e79eae55f907b9b126 ]
+
+soc-generic-dmaengine-pcm.c uses same dev for both CPU and Platform.
+In such case, CPU component driver might not have driver->name, then
+snd_soc_lookup_component_nolocked() will be NULL pointer access error.
+Care NULL driver name.
+
+ Call trace:
+ strcmp from snd_soc_lookup_component_nolocked+0x64/0xa4
+ snd_soc_lookup_component_nolocked from snd_soc_unregister_component_by_driver+0x2c/0x44
+ snd_soc_unregister_component_by_driver from snd_dmaengine_pcm_unregister+0x28/0x64
+ snd_dmaengine_pcm_unregister from devres_release_all+0x98/0xfc
+ devres_release_all from device_unbind_cleanup+0xc/0x60
+ device_unbind_cleanup from really_probe+0x220/0x2c8
+ really_probe from __driver_probe_device+0x88/0x1a0
+ __driver_probe_device from driver_probe_device+0x30/0x110
+ driver_probe_device from __driver_attach+0x90/0x178
+ __driver_attach from bus_for_each_dev+0x7c/0xcc
+ bus_for_each_dev from bus_add_driver+0xcc/0x1ec
+ bus_add_driver from driver_register+0x80/0x11c
+ driver_register from do_one_initcall+0x58/0x23c
+ do_one_initcall from kernel_init_freeable+0x198/0x1f4
+ kernel_init_freeable from kernel_init+0x1c/0x12c
+ kernel_init from ret_from_fork+0x14/0x28
+
+Fixes: 144d6dfc7482 ("ASoC: soc-core: merge snd_soc_unregister_component() and snd_soc_unregister_component_by_driver()")
+Reported-by: J. Neuschäfer <j.ne@posteo.net>
+Closes: https://lore.kernel.org/r/aJb311bMDc9x-dpW@probook
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Reported-by: Ondřej Jirman <megi@xff.cz>
+Closes: https://lore.kernel.org/r/arxpwzu6nzgjxvsndct65ww2wz4aezb5gjdzlgr24gfx7xvyih@natjg6dg2pj6
+Tested-by: J. Neuschäfer <j.ne@posteo.net>
+Message-ID: <87ect8ysv8.wl-kuninori.morimoto.gx@renesas.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/soc-core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 16bbc074dc5f6..d31ee6e9abefc 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -375,8 +375,9 @@ struct snd_soc_component
+ for_each_component(component) {
+ if ((dev == component->dev) &&
+ (!driver_name ||
+- (driver_name == component->driver->name) ||
+- (strcmp(component->driver->name, driver_name) == 0))) {
++ (component->driver->name &&
++ ((component->driver->name == driver_name) ||
++ (strcmp(component->driver->name, driver_name) == 0))))) {
+ found_component = component;
+ break;
+ }
+--
+2.50.1
+
--- /dev/null
+From ce2339e7d640358ecb172b87d250148a66576ef8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Aug 2025 23:40:40 +0800
+Subject: ASoC: SOF: Intel: WCL: Add the sdw_process_wakeen op
+
+From: Ajye Huang <ajye_huang@compal.corp-partner.google.com>
+
+[ Upstream commit 3e7fd1febc3156d3d98fba229399a13b12d69707 ]
+
+Add the missing op in the device description to avoid issues with jack
+detection.
+
+Fixes: 6b04629ae97a ("ASoC: SOF: Intel: add initial support for WCL")
+Acked-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Signed-off-by: Ajye Huang <ajye_huang@compal.corp-partner.google.com>
+Message-ID: <20250826154040.2723998-1-ajye_huang@compal.corp-partner.google.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/intel/ptl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/sof/intel/ptl.c b/sound/soc/sof/intel/ptl.c
+index 1bc1f54c470df..4633cd01e7dd4 100644
+--- a/sound/soc/sof/intel/ptl.c
++++ b/sound/soc/sof/intel/ptl.c
+@@ -143,6 +143,7 @@ const struct sof_intel_dsp_desc wcl_chip_info = {
+ .read_sdw_lcount = hda_sdw_check_lcount_ext,
+ .check_sdw_irq = lnl_dsp_check_sdw_irq,
+ .check_sdw_wakeen_irq = lnl_sdw_check_wakeen_irq,
++ .sdw_process_wakeen = hda_sdw_process_wakeen_common,
+ .check_ipc_irq = mtl_dsp_check_ipc_irq,
+ .cl_init = mtl_dsp_cl_init,
+ .power_down_dsp = mtl_power_down_dsp,
+--
+2.50.1
+
--- /dev/null
+From f3e9e4bf7a92397efb6a1808aef9c4c508a8ac70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 17:08:44 +0800
+Subject: Bluetooth: hci_sync: Avoid adding default advertising on startup
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit de5d7d3f27ddd4046736f558a40e252ddda82013 ]
+
+list_empty(&hdev->adv_instances) is always true during startup,
+so an advertising instance is added by default.
+
+Call trace:
+ dump_backtrace+0x94/0xec
+ show_stack+0x18/0x24
+ dump_stack_lvl+0x48/0x60
+ dump_stack+0x18/0x24
+ hci_setup_ext_adv_instance_sync+0x17c/0x328
+ hci_powered_update_adv_sync+0xb4/0x12c
+ hci_powered_update_sync+0x54/0x70
+ hci_power_on_sync+0xe4/0x278
+ hci_set_powered_sync+0x28/0x34
+ set_powered_sync+0x40/0x58
+ hci_cmd_sync_work+0x94/0x100
+ process_one_work+0x168/0x444
+ worker_thread+0x378/0x3f4
+ kthread+0x108/0x10c
+ ret_from_fork+0x10/0x20
+
+Link: https://github.com/bluez/bluez/issues/1442
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 749bba1512eb1..a25439f1eeac2 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3344,7 +3344,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
++ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev)) {
+ err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+--
+2.50.1
+
--- /dev/null
+From ba423d927f07100f3bf604f2ed54cd6aab2e0dec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:32 +0100
+Subject: btrfs: avoid load/store tearing races when checking if an inode was
+ logged
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 986bf6ed44dff7fbae7b43a0882757ee7f5ba21b ]
+
+At inode_logged() we do a couple lockless checks for ->logged_trans, and
+these are generally safe except the second one in case we get a load or
+store tearing due to a concurrent call updating ->logged_trans (either at
+btrfs_log_inode() or later at inode_logged()).
+
+In the first case it's safe to compare to the current transaction ID since
+once ->logged_trans is set the current transaction, we never set it to a
+lower value.
+
+In the second case, where we check if it's greater than zero, we are prone
+to load/store tearing races, since we can have a concurrent task updating
+to the current transaction ID with store tearing for example, instead of
+updating with a single 64 bits write, to update with two 32 bits writes or
+four 16 bits writes. In that case the reading side at inode_logged() could
+see a positive value that does not match the current transaction and then
+return a false negative.
+
+Fix this by doing the second check while holding the inode's spinlock, add
+some comments about it too. Also add the data_race() annotation to the
+first check to avoid any reports from KCSAN (or similar tools) and comment
+about it.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 5f82e8c59cd17..56d30ec0f52fc 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3351,15 +3351,32 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ struct btrfs_key key;
+ int ret;
+
+- if (inode->logged_trans == trans->transid)
++ /*
++ * Quick lockless call, since once ->logged_trans is set to the current
++ * transaction, we never set it to a lower value anywhere else.
++ */
++ if (data_race(inode->logged_trans) == trans->transid)
+ return 1;
+
+ /*
+- * If logged_trans is not 0, then we know the inode logged was not logged
+- * in this transaction, so we can return false right away.
++ * If logged_trans is not 0 and not trans->transid, then we know the
++ * inode was not logged in this transaction, so we can return false
++ * right away. We take the lock to avoid a race caused by load/store
++ * tearing with a concurrent btrfs_log_inode() call or a concurrent task
++ * in this function further below - an update to trans->transid can be
++ * teared into two 32 bits updates for example, in which case we could
++ * see a positive value that is not trans->transid and assume the inode
++ * was not logged when it was.
+ */
+- if (inode->logged_trans > 0)
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == trans->transid) {
++ spin_unlock(&inode->lock);
++ return 1;
++ } else if (inode->logged_trans > 0) {
++ spin_unlock(&inode->lock);
+ return 0;
++ }
++ spin_unlock(&inode->lock);
+
+ /*
+ * If no log tree was created for this root in this transaction, then
+--
+2.50.1
+
--- /dev/null
+From 10ff94f27d14a4b62b31411f25ff2b733c62e510 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 19:01:45 +0930
+Subject: btrfs: clear block dirty if submit_one_sector() failed
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit 4bcd3061e8154606af7f721cb75ca04ffe191a12 ]
+
+[BUG]
+If submit_one_sector() failed, the block will be kept dirty, but with
+their corresponding range finished in the ordered extent.
+
+This means if a writeback happens later again, we can hit the following
+problems:
+
+- ASSERT(block_start != EXTENT_MAP_HOLE) in submit_one_sector()
+ If the original extent map is a hole, then we can hit this case, as
+ the new ordered extent failed, we will drop the new extent map and
+ re-read one from the disk.
+
+- DEBUG_WARN() in btrfs_writepage_cow_fixup()
+ This is because we no longer have an ordered extent for those dirty
+ blocks. The original for them is already finished with error.
+
+[CAUSE]
+The function submit_one_sector() is not following the regular error
+handling of writeback. The common practice is to clear the folio dirty,
+start and finish the writeback for the block.
+
+This is normally done by extent_clear_unlock_delalloc() with
+PAGE_START_WRITEBACK | PAGE_END_WRITEBACK flags during
+run_delalloc_range().
+
+So if we keep those failed blocks dirty, they will stay in the page
+cache and wait for the next writeback.
+
+And since the original ordered extent is already finished and removed,
+depending on the original extent map, we either hit the ASSERT() inside
+submit_one_sector(), or hit the DEBUG_WARN() in
+btrfs_writepage_cow_fixup().
+
+[FIX]
+Follow the regular error handling to clear the dirty flag for the block,
+start and finish writeback for that block instead.
+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 3711a5d073423..fac4000a5bcae 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1483,7 +1483,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+
+ /*
+ * Return 0 if we have submitted or queued the sector for submission.
+- * Return <0 for critical errors.
++ * Return <0 for critical errors, and the sector will have its dirty flag cleared.
+ *
+ * Caller should make sure filepos < i_size and handle filepos >= i_size case.
+ */
+@@ -1506,8 +1506,17 @@ static int submit_one_sector(struct btrfs_inode *inode,
+ ASSERT(filepos < i_size);
+
+ em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
+- if (IS_ERR(em))
++ if (IS_ERR(em)) {
++ /*
++ * When submission failed, we should still clear the folio dirty.
++ * Or the folio will be written back again but without any
++ * ordered extent.
++ */
++ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
++ btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
++ btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize);
+ return PTR_ERR(em);
++ }
+
+ extent_offset = filepos - em->start;
+ em_end = btrfs_extent_map_end(em);
+@@ -1637,8 +1646,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ * Here we set writeback and clear for the range. If the full folio
+ * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
+ *
+- * If we hit any error, the corresponding sector will still be dirty
+- * thus no need to clear PAGECACHE_TAG_DIRTY.
++ * If we hit any error, the corresponding sector will have its dirty
++ * flag cleared and writeback finished, thus no need to handle the error case.
+ */
+ if (!submitted_io && !error) {
+ btrfs_folio_set_writeback(fs_info, folio, start, len);
+--
+2.50.1
+
--- /dev/null
+From e8f6f708771176c9a0f58307c253b32124ebc340 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:30 +0100
+Subject: btrfs: fix race between logging inode and checking if it was logged
+ before
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ef07b74e1be56f9eafda6aadebb9ebba0743c9f0 ]
+
+There's a race between checking if an inode was logged before and logging
+an inode that can cause us to mark an inode as not logged just after it
+was logged by a concurrent task:
+
+1) We have inode X which was not logged before neither in the current
+ transaction not in past transaction since the inode was loaded into
+ memory, so it's ->logged_trans value is 0;
+
+2) We are at transaction N;
+
+3) Task A calls inode_logged() against inode X, sees that ->logged_trans
+ is 0 and there is a log tree and so it proceeds to search in the log
+ tree for an inode item for inode X. It doesn't see any, but before
+ it sets ->logged_trans to N - 1...
+
+3) Task B calls btrfs_log_inode() against inode X, logs the inode and
+ sets ->logged_trans to N;
+
+4) Task A now sets ->logged_trans to N - 1;
+
+5) At this point anyone calling inode_logged() gets 0 (inode not logged)
+ since ->logged_trans is greater than 0 and less than N, but our inode
+ was really logged. As a consequence operations like rename, unlink and
+ link that happen afterwards in the current transaction end up not
+ updating the log when they should.
+
+Fix this by ensuring inode_logged() only updates ->logged_trans in case
+the inode item is not found in the log tree if after tacking the inode's
+lock (spinlock struct btrfs_inode::lock) the ->logged_trans value is still
+zero, since the inode lock is what protects setting ->logged_trans at
+btrfs_log_inode().
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 36 ++++++++++++++++++++++++++++++------
+ 1 file changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index afc05e406689a..17003f3d9dd1c 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3309,6 +3309,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode)
++{
++ bool ret = false;
++
++ /*
++ * Do this only if ->logged_trans is still 0 to prevent races with
++ * concurrent logging as we may see the inode not logged when
++ * inode_logged() is called but it gets logged after inode_logged() did
++ * not find it in the log tree and we end up setting ->logged_trans to a
++ * value less than trans->transid after the concurrent logging task has
++ * set it to trans->transid. As a consequence, subsequent rename, unlink
++ * and link operations may end up not logging new names and removing old
++ * names from the log.
++ */
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == 0)
++ inode->logged_trans = trans->transid - 1;
++ else if (inode->logged_trans == trans->transid)
++ ret = true;
++ spin_unlock(&inode->lock);
++
++ return ret;
++}
++
+ /*
+ * Check if an inode was logged in the current transaction. This correctly deals
+ * with the case where the inode was logged but has a logged_trans of 0, which
+@@ -3343,10 +3368,8 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * transaction's ID, to avoid the search below in a future call in case
+ * a log tree gets created after this.
+ */
+- if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
+- inode->logged_trans = trans->transid - 1;
+- return 0;
+- }
++ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
++ return mark_inode_as_not_logged(trans, inode);
+
+ /*
+ * We have a log tree and the inode's logged_trans is 0. We can't tell
+@@ -3400,8 +3423,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * Set logged_trans to a value greater than 0 and less then the
+ * current transaction to avoid doing the search in future calls.
+ */
+- inode->logged_trans = trans->transid - 1;
+- return 0;
++ return mark_inode_as_not_logged(trans, inode);
+ }
+
+ /*
+@@ -3409,7 +3431,9 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * the current transacion's ID, to avoid future tree searches as long as
+ * the inode is not evicted again.
+ */
++ spin_lock(&inode->lock);
+ inode->logged_trans = trans->transid;
++ spin_unlock(&inode->lock);
+
+ /*
+ * If it's a directory, then we must set last_dir_index_offset to the
+--
+2.50.1
+
--- /dev/null
+From 7ead2114345a6c2ebb0ace2dca82e3e93b388425 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:31 +0100
+Subject: btrfs: fix race between setting last_dir_index_offset and inode
+ logging
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 59a0dd4ab98970086fd096281b1606c506ff2698 ]
+
+At inode_logged() if we find that the inode was not logged before we
+update its ->last_dir_index_offset to (u64)-1 with the goal that the
+next directory log operation will see the (u64)-1 and then figure out
+it must check what was the index of the last logged dir index key and
+update ->last_dir_index_offset to that key's offset (this is done in
+update_last_dir_index_offset()).
+
+This however has a possibility for a time window where a race can happen
+and lead to directory logging skipping dir index keys that should be
+logged. The race happens like this:
+
+1) Task A calls inode_logged(), sees ->logged_trans as 0 and then checks
+ that the inode item was logged before, but before it sets the inode's
+ ->last_dir_index_offset to (u64)-1...
+
+2) Task B is at btrfs_log_inode() which calls inode_logged() early, and
+ that has set ->last_dir_index_offset to (u64)-1;
+
+3) Task B then enters log_directory_changes() which calls
+ update_last_dir_index_offset(). There it sees ->last_dir_index_offset
+ is (u64)-1 and that the inode was logged before (ctx->logged_before is
+ true), and so it searches for the last logged dir index key in the log
+ tree and it finds that it has an offset (index) value of N, so it sets
+ ->last_dir_index_offset to N, so that we can skip index keys that are
+ less than or equal to N (later at process_dir_items_leaf());
+
+4) Task A now sets ->last_dir_index_offset to (u64)-1, undoing the update
+ that task B just did;
+
+5) Task B will now skip every index key when it enters
+ process_dir_items_leaf(), since ->last_dir_index_offset is (u64)-1.
+
+Fix this by making inode_logged() not touch ->last_dir_index_offset and
+initializing it to 0 when an inode is loaded (at btrfs_alloc_inode()) and
+then having update_last_dir_index_offset() treat a value of 0 as meaning
+we must check the log tree and update with the index of the last logged
+index key. This is fine since the minimum possible value for
+->last_dir_index_offset is 1 (BTRFS_DIR_START_INDEX - 1 = 2 - 1 = 1).
+This also simplifies the management of ->last_dir_index_offset and now
+all accesses to it are done under the inode's log_mutex.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/btrfs_inode.h | 2 +-
+ fs/btrfs/inode.c | 1 +
+ fs/btrfs/tree-log.c | 17 ++---------------
+ 3 files changed, 4 insertions(+), 16 deletions(-)
+
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index a79fa0726f1d9..216eff293ffec 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -248,7 +248,7 @@ struct btrfs_inode {
+ u64 new_delalloc_bytes;
+ /*
+ * The offset of the last dir index key that was logged.
+- * This is used only for directories.
++ * This is used only for directories. Protected by 'log_mutex'.
+ */
+ u64 last_dir_index_offset;
+ };
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index df4c8312aae39..ffa5d6c159405 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7827,6 +7827,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ ei->last_sub_trans = 0;
+ ei->logged_trans = 0;
+ ei->delalloc_bytes = 0;
++ /* new_delalloc_bytes and last_dir_index_offset are in a union. */
+ ei->new_delalloc_bytes = 0;
+ ei->defrag_bytes = 0;
+ ei->disk_i_size = 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 17003f3d9dd1c..5f82e8c59cd17 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3435,19 +3435,6 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ inode->logged_trans = trans->transid;
+ spin_unlock(&inode->lock);
+
+- /*
+- * If it's a directory, then we must set last_dir_index_offset to the
+- * maximum possible value, so that the next attempt to log the inode does
+- * not skip checking if dir index keys found in modified subvolume tree
+- * leaves have been logged before, otherwise it would result in attempts
+- * to insert duplicate dir index keys in the log tree. This must be done
+- * because last_dir_index_offset is an in-memory only field, not persisted
+- * in the inode item or any other on-disk structure, so its value is lost
+- * once the inode is evicted.
+- */
+- if (S_ISDIR(inode->vfs_inode.i_mode))
+- inode->last_dir_index_offset = (u64)-1;
+-
+ return 1;
+ }
+
+@@ -4038,7 +4025,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+
+ /*
+ * If the inode was logged before and it was evicted, then its
+- * last_dir_index_offset is (u64)-1, so we don't the value of the last index
++ * last_dir_index_offset is 0, so we don't know the value of the last index
+ * key offset. If that's the case, search for it and update the inode. This
+ * is to avoid lookups in the log tree every time we try to insert a dir index
+ * key from a leaf changed in the current transaction, and to allow us to always
+@@ -4054,7 +4041,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode,
+
+ lockdep_assert_held(&inode->log_mutex);
+
+- if (inode->last_dir_index_offset != (u64)-1)
++ if (inode->last_dir_index_offset != 0)
+ return 0;
+
+ if (!ctx->logged_before) {
+--
+2.50.1
+
--- /dev/null
+From fd8c7bd43ad6a04f957725be87103804989162a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 15:38:10 +0200
+Subject: btrfs: zoned: skip ZONE FINISH of conventional zones
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit f0ba0e7172a222ea6043b61ecd86723c46d7bcf2 ]
+
+Don't call ZONE FINISH for conventional zones as this will result in I/O
+errors. Instead check if the zone that needs finishing is a conventional
+zone and if yes skip it.
+
+Also factor out the actual handling of finishing a single zone into a
+helper function, as do_zone_finish() is growing ever bigger and the
+indentations levels are getting higher.
+
+Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 55 ++++++++++++++++++++++++++++++------------------
+ 1 file changed, 35 insertions(+), 20 deletions(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index af5ba3ad2eb83..d7a1193332d94 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2252,6 +2252,40 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group)
+ rcu_read_unlock();
+ }
+
++static int call_zone_finish(struct btrfs_block_group *block_group,
++ struct btrfs_io_stripe *stripe)
++{
++ struct btrfs_device *device = stripe->dev;
++ const u64 physical = stripe->physical;
++ struct btrfs_zoned_device_info *zinfo = device->zone_info;
++ int ret;
++
++ if (!device->bdev)
++ return 0;
++
++ if (zinfo->max_active_zones == 0)
++ return 0;
++
++ if (btrfs_dev_is_sequential(device, physical)) {
++ unsigned int nofs_flags;
++
++ nofs_flags = memalloc_nofs_save();
++ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
++ physical >> SECTOR_SHIFT,
++ zinfo->zone_size >> SECTOR_SHIFT);
++ memalloc_nofs_restore(nofs_flags);
++
++ if (ret)
++ return ret;
++ }
++
++ if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
++ zinfo->reserved_active_zones++;
++ btrfs_dev_clear_active_zone(device, physical);
++
++ return 0;
++}
++
+ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
+ {
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+@@ -2336,31 +2370,12 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ down_read(&dev_replace->rwsem);
+ map = block_group->physical_map;
+ for (i = 0; i < map->num_stripes; i++) {
+- struct btrfs_device *device = map->stripes[i].dev;
+- const u64 physical = map->stripes[i].physical;
+- struct btrfs_zoned_device_info *zinfo = device->zone_info;
+- unsigned int nofs_flags;
+-
+- if (!device->bdev)
+- continue;
+-
+- if (zinfo->max_active_zones == 0)
+- continue;
+-
+- nofs_flags = memalloc_nofs_save();
+- ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+- physical >> SECTOR_SHIFT,
+- zinfo->zone_size >> SECTOR_SHIFT);
+- memalloc_nofs_restore(nofs_flags);
+
++ ret = call_zone_finish(block_group, &map->stripes[i]);
+ if (ret) {
+ up_read(&dev_replace->rwsem);
+ return ret;
+ }
+-
+- if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+- zinfo->reserved_active_zones++;
+- btrfs_dev_clear_active_zone(device, physical);
+ }
+ up_read(&dev_replace->rwsem);
+
+--
+2.50.1
+
--- /dev/null
+From 52017ad3f74304f5d2f6455b9766e47107869e3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 17:42:14 +0200
+Subject: cdc_ncm: Flag Intel OEM version of Fibocom L850-GL as WWAN
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+[ Upstream commit 4a73a36cb704813f588af13d9842d0ba5a185758 ]
+
+This lets NetworkManager/ModemManager know that this is a modem and
+needs to be connected first.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Link: https://patch.msgid.link/20250814154214.250103-1-lkundrak@v3.sk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/cdc_ncm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index ea0e5e276cd6d..5d123df0a866b 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -2087,6 +2087,13 @@ static const struct usb_device_id cdc_devs[] = {
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
++ /* Intel modem (label from OEM reads Fibocom L850-GL) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
++ USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&wwan_info,
++ },
++
+ /* DisplayLink docking stations */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+--
+2.50.1
+
--- /dev/null
+From 14e409cf5a2286847861c3ee48980f06bca6bddb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 May 2025 15:10:58 +0900
+Subject: cpupower: Fix a bug where the -t option of the set subcommand was not
+ working.
+
+From: Shinji Nomoto <fj5851bi@fujitsu.com>
+
+[ Upstream commit b3eaf14f4c63fd6abc7b68c6d7a07c5680a6d8e5 ]
+
+The set subcommand's -t option is documented as being available for boost
+configuration, but it was not actually functioning due to a bug
+in the option handling.
+
+Link: https://lore.kernel.org/r/20250522061122.2149188-2-fj5851bi@fujitsu.com
+Signed-off-by: Shinji Nomoto <fj5851bi@fujitsu.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/power/cpupower/utils/cpupower-set.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index 0677b58374abf..59ace394cf3ef 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -62,8 +62,8 @@ int cmd_set(int argc, char **argv)
+
+ params.params = 0;
+ /* parameter parsing */
+- while ((ret = getopt_long(argc, argv, "b:e:m:",
+- set_opts, NULL)) != -1) {
++ while ((ret = getopt_long(argc, argv, "b:e:m:t:",
++ set_opts, NULL)) != -1) {
+ switch (ret) {
+ case 'b':
+ if (params.perf_bias)
+--
+2.50.1
+
--- /dev/null
+From 2604e9e64fa2ba9d4676e1c21746460e9626625b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:50 +0200
+Subject: drm/amd/display: Don't warn when missing DCE encoder caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 8246147f1fbaed522b8bcc02ca34e4260747dcfb ]
+
+On some GPUs the VBIOS just doesn't have encoder caps,
+or maybe not for every encoder.
+
+This isn't really a problem and it's handled well,
+so let's not litter the logs with it.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 33e0227ee96e62d034781e91f215e32fd0b1d512)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 4a9d07c31bc5b..0c50fe266c8a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+--
+2.50.1
+
--- /dev/null
+From c29535d480d6a49dc11f0637b0b9d5d38a1eb726 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Jul 2025 08:36:58 +0000
+Subject: drm/rockchip: vop2: make vp registers nonvolatile
+
+From: Piotr Zalewski <pZ010001011111@proton.me>
+
+[ Upstream commit a52dffaa46c2c5ff0b311c4dc1288581f7b9109e ]
+
+Make video port registers nonvolatile. As DSP_CTRL register is written
+to twice due to gamma LUT enable bit which is set outside of the main
+DSP_CTRL initialization within atomic_enable (for rk356x case it is also
+necessary to always disable gamma LUT before writing a new LUT) there is
+a chance that DSP_CTRL value read-out in gamma LUT init/update code is
+not the one which was written by the preceding DSP_CTRL initialization
+code within atomic_enable. This might result in misconfigured DSP_CTRL
+which leads to no visual output[1]. Since DSP_CTRL write takes effect
+after VSYNC[1] the issue is not always present. When tested on Pinetab2
+with kernel 6.14 it happenes only when DRM is compiled as a module[1].
+In order to confirm that it is a timing issue I inserted 18ms udelay
+before vop2_crtc_atomic_try_set_gamma in atomic enable and compiled DRM
+as module - this has also fixed the issue.
+
+[1] https://lore.kernel.org/linux-rockchip/562b38e5.a496.1975f09f983.Coremail.andyshrk@163.com/
+
+Reported-by: Diederik de Haas <didi.debian@cknow.org>
+Closes: https://lore.kernel.org/linux-rockchip/DAEVDSTMWI1E.J454VZN0R9MA@cknow.org/
+Suggested-by: Andy Yan <andy.yan@rock-chips.com>
+Signed-off-by: Piotr Zalewski <pZ010001011111@proton.me>
+Tested-by: Diederik de Haas <didi.debian@cknow.org>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/20250706083629.140332-2-pZ010001011111@proton.me
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 186f6452a7d35..b50927a824b40 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2)
+ }
+
+ /*
+- * The window registers are only updated when config done is written.
+- * Until that they read back the old value. As we read-modify-write
+- * these registers mark them as non-volatile. This makes sure we read
+- * the new values from the regmap register cache.
++ * The window and video port registers are only updated when config
++ * done is written. Until that they read back the old value. As we
++ * read-modify-write these registers mark them as non-volatile. This
++ * makes sure we read the new values from the regmap register cache.
+ */
+ static const struct regmap_range vop2_nonvolatile_range[] = {
++ regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255),
+ regmap_reg_range(0x1000, 0x23ff),
+ };
+
+--
+2.50.1
+
--- /dev/null
+From e1920cf8bc6ba7a3291a9b196ec194dca7ac89ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 18:07:15 +0800
+Subject: fs: writeback: fix use-after-free in __mark_inode_dirty()
+
+From: Jiufei Xue <jiufei.xue@samsung.com>
+
+[ Upstream commit d02d2c98d25793902f65803ab853b592c7a96b29 ]
+
+An use-after-free issue occurred when __mark_inode_dirty() get the
+bdi_writeback that was in the progress of switching.
+
+CPU: 1 PID: 562 Comm: systemd-random- Not tainted 6.6.56-gb4403bd46a8e #1
+......
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __mark_inode_dirty+0x124/0x418
+lr : __mark_inode_dirty+0x118/0x418
+sp : ffffffc08c9dbbc0
+........
+Call trace:
+ __mark_inode_dirty+0x124/0x418
+ generic_update_time+0x4c/0x60
+ file_modified+0xcc/0xd0
+ ext4_buffered_write_iter+0x58/0x124
+ ext4_file_write_iter+0x54/0x704
+ vfs_write+0x1c0/0x308
+ ksys_write+0x74/0x10c
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x114
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x40/0xe4
+ el0t_64_sync_handler+0x120/0x12c
+ el0t_64_sync+0x194/0x198
+
+Root cause is:
+
+systemd-random-seed kworker
+----------------------------------------------------------------------
+___mark_inode_dirty inode_switch_wbs_work_fn
+
+ spin_lock(&inode->i_lock);
+ inode_attach_wb
+ locked_inode_to_wb_and_lock_list
+ get inode->i_wb
+ spin_unlock(&inode->i_lock);
+ spin_lock(&wb->list_lock)
+ spin_lock(&inode->i_lock)
+ inode_io_list_move_locked
+ spin_unlock(&wb->list_lock)
+ spin_unlock(&inode->i_lock)
+ spin_lock(&old_wb->list_lock)
+ inode_do_switch_wbs
+ spin_lock(&inode->i_lock)
+ inode->i_wb = new_wb
+ spin_unlock(&inode->i_lock)
+ spin_unlock(&old_wb->list_lock)
+ wb_put_many(old_wb, nr_switched)
+ cgwb_release
+ old wb released
+ wb_wakeup_delayed() accesses wb,
+ then trigger the use-after-free
+ issue
+
+Fix this race condition by holding inode spinlock until
+wb_wakeup_delayed() finished.
+
+Signed-off-by: Jiufei Xue <jiufei.xue@samsung.com>
+Link: https://lore.kernel.org/20250728100715.3863241-1-jiufei.xue@samsung.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fs-writeback.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index cc57367fb641d..a07b8cf73ae27 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2608,10 +2608,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ wakeup_bdi = inode_io_list_move_locked(inode, wb,
+ dirty_list);
+
+- spin_unlock(&wb->list_lock);
+- spin_unlock(&inode->i_lock);
+- trace_writeback_dirty_inode_enqueue(inode);
+-
+ /*
+ * If this is the first dirty inode for this bdi,
+ * we have to wake-up the corresponding bdi thread
+@@ -2621,6 +2617,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ if (wakeup_bdi &&
+ (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
+ wb_wakeup_delayed(wb);
++
++ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
++ trace_writeback_dirty_inode_enqueue(inode);
++
+ return;
+ }
+ }
+--
+2.50.1
+
--- /dev/null
+From 4c85494437c72b022a2d5c810dc0965698f7cd04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Add cpuhotplug hooks to fix high cpu usage of vCPU threads
+
+From: Xianglai Li <lixianglai@loongson.cn>
+
+[ Upstream commit 8ef7f3132e4005a103b382e71abea7ad01fbeb86 ]
+
+When the CPU is offline, the timer of LoongArch is not correctly closed.
+This is harmless for real machines, but resulting in an excessively high
+cpu usage rate of the offline vCPU thread in the virtual machines.
+
+To correctly close the timer, we have made the following modifications:
+
+Register the cpu hotplug event (CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING)
+for LoongArch. This event's hooks will be called to close the timer when
+the CPU is offline.
+
+Clear the timer interrupt when the timer is turned off. Since before the
+timer is turned off, there may be a timer interrupt that has already been
+in the pending state due to the interruption of the disabled, which also
+affects the halt state of the offline vCPU.
+
+Signed-off-by: Xianglai Li <lixianglai@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/time.c | 22 ++++++++++++++++++++++
+ include/linux/cpuhotplug.h | 1 +
+ 2 files changed, 23 insertions(+)
+
+diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
+index 367906b10f810..f3092f2de8b50 100644
+--- a/arch/loongarch/kernel/time.c
++++ b/arch/loongarch/kernel/time.c
+@@ -5,6 +5,7 @@
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+ #include <linux/clockchips.h>
++#include <linux/cpuhotplug.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
+ #include <linux/init.h>
+@@ -102,6 +103,23 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
+ return 0;
+ }
+
++static int arch_timer_starting(unsigned int cpu)
++{
++ set_csr_ecfg(ECFGF_TIMER);
++
++ return 0;
++}
++
++static int arch_timer_dying(unsigned int cpu)
++{
++ constant_set_state_shutdown(this_cpu_ptr(&constant_clockevent_device));
++
++ /* Clear Timer Interrupt */
++ write_csr_tintclear(CSR_TINTCLR_TI);
++
++ return 0;
++}
++
+ static unsigned long get_loops_per_jiffy(void)
+ {
+ unsigned long lpj = (unsigned long)const_clock_freq;
+@@ -172,6 +190,10 @@ int constant_clockevent_init(void)
+ lpj_fine = get_loops_per_jiffy();
+ pr_info("Constant clock event device register\n");
+
++ cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
++ "clockevents/loongarch/timer:starting",
++ arch_timer_starting, arch_timer_dying);
++
+ return 0;
+ }
+
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index df366ee15456b..e62064cb9e08a 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -169,6 +169,7 @@ enum cpuhp_state {
+ CPUHP_AP_QCOM_TIMER_STARTING,
+ CPUHP_AP_TEGRA_TIMER_STARTING,
+ CPUHP_AP_ARMADA_TIMER_STARTING,
++ CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
+ CPUHP_AP_MIPS_GIC_TIMER_STARTING,
+ CPUHP_AP_ARC_TIMER_STARTING,
+ CPUHP_AP_REALTEK_TIMER_STARTING,
+--
+2.50.1
+
--- /dev/null
+From eac43d8b10a35bf1f7c0c9c688c390c63b409486 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Save LBT before FPU in setup_sigcontext()
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 112ca94f6c3b3e0b2002a240de43c487a33e0234 ]
+
+Now if preemption happens between protected_save_fpu_context() and
+protected_save_lbt_context(), FTOP context is lost. Because FTOP is
+saved by protected_save_lbt_context() but protected_save_fpu_context()
+disables TM before that. So save LBT before FPU in setup_sigcontext()
+to avoid this potential risk.
+
+Signed-off-by: Hanlu Li <lihanlu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/signal.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
+index 4740cb5b23889..c9f7ca778364e 100644
+--- a/arch/loongarch/kernel/signal.c
++++ b/arch/loongarch/kernel/signal.c
+@@ -677,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
++#ifdef CONFIG_CPU_HAS_LBT
++ if (extctx->lbt.addr)
++ err |= protected_save_lbt_context(extctx);
++#endif
++
+ if (extctx->lasx.addr)
+ err |= protected_save_lasx_context(extctx);
+ else if (extctx->lsx.addr)
+@@ -684,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ else if (extctx->fpu.addr)
+ err |= protected_save_fpu_context(extctx);
+
+-#ifdef CONFIG_CPU_HAS_LBT
+- if (extctx->lbt.addr)
+- err |= protected_save_lbt_context(extctx);
+-#endif
+-
+ /* Set the "end" magic */
+ info = (struct sctx_info *)extctx->end.addr;
+ err |= __put_user(0, &info->magic);
+--
+2.50.1
+
--- /dev/null
+From 093cf6aee758950b2797b23737f84497b2e7aac6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 13:51:08 -0500
+Subject: platform/x86/amd: pmc: Drop SMU F/W match for Cezanne
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 5b9e07551faa7bb2f26cb039cc6e8d00bc4d0831 ]
+
+Chris reported that even on a BIOS that has a new enough SMU F/W
+version there is still a spurious IRQ1. Although the solution was
+added to SMU F/W 64.66.0 it turns out there needs to be a matching
+SBIOS change to activate it. Thus Linux shouldn't be avoiding the
+IRQ1 workaround on newer SMU F/W because there is no indication the
+BIOS change is in place.
+
+Drop the match for 64.66.0+ and instead match all RN/CZN/BRC (they
+all share same SMU F/W). Adjust the quirk infrastructure to allow
+quirking the workaround on or off and also adjust existing quirks
+to match properly.
+
+Unfortunately this may cause some systems that did have the SBIOS
+change in place to regress in keyboard wakeup but we don't have a
+way to know. If a user reports a keyboard wakeup regression they can
+run with amd_pmc.disable_workarounds=1 to deactivate the workaround
+and share DMI data so that their system can be quirked not to use
+the workaround in the upstream kernel.
+
+Reported-by: Chris Bainbridge <chris.bainbridge@gmail.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4449
+Tested-by: Chris Bainbridge <chris.bainbridge@gmail.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Link: https://lore.kernel.org/r/20250724185156.1827592-1-superm1@kernel.org
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/amd/pmc/pmc-quirks.c | 54 ++++++++++++++---------
+ drivers/platform/x86/amd/pmc/pmc.c | 13 ------
+ 2 files changed, 34 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index ded4c84f5ed14..7ffc659b27944 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -28,10 +28,15 @@ static struct quirk_entry quirk_spurious_8042 = {
+ .spurious_8042 = true,
+ };
+
++static struct quirk_entry quirk_s2idle_spurious_8042 = {
++ .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH,
++ .spurious_8042 = true,
++};
++
+ static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "L14 Gen2 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+@@ -39,7 +44,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "T14s Gen2 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+@@ -47,7 +52,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "X13 Gen2 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+@@ -55,7 +60,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "T14 Gen2 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+@@ -63,7 +68,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+@@ -71,7 +76,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+@@ -79,7 +84,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+@@ -87,7 +92,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+@@ -95,7 +100,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "P14s Gen1 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+@@ -103,7 +108,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+@@ -111,7 +116,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+@@ -152,7 +157,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "IdeaPad 1 14AMN7",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
+@@ -160,7 +165,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
+@@ -168,7 +173,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
+@@ -176,7 +181,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "IdeaPad Slim 3 14AMN8",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
+@@ -184,7 +189,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ },
+ {
+ .ident = "IdeaPad Slim 3 15AMN8",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
+@@ -193,7 +198,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */
+ {
+ .ident = "Lenovo Yoga 6 13ALC6",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82ND"),
+@@ -202,7 +207,7 @@ static const struct dmi_system_id fwbug_list[] = {
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
+ {
+ .ident = "HP Laptop 15s-eq2xxx",
+- .driver_data = &quirk_s2idle_bug,
++ .driver_data = &quirk_s2idle_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
+@@ -285,6 +290,16 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
+ {
+ const struct dmi_system_id *dmi_id;
+
++ /*
++ * IRQ1 may cause an interrupt during resume even without a keyboard
++ * press.
++ *
++ * Affects Renoir, Cezanne and Barcelo SoCs
++ *
++ * A solution is available in PMFW 64.66.0, but it must be activated by
++ * SBIOS. If SBIOS is known to have the fix a quirk can be added for
++ * a given system to avoid workaround.
++ */
+ if (dev->cpu_id == AMD_CPU_ID_CZN)
+ dev->disable_8042_wakeup = true;
+
+@@ -295,6 +310,5 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
+ if (dev->quirks->s2idle_bug_mmio)
+ pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+- if (dev->quirks->spurious_8042)
+- dev->disable_8042_wakeup = true;
++ dev->disable_8042_wakeup = dev->quirks->spurious_8042;
+ }
+diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
+index 0b9b23eb7c2c3..bd318fd02ccf4 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -530,19 +530,6 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+ static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev)
+ {
+ struct device *d;
+- int rc;
+-
+- /* cezanne platform firmware has a fix in 64.66.0 */
+- if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+- if (!pdev->major) {
+- rc = amd_pmc_get_smu_version(pdev);
+- if (rc)
+- return rc;
+- }
+-
+- if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+- return 0;
+- }
+
+ d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
+ if (!d)
+--
+2.50.1
+
--- /dev/null
+btrfs-fix-race-between-logging-inode-and-checking-if.patch
+btrfs-fix-race-between-setting-last_dir_index_offset.patch
+btrfs-avoid-load-store-tearing-races-when-checking-i.patch
+asoc-soc-core-care-null-dirver-name-on-snd_soc_looku.patch
+asoc-rsnd-tidyup-direction-name-on-rsnd_dai_connect.patch
+asoc-sof-intel-wcl-add-the-sdw_process_wakeen-op.patch
+alsa-usb-audio-allow-focusrite-devices-to-use-low-sa.patch
+loongarch-save-lbt-before-fpu-in-setup_sigcontext.patch
+loongarch-add-cpuhotplug-hooks-to-fix-high-cpu-usage.patch
+cdc_ncm-flag-intel-oem-version-of-fibocom-l850-gl-as.patch
+drm-amd-display-don-t-warn-when-missing-dce-encoder-.patch
+cpupower-fix-a-bug-where-the-t-option-of-the-set-sub.patch
+bluetooth-hci_sync-avoid-adding-default-advertising-.patch
+drm-rockchip-vop2-make-vp-registers-nonvolatile.patch
+btrfs-clear-block-dirty-if-submit_one_sector-failed.patch
+btrfs-zoned-skip-zone-finish-of-conventional-zones.patch
+platform-x86-amd-pmc-drop-smu-f-w-match-for-cezanne.patch
+fs-writeback-fix-use-after-free-in-__mark_inode_dirt.patch
--- /dev/null
+From 26b677f9cb4aecf10175f6c4fad38e8b4bd6d4a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 17:08:44 +0800
+Subject: Bluetooth: hci_sync: Avoid adding default advertising on startup
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit de5d7d3f27ddd4046736f558a40e252ddda82013 ]
+
+list_empty(&hdev->adv_instances) is always true during startup,
+so an advertising instance is added by default.
+
+Call trace:
+ dump_backtrace+0x94/0xec
+ show_stack+0x18/0x24
+ dump_stack_lvl+0x48/0x60
+ dump_stack+0x18/0x24
+ hci_setup_ext_adv_instance_sync+0x17c/0x328
+ hci_powered_update_adv_sync+0xb4/0x12c
+ hci_powered_update_sync+0x54/0x70
+ hci_power_on_sync+0xe4/0x278
+ hci_set_powered_sync+0x28/0x34
+ set_powered_sync+0x40/0x58
+ hci_cmd_sync_work+0x94/0x100
+ process_one_work+0x168/0x444
+ worker_thread+0x378/0x3f4
+ kthread+0x108/0x10c
+ ret_from_fork+0x10/0x20
+
+Link: https://github.com/bluez/bluez/issues/1442
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 020f1809fc994..7f3f700faebc2 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3354,7 +3354,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
++ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev)) {
+ err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+--
+2.50.1
+
--- /dev/null
+From 4b9f10dd5e8b923a5b52a205084928489a5e16ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:30 +0200
+Subject: bpf: Add cookie object to bpf maps
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 12df58ad294253ac1d8df0c9bb9cf726397a671d ]
+
+Add a cookie to BPF maps to uniquely identify BPF maps for the timespan
+when the node is up. This is different to comparing a pointer or BPF map
+id which could get rolled over and reused.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-1-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/syscall.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 17de12a98f858..0a097087f0a7c 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -300,6 +300,7 @@ struct bpf_map {
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
++ u64 cookie; /* write-once */
+ };
+
+ static inline const char *btf_field_type_name(enum btf_field_type type)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index b66349f892f25..1d6bd012de9e6 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -35,6 +35,7 @@
+ #include <linux/rcupdate_trace.h>
+ #include <linux/memcontrol.h>
+ #include <linux/trace_events.h>
++#include <linux/cookie.h>
+ #include <net/netfilter/nf_bpf_link.h>
+
+ #include <net/tcx.h>
+@@ -50,6 +51,7 @@
+ #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
+
+ DEFINE_PER_CPU(int, bpf_prog_active);
++DEFINE_COOKIE(bpf_map_cookie);
+ static DEFINE_IDR(prog_idr);
+ static DEFINE_SPINLOCK(prog_idr_lock);
+ static DEFINE_IDR(map_idr);
+@@ -1253,6 +1255,10 @@ static int map_create(union bpf_attr *attr)
+ if (err < 0)
+ goto free_map;
+
++ preempt_disable();
++ map->cookie = gen_cookie_next(&bpf_map_cookie);
++ preempt_enable();
++
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+--
+2.50.1
+
--- /dev/null
+From a00d1878212950a4842193e278671f0303daca35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:33 +0200
+Subject: bpf: Fix oob access in cgroup local storage
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit abad3d0bad72a52137e0c350c59542d75ae4f513 ]
+
+Lonial reported that an out-of-bounds access in cgroup local storage
+can be crafted via tail calls. Given two programs each utilizing a
+cgroup local storage with a different value size, and one program
+doing a tail call into the other. The verifier will validate each of
+the indivial programs just fine. However, in the runtime context
+the bpf_cg_run_ctx holds an bpf_prog_array_item which contains the
+BPF program as well as any cgroup local storage flavor the program
+uses. Helpers such as bpf_get_local_storage() pick this up from the
+runtime context:
+
+ ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
+ storage = ctx->prog_item->cgroup_storage[stype];
+
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
+ ptr = &READ_ONCE(storage->buf)->data[0];
+ else
+ ptr = this_cpu_ptr(storage->percpu_buf);
+
+For the second program which was called from the originally attached
+one, this means bpf_get_local_storage() will pick up the former
+program's map, not its own. With mismatching sizes, this can result
+in an unintended out-of-bounds access.
+
+To fix this issue, we need to extend bpf_map_owner with an array of
+storage_cookie[] to match on i) the exact maps from the original
+program if the second program was using bpf_get_local_storage(), or
+ii) allow the tail call combination if the second program was not
+using any of the cgroup local storage maps.
+
+Fixes: 7d9c3427894f ("bpf: Make cgroup storages shared between programs on the same cgroup")
+Reported-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-4-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 1 +
+ kernel/bpf/core.c | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index b8e0204992857..83da9c81fa86a 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -267,6 +267,7 @@ struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
++ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ };
+
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index c3369c66eae8f..3618be05fc352 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2263,7 +2263,9 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ struct bpf_prog_aux *aux = fp->aux;
++ enum bpf_cgroup_storage_type i;
+ bool ret = false;
++ u64 cookie;
+
+ if (fp->kprobe_override)
+ return ret;
+@@ -2278,11 +2280,24 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ map->owner->jited = fp->jited;
+ map->owner->xdp_has_frags = aux->xdp_has_frags;
+ map->owner->attach_func_proto = aux->attach_func_proto;
++ for_each_cgroup_storage_type(i) {
++ map->owner->storage_cookie[i] =
++ aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ }
+ ret = true;
+ } else {
+ ret = map->owner->type == prog_type &&
+ map->owner->jited == fp->jited &&
+ map->owner->xdp_has_frags == aux->xdp_has_frags;
++ for_each_cgroup_storage_type(i) {
++ if (!ret)
++ break;
++ cookie = aux->cgroup_storage[i] ?
++ aux->cgroup_storage[i]->cookie : 0;
++ ret = map->owner->storage_cookie[i] == cookie ||
++ !cookie;
++ }
+ if (ret &&
+ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+--
+2.50.1
+
--- /dev/null
+From 93e8841f794d5379b838e9328bfae77b318b1367 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:31 +0200
+Subject: bpf: Move bpf map owner out of common struct
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 ]
+
+Given this is only relevant for BPF tail call maps, it is adding up space
+and penalizing other map types. We also need to extend this with further
+objects to track / compare to. Therefore, lets move this out into a separate
+structure and dynamically allocate it only for BPF tail call maps.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 36 ++++++++++++++++++++++++------------
+ kernel/bpf/core.c | 35 ++++++++++++++++++-----------------
+ kernel/bpf/syscall.c | 14 +++++++-------
+ 3 files changed, 49 insertions(+), 36 deletions(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 730e692e002ff..b8e0204992857 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -258,6 +258,18 @@ struct bpf_list_node_kern {
+ void *owner;
+ } __attribute__((aligned(8)));
+
++/* 'Ownership' of program-containing map is claimed by the first program
++ * that is going to use this map or by the first program which FD is
++ * stored in the map to make sure that all callers and callees have the
++ * same prog type, JITed flag and xdp_has_frags flag.
++ */
++struct bpf_map_owner {
++ enum bpf_prog_type type;
++ bool jited;
++ bool xdp_has_frags;
++ const struct btf_type *attach_func_proto;
++};
++
+ struct bpf_map {
+ /* The first two cachelines with read-mostly members of which some
+ * are also accessed in fast-path (e.g. ops, max_entries).
+@@ -296,18 +308,8 @@ struct bpf_map {
+ };
+ struct mutex freeze_mutex;
+ atomic64_t writecnt;
+- /* 'Ownership' of program-containing map is claimed by the first program
+- * that is going to use this map or by the first program which FD is
+- * stored in the map to make sure that all callers and callees have the
+- * same prog type, JITed flag and xdp_has_frags flag.
+- */
+- struct {
+- const struct btf_type *attach_func_proto;
+- spinlock_t lock;
+- enum bpf_prog_type type;
+- bool jited;
+- bool xdp_has_frags;
+- } owner;
++ spinlock_t owner_lock;
++ struct bpf_map_owner *owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+@@ -1818,6 +1820,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+ }
+
++static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
++{
++ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
++}
++
++static inline void bpf_map_owner_free(struct bpf_map *map)
++{
++ kfree(map->owner);
++}
++
+ struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 5eaaf95048abc..c3369c66eae8f 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2262,28 +2262,29 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ const struct bpf_prog *fp)
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+- bool ret;
+ struct bpf_prog_aux *aux = fp->aux;
++ bool ret = false;
+
+ if (fp->kprobe_override)
+- return false;
++ return ret;
+
+- spin_lock(&map->owner.lock);
+- if (!map->owner.type) {
+- /* There's no owner yet where we could check for
+- * compatibility.
+- */
+- map->owner.type = prog_type;
+- map->owner.jited = fp->jited;
+- map->owner.xdp_has_frags = aux->xdp_has_frags;
+- map->owner.attach_func_proto = aux->attach_func_proto;
++ spin_lock(&map->owner_lock);
++ /* There's no owner yet where we could check for compatibility. */
++ if (!map->owner) {
++ map->owner = bpf_map_owner_alloc(map);
++ if (!map->owner)
++ goto err;
++ map->owner->type = prog_type;
++ map->owner->jited = fp->jited;
++ map->owner->xdp_has_frags = aux->xdp_has_frags;
++ map->owner->attach_func_proto = aux->attach_func_proto;
+ ret = true;
+ } else {
+- ret = map->owner.type == prog_type &&
+- map->owner.jited == fp->jited &&
+- map->owner.xdp_has_frags == aux->xdp_has_frags;
++ ret = map->owner->type == prog_type &&
++ map->owner->jited == fp->jited &&
++ map->owner->xdp_has_frags == aux->xdp_has_frags;
+ if (ret &&
+- map->owner.attach_func_proto != aux->attach_func_proto) {
++ map->owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+@@ -2296,8 +2297,8 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ }
+ }
+ }
+- spin_unlock(&map->owner.lock);
+-
++err:
++ spin_unlock(&map->owner_lock);
+ return ret;
+ }
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 1d6bd012de9e6..98f3f206d112e 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -698,6 +698,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
+
+ security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
++ bpf_map_owner_free(map);
+ /* implementation dependent freeing */
+ map->ops->map_free(map);
+ /* Delay freeing of btf_record for maps, as map_free
+@@ -715,7 +716,6 @@ static void bpf_map_free_deferred(struct work_struct *work)
+ */
+ btf_put(btf);
+ }
+-
+ static void bpf_map_put_uref(struct bpf_map *map)
+ {
+ if (atomic64_dec_and_test(&map->usercnt)) {
+@@ -807,12 +807,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
+ struct bpf_map *map = filp->private_data;
+ u32 type = 0, jited = 0;
+
+- if (map_type_contains_progs(map)) {
+- spin_lock(&map->owner.lock);
+- type = map->owner.type;
+- jited = map->owner.jited;
+- spin_unlock(&map->owner.lock);
++ spin_lock(&map->owner_lock);
++ if (map->owner) {
++ type = map->owner->type;
++ jited = map->owner->jited;
+ }
++ spin_unlock(&map->owner_lock);
+
+ seq_printf(m,
+ "map_type:\t%u\n"
+@@ -1262,7 +1262,7 @@ static int map_create(union bpf_attr *attr)
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
+- spin_lock_init(&map->owner.lock);
++ spin_lock_init(&map->owner_lock);
+
+ if (attr->btf_key_type_id || attr->btf_value_type_id ||
+ /* Even the map's value is a kernel's struct,
+--
+2.50.1
+
--- /dev/null
+From 499bfd97b16a5f31962be0a87dea3e7930cec26f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 01:47:32 +0200
+Subject: bpf: Move cgroup iterator helpers to bpf.h
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 9621e60f59eae87eb9ffe88d90f24f391a1ef0f0 ]
+
+Move them into bpf.h given we also need them in core code.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20250730234733.530041-3-daniel@iogearbox.net
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Stable-dep-of: abad3d0bad72 ("bpf: Fix oob access in cgroup local storage")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf-cgroup.h | 5 -----
+ include/linux/bpf.h | 22 ++++++++++++++--------
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index 2331cd8174fe3..684c4822f76a3 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -72,9 +72,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+ extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+ #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+
+-#define for_each_cgroup_storage_type(stype) \
+- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+-
+ struct bpf_cgroup_storage_map;
+
+ struct bpf_storage_buffer {
+@@ -500,8 +497,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) ({ 0; })
+
+-#define for_each_cgroup_storage_type(stype) for (; false; )
+-
+ #endif /* CONFIG_CGROUP_BPF */
+
+ #endif /* _BPF_CGROUP_H */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 0a097087f0a7c..730e692e002ff 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -194,6 +194,20 @@ enum btf_field_type {
+ BPF_REFCOUNT = (1 << 8),
+ };
+
++enum bpf_cgroup_storage_type {
++ BPF_CGROUP_STORAGE_SHARED,
++ BPF_CGROUP_STORAGE_PERCPU,
++ __BPF_CGROUP_STORAGE_MAX
++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
++};
++
++#ifdef CONFIG_CGROUP_BPF
++# define for_each_cgroup_storage_type(stype) \
++ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
++#else
++# define for_each_cgroup_storage_type(stype) for (; false; )
++#endif /* CONFIG_CGROUP_BPF */
++
+ typedef void (*btf_dtor_kfunc_t)(void *);
+
+ struct btf_field_kptr {
+@@ -995,14 +1009,6 @@ struct bpf_prog_offload {
+ u32 jited_len;
+ };
+
+-enum bpf_cgroup_storage_type {
+- BPF_CGROUP_STORAGE_SHARED,
+- BPF_CGROUP_STORAGE_PERCPU,
+- __BPF_CGROUP_STORAGE_MAX
+-};
+-
+-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+-
+ /* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+--
+2.50.1
+
--- /dev/null
+From c0326de872f4d8a096c4d5ed5297051c93f36612 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:32 +0100
+Subject: btrfs: avoid load/store tearing races when checking if an inode was
+ logged
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 986bf6ed44dff7fbae7b43a0882757ee7f5ba21b ]
+
+At inode_logged() we do a couple lockless checks for ->logged_trans, and
+these are generally safe except the second one in case we get a load or
+store tearing due to a concurrent call updating ->logged_trans (either at
+btrfs_log_inode() or later at inode_logged()).
+
+In the first case it's safe to compare to the current transaction ID since
+once ->logged_trans is set the current transaction, we never set it to a
+lower value.
+
+In the second case, where we check if it's greater than zero, we are prone
+to load/store tearing races, since we can have a concurrent task updating
+to the current transaction ID with store tearing for example, instead of
+updating with a single 64 bits write, to update with two 32 bits writes or
+four 16 bits writes. In that case the reading side at inode_logged() could
+see a positive value that does not match the current transaction and then
+return a false negative.
+
+Fix this by doing the second check while holding the inode's spinlock, add
+some comments about it too. Also add the data_race() annotation to the
+first check to avoid any reports from KCSAN (or similar tools) and comment
+about it.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index afd76dff1d2bb..e5d6bc1bb5e5d 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3398,15 +3398,32 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ struct btrfs_key key;
+ int ret;
+
+- if (inode->logged_trans == trans->transid)
++ /*
++ * Quick lockless call, since once ->logged_trans is set to the current
++ * transaction, we never set it to a lower value anywhere else.
++ */
++ if (data_race(inode->logged_trans) == trans->transid)
+ return 1;
+
+ /*
+- * If logged_trans is not 0, then we know the inode logged was not logged
+- * in this transaction, so we can return false right away.
++ * If logged_trans is not 0 and not trans->transid, then we know the
++ * inode was not logged in this transaction, so we can return false
++ * right away. We take the lock to avoid a race caused by load/store
++ * tearing with a concurrent btrfs_log_inode() call or a concurrent task
++ * in this function further below - an update to trans->transid can be
++ * teared into two 32 bits updates for example, in which case we could
++ * see a positive value that is not trans->transid and assume the inode
++ * was not logged when it was.
+ */
+- if (inode->logged_trans > 0)
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == trans->transid) {
++ spin_unlock(&inode->lock);
++ return 1;
++ } else if (inode->logged_trans > 0) {
++ spin_unlock(&inode->lock);
+ return 0;
++ }
++ spin_unlock(&inode->lock);
+
+ /*
+ * If no log tree was created for this root in this transaction, then
+--
+2.50.1
+
--- /dev/null
+From 948f732ffd23155d9f547bb36c1fed4829d940e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:30 +0100
+Subject: btrfs: fix race between logging inode and checking if it was logged
+ before
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ef07b74e1be56f9eafda6aadebb9ebba0743c9f0 ]
+
+There's a race between checking if an inode was logged before and logging
+an inode that can cause us to mark an inode as not logged just after it
+was logged by a concurrent task:
+
+1) We have inode X which was not logged before neither in the current
+ transaction not in past transaction since the inode was loaded into
+ memory, so it's ->logged_trans value is 0;
+
+2) We are at transaction N;
+
+3) Task A calls inode_logged() against inode X, sees that ->logged_trans
+ is 0 and there is a log tree and so it proceeds to search in the log
+ tree for an inode item for inode X. It doesn't see any, but before
+ it sets ->logged_trans to N - 1...
+
+3) Task B calls btrfs_log_inode() against inode X, logs the inode and
+ sets ->logged_trans to N;
+
+4) Task A now sets ->logged_trans to N - 1;
+
+5) At this point anyone calling inode_logged() gets 0 (inode not logged)
+ since ->logged_trans is greater than 0 and less than N, but our inode
+ was really logged. As a consequence operations like rename, unlink and
+ link that happen afterwards in the current transaction end up not
+ updating the log when they should.
+
+Fix this by ensuring inode_logged() only updates ->logged_trans in case
+the inode item is not found in the log tree if after tacking the inode's
+lock (spinlock struct btrfs_inode::lock) the ->logged_trans value is still
+zero, since the inode lock is what protects setting ->logged_trans at
+btrfs_log_inode().
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 36 ++++++++++++++++++++++++++++++------
+ 1 file changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 9439abf415ae3..26036cf4f51c0 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3356,6 +3356,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode)
++{
++ bool ret = false;
++
++ /*
++ * Do this only if ->logged_trans is still 0 to prevent races with
++ * concurrent logging as we may see the inode not logged when
++ * inode_logged() is called but it gets logged after inode_logged() did
++ * not find it in the log tree and we end up setting ->logged_trans to a
++ * value less than trans->transid after the concurrent logging task has
++ * set it to trans->transid. As a consequence, subsequent rename, unlink
++ * and link operations may end up not logging new names and removing old
++ * names from the log.
++ */
++ spin_lock(&inode->lock);
++ if (inode->logged_trans == 0)
++ inode->logged_trans = trans->transid - 1;
++ else if (inode->logged_trans == trans->transid)
++ ret = true;
++ spin_unlock(&inode->lock);
++
++ return ret;
++}
++
+ /*
+ * Check if an inode was logged in the current transaction. This correctly deals
+ * with the case where the inode was logged but has a logged_trans of 0, which
+@@ -3390,10 +3415,8 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * transaction's ID, to avoid the search below in a future call in case
+ * a log tree gets created after this.
+ */
+- if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
+- inode->logged_trans = trans->transid - 1;
+- return 0;
+- }
++ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
++ return mark_inode_as_not_logged(trans, inode);
+
+ /*
+ * We have a log tree and the inode's logged_trans is 0. We can't tell
+@@ -3447,8 +3470,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * Set logged_trans to a value greater than 0 and less then the
+ * current transaction to avoid doing the search in future calls.
+ */
+- inode->logged_trans = trans->transid - 1;
+- return 0;
++ return mark_inode_as_not_logged(trans, inode);
+ }
+
+ /*
+@@ -3456,7 +3478,9 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ * the current transacion's ID, to avoid future tree searches as long as
+ * the inode is not evicted again.
+ */
++ spin_lock(&inode->lock);
+ inode->logged_trans = trans->transid;
++ spin_unlock(&inode->lock);
+
+ /*
+ * If it's a directory, then we must set last_dir_index_offset to the
+--
+2.50.1
+
--- /dev/null
+From e2af8bbb12af0fc41f78c2a3323df18dbf69e8e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 12:11:31 +0100
+Subject: btrfs: fix race between setting last_dir_index_offset and inode
+ logging
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 59a0dd4ab98970086fd096281b1606c506ff2698 ]
+
+At inode_logged() if we find that the inode was not logged before we
+update its ->last_dir_index_offset to (u64)-1 with the goal that the
+next directory log operation will see the (u64)-1 and then figure out
+it must check what was the index of the last logged dir index key and
+update ->last_dir_index_offset to that key's offset (this is done in
+update_last_dir_index_offset()).
+
+This however has a possibility for a time window where a race can happen
+and lead to directory logging skipping dir index keys that should be
+logged. The race happens like this:
+
+1) Task A calls inode_logged(), sees ->logged_trans as 0 and then checks
+ that the inode item was logged before, but before it sets the inode's
+ ->last_dir_index_offset to (u64)-1...
+
+2) Task B is at btrfs_log_inode() which calls inode_logged() early, and
+ that has set ->last_dir_index_offset to (u64)-1;
+
+3) Task B then enters log_directory_changes() which calls
+ update_last_dir_index_offset(). There it sees ->last_dir_index_offset
+ is (u64)-1 and that the inode was logged before (ctx->logged_before is
+ true), and so it searches for the last logged dir index key in the log
+ tree and it finds that it has an offset (index) value of N, so it sets
+ ->last_dir_index_offset to N, so that we can skip index keys that are
+ less than or equal to N (later at process_dir_items_leaf());
+
+4) Task A now sets ->last_dir_index_offset to (u64)-1, undoing the update
+ that task B just did;
+
+5) Task B will now skip every index key when it enters
+ process_dir_items_leaf(), since ->last_dir_index_offset is (u64)-1.
+
+Fix this by making inode_logged() not touch ->last_dir_index_offset and
+initializing it to 0 when an inode is loaded (at btrfs_alloc_inode()) and
+then having update_last_dir_index_offset() treat a value of 0 as meaning
+we must check the log tree and update with the index of the last logged
+index key. This is fine since the minimum possible value for
+->last_dir_index_offset is 1 (BTRFS_DIR_START_INDEX - 1 = 2 - 1 = 1).
+This also simplifies the management of ->last_dir_index_offset and now
+all accesses to it are done under the inode's log_mutex.
+
+Fixes: 0f8ce49821de ("btrfs: avoid inode logging during rename and link when possible")
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/btrfs_inode.h | 2 +-
+ fs/btrfs/inode.c | 1 +
+ fs/btrfs/tree-log.c | 17 ++---------------
+ 3 files changed, 4 insertions(+), 16 deletions(-)
+
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index c4968efc3fc46..a2e471d51a8f0 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -179,7 +179,7 @@ struct btrfs_inode {
+ u64 new_delalloc_bytes;
+ /*
+ * The offset of the last dir index key that was logged.
+- * This is used only for directories.
++ * This is used only for directories. Protected by 'log_mutex'.
+ */
+ u64 last_dir_index_offset;
+ };
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 4502a474a81da..ee5ffeab85bb7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8525,6 +8525,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ ei->last_sub_trans = 0;
+ ei->logged_trans = 0;
+ ei->delalloc_bytes = 0;
++ /* new_delalloc_bytes and last_dir_index_offset are in a union. */
+ ei->new_delalloc_bytes = 0;
+ ei->defrag_bytes = 0;
+ ei->disk_i_size = 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 26036cf4f51c0..afd76dff1d2bb 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3482,19 +3482,6 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ inode->logged_trans = trans->transid;
+ spin_unlock(&inode->lock);
+
+- /*
+- * If it's a directory, then we must set last_dir_index_offset to the
+- * maximum possible value, so that the next attempt to log the inode does
+- * not skip checking if dir index keys found in modified subvolume tree
+- * leaves have been logged before, otherwise it would result in attempts
+- * to insert duplicate dir index keys in the log tree. This must be done
+- * because last_dir_index_offset is an in-memory only field, not persisted
+- * in the inode item or any other on-disk structure, so its value is lost
+- * once the inode is evicted.
+- */
+- if (S_ISDIR(inode->vfs_inode.i_mode))
+- inode->last_dir_index_offset = (u64)-1;
+-
+ return 1;
+ }
+
+@@ -4065,7 +4052,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+
+ /*
+ * If the inode was logged before and it was evicted, then its
+- * last_dir_index_offset is (u64)-1, so we don't the value of the last index
++ * last_dir_index_offset is 0, so we don't know the value of the last index
+ * key offset. If that's the case, search for it and update the inode. This
+ * is to avoid lookups in the log tree every time we try to insert a dir index
+ * key from a leaf changed in the current transaction, and to allow us to always
+@@ -4081,7 +4068,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode,
+
+ lockdep_assert_held(&inode->log_mutex);
+
+- if (inode->last_dir_index_offset != (u64)-1)
++ if (inode->last_dir_index_offset != 0)
+ return 0;
+
+ if (!ctx->logged_before) {
+--
+2.50.1
+
--- /dev/null
+From fb0f320d17fd8209913256bf67c8debfe64587ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 17:42:14 +0200
+Subject: cdc_ncm: Flag Intel OEM version of Fibocom L850-GL as WWAN
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+[ Upstream commit 4a73a36cb704813f588af13d9842d0ba5a185758 ]
+
+This lets NetworkManager/ModemManager know that this is a modem and
+needs to be connected first.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Link: https://patch.msgid.link/20250814154214.250103-1-lkundrak@v3.sk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/cdc_ncm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index d9792fd515a90..22554daaf6ff1 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -2043,6 +2043,13 @@ static const struct usb_device_id cdc_devs[] = {
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
++ /* Intel modem (label from OEM reads Fibocom L850-GL) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
++ USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&wwan_info,
++ },
++
+ /* DisplayLink docking stations */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+--
+2.50.1
+
--- /dev/null
+From e7e03358900a4b7303f1f0189f37034dd4f4c5fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 May 2025 15:10:58 +0900
+Subject: cpupower: Fix a bug where the -t option of the set subcommand was not
+ working.
+
+From: Shinji Nomoto <fj5851bi@fujitsu.com>
+
+[ Upstream commit b3eaf14f4c63fd6abc7b68c6d7a07c5680a6d8e5 ]
+
+The set subcommand's -t option is documented as being available for boost
+configuration, but it was not actually functioning due to a bug
+in the option handling.
+
+Link: https://lore.kernel.org/r/20250522061122.2149188-2-fj5851bi@fujitsu.com
+Signed-off-by: Shinji Nomoto <fj5851bi@fujitsu.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/power/cpupower/utils/cpupower-set.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index 0677b58374abf..59ace394cf3ef 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -62,8 +62,8 @@ int cmd_set(int argc, char **argv)
+
+ params.params = 0;
+ /* parameter parsing */
+- while ((ret = getopt_long(argc, argv, "b:e:m:",
+- set_opts, NULL)) != -1) {
++ while ((ret = getopt_long(argc, argv, "b:e:m:t:",
++ set_opts, NULL)) != -1) {
+ switch (ret) {
+ case 'b':
+ if (params.perf_bias)
+--
+2.50.1
+
--- /dev/null
+From 10f00a935006e25b2c55f697f7b7c9d611fca527 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:50 +0200
+Subject: drm/amd/display: Don't warn when missing DCE encoder caps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 8246147f1fbaed522b8bcc02ca34e4260747dcfb ]
+
+On some GPUs the VBIOS just doesn't have encoder caps,
+or maybe not for every encoder.
+
+This isn't really a problem and it's handled well,
+so let's not litter the logs with it.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 33e0227ee96e62d034781e91f215e32fd0b1d512)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 136bd93c3b655..0a33f8f117e92 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+@@ -1795,13 +1795,13 @@ void dce60_link_encoder_construct(
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == result) {
++ if (result == BP_RESULT_OK) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+- } else {
++ } else if (result != BP_RESULT_NORECORD) {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+--
+2.50.1
+
--- /dev/null
+From 948ca4d84f3857d5acb712eeb4ba53f3e5b5ffbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 18:07:15 +0800
+Subject: fs: writeback: fix use-after-free in __mark_inode_dirty()
+
+From: Jiufei Xue <jiufei.xue@samsung.com>
+
+[ Upstream commit d02d2c98d25793902f65803ab853b592c7a96b29 ]
+
+An use-after-free issue occurred when __mark_inode_dirty() get the
+bdi_writeback that was in the progress of switching.
+
+CPU: 1 PID: 562 Comm: systemd-random- Not tainted 6.6.56-gb4403bd46a8e #1
+......
+pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __mark_inode_dirty+0x124/0x418
+lr : __mark_inode_dirty+0x118/0x418
+sp : ffffffc08c9dbbc0
+........
+Call trace:
+ __mark_inode_dirty+0x124/0x418
+ generic_update_time+0x4c/0x60
+ file_modified+0xcc/0xd0
+ ext4_buffered_write_iter+0x58/0x124
+ ext4_file_write_iter+0x54/0x704
+ vfs_write+0x1c0/0x308
+ ksys_write+0x74/0x10c
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x114
+ el0_svc_common.constprop.0+0xc0/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x40/0xe4
+ el0t_64_sync_handler+0x120/0x12c
+ el0t_64_sync+0x194/0x198
+
+Root cause is:
+
+systemd-random-seed kworker
+----------------------------------------------------------------------
+___mark_inode_dirty inode_switch_wbs_work_fn
+
+ spin_lock(&inode->i_lock);
+ inode_attach_wb
+ locked_inode_to_wb_and_lock_list
+ get inode->i_wb
+ spin_unlock(&inode->i_lock);
+ spin_lock(&wb->list_lock)
+ spin_lock(&inode->i_lock)
+ inode_io_list_move_locked
+ spin_unlock(&wb->list_lock)
+ spin_unlock(&inode->i_lock)
+ spin_lock(&old_wb->list_lock)
+ inode_do_switch_wbs
+ spin_lock(&inode->i_lock)
+ inode->i_wb = new_wb
+ spin_unlock(&inode->i_lock)
+ spin_unlock(&old_wb->list_lock)
+ wb_put_many(old_wb, nr_switched)
+ cgwb_release
+ old wb released
+ wb_wakeup_delayed() accesses wb,
+ then trigger the use-after-free
+ issue
+
+Fix this race condition by holding inode spinlock until
+wb_wakeup_delayed() finished.
+
+Signed-off-by: Jiufei Xue <jiufei.xue@samsung.com>
+Link: https://lore.kernel.org/20250728100715.3863241-1-jiufei.xue@samsung.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fs-writeback.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 0a498bc60f557..ed110568d6127 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2536,10 +2536,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ wakeup_bdi = inode_io_list_move_locked(inode, wb,
+ dirty_list);
+
+- spin_unlock(&wb->list_lock);
+- spin_unlock(&inode->i_lock);
+- trace_writeback_dirty_inode_enqueue(inode);
+-
+ /*
+ * If this is the first dirty inode for this bdi,
+ * we have to wake-up the corresponding bdi thread
+@@ -2549,6 +2545,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ if (wakeup_bdi &&
+ (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
+ wb_wakeup_delayed(wb);
++
++ spin_unlock(&wb->list_lock);
++ spin_unlock(&inode->i_lock);
++ trace_writeback_dirty_inode_enqueue(inode);
++
+ return;
+ }
+ }
+--
+2.50.1
+
--- /dev/null
+From 059e3c9a2437177001643ad9632a33c04dbac2ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Save LBT before FPU in setup_sigcontext()
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 112ca94f6c3b3e0b2002a240de43c487a33e0234 ]
+
+Now if preemption happens between protected_save_fpu_context() and
+protected_save_lbt_context(), FTOP context is lost. Because FTOP is
+saved by protected_save_lbt_context() but protected_save_fpu_context()
+disables TM before that. So save LBT before FPU in setup_sigcontext()
+to avoid this potential risk.
+
+Signed-off-by: Hanlu Li <lihanlu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/signal.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
+index 4a3686d133494..0e90cd2df0ea3 100644
+--- a/arch/loongarch/kernel/signal.c
++++ b/arch/loongarch/kernel/signal.c
+@@ -697,6 +697,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
++#ifdef CONFIG_CPU_HAS_LBT
++ if (extctx->lbt.addr)
++ err |= protected_save_lbt_context(extctx);
++#endif
++
+ if (extctx->lasx.addr)
+ err |= protected_save_lasx_context(extctx);
+ else if (extctx->lsx.addr)
+@@ -704,11 +709,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ else if (extctx->fpu.addr)
+ err |= protected_save_fpu_context(extctx);
+
+-#ifdef CONFIG_CPU_HAS_LBT
+- if (extctx->lbt.addr)
+- err |= protected_save_lbt_context(extctx);
+-#endif
+-
+ /* Set the "end" magic */
+ info = (struct sctx_info *)extctx->end.addr;
+ err |= __put_user(0, &info->magic);
+--
+2.50.1
+
--- /dev/null
+bpf-add-cookie-object-to-bpf-maps.patch
+bpf-move-cgroup-iterator-helpers-to-bpf.h.patch
+bpf-move-bpf-map-owner-out-of-common-struct.patch
+bpf-fix-oob-access-in-cgroup-local-storage.patch
+btrfs-fix-race-between-logging-inode-and-checking-if.patch
+btrfs-fix-race-between-setting-last_dir_index_offset.patch
+btrfs-avoid-load-store-tearing-races-when-checking-i.patch
+loongarch-save-lbt-before-fpu-in-setup_sigcontext.patch
+cdc_ncm-flag-intel-oem-version-of-fibocom-l850-gl-as.patch
+drm-amd-display-don-t-warn-when-missing-dce-encoder-.patch
+cpupower-fix-a-bug-where-the-t-option-of-the-set-sub.patch
+bluetooth-hci_sync-avoid-adding-default-advertising-.patch
+fs-writeback-fix-use-after-free-in-__mark_inode_dirt.patch