--- /dev/null
+From 4c46091ee985ae84c60c5e95055d779fcd291d87 Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@linaro.org>
+Date: Tue, 17 May 2022 11:04:20 -0700
+Subject: bpf: Fix KASAN use-after-free Read in compute_effective_progs
+
+From: Tadeusz Struk <tadeusz.struk@linaro.org>
+
+commit 4c46091ee985ae84c60c5e95055d779fcd291d87 upstream.
+
+Syzbot found a Use After Free bug in compute_effective_progs().
+The reproducer creates a number of BPF links, and causes a fault
+injected alloc to fail, while calling bpf_link_detach on them.
+Link detach triggers the link to be freed by bpf_link_free(),
+which calls __cgroup_bpf_detach() and update_effective_progs().
+If the memory allocation in this function fails, the function restores
+the pointer to the bpf_cgroup_link on the cgroup list, but the memory
+gets freed just after it returns. After this, every subsequent call to
+update_effective_progs() causes this already deallocated pointer to be
+dereferenced in prog_list_length(), and triggers KASAN UAF error.
+
+To fix this issue don't preserve the pointer to the prog or link in the
+list, but remove it and replace it with a dummy prog without shrinking
+the table. The subsequent call to __cgroup_bpf_detach() or
+__cgroup_bpf_detach() will correct it.
+
+Fixes: af6eea57437a ("bpf: Implement bpf_link-based cgroup BPF program attachment")
+Reported-by: <syzbot+f264bffdfbd5614f3bb2@syzkaller.appspotmail.com>
+Signed-off-by: Tadeusz Struk <tadeusz.struk@linaro.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: https://syzkaller.appspot.com/bug?id=8ebf179a95c2a2670f7cf1ba62429ec044369db4
+Link: https://lore.kernel.org/bpf/20220517180420.87954-1-tadeusz.struk@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/cgroup.c | 70 ++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 60 insertions(+), 10 deletions(-)
+
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -748,6 +748,60 @@ static struct bpf_prog_list *find_detach
+ }
+
+ /**
++ * purge_effective_progs() - After compute_effective_progs fails to alloc new
++ * cgrp->bpf.inactive table we can recover by
++ * recomputing the array in place.
++ *
++ * @cgrp: The cgroup which descendants to travers
++ * @prog: A program to detach or NULL
++ * @link: A link to detach or NULL
++ * @atype: Type of detach operation
++ */
++static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
++ struct bpf_cgroup_link *link,
++ enum cgroup_bpf_attach_type atype)
++{
++ struct cgroup_subsys_state *css;
++ struct bpf_prog_array *progs;
++ struct bpf_prog_list *pl;
++ struct list_head *head;
++ struct cgroup *cg;
++ int pos;
++
++ /* recompute effective prog array in place */
++ css_for_each_descendant_pre(css, &cgrp->self) {
++ struct cgroup *desc = container_of(css, struct cgroup, self);
++
++ if (percpu_ref_is_zero(&desc->bpf.refcnt))
++ continue;
++
++ /* find position of link or prog in effective progs array */
++ for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
++ if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
++ continue;
++
++ head = &cg->bpf.progs[atype];
++ list_for_each_entry(pl, head, node) {
++ if (!prog_list_prog(pl))
++ continue;
++ if (pl->prog == prog && pl->link == link)
++ goto found;
++ pos++;
++ }
++ }
++found:
++ BUG_ON(!cg);
++ progs = rcu_dereference_protected(
++ desc->bpf.effective[atype],
++ lockdep_is_held(&cgroup_mutex));
++
++ /* Remove the program from the array */
++ WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
++ "Failed to purge a prog from array at index %d", pos);
++ }
++}
++
++/**
+ * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
+ * propagate the change to descendants
+ * @cgrp: The cgroup which descendants to traverse
+@@ -766,7 +820,6 @@ static int __cgroup_bpf_detach(struct cg
+ struct bpf_prog_list *pl;
+ struct list_head *progs;
+ u32 flags;
+- int err;
+
+ atype = to_cgroup_bpf_attach_type(type);
+ if (atype < 0)
+@@ -788,9 +841,12 @@ static int __cgroup_bpf_detach(struct cg
+ pl->prog = NULL;
+ pl->link = NULL;
+
+- err = update_effective_progs(cgrp, atype);
+- if (err)
+- goto cleanup;
++ if (update_effective_progs(cgrp, atype)) {
++ /* if update effective array failed replace the prog with a dummy prog*/
++ pl->prog = old_prog;
++ pl->link = link;
++ purge_effective_progs(cgrp, old_prog, link, atype);
++ }
+
+ /* now can actually delete it from this cgroup list */
+ list_del(&pl->node);
+@@ -802,12 +858,6 @@ static int __cgroup_bpf_detach(struct cg
+ bpf_prog_put(old_prog);
+ static_branch_dec(&cgroup_bpf_enabled_key[atype]);
+ return 0;
+-
+-cleanup:
+- /* restore back prog or link */
+- pl->prog = old_prog;
+- pl->link = link;
+- return err;
+ }
+
+ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
--- /dev/null
+From dc4d31684974d140250f3ee612c3f0cab13b3146 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Tue, 7 Jun 2022 19:48:24 +0800
+Subject: btrfs: reject log replay if there is unsupported RO compat flag
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit dc4d31684974d140250f3ee612c3f0cab13b3146 upstream.
+
+[BUG]
+If we have a btrfs image with dirty log, along with an unsupported RO
+compatible flag:
+
+log_root 30474240
+...
+compat_flags 0x0
+compat_ro_flags 0x40000003
+ ( FREE_SPACE_TREE |
+ FREE_SPACE_TREE_VALID |
+ unknown flag: 0x40000000 )
+
+Then even if we can only mount it RO, we will still cause metadata
+update for log replay:
+
+ BTRFS info (device dm-1): flagging fs with big metadata feature
+ BTRFS info (device dm-1): using free space tree
+ BTRFS info (device dm-1): has skinny extents
+ BTRFS info (device dm-1): start tree-log replay
+
+This is definitely against RO compact flag requirement.
+
+[CAUSE]
+RO compact flag only forces us to do RO mount, but we will still do log
+replay for plain RO mount.
+
+Thus this will result us to do log replay and update metadata.
+
+This can be very problematic for new RO compat flag, for example older
+kernel can not understand v2 cache, and if we allow metadata update on
+RO mount and invalidate/corrupt v2 cache.
+
+[FIX]
+Just reject the mount unless rescue=nologreplay is provided:
+
+ BTRFS error (device dm-1): cannot replay dirty log with unsupport optional features (0x40000000), try rescue=nologreplay instead
+
+We don't want to set rescue=nologreply directly, as this would make the
+end user to read the old data, and cause confusion.
+
+Since the such case is really rare, we're mostly fine to just reject the
+mount with an error message, which also includes the proper workaround.
+
+CC: stable@vger.kernel.org #4.9+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/disk-io.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3654,6 +3654,20 @@ int __cold open_ctree(struct super_block
+ err = -EINVAL;
+ goto fail_alloc;
+ }
++ /*
++ * We have unsupported RO compat features, although RO mounted, we
++ * should not cause any metadata write, including log replay.
++ * Or we could screw up whatever the new feature requires.
++ */
++ if (unlikely(features && btrfs_super_log_root(disk_super) &&
++ !btrfs_test_opt(fs_info, NOLOGREPLAY))) {
++ btrfs_err(fs_info,
++"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
++ features);
++ err = -EINVAL;
++ goto fail_alloc;
++ }
++
+
+ if (sectorsize < PAGE_SIZE) {
+ struct btrfs_subpage_info *subpage_info;