]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jan 2026 14:24:44 +0000 (15:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jan 2026 14:24:44 +0000 (15:24 +0100)
added patches:
bpf-do-not-let-bpf-test-infra-emit-invalid-gso-types-to-stack.patch
bpf-reject-narrower-access-to-pointer-ctx-fields.patch
migrate-correct-lock-ordering-for-hugetlb-file-folios.patch
mm-damon-sysfs-scheme-cleanup-access_pattern-subdirs-on-scheme-dir-setup-failure.patch
mm-damon-sysfs-scheme-cleanup-quotas-subdirs-on-scheme-dir-setup-failure.patch

queue-6.1/bpf-do-not-let-bpf-test-infra-emit-invalid-gso-types-to-stack.patch [new file with mode: 0644]
queue-6.1/bpf-reject-narrower-access-to-pointer-ctx-fields.patch [new file with mode: 0644]
queue-6.1/migrate-correct-lock-ordering-for-hugetlb-file-folios.patch [new file with mode: 0644]
queue-6.1/mm-damon-sysfs-scheme-cleanup-access_pattern-subdirs-on-scheme-dir-setup-failure.patch [new file with mode: 0644]
queue-6.1/mm-damon-sysfs-scheme-cleanup-quotas-subdirs-on-scheme-dir-setup-failure.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/bpf-do-not-let-bpf-test-infra-emit-invalid-gso-types-to-stack.patch b/queue-6.1/bpf-do-not-let-bpf-test-infra-emit-invalid-gso-types-to-stack.patch
new file mode 100644 (file)
index 0000000..d20a106
--- /dev/null
@@ -0,0 +1,78 @@
+From 04a899573fb87273a656f178b5f920c505f68875 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 20 Oct 2025 09:54:41 +0200
+Subject: bpf: Do not let BPF test infra emit invalid GSO types to stack
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 04a899573fb87273a656f178b5f920c505f68875 upstream.
+
+Yinhao et al. reported that their fuzzer tool was able to trigger a
+skb_warn_bad_offload() from netif_skb_features() -> gso_features_check().
+When a BPF program - triggered via BPF test infra - pushes the packet
+to the loopback device via bpf_clone_redirect() then mentioned offload
+warning can be seen. GSO-related features are then rightfully disabled.
+
+We get into this situation due to convert___skb_to_skb() setting
+gso_segs and gso_size but not gso_type. Technically, it makes sense
+that this warning triggers since the GSO properties are malformed due
+to the gso_type. Potentially, the gso_type could be marked non-trustworthy
+through setting it at least to SKB_GSO_DODGY without any other specific
+assumptions, but that also feels wrong given we should not go further
+into the GSO engine in the first place.
+
+The checks were added in 121d57af308d ("gso: validate gso_type in GSO
+handlers") because there were malicious (syzbot) senders that combine
+a protocol with a non-matching gso_type. If we would want to drop such
+packets, gso_features_check() currently only returns feature flags via
+netif_skb_features(), so one location for potentially dropping such skbs
+could be validate_xmit_unreadable_skb(), but then otoh it would be
+an additional check in the fast-path for a very corner case. Given
+bpf_clone_redirect() is the only place where BPF test infra could emit
+such packets, lets reject them right there.
+
+Fixes: 850a88cc4096 ("bpf: Expose __sk_buff wire_len/gso_segs to BPF_PROG_TEST_RUN")
+Fixes: cf62089b0edd ("bpf: Add gso_size to __sk_buff")
+Reported-by: Yinhao Hu <dddddd@hust.edu.cn>
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Reported-by: Dongliang Mu <dzm91@hust.edu.cn>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/20251020075441.127980-1-daniel@iogearbox.net
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bpf/test_run.c |    5 +++++
+ net/core/filter.c  |    7 +++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -1047,6 +1047,11 @@ static int convert___skb_to_skb(struct s
+       if (__skb->gso_segs > GSO_MAX_SEGS)
+               return -EINVAL;
++
++      /* Currently GSO type is zero/unset. If this gets extended with
++       * a small list of accepted GSO types in future, the filter for
++       * an unset GSO type in bpf_clone_redirect() can be lifted.
++       */
+       skb_shinfo(skb)->gso_segs = __skb->gso_segs;
+       skb_shinfo(skb)->gso_size = __skb->gso_size;
+       skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2444,6 +2444,13 @@ BPF_CALL_3(bpf_clone_redirect, struct sk
+       if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
+               return -EINVAL;
++      /* BPF test infra's convert___skb_to_skb() can create type-less
++       * GSO packets. gso_features_check() will detect this as a bad
++       * offload. However, lets not leak them out in the first place.
++       */
++      if (unlikely(skb_is_gso(skb) && !skb_shinfo(skb)->gso_type))
++              return -EBADMSG;
++
+       dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
+       if (unlikely(!dev))
+               return -EINVAL;
diff --git a/queue-6.1/bpf-reject-narrower-access-to-pointer-ctx-fields.patch b/queue-6.1/bpf-reject-narrower-access-to-pointer-ctx-fields.patch
new file mode 100644 (file)
index 0000000..d128f73
--- /dev/null
@@ -0,0 +1,158 @@
+From e09299225d5ba3916c91ef70565f7d2187e4cca0 Mon Sep 17 00:00:00 2001
+From: Paul Chaignon <paul.chaignon@gmail.com>
+Date: Tue, 22 Jul 2025 16:32:32 +0200
+Subject: bpf: Reject narrower access to pointer ctx fields
+
+From: Paul Chaignon <paul.chaignon@gmail.com>
+
+commit e09299225d5ba3916c91ef70565f7d2187e4cca0 upstream.
+
+The following BPF program, simplified from a syzkaller repro, causes a
+kernel warning:
+
+    r0 = *(u8 *)(r1 + 169);
+    exit;
+
+With pointer field sk being at offset 168 in __sk_buff. This access is
+detected as a narrower read in bpf_skb_is_valid_access because it
+doesn't match offsetof(struct __sk_buff, sk). It is therefore allowed
+and later proceeds to bpf_convert_ctx_access. Note that for the
+"is_narrower_load" case in the convert_ctx_accesses(), the insn->off
+is aligned, so the cnt may not be 0 because it matches the
+offsetof(struct __sk_buff, sk) in the bpf_convert_ctx_access. However,
+the target_size stays 0 and the verifier errors with a kernel warning:
+
+    verifier bug: error during ctx access conversion(1)
+
+This patch fixes that to return a proper "invalid bpf_context access
+off=X size=Y" error on the load instruction.
+
+The same issue affects multiple other fields in context structures that
+allow narrow access. Some other non-affected fields (for sk_msg,
+sk_lookup, and sockopt) were also changed to use bpf_ctx_range_ptr for
+consistency.
+
+Note this syzkaller crash was reported in the "Closes" link below, which
+used to be about a different bug, fixed in
+commit fce7bd8e385a ("bpf/verifier: Handle BPF_LOAD_ACQ instructions
+in insn_def_regno()"). Because syzbot somehow confused the two bugs,
+the new crash and repro didn't get reported to the mailing list.
+
+Fixes: f96da09473b52 ("bpf: simplify narrower ctx access")
+Fixes: 0df1a55afa832 ("bpf: Warn on internal verifier errors")
+Reported-by: syzbot+0ef84a7bdf5301d4cbec@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0ef84a7bdf5301d4cbec
+Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://patch.msgid.link/3b8dcee67ff4296903351a974ddd9c4dca768b64.1753194596.git.paul.chaignon@gmail.com
+[shung-hsi.yu: offset(struct bpf_sock_ops, skb_hwtstamp) case was
+dropped becasuse it was only added in v6.2 with commit 9bb053490f1a
+("bpf: Add hwtstamp field for the sockops prog")]
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/cgroup.c |    8 ++++----
+ net/core/filter.c   |   18 +++++++++---------
+ 2 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -2384,22 +2384,22 @@ static bool cg_sockopt_is_valid_access(i
+       }
+       switch (off) {
+-      case offsetof(struct bpf_sockopt, sk):
++      case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
+               if (size != sizeof(__u64))
+                       return false;
+               info->reg_type = PTR_TO_SOCKET;
+               break;
+-      case offsetof(struct bpf_sockopt, optval):
++      case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
+               if (size != sizeof(__u64))
+                       return false;
+               info->reg_type = PTR_TO_PACKET;
+               break;
+-      case offsetof(struct bpf_sockopt, optval_end):
++      case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
+               if (size != sizeof(__u64))
+                       return false;
+               info->reg_type = PTR_TO_PACKET_END;
+               break;
+-      case offsetof(struct bpf_sockopt, retval):
++      case bpf_ctx_range(struct bpf_sockopt, retval):
+               if (size != size_default)
+                       return false;
+               return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -8522,7 +8522,7 @@ static bool bpf_skb_is_valid_access(int
+               if (size != sizeof(__u64))
+                       return false;
+               break;
+-      case offsetof(struct __sk_buff, sk):
++      case bpf_ctx_range_ptr(struct __sk_buff, sk):
+               if (type == BPF_WRITE || size != sizeof(__u64))
+                       return false;
+               info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
+@@ -9106,7 +9106,7 @@ static bool sock_addr_is_valid_access(in
+                               return false;
+               }
+               break;
+-      case offsetof(struct bpf_sock_addr, sk):
++      case bpf_ctx_range_ptr(struct bpf_sock_addr, sk):
+               if (type != BPF_READ)
+                       return false;
+               if (size != sizeof(__u64))
+@@ -9160,17 +9160,17 @@ static bool sock_ops_is_valid_access(int
+                       if (size != sizeof(__u64))
+                               return false;
+                       break;
+-              case offsetof(struct bpf_sock_ops, sk):
++              case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
+                       if (size != sizeof(__u64))
+                               return false;
+                       info->reg_type = PTR_TO_SOCKET_OR_NULL;
+                       break;
+-              case offsetof(struct bpf_sock_ops, skb_data):
++              case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
+                       if (size != sizeof(__u64))
+                               return false;
+                       info->reg_type = PTR_TO_PACKET;
+                       break;
+-              case offsetof(struct bpf_sock_ops, skb_data_end):
++              case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
+                       if (size != sizeof(__u64))
+                               return false;
+                       info->reg_type = PTR_TO_PACKET_END;
+@@ -9245,17 +9245,17 @@ static bool sk_msg_is_valid_access(int o
+               return false;
+       switch (off) {
+-      case offsetof(struct sk_msg_md, data):
++      case bpf_ctx_range_ptr(struct sk_msg_md, data):
+               info->reg_type = PTR_TO_PACKET;
+               if (size != sizeof(__u64))
+                       return false;
+               break;
+-      case offsetof(struct sk_msg_md, data_end):
++      case bpf_ctx_range_ptr(struct sk_msg_md, data_end):
+               info->reg_type = PTR_TO_PACKET_END;
+               if (size != sizeof(__u64))
+                       return false;
+               break;
+-      case offsetof(struct sk_msg_md, sk):
++      case bpf_ctx_range_ptr(struct sk_msg_md, sk):
+               if (size != sizeof(__u64))
+                       return false;
+               info->reg_type = PTR_TO_SOCKET;
+@@ -11444,7 +11444,7 @@ static bool sk_lookup_is_valid_access(in
+               return false;
+       switch (off) {
+-      case offsetof(struct bpf_sk_lookup, sk):
++      case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
+               info->reg_type = PTR_TO_SOCKET_OR_NULL;
+               return size == sizeof(__u64);
diff --git a/queue-6.1/migrate-correct-lock-ordering-for-hugetlb-file-folios.patch b/queue-6.1/migrate-correct-lock-ordering-for-hugetlb-file-folios.patch
new file mode 100644 (file)
index 0000000..a270420
--- /dev/null
@@ -0,0 +1,106 @@
+From b7880cb166ab62c2409046b2347261abf701530e Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 9 Jan 2026 04:13:42 +0000
+Subject: migrate: correct lock ordering for hugetlb file folios
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit b7880cb166ab62c2409046b2347261abf701530e upstream.
+
+Syzbot has found a deadlock (analyzed by Lance Yang):
+
+1) Task (5749): Holds folio_lock, then tries to acquire i_mmap_rwsem(read lock).
+2) Task (5754): Holds i_mmap_rwsem(write lock), then tries to acquire
+folio_lock.
+
+migrate_pages()
+  -> migrate_hugetlbs()
+    -> unmap_and_move_huge_page()     <- Takes folio_lock!
+      -> remove_migration_ptes()
+        -> __rmap_walk_file()
+          -> i_mmap_lock_read()       <- Waits for i_mmap_rwsem(read lock)!
+
+hugetlbfs_fallocate()
+  -> hugetlbfs_punch_hole()           <- Takes i_mmap_rwsem(write lock)!
+    -> hugetlbfs_zero_partial_page()
+     -> filemap_lock_hugetlb_folio()
+      -> filemap_lock_folio()
+        -> __filemap_get_folio        <- Waits for folio_lock!
+
+The migration path is the one taking locks in the wrong order according to
+the documentation at the top of mm/rmap.c.  So expand the scope of the
+existing i_mmap_lock to cover the calls to remove_migration_ptes() too.
+
+This is (mostly) how it used to be after commit c0d0381ade79.  That was
+removed by 336bf30eb765 for both file & anon hugetlb pages when it should
+only have been removed for anon hugetlb pages.
+
+Link: https://lkml.kernel.org/r/20260109041345.3863089-2-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Fixes: 336bf30eb765 ("hugetlbfs: fix anon huge page migration race")
+Reported-by: syzbot+2d9c96466c978346b55f@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/all/68e9715a.050a0220.1186a4.000d.GAE@google.com
+Debugged-by: Lance Yang <lance.yang@linux.dev>
+Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: Jann Horn <jannh@google.com>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Ying Huang <ying.huang@linux.alibaba.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1357,6 +1357,7 @@ static int unmap_and_move_huge_page(new_
+       struct page *new_hpage;
+       struct anon_vma *anon_vma = NULL;
+       struct address_space *mapping = NULL;
++      enum ttu_flags ttu = 0;
+       /*
+        * Migratability of hugepages depends on architectures and their size.
+@@ -1409,8 +1410,6 @@ static int unmap_and_move_huge_page(new_
+               goto put_anon;
+       if (folio_mapped(src)) {
+-              enum ttu_flags ttu = 0;
+-
+               if (!folio_test_anon(src)) {
+                       /*
+                        * In shared mappings, try_to_unmap could potentially
+@@ -1427,9 +1426,6 @@ static int unmap_and_move_huge_page(new_
+               try_to_migrate(src, ttu);
+               page_was_mapped = 1;
+-
+-              if (ttu & TTU_RMAP_LOCKED)
+-                      i_mmap_unlock_write(mapping);
+       }
+       if (!folio_mapped(src))
+@@ -1437,7 +1433,11 @@ static int unmap_and_move_huge_page(new_
+       if (page_was_mapped)
+               remove_migration_ptes(src,
+-                      rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
++                      rc == MIGRATEPAGE_SUCCESS ? dst : src,
++                              ttu ? true : false);
++
++      if (ttu & TTU_RMAP_LOCKED)
++              i_mmap_unlock_write(mapping);
+ unlock_put_anon:
+       folio_unlock(dst);
diff --git a/queue-6.1/mm-damon-sysfs-scheme-cleanup-access_pattern-subdirs-on-scheme-dir-setup-failure.patch b/queue-6.1/mm-damon-sysfs-scheme-cleanup-access_pattern-subdirs-on-scheme-dir-setup-failure.patch
new file mode 100644 (file)
index 0000000..b7ca035
--- /dev/null
@@ -0,0 +1,49 @@
+From 392b3d9d595f34877dd745b470c711e8ebcd225c Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Wed, 24 Dec 2025 18:30:37 -0800
+Subject: mm/damon/sysfs-scheme: cleanup access_pattern subdirs on scheme dir setup failure
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 392b3d9d595f34877dd745b470c711e8ebcd225c upstream.
+
+When a DAMOS-scheme DAMON sysfs directory setup fails after setup of
+access_pattern/ directory, subdirectories of access_pattern/ directory are
+not cleaned up.  As a result, DAMON sysfs interface is nearly broken until
+the system reboots, and the memory for the unremoved directory is leaked.
+
+Cleanup the directories under such failures.
+
+Link: https://lkml.kernel.org/r/20251225023043.18579-5-sj@kernel.org
+Fixes: 9bbb820a5bd5 ("mm/damon/sysfs: support DAMOS quotas")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: chongjiapeng <jiapeng.chong@linux.alibaba.com>
+Cc: <stable@vger.kernel.org> # 5.18.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -856,7 +856,7 @@ static int damon_sysfs_scheme_add_dirs(s
+               return err;
+       err = damon_sysfs_scheme_set_quotas(scheme);
+       if (err)
+-              goto put_access_pattern_out;
++              goto rmdir_put_access_pattern_out;
+       err = damon_sysfs_scheme_set_watermarks(scheme);
+       if (err)
+               goto rmdir_put_quotas_access_pattern_out;
+@@ -872,7 +872,8 @@ rmdir_put_quotas_access_pattern_out:
+       damon_sysfs_quotas_rm_dirs(scheme->quotas);
+       kobject_put(&scheme->quotas->kobj);
+       scheme->quotas = NULL;
+-put_access_pattern_out:
++rmdir_put_access_pattern_out:
++      damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
+       kobject_put(&scheme->access_pattern->kobj);
+       scheme->access_pattern = NULL;
+       return err;
diff --git a/queue-6.1/mm-damon-sysfs-scheme-cleanup-quotas-subdirs-on-scheme-dir-setup-failure.patch b/queue-6.1/mm-damon-sysfs-scheme-cleanup-quotas-subdirs-on-scheme-dir-setup-failure.patch
new file mode 100644 (file)
index 0000000..ea66fa4
--- /dev/null
@@ -0,0 +1,49 @@
+From dc7e1d75fd8c505096d0cddeca9e2efb2b55aaf9 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Wed, 24 Dec 2025 18:30:36 -0800
+Subject: mm/damon/sysfs-scheme: cleanup quotas subdirs on scheme dir setup failure
+
+From: SeongJae Park <sj@kernel.org>
+
+commit dc7e1d75fd8c505096d0cddeca9e2efb2b55aaf9 upstream.
+
+When a DAMOS-scheme DAMON sysfs directory setup fails after setup of
+quotas/ directory, subdirectories of quotas/ directory are not cleaned up.
+As a result, DAMON sysfs interface is nearly broken until the system
+reboots, and the memory for the unremoved directory is leaked.
+
+Cleanup the directories under such failures.
+
+Link: https://lkml.kernel.org/r/20251225023043.18579-4-sj@kernel.org
+Fixes: 1b32234ab087 ("mm/damon/sysfs: support DAMOS watermarks")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: chongjiapeng <jiapeng.chong@linux.alibaba.com>
+Cc: <stable@vger.kernel.org> # 5.18.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -859,7 +859,7 @@ static int damon_sysfs_scheme_add_dirs(s
+               goto put_access_pattern_out;
+       err = damon_sysfs_scheme_set_watermarks(scheme);
+       if (err)
+-              goto put_quotas_access_pattern_out;
++              goto rmdir_put_quotas_access_pattern_out;
+       err = damon_sysfs_scheme_set_stats(scheme);
+       if (err)
+               goto put_watermarks_quotas_access_pattern_out;
+@@ -868,7 +868,8 @@ static int damon_sysfs_scheme_add_dirs(s
+ put_watermarks_quotas_access_pattern_out:
+       kobject_put(&scheme->watermarks->kobj);
+       scheme->watermarks = NULL;
+-put_quotas_access_pattern_out:
++rmdir_put_quotas_access_pattern_out:
++      damon_sysfs_quotas_rm_dirs(scheme->quotas);
+       kobject_put(&scheme->quotas->kobj);
+       scheme->quotas = NULL;
+ put_access_pattern_out:
index d1948160b5aa405bf3087026277661d472a6ed44..11c0a12a4c6ea8c0dfea147e61d625c6010835e5 100644 (file)
@@ -180,3 +180,8 @@ can-ems_usb-ems_usb_read_bulk_callback-fix-urb-memory-leak.patch
 can-kvaser_usb-kvaser_usb_read_bulk_callback-fix-urb-memory-leak.patch
 can-mcba_usb-mcba_usb_read_bulk_callback-fix-urb-memory-leak.patch
 can-usb_8dev-usb_8dev_read_bulk_callback-fix-urb-memory-leak.patch
+migrate-correct-lock-ordering-for-hugetlb-file-folios.patch
+bpf-do-not-let-bpf-test-infra-emit-invalid-gso-types-to-stack.patch
+bpf-reject-narrower-access-to-pointer-ctx-fields.patch
+mm-damon-sysfs-scheme-cleanup-quotas-subdirs-on-scheme-dir-setup-failure.patch
+mm-damon-sysfs-scheme-cleanup-access_pattern-subdirs-on-scheme-dir-setup-failure.patch