]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jan 2024 10:35:05 +0000 (11:35 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jan 2024 10:35:05 +0000 (11:35 +0100)
added patches:
dm-integrity-don-t-modify-bio-s-immutable-bio_vec-in-integrity_metadata.patch
netfilter-nf_tables-skip-set-commit-for-deleted-destroyed-sets.patch
tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch

queue-5.10/dm-integrity-don-t-modify-bio-s-immutable-bio_vec-in-integrity_metadata.patch [new file with mode: 0644]
queue-5.10/netfilter-nf_tables-skip-set-commit-for-deleted-destroyed-sets.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch [new file with mode: 0644]

diff --git a/queue-5.10/dm-integrity-don-t-modify-bio-s-immutable-bio_vec-in-integrity_metadata.patch b/queue-5.10/dm-integrity-don-t-modify-bio-s-immutable-bio_vec-in-integrity_metadata.patch
new file mode 100644 (file)
index 0000000..61a9d0e
--- /dev/null
@@ -0,0 +1,66 @@
+From b86f4b790c998afdbc88fe1aa55cfe89c4068726 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 5 Dec 2023 16:39:16 +0100
+Subject: dm-integrity: don't modify bio's immutable bio_vec in integrity_metadata()
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit b86f4b790c998afdbc88fe1aa55cfe89c4068726 upstream.
+
+__bio_for_each_segment assumes that the first struct bio_vec argument
+doesn't change - it calls "bio_advance_iter_single((bio), &(iter),
+(bvl).bv_len)" to advance the iterator. Unfortunately, the dm-integrity
+code changes the bio_vec with "bv.bv_len -= pos". When this code path
+is taken, the iterator would be out of sync and dm-integrity would
+report errors. This happens if the machine is out of memory and
+"kmalloc" fails.
+
+Fix this bug by making a copy of "bv" and changing the copy instead.
+
+Fixes: 7eada909bfd7 ("dm: add integrity target")
+Cc: stable@vger.kernel.org     # v4.12+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-integrity.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1657,11 +1657,12 @@ static void integrity_metadata(struct wo
+               sectors_to_process = dio->range.n_sectors;
+               __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++                      struct bio_vec bv_copy = bv;
+                       unsigned pos;
+                       char *mem, *checksums_ptr;
+ again:
+-                      mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
++                      mem = (char *)kmap_atomic(bv_copy.bv_page) + bv_copy.bv_offset;
+                       pos = 0;
+                       checksums_ptr = checksums;
+                       do {
+@@ -1670,7 +1671,7 @@ again:
+                               sectors_to_process -= ic->sectors_per_block;
+                               pos += ic->sectors_per_block << SECTOR_SHIFT;
+                               sector += ic->sectors_per_block;
+-                      } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
++                      } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
+                       kunmap_atomic(mem);
+                       r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+@@ -1691,9 +1692,9 @@ again:
+                       if (!sectors_to_process)
+                               break;
+-                      if (unlikely(pos < bv.bv_len)) {
+-                              bv.bv_offset += pos;
+-                              bv.bv_len -= pos;
++                      if (unlikely(pos < bv_copy.bv_len)) {
++                              bv_copy.bv_offset += pos;
++                              bv_copy.bv_len -= pos;
+                               goto again;
+                       }
+               }
diff --git a/queue-5.10/netfilter-nf_tables-skip-set-commit-for-deleted-destroyed-sets.patch b/queue-5.10/netfilter-nf_tables-skip-set-commit-for-deleted-destroyed-sets.patch
new file mode 100644 (file)
index 0000000..f72e7f7
--- /dev/null
@@ -0,0 +1,33 @@
+From 7315dc1e122c85ffdfc8defffbb8f8b616c2eb1a Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 19 Dec 2023 19:44:49 +0100
+Subject: netfilter: nf_tables: skip set commit for deleted/destroyed sets
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 7315dc1e122c85ffdfc8defffbb8f8b616c2eb1a upstream.
+
+NFT_MSG_DELSET deactivates all elements in the set, skip
+set->ops->commit() to avoid the unnecessary clone (for the pipapo case)
+as well as the sync GC cycle, which could deactivate again expired
+elements in such set.
+
+Fixes: 5f68718b34a5 ("netfilter: nf_tables: GC transaction API to avoid race with control plane")
+Reported-by: Kevin Rich <kevinrich1337@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8377,7 +8377,7 @@ static void nft_set_commit_update(struct
+       list_for_each_entry_safe(set, next, set_update_list, pending_update) {
+               list_del_init(&set->pending_update);
+-              if (!set->ops->commit)
++              if (!set->ops->commit || set->dead)
+                       continue;
+               set->ops->commit(set);
index 2aee73b2491a38001ccefc4899189c264a519060..bd240796ec224fff4ff657f1b5a463acbf2f6a5b 100644 (file)
@@ -67,3 +67,6 @@ scsi-core-use-a-structure-member-to-track-the-scsi-c.patch
 scsi-core-always-send-batch-on-reset-or-error-handli.patch
 ring-buffer-fix-wake-ups-when-buffer_percent-is-set-to-100.patch
 tracing-fix-blocked-reader-of-snapshot-buffer.patch
+netfilter-nf_tables-skip-set-commit-for-deleted-destroyed-sets.patch
+dm-integrity-don-t-modify-bio-s-immutable-bio_vec-in-integrity_metadata.patch
+tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch
diff --git a/queue-5.10/tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch b/queue-5.10/tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch
new file mode 100644 (file)
index 0000000..5275187
--- /dev/null
@@ -0,0 +1,149 @@
+From b022f0c7e404887a7c5229788fc99eff9f9a80d5 Mon Sep 17 00:00:00 2001
+From: Francis Laniel <flaniel@linux.microsoft.com>
+Date: Fri, 20 Oct 2023 13:42:49 +0300
+Subject: tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols
+
+From: Francis Laniel <flaniel@linux.microsoft.com>
+
+commit b022f0c7e404887a7c5229788fc99eff9f9a80d5 upstream.
+
+When a kprobe is attached to a function that's name is not unique (is
+static and shares the name with other functions in the kernel), the
+kprobe is attached to the first function it finds. This is a bug as the
+function that it is attaching to is not necessarily the one that the
+user wants to attach to.
+
+Instead of blindly picking a function to attach to what is ambiguous,
+error with EADDRNOTAVAIL to let the user know that this function is not
+unique, and that the user must use another unique function with an
+address offset to get to the function they want to attach to.
+
+Link: https://lore.kernel.org/all/20231020104250.9537-2-flaniel@linux.microsoft.com/
+
+Cc: stable@vger.kernel.org
+Fixes: 413d37d1eb69 ("tracing: Add kprobe-based event tracer")
+Suggested-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Francis Laniel <flaniel@linux.microsoft.com>
+Link: https://lore.kernel.org/lkml/20230819101105.b0c104ae4494a7d1f2eea742@kernel.org/
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_kprobe.c |   74 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/trace/trace_probe.h  |    1 
+ 2 files changed, 75 insertions(+)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -715,6 +715,36 @@ static inline void sanitize_event_name(c
+                       *name = '_';
+ }
++struct count_symbols_struct {
++      const char *func_name;
++      unsigned int count;
++};
++
++static int count_symbols(void *data, const char *name, struct module *unused0,
++                       unsigned long unused1)
++{
++      struct count_symbols_struct *args = data;
++
++      if (strcmp(args->func_name, name))
++              return 0;
++
++      args->count++;
++
++      return 0;
++}
++
++static unsigned int number_of_same_symbols(char *func_name)
++{
++      struct count_symbols_struct args = {
++              .func_name = func_name,
++              .count = 0,
++      };
++
++      kallsyms_on_each_symbol(count_symbols, &args);
++
++      return args.count;
++}
++
+ static int trace_kprobe_create(int argc, const char *argv[])
+ {
+       /*
+@@ -842,6 +872,31 @@ static int trace_kprobe_create(int argc,
+               }
+       }
++      if (symbol && !strchr(symbol, ':')) {
++              unsigned int count;
++
++              count = number_of_same_symbols(symbol);
++              if (count > 1) {
++                      /*
++                       * Users should use ADDR to remove the ambiguity of
++                       * using KSYM only.
++                       */
++                      trace_probe_log_err(0, NON_UNIQ_SYMBOL);
++                      ret = -EADDRNOTAVAIL;
++
++                      goto error;
++              } else if (count == 0) {
++                      /*
++                       * We can return ENOENT earlier than when register the
++                       * kprobe.
++                       */
++                      trace_probe_log_err(0, BAD_PROBE_ADDR);
++                      ret = -ENOENT;
++
++                      goto error;
++              }
++      }
++
+       trace_probe_log_set_index(0);
+       if (event) {
+               ret = traceprobe_parse_event_name(&event, &group, buf,
+@@ -1805,6 +1860,7 @@ static int unregister_kprobe_event(struc
+ }
+ #ifdef CONFIG_PERF_EVENTS
++
+ /* create a trace_kprobe, but don't add it to global lists */
+ struct trace_event_call *
+ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+@@ -1814,6 +1870,24 @@ create_local_trace_kprobe(char *func, vo
+       int ret;
+       char *event;
++      if (func) {
++              unsigned int count;
++
++              count = number_of_same_symbols(func);
++              if (count > 1)
++                      /*
++                       * Users should use addr to remove the ambiguity of
++                       * using func only.
++                       */
++                      return ERR_PTR(-EADDRNOTAVAIL);
++              else if (count == 0)
++                      /*
++                       * We can return ENOENT earlier than when register the
++                       * kprobe.
++                       */
++                      return ERR_PTR(-ENOENT);
++      }
++
+       /*
+        * local trace_kprobes are not added to dyn_event, so they are never
+        * searched in find_trace_kprobe(). Therefore, there is no concern of
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -390,6 +390,7 @@ extern int traceprobe_define_arg_fields(
+       C(BAD_MAXACT,           "Invalid maxactive number"),            \
+       C(MAXACT_TOO_BIG,       "Maxactive is too big"),                \
+       C(BAD_PROBE_ADDR,       "Invalid probed address or symbol"),    \
++      C(NON_UNIQ_SYMBOL,      "The symbol is not unique"),            \
+       C(BAD_RETPROBE,         "Retprobe address must be an function entry"), \
+       C(BAD_ADDR_SUFFIX,      "Invalid probed address suffix"), \
+       C(NO_GROUP_NAME,        "Group name is not specified"),         \