]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Tue, 18 Feb 2025 12:30:07 +0000 (07:30 -0500)
committerSasha Levin <sashal@kernel.org>
Tue, 18 Feb 2025 12:31:35 +0000 (07:31 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
17 files changed:
queue-5.10/arp-use-rcu-protection-in-arp_xmit.patch [new file with mode: 0644]
queue-5.10/clocksource-limit-number-of-cpus-checked-for-clock-s.patch [new file with mode: 0644]
queue-5.10/clocksource-replace-cpumask_weight-with-cpumask_empt.patch [new file with mode: 0644]
queue-5.10/clocksource-replace-deprecated-cpu-hotplug-functions.patch [new file with mode: 0644]
queue-5.10/clocksource-use-migrate_disable-to-avoid-calling-get.patch [new file with mode: 0644]
queue-5.10/clocksource-use-pr_info-for-checking-clocksource-syn.patch [new file with mode: 0644]
queue-5.10/ipv4-use-rcu-protection-in-inet_select_addr.patch [new file with mode: 0644]
queue-5.10/ipv4-use-rcu-protection-in-rt_is_expired.patch [new file with mode: 0644]
queue-5.10/ipv6-use-rcu-protection-in-ip6_default_advmss.patch [new file with mode: 0644]
queue-5.10/ndisc-extend-rcu-protection-in-ndisc_send_skb.patch [new file with mode: 0644]
queue-5.10/ndisc-use-rcu-protection-in-ndisc_alloc_skb.patch [new file with mode: 0644]
queue-5.10/neighbour-delete-redundant-judgment-statements.patch [new file with mode: 0644]
queue-5.10/neighbour-use-rcu-protection-in-__neigh_notify.patch [new file with mode: 0644]
queue-5.10/net-add-dev_net_rcu-helper.patch [new file with mode: 0644]
queue-5.10/net-treat-possible_net_t-net-pointer-as-an-rcu-one-a.patch [new file with mode: 0644]
queue-5.10/openvswitch-use-rcu-protection-in-ovs_vport_cmd_fill.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arp-use-rcu-protection-in-arp_xmit.patch b/queue-5.10/arp-use-rcu-protection-in-arp_xmit.patch
new file mode 100644 (file)
index 0000000..e51742f
--- /dev/null
@@ -0,0 +1,45 @@
+From eb9f44ebc916fc9f9f0b35ed3ef5389b9baa9640 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2025 13:58:36 +0000
+Subject: arp: use RCU protection in arp_xmit()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a42b69f692165ec39db42d595f4f65a4c8f42e44 ]
+
+arp_xmit() can be called without RTNL or RCU protection.
+
+Use RCU protection to avoid potential UAF.
+
+Fixes: 29a26a568038 ("netfilter: Pass struct net into the netfilter hooks")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250207135841.1948589-5-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/arp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 8ae9bd6f91c19..6879e0b70c769 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -637,10 +637,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+  */
+ void arp_xmit(struct sk_buff *skb)
+ {
++      rcu_read_lock();
+       /* Send it off, maybe filter it using firewalling first.  */
+       NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
+-              dev_net(skb->dev), NULL, skb, NULL, skb->dev,
++              dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev,
+               arp_xmit_finish);
++      rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(arp_xmit);
+-- 
+2.39.5
+
diff --git a/queue-5.10/clocksource-limit-number-of-cpus-checked-for-clock-s.patch b/queue-5.10/clocksource-limit-number-of-cpus-checked-for-clock-s.patch
new file mode 100644 (file)
index 0000000..8b11505
--- /dev/null
@@ -0,0 +1,181 @@
+From f5eb3280b28a9c15bd5f564e965eeb73a40819f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 May 2021 12:01:21 -0700
+Subject: clocksource: Limit number of CPUs checked for clock synchronization
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+[ Upstream commit fa218f1cce6ba40069c8daab8821de7e6be1cdd0 ]
+
+Currently, if skew is detected on a clock marked CLOCK_SOURCE_VERIFY_PERCPU,
+that clock is checked on all CPUs.  This is thorough, but might not be
+what you want on a system with a few tens of CPUs, let alone a few hundred
+of them.
+
+Therefore, by default check only up to eight randomly chosen CPUs.  Also
+provide a new clocksource.verify_n_cpus kernel boot parameter.  A value of
+-1 says to check all of the CPUs, and a non-negative value says to randomly
+select that number of CPUs, without concern about selecting the same CPU
+multiple times.  However, make use of a cpumask so that a given CPU will be
+checked at most once.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de> # For verify_n_cpus=1.
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Feng Tang <feng.tang@intel.com>
+Link: https://lore.kernel.org/r/20210527190124.440372-3-paulmck@kernel.org
+Stable-dep-of: 6bb05a33337b ("clocksource: Use migrate_disable() to avoid calling get_random_u32() in atomic context")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../admin-guide/kernel-parameters.txt         | 10 +++
+ kernel/time/clocksource.c                     | 74 ++++++++++++++++++-
+ 2 files changed, 82 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 097ef49b3d3a0..88110e74b3f7a 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -583,6 +583,16 @@
+                       unstable.  Defaults to three retries, that is,
+                       four attempts to read the clock under test.
++      clocksource.verify_n_cpus= [KNL]
++                      Limit the number of CPUs checked for clocksources
++                      marked with CLOCK_SOURCE_VERIFY_PERCPU that
++                      are marked unstable due to excessive skew.
++                      A negative value says to check all CPUs, while
++                      zero says not to check any.  Values larger than
++                      nr_cpu_ids are silently truncated to nr_cpu_ids.
++                      The actual CPUs are chosen randomly, with
++                      no replacement if the same CPU is chosen twice.
++
+       clearcpuid=BITNUM[,BITNUM...] [X86]
+                       Disable CPUID feature X for the kernel. See
+                       arch/x86/include/asm/cpufeatures.h for the valid bit
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 754e93edb2f79..00cf99cb74496 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -14,6 +14,8 @@
+ #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
+ #include <linux/tick.h>
+ #include <linux/kthread.h>
++#include <linux/prandom.h>
++#include <linux/cpu.h>
+ #include "tick-internal.h"
+ #include "timekeeping_internal.h"
+@@ -201,6 +203,8 @@ void clocksource_mark_unstable(struct clocksource *cs)
+ static ulong max_cswd_read_retries = 3;
+ module_param(max_cswd_read_retries, ulong, 0644);
++static int verify_n_cpus = 8;
++module_param(verify_n_cpus, int, 0644);
+ enum wd_read_status {
+       WD_READ_SUCCESS,
+@@ -263,6 +267,55 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ static u64 csnow_mid;
+ static cpumask_t cpus_ahead;
+ static cpumask_t cpus_behind;
++static cpumask_t cpus_chosen;
++
++static void clocksource_verify_choose_cpus(void)
++{
++      int cpu, i, n = verify_n_cpus;
++
++      if (n < 0) {
++              /* Check all of the CPUs. */
++              cpumask_copy(&cpus_chosen, cpu_online_mask);
++              cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
++              return;
++      }
++
++      /* If no checking desired, or no other CPU to check, leave. */
++      cpumask_clear(&cpus_chosen);
++      if (n == 0 || num_online_cpus() <= 1)
++              return;
++
++      /* Make sure to select at least one CPU other than the current CPU. */
++      cpu = cpumask_next(-1, cpu_online_mask);
++      if (cpu == smp_processor_id())
++              cpu = cpumask_next(cpu, cpu_online_mask);
++      if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
++              return;
++      cpumask_set_cpu(cpu, &cpus_chosen);
++
++      /* Force a sane value for the boot parameter. */
++      if (n > nr_cpu_ids)
++              n = nr_cpu_ids;
++
++      /*
++       * Randomly select the specified number of CPUs.  If the same
++       * CPU is selected multiple times, that CPU is checked only once,
++       * and no replacement CPU is selected.  This gracefully handles
++       * situations where verify_n_cpus is greater than the number of
++       * CPUs that are currently online.
++       */
++      for (i = 1; i < n; i++) {
++              cpu = prandom_u32() % nr_cpu_ids;
++              cpu = cpumask_next(cpu - 1, cpu_online_mask);
++              if (cpu >= nr_cpu_ids)
++                      cpu = cpumask_next(-1, cpu_online_mask);
++              if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
++                      cpumask_set_cpu(cpu, &cpus_chosen);
++      }
++
++      /* Don't verify ourselves. */
++      cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
++}
+ static void clocksource_verify_one_cpu(void *csin)
+ {
+@@ -278,12 +331,22 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+       int cpu, testcpu;
+       s64 delta;
++      if (verify_n_cpus == 0)
++              return;
+       cpumask_clear(&cpus_ahead);
+       cpumask_clear(&cpus_behind);
++      get_online_cpus();
+       preempt_disable();
++      clocksource_verify_choose_cpus();
++      if (cpumask_weight(&cpus_chosen) == 0) {
++              preempt_enable();
++              put_online_cpus();
++              pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
++              return;
++      }
+       testcpu = smp_processor_id();
+-      pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu);
+-      for_each_online_cpu(cpu) {
++      pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++      for_each_cpu(cpu, &cpus_chosen) {
+               if (cpu == testcpu)
+                       continue;
+               csnow_begin = cs->read(cs);
+@@ -303,6 +366,7 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+                       cs_nsec_min = cs_nsec;
+       }
+       preempt_enable();
++      put_online_cpus();
+       if (!cpumask_empty(&cpus_ahead))
+               pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
+                       cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
+@@ -427,6 +491,12 @@ static void clocksource_watchdog(struct timer_list *unused)
+                               watchdog->name, wdnow, wdlast, watchdog->mask);
+                       pr_warn("                      '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
+                               cs->name, csnow, cslast, cs->mask);
++                      if (curr_clocksource == cs)
++                              pr_warn("                      '%s' is current clocksource.\n", cs->name);
++                      else if (curr_clocksource)
++                              pr_warn("                      '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
++                      else
++                              pr_warn("                      No current clocksource.\n");
+                       __clocksource_unstable(cs);
+                       continue;
+               }
+-- 
+2.39.5
+
diff --git a/queue-5.10/clocksource-replace-cpumask_weight-with-cpumask_empt.patch b/queue-5.10/clocksource-replace-cpumask_weight-with-cpumask_empt.patch
new file mode 100644 (file)
index 0000000..f470873
--- /dev/null
@@ -0,0 +1,41 @@
+From 028bcb4c53e50bf2f5220ec90ec1bd37d8bfb8c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Feb 2022 14:49:07 -0800
+Subject: clocksource: Replace cpumask_weight() with cpumask_empty()
+
+From: Yury Norov <yury.norov@gmail.com>
+
+[ Upstream commit 8afbcaf8690dac19ebf570a4e4fef9c59c75bf8e ]
+
+clocksource_verify_percpu() calls cpumask_weight() to check if any bit of a
+given cpumask is set.
+
+This can be done more efficiently with cpumask_empty() because
+cpumask_empty() stops traversing the cpumask as soon as it finds first set
+bit, while cpumask_weight() counts all bits unconditionally.
+
+Signed-off-by: Yury Norov <yury.norov@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20220210224933.379149-24-yury.norov@gmail.com
+Stable-dep-of: 6bb05a33337b ("clocksource: Use migrate_disable() to avoid calling get_random_u32() in atomic context")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/clocksource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index e44fb1e12a281..658b90755dd72 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -338,7 +338,7 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+       cpus_read_lock();
+       preempt_disable();
+       clocksource_verify_choose_cpus();
+-      if (cpumask_weight(&cpus_chosen) == 0) {
++      if (cpumask_empty(&cpus_chosen)) {
+               preempt_enable();
+               cpus_read_unlock();
+               pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
+-- 
+2.39.5
+
diff --git a/queue-5.10/clocksource-replace-deprecated-cpu-hotplug-functions.patch b/queue-5.10/clocksource-replace-deprecated-cpu-hotplug-functions.patch
new file mode 100644 (file)
index 0000000..3285420
--- /dev/null
@@ -0,0 +1,56 @@
+From 98b4e8f388ccf23d0a9370c36b626ba2fcf370fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Aug 2021 16:16:17 +0200
+Subject: clocksource: Replace deprecated CPU-hotplug functions.
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit 698429f9d0e54ce3964151adff886ee5fc59714b ]
+
+The functions get_online_cpus() and put_online_cpus() have been
+deprecated during the CPU hotplug rework. They map directly to
+cpus_read_lock() and cpus_read_unlock().
+
+Replace deprecated CPU-hotplug functions with the official version.
+The behavior remains unchanged.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20210803141621.780504-35-bigeasy@linutronix.de
+Stable-dep-of: 6bb05a33337b ("clocksource: Use migrate_disable() to avoid calling get_random_u32() in atomic context")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/clocksource.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 00cf99cb74496..e44fb1e12a281 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -335,12 +335,12 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+               return;
+       cpumask_clear(&cpus_ahead);
+       cpumask_clear(&cpus_behind);
+-      get_online_cpus();
++      cpus_read_lock();
+       preempt_disable();
+       clocksource_verify_choose_cpus();
+       if (cpumask_weight(&cpus_chosen) == 0) {
+               preempt_enable();
+-              put_online_cpus();
++              cpus_read_unlock();
+               pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
+               return;
+       }
+@@ -366,7 +366,7 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+                       cs_nsec_min = cs_nsec;
+       }
+       preempt_enable();
+-      put_online_cpus();
++      cpus_read_unlock();
+       if (!cpumask_empty(&cpus_ahead))
+               pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
+                       cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
+-- 
+2.39.5
+
diff --git a/queue-5.10/clocksource-use-migrate_disable-to-avoid-calling-get.patch b/queue-5.10/clocksource-use-migrate_disable-to-avoid-calling-get.patch
new file mode 100644 (file)
index 0000000..edaa06d
--- /dev/null
@@ -0,0 +1,82 @@
+From cee6c63c91354905fc0c170534e8186978b2dd4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jan 2025 12:33:23 -0500
+Subject: clocksource: Use migrate_disable() to avoid calling get_random_u32()
+ in atomic context
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 6bb05a33337b2c842373857b63de5c9bf1ae2a09 ]
+
+The following bug report happened with a PREEMPT_RT kernel:
+
+  BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48
+  in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 2012, name: kwatchdog
+  preempt_count: 1, expected: 0
+  RCU nest depth: 0, expected: 0
+  get_random_u32+0x4f/0x110
+  clocksource_verify_choose_cpus+0xab/0x1a0
+  clocksource_verify_percpu.part.0+0x6b/0x330
+  clocksource_watchdog_kthread+0x193/0x1a0
+
+It is due to the fact that clocksource_verify_choose_cpus() is invoked with
+preemption disabled.  This function invokes get_random_u32() to obtain
+random numbers for choosing CPUs.  The batched_entropy_32 local lock and/or
+the base_crng.lock spinlock in driver/char/random.c will be acquired during
+the call. In PREEMPT_RT kernel, they are both sleeping locks and so cannot
+be acquired in atomic context.
+
+Fix this problem by using migrate_disable() to allow smp_processor_id() to
+be reliably used without introducing atomic context. preempt_disable() is
+then called after clocksource_verify_choose_cpus() but before the
+clocksource measurement is being run to avoid introducing unexpected
+latency.
+
+Fixes: 7560c02bdffb ("clocksource: Check per-CPU clock synchronization when marked unstable")
+Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/all/20250131173323.891943-2-longman@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/clocksource.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 21dfee6c0d936..b22508c5d2d96 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -336,10 +336,10 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+       cpumask_clear(&cpus_ahead);
+       cpumask_clear(&cpus_behind);
+       cpus_read_lock();
+-      preempt_disable();
++      migrate_disable();
+       clocksource_verify_choose_cpus();
+       if (cpumask_empty(&cpus_chosen)) {
+-              preempt_enable();
++              migrate_enable();
+               cpus_read_unlock();
+               pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
+               return;
+@@ -347,6 +347,7 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+       testcpu = smp_processor_id();
+       pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
+               cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++      preempt_disable();
+       for_each_cpu(cpu, &cpus_chosen) {
+               if (cpu == testcpu)
+                       continue;
+@@ -367,6 +368,7 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+                       cs_nsec_min = cs_nsec;
+       }
+       preempt_enable();
++      migrate_enable();
+       cpus_read_unlock();
+       if (!cpumask_empty(&cpus_ahead))
+               pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
+-- 
+2.39.5
+
diff --git a/queue-5.10/clocksource-use-pr_info-for-checking-clocksource-syn.patch b/queue-5.10/clocksource-use-pr_info-for-checking-clocksource-syn.patch
new file mode 100644 (file)
index 0000000..cd21d00
--- /dev/null
@@ -0,0 +1,45 @@
+From cf6ecf2af39279a206f5c775ad78955648a89c4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Jan 2025 20:54:41 -0500
+Subject: clocksource: Use pr_info() for "Checking clocksource synchronization"
+ message
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 1f566840a82982141f94086061927a90e79440e5 ]
+
+The "Checking clocksource synchronization" message is normally printed
+when clocksource_verify_percpu() is called for a given clocksource if
+both the CLOCK_SOURCE_UNSTABLE and CLOCK_SOURCE_VERIFY_PERCPU flags
+are set.
+
+It is an informational message and so pr_info() is the correct choice.
+
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
+Acked-by: John Stultz <jstultz@google.com>
+Link: https://lore.kernel.org/all/20250125015442.3740588-1-longman@redhat.com
+Stable-dep-of: 6bb05a33337b ("clocksource: Use migrate_disable() to avoid calling get_random_u32() in atomic context")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/clocksource.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 658b90755dd72..21dfee6c0d936 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -345,7 +345,8 @@ static void clocksource_verify_percpu(struct clocksource *cs)
+               return;
+       }
+       testcpu = smp_processor_id();
+-      pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++      pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
++              cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
+       for_each_cpu(cpu, &cpus_chosen) {
+               if (cpu == testcpu)
+                       continue;
+-- 
+2.39.5
+
diff --git a/queue-5.10/ipv4-use-rcu-protection-in-inet_select_addr.patch b/queue-5.10/ipv4-use-rcu-protection-in-inet_select_addr.patch
new file mode 100644 (file)
index 0000000..8efb401
--- /dev/null
@@ -0,0 +1,41 @@
+From db66daa3c29963a64cd6e3b48b15a396f168d24f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 15:51:14 +0000
+Subject: ipv4: use RCU protection in inet_select_addr()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 719817cd293e4fa389e1f69c396f3f816ed5aa41 ]
+
+inet_select_addr() must use RCU protection to make
+sure the net structure it reads does not disappear.
+
+Fixes: c4544c724322 ("[NETNS]: Process inet_select_addr inside a namespace.")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250205155120.1676781-7-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/devinet.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 6918b3ced6713..2dc94109fc0ea 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1317,10 +1317,11 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
+       __be32 addr = 0;
+       unsigned char localnet_scope = RT_SCOPE_HOST;
+       struct in_device *in_dev;
+-      struct net *net = dev_net(dev);
++      struct net *net;
+       int master_idx;
+       rcu_read_lock();
++      net = dev_net_rcu(dev);
+       in_dev = __in_dev_get_rcu(dev);
+       if (!in_dev)
+               goto no_in_dev;
+-- 
+2.39.5
+
diff --git a/queue-5.10/ipv4-use-rcu-protection-in-rt_is_expired.patch b/queue-5.10/ipv4-use-rcu-protection-in-rt_is_expired.patch
new file mode 100644 (file)
index 0000000..54d1a6a
--- /dev/null
@@ -0,0 +1,44 @@
+From e3ed566b4ce73eef049096743cbb6f3f1a85300b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 15:51:13 +0000
+Subject: ipv4: use RCU protection in rt_is_expired()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit dd205fcc33d92d54eee4d7f21bb073af9bd5ce2b ]
+
+rt_is_expired() must use RCU protection to make
+sure the net structure it reads does not disappear.
+
+Fixes: e84f84f27647 ("netns: place rt_genid into struct net")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250205155120.1676781-6-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/route.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index c34386a9d99b4..a2a7f2597e201 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -423,7 +423,13 @@ static inline int ip_rt_proc_init(void)
+ static inline bool rt_is_expired(const struct rtable *rth)
+ {
+-      return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
++      bool res;
++
++      rcu_read_lock();
++      res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev));
++      rcu_read_unlock();
++
++      return res;
+ }
+ void rt_cache_flush(struct net *net)
+-- 
+2.39.5
+
diff --git a/queue-5.10/ipv6-use-rcu-protection-in-ip6_default_advmss.patch b/queue-5.10/ipv6-use-rcu-protection-in-ip6_default_advmss.patch
new file mode 100644 (file)
index 0000000..7c0c9e4
--- /dev/null
@@ -0,0 +1,49 @@
+From eced208669162f9d5107e67e61773db341184840 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 15:51:18 +0000
+Subject: ipv6: use RCU protection in ip6_default_advmss()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 3c8ffcd248da34fc41e52a46e51505900115fc2a ]
+
+ip6_default_advmss() needs rcu protection to make
+sure the net structure it reads does not disappear.
+
+Fixes: 5578689a4e3c ("[NETNS][IPV6] route6 - make route6 per namespace")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250205155120.1676781-11-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index d7d600cb15a8d..178c56f6f6185 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3056,13 +3056,18 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
+ {
+       struct net_device *dev = dst->dev;
+       unsigned int mtu = dst_mtu(dst);
+-      struct net *net = dev_net(dev);
++      struct net *net;
+       mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
++      rcu_read_lock();
++
++      net = dev_net_rcu(dev);
+       if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
+               mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
++      rcu_read_unlock();
++
+       /*
+        * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+        * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+-- 
+2.39.5
+
diff --git a/queue-5.10/ndisc-extend-rcu-protection-in-ndisc_send_skb.patch b/queue-5.10/ndisc-extend-rcu-protection-in-ndisc_send_skb.patch
new file mode 100644 (file)
index 0000000..77554f5
--- /dev/null
@@ -0,0 +1,72 @@
+From 82bcfde0cf8cc019e342ae70af2658d081a85c68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2025 13:58:39 +0000
+Subject: ndisc: extend RCU protection in ndisc_send_skb()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ed6ae1f325d3c43966ec1b62ac1459e2b8e45640 ]
+
+ndisc_send_skb() can be called without RTNL or RCU held.
+
+Acquire rcu_read_lock() earlier, so that we can use dev_net_rcu()
+and avoid a potential UAF.
+
+Fixes: 1762f7e88eb3 ("[NETNS][IPV6] ndisc - make socket control per namespace")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250207135841.1948589-8-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ndisc.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 2361f4af49e8f..43ad4e5db5941 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -471,16 +471,20 @@ static void ndisc_send_skb(struct sk_buff *skb,
+                          const struct in6_addr *daddr,
+                          const struct in6_addr *saddr)
+ {
++      struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+       struct dst_entry *dst = skb_dst(skb);
+-      struct net *net = dev_net(skb->dev);
+-      struct sock *sk = net->ipv6.ndisc_sk;
+       struct inet6_dev *idev;
++      struct net *net;
++      struct sock *sk;
+       int err;
+-      struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+       u8 type;
+       type = icmp6h->icmp6_type;
++      rcu_read_lock();
++
++      net = dev_net_rcu(skb->dev);
++      sk = net->ipv6.ndisc_sk;
+       if (!dst) {
+               struct flowi6 fl6;
+               int oif = skb->dev->ifindex;
+@@ -488,6 +492,7 @@ static void ndisc_send_skb(struct sk_buff *skb,
+               icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
+               dst = icmp6_dst_alloc(skb->dev, &fl6);
+               if (IS_ERR(dst)) {
++                      rcu_read_unlock();
+                       kfree_skb(skb);
+                       return;
+               }
+@@ -502,7 +507,6 @@ static void ndisc_send_skb(struct sk_buff *skb,
+       ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len);
+-      rcu_read_lock();
+       idev = __in6_dev_get(dst->dev);
+       IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
+-- 
+2.39.5
+
diff --git a/queue-5.10/ndisc-use-rcu-protection-in-ndisc_alloc_skb.patch b/queue-5.10/ndisc-use-rcu-protection-in-ndisc_alloc_skb.patch
new file mode 100644 (file)
index 0000000..7b5793e
--- /dev/null
@@ -0,0 +1,59 @@
+From 446370a943c7eeec1de2a979b2e5ee602fa3a224 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2025 13:58:34 +0000
+Subject: ndisc: use RCU protection in ndisc_alloc_skb()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 628e6d18930bbd21f2d4562228afe27694f66da9 ]
+
+ndisc_alloc_skb() can be called without RTNL or RCU being held.
+
+Add RCU protection to avoid possible UAF.
+
+Fixes: de09334b9326 ("ndisc: Introduce ndisc_alloc_skb() helper.")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250207135841.1948589-3-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ndisc.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index c0a5552733177..2361f4af49e8f 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -417,15 +417,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
+ {
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
+-      struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
+       struct sk_buff *skb;
+       skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
+-      if (!skb) {
+-              ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
+-                        __func__);
++      if (!skb)
+               return NULL;
+-      }
+       skb->protocol = htons(ETH_P_IPV6);
+       skb->dev = dev;
+@@ -436,7 +432,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
+       /* Manually assign socket ownership as we avoid calling
+        * sock_alloc_send_pskb() to bypass wmem buffer limits
+        */
+-      skb_set_owner_w(skb, sk);
++      rcu_read_lock();
++      skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk);
++      rcu_read_unlock();
+       return skb;
+ }
+-- 
+2.39.5
+
diff --git a/queue-5.10/neighbour-delete-redundant-judgment-statements.patch b/queue-5.10/neighbour-delete-redundant-judgment-statements.patch
new file mode 100644 (file)
index 0000000..0a1fbc0
--- /dev/null
@@ -0,0 +1,40 @@
+From 95371a97eba868d37c743e5da0618b586f175dd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 12:32:45 +0800
+Subject: neighbour: delete redundant judgment statements
+
+From: Li Zetao <lizetao1@huawei.com>
+
+[ Upstream commit c25bdd2ac8cf7da70a226f1a66cdce7af15ff86f ]
+
+The initial value of err is -ENOBUFS, and err is guaranteed to be
+less than 0 before all goto errout. Therefore, on the error path
+of errout, there is no need to repeatedly judge that err is less than 0,
+and delete redundant judgments to make the code more concise.
+
+Signed-off-by: Li Zetao <lizetao1@huawei.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: becbd5850c03 ("neighbour: use RCU protection in __neigh_notify()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/neighbour.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index c187eb951083b..bd017b220cfed 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -3387,8 +3387,7 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
+       rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+       return;
+ errout:
+-      if (err < 0)
+-              rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
++      rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ }
+ void neigh_app_ns(struct neighbour *n)
+-- 
+2.39.5
+
diff --git a/queue-5.10/neighbour-use-rcu-protection-in-__neigh_notify.patch b/queue-5.10/neighbour-use-rcu-protection-in-__neigh_notify.patch
new file mode 100644 (file)
index 0000000..76aefaf
--- /dev/null
@@ -0,0 +1,58 @@
+From 7bc12cbb59ca26276b9f9940c0273f7f7d85af5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2025 13:58:35 +0000
+Subject: neighbour: use RCU protection in __neigh_notify()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit becbd5850c03ed33b232083dd66c6e38c0c0e569 ]
+
+__neigh_notify() can be called without RTNL or RCU protection.
+
+Use RCU protection to avoid potential UAF.
+
+Fixes: 426b5303eb43 ("[NETNS]: Modify the neighbour table code so it handles multiple network namespaces")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250207135841.1948589-4-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/neighbour.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index bd017b220cfed..f04ba63e98515 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -3369,10 +3369,12 @@ static const struct seq_operations neigh_stat_seq_ops = {
+ static void __neigh_notify(struct neighbour *n, int type, int flags,
+                          u32 pid)
+ {
+-      struct net *net = dev_net(n->dev);
+       struct sk_buff *skb;
+       int err = -ENOBUFS;
++      struct net *net;
++      rcu_read_lock();
++      net = dev_net_rcu(n->dev);
+       skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
+       if (skb == NULL)
+               goto errout;
+@@ -3385,9 +3387,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
+               goto errout;
+       }
+       rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+-      return;
++      goto out;
+ errout:
+       rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
++out:
++      rcu_read_unlock();
+ }
+ void neigh_app_ns(struct neighbour *n)
+-- 
+2.39.5
+
diff --git a/queue-5.10/net-add-dev_net_rcu-helper.patch b/queue-5.10/net-add-dev_net_rcu-helper.patch
new file mode 100644 (file)
index 0000000..2cdeb59
--- /dev/null
@@ -0,0 +1,62 @@
+From a95840eb837eeadd12e7642cdf1b4d3af5404a5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 15:51:09 +0000
+Subject: net: add dev_net_rcu() helper
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 482ad2a4ace2740ca0ff1cbc8f3c7f862f3ab507 ]
+
+dev->nd_net can change, readers should either
+use rcu_read_lock() or RTNL.
+
+We currently use a generic helper, dev_net() with
+no debugging support. We probably have many hidden bugs.
+
+Add dev_net_rcu() helper for callers using rcu_read_lock()
+protection.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250205155120.1676781-2-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: dd205fcc33d9 ("ipv4: use RCU protection in rt_is_expired()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h   | 6 ++++++
+ include/net/net_namespace.h | 2 +-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 3380668478e8a..06b37f45b67c9 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2361,6 +2361,12 @@ struct net *dev_net(const struct net_device *dev)
+       return read_pnet(&dev->nd_net);
+ }
++static inline
++struct net *dev_net_rcu(const struct net_device *dev)
++{
++      return read_pnet_rcu(&dev->nd_net);
++}
++
+ static inline
+ void dev_net_set(struct net_device *dev, struct net *net)
+ {
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 0dfcf2f0ef62a..3cf6a5c17b84c 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -340,7 +340,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
+ #endif
+ }
+-static inline struct net *read_pnet_rcu(possible_net_t *pnet)
++static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
+ {
+ #ifdef CONFIG_NET_NS
+       return rcu_dereference(pnet->net);
+-- 
+2.39.5
+
diff --git a/queue-5.10/net-treat-possible_net_t-net-pointer-as-an-rcu-one-a.patch b/queue-5.10/net-treat-possible_net_t-net-pointer-as-an-rcu-one-a.patch
new file mode 100644 (file)
index 0000000..9fc57f9
--- /dev/null
@@ -0,0 +1,65 @@
+From d19a98544f6b1e7b0f15f6520870fb0fa7fb14b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Oct 2023 14:10:23 +0200
+Subject: net: treat possible_net_t net pointer as an RCU one and add
+ read_pnet_rcu()
+
+From: Jiri Pirko <jiri@nvidia.com>
+
+[ Upstream commit 2034d90ae41ae93e30d492ebcf1f06f97a9cfba6 ]
+
+Make the net pointer stored in possible_net_t structure annotated as
+an RCU pointer. Change the access helpers to treat it as such.
+Introduce read_pnet_rcu() helper to allow caller to dereference
+the net pointer under RCU read lock.
+
+Signed-off-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: dd205fcc33d9 ("ipv4: use RCU protection in rt_is_expired()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/net_namespace.h | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index c41e922fdd97e..0dfcf2f0ef62a 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -320,21 +320,30 @@ static inline int check_net(const struct net *net)
+ typedef struct {
+ #ifdef CONFIG_NET_NS
+-      struct net *net;
++      struct net __rcu *net;
+ #endif
+ } possible_net_t;
+ static inline void write_pnet(possible_net_t *pnet, struct net *net)
+ {
+ #ifdef CONFIG_NET_NS
+-      pnet->net = net;
++      rcu_assign_pointer(pnet->net, net);
+ #endif
+ }
+ static inline struct net *read_pnet(const possible_net_t *pnet)
+ {
+ #ifdef CONFIG_NET_NS
+-      return pnet->net;
++      return rcu_dereference_protected(pnet->net, true);
++#else
++      return &init_net;
++#endif
++}
++
++static inline struct net *read_pnet_rcu(possible_net_t *pnet)
++{
++#ifdef CONFIG_NET_NS
++      return rcu_dereference(pnet->net);
+ #else
+       return &init_net;
+ #endif
+-- 
+2.39.5
+
diff --git a/queue-5.10/openvswitch-use-rcu-protection-in-ovs_vport_cmd_fill.patch b/queue-5.10/openvswitch-use-rcu-protection-in-ovs_vport_cmd_fill.patch
new file mode 100644 (file)
index 0000000..0a47caf
--- /dev/null
@@ -0,0 +1,66 @@
+From 53a955d5ad35aee2d3f8a7746c8d92160905bd33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Feb 2025 13:58:37 +0000
+Subject: openvswitch: use RCU protection in ovs_vport_cmd_fill_info()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 90b2f49a502fa71090d9f4fe29a2f51fe5dff76d ]
+
+ovs_vport_cmd_fill_info() can be called without RTNL or RCU.
+
+Use RCU protection and dev_net_rcu() to avoid potential UAF.
+
+Fixes: 9354d4520342 ("openvswitch: reliable interface indentification in port dumps")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20250207135841.1948589-6-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/datapath.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index b625ab5e9a430..b493931433e99 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1980,6 +1980,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ {
+       struct ovs_header *ovs_header;
+       struct ovs_vport_stats vport_stats;
++      struct net *net_vport;
+       int err;
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
+@@ -1996,12 +1997,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+           nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
+               goto nla_put_failure;
+-      if (!net_eq(net, dev_net(vport->dev))) {
+-              int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
++      rcu_read_lock();
++      net_vport = dev_net_rcu(vport->dev);
++      if (!net_eq(net, net_vport)) {
++              int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC);
+               if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
+-                      goto nla_put_failure;
++                      goto nla_put_failure_unlock;
+       }
++      rcu_read_unlock();
+       ovs_vport_get_stats(vport, &vport_stats);
+       if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
+@@ -2019,6 +2023,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+       genlmsg_end(skb, ovs_header);
+       return 0;
++nla_put_failure_unlock:
++      rcu_read_unlock();
+ nla_put_failure:
+       err = -EMSGSIZE;
+ error:
+-- 
+2.39.5
+
index 44d26fc750dbb456b96d42dc858848cb50203648..280bce46312287ad946cc3b687cca61da6c1d55a 100644 (file)
@@ -282,3 +282,19 @@ serial-8250-fix-fifo-underflow-on-flush.patch
 alpha-align-stack-for-page-fault-and-user-unaligned-trap-handlers.patch
 gpio-stmpe-check-return-value-of-stmpe_reg_read-in-stmpe_gpio_irq_sync_unlock.patch
 partitions-mac-fix-handling-of-bogus-partition-table.patch
+clocksource-limit-number-of-cpus-checked-for-clock-s.patch
+clocksource-replace-deprecated-cpu-hotplug-functions.patch
+clocksource-replace-cpumask_weight-with-cpumask_empt.patch
+clocksource-use-pr_info-for-checking-clocksource-syn.patch
+clocksource-use-migrate_disable-to-avoid-calling-get.patch
+net-treat-possible_net_t-net-pointer-as-an-rcu-one-a.patch
+net-add-dev_net_rcu-helper.patch
+ipv4-use-rcu-protection-in-rt_is_expired.patch
+ipv4-use-rcu-protection-in-inet_select_addr.patch
+ipv6-use-rcu-protection-in-ip6_default_advmss.patch
+ndisc-use-rcu-protection-in-ndisc_alloc_skb.patch
+neighbour-delete-redundant-judgment-statements.patch
+neighbour-use-rcu-protection-in-__neigh_notify.patch
+arp-use-rcu-protection-in-arp_xmit.patch
+openvswitch-use-rcu-protection-in-ovs_vport_cmd_fill.patch
+ndisc-extend-rcu-protection-in-ndisc_send_skb.patch