]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Oct 2023 06:13:34 +0000 (08:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Oct 2023 06:13:34 +0000 (08:13 +0200)
added patches:
dev_forward_skb-do-not-scrub-skb-mark-within-the-same-name-space.patch
lib-kconfig.debug-do-not-enable-debug_preempt-by-default.patch

queue-5.10/dev_forward_skb-do-not-scrub-skb-mark-within-the-same-name-space.patch [new file with mode: 0644]
queue-5.10/lib-kconfig.debug-do-not-enable-debug_preempt-by-default.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/dev_forward_skb-do-not-scrub-skb-mark-within-the-same-name-space.patch b/queue-5.10/dev_forward_skb-do-not-scrub-skb-mark-within-the-same-name-space.patch
new file mode 100644 (file)
index 0000000..055d813
--- /dev/null
@@ -0,0 +1,45 @@
+From ff70202b2d1ad522275c6aadc8c53519b6a22c57 Mon Sep 17 00:00:00 2001
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Thu, 24 Jun 2021 10:05:05 +0200
+Subject: dev_forward_skb: do not scrub skb mark within the same name space
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+commit ff70202b2d1ad522275c6aadc8c53519b6a22c57 upstream.
+
+The goal is to keep the mark during a bpf_redirect(), like it is done for
+legacy encapsulation / decapsulation, when there is no x-netns.
+This was initially done in commit 213dd74aee76 ("skbuff: Do not scrub skb
+mark within the same name space").
+
+When the call to skb_scrub_packet() was added in dev_forward_skb() (commit
+8b27f27797ca ("skb: allow skb_scrub_packet() to be used by tunnels")), the
+second argument (xnet) was set to true to force a call to skb_orphan(). At
+this time, the mark was always cleanned up by skb_scrub_packet(), whatever
+xnet value was.
+This call to skb_orphan() was removed later in commit
+9c4c325252c5 ("skbuff: preserve sock reference when scrubbing the skb.").
+But this 'true' stayed here without any real reason.
+
+Let's correctly set xnet in ____dev_forward_skb(), this function has access
+to the previous interface and to the new interface.
+
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netdevice.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3972,7 +3972,7 @@ static __always_inline int ____dev_forwa
+               return NET_RX_DROP;
+       }
+-      skb_scrub_packet(skb, true);
++      skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
+       skb->priority = 0;
+       return 0;
+ }
diff --git a/queue-5.10/lib-kconfig.debug-do-not-enable-debug_preempt-by-default.patch b/queue-5.10/lib-kconfig.debug-do-not-enable-debug_preempt-by-default.patch
new file mode 100644 (file)
index 0000000..9871663
--- /dev/null
@@ -0,0 +1,85 @@
+From cc6003916ed46d7a67d91ee32de0f9138047d55f Mon Sep 17 00:00:00 2001
+From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Date: Sat, 21 Jan 2023 12:39:42 +0900
+Subject: lib/Kconfig.debug: do not enable DEBUG_PREEMPT by default
+
+From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+
+commit cc6003916ed46d7a67d91ee32de0f9138047d55f upstream.
+
+In workloads where this_cpu operations are frequently performed,
+enabling DEBUG_PREEMPT may result in significant increase in
+runtime overhead due to frequent invocation of
+__this_cpu_preempt_check() function.
+
+This can be demonstrated through benchmarks such as hackbench where this
+configuration results in a 10% reduction in performance, primarily due to
+the added overhead within memcg charging path.
+
+Therefore, do not to enable DEBUG_PREEMPT by default and make users aware
+of its potential impact on performance in some workloads.
+
+hackbench-process-sockets
+                     debug_preempt      no_debug_preempt
+Amean     1       0.4743 (   0.00%)      0.4295 *   9.45%*
+Amean     4       1.4191 (   0.00%)      1.2650 *  10.86%*
+Amean     7       2.2677 (   0.00%)      2.0094 *  11.39%*
+Amean     12      3.6821 (   0.00%)      3.2115 *  12.78%*
+Amean     21      6.6752 (   0.00%)      5.7956 *  13.18%*
+Amean     30      9.6646 (   0.00%)      8.5197 *  11.85%*
+Amean     48     15.3363 (   0.00%)     13.5559 *  11.61%*
+Amean     79     24.8603 (   0.00%)     22.0597 *  11.27%*
+Amean     96     30.1240 (   0.00%)     26.8073 *  11.01%*
+
+Link: https://lkml.kernel.org/r/20230121033942.350387-1-42.hyeyoo@gmail.com
+Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Acked-by: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Ben Segall <bsegall@google.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Dennis Zhou <dennis@kernel.org>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Valentin Schneider <vschneid@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Luiz Capitulino <luizcap@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/Kconfig.debug |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1136,13 +1136,16 @@ config DEBUG_TIMEKEEPING
+ config DEBUG_PREEMPT
+       bool "Debug preemptible kernel"
+       depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
+-      default y
+       help
+         If you say Y here then the kernel will use a debug variant of the
+         commonly used smp_processor_id() function and will print warnings
+         if kernel code uses it in a preemption-unsafe way. Also, the kernel
+         will detect preemption count underflows.
++        This option has potential to introduce high runtime overhead,
++        depending on workload as it triggers debugging routines for each
++        this_cpu operation. It should only be used for debugging purposes.
++
+ menu "Lock Debugging (spinlocks, mutexes, etc...)"
+ config LOCK_DEBUGGING_SUPPORT
index 86673e00afeab6ebef8d5ebb5dd9538dcc495408..ba420a23c1fc2e789def926b58d5a83ed1e002db 100644 (file)
@@ -80,3 +80,5 @@ eth-remove-remaining-copies-of-the-napi_poll_weight-define.patch
 rdma-srp-set-scmnd-result-only-when-scmnd-is-not-null.patch
 rdma-srp-fix-srp_abort.patch
 ravb-fix-use-after-free-issue-in-ravb_tx_timeout_work.patch
+dev_forward_skb-do-not-scrub-skb-mark-within-the-same-name-space.patch
+lib-kconfig.debug-do-not-enable-debug_preempt-by-default.patch