]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 May 2021 12:15:57 +0000 (14:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 May 2021 12:15:57 +0000 (14:15 +0200)
added patches:
net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch
smp-fix-smp_call_function_single_async-prototype.patch

queue-5.12/net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch [new file with mode: 0644]
queue-5.12/series
queue-5.12/smp-fix-smp_call_function_single_async-prototype.patch [new file with mode: 0644]

diff --git a/queue-5.12/net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch b/queue-5.12/net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch
new file mode 100644 (file)
index 0000000..4e22b20
--- /dev/null
@@ -0,0 +1,46 @@
+From 8d432592f30fcc34ef5a10aac4887b4897884493 Mon Sep 17 00:00:00 2001
+From: Jonathon Reinhart <jonathon.reinhart@gmail.com>
+Date: Sat, 1 May 2021 04:28:22 -0400
+Subject: net: Only allow init netns to set default tcp cong to a restricted algo
+
+From: Jonathon Reinhart <jonathon.reinhart@gmail.com>
+
+commit 8d432592f30fcc34ef5a10aac4887b4897884493 upstream.
+
+tcp_set_default_congestion_control() is netns-safe in that it writes
+to &net->ipv4.tcp_congestion_control, but it also sets
+ca->flags |= TCP_CONG_NON_RESTRICTED which is not namespaced.
+This has the unintended side-effect of changing the global
+net.ipv4.tcp_allowed_congestion_control sysctl, despite the fact that it
+is read-only: 97684f0970f6 ("net: Make tcp_allowed_congestion_control
+readonly in non-init netns")
+
+Resolve this netns "leak" by only allowing the init netns to set the
+default algorithm to one that is restricted. This restriction could be
+removed if tcp_allowed_congestion_control were namespace-ified in the
+future.
+
+This bug was uncovered with
+https://github.com/JonathonReinhart/linux-netns-sysctl-verify
+
+Fixes: 6670e1524477 ("tcp: Namespace-ify sysctl_tcp_default_congestion_control")
+Signed-off-by: Jonathon Reinhart <jonathon.reinhart@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_cong.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -230,6 +230,10 @@ int tcp_set_default_congestion_control(s
+               ret = -ENOENT;
+       } else if (!bpf_try_module_get(ca, ca->owner)) {
+               ret = -EBUSY;
++      } else if (!net_eq(net, &init_net) &&
++                      !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
++              /* Only init netns can set default to a restricted algorithm */
++              ret = -EPERM;
+       } else {
+               prev = xchg(&net->ipv4.tcp_congestion_control, ca);
+               if (prev)
index 6eeda0d1fd233c5a6f4a70570222d8b4656ce1e5..8d12da0c3bec6d3f3759e70888676f28f9d2ab38 100644 (file)
@@ -671,3 +671,5 @@ afs-fix-speculative-status-fetches.patch
 bpf-fix-alu32-const-subreg-bound-tracking-on-bitwise-operations.patch
 bpf-ringbuf-deny-reserve-of-buffers-larger-than-ringbuf.patch
 bpf-prevent-writable-memory-mapping-of-read-only-ringbuf-pages.patch
+net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch
+smp-fix-smp_call_function_single_async-prototype.patch
diff --git a/queue-5.12/smp-fix-smp_call_function_single_async-prototype.patch b/queue-5.12/smp-fix-smp_call_function_single_async-prototype.patch
new file mode 100644 (file)
index 0000000..3ba6b6f
--- /dev/null
@@ -0,0 +1,149 @@
+From foo@baz Wed May 12 02:08:25 PM CEST 2021
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 5 May 2021 23:12:42 +0200
+Subject: smp: Fix smp_call_function_single_async prototype
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 1139aeb1c521eb4a050920ce6c64c36c4f2a3ab7 upstream.
+
+As of commit 966a967116e6 ("smp: Avoid using two cache lines for struct
+call_single_data"), the smp code prefers 32-byte aligned call_single_data
+objects for performance reasons, but the block layer includes an instance
+of this structure in the main 'struct request' that is more senstive
+to size than to performance here, see 4ccafe032005 ("block: unalign
+call_single_data in struct request").
+
+The result is a violation of the calling conventions that clang correctly
+points out:
+
+block/blk-mq.c:630:39: warning: passing 8-byte aligned argument to 32-byte aligned parameter 2 of 'smp_call_function_single_async' may result in an unaligned pointer access [-Walign-mismatch]
+                smp_call_function_single_async(cpu, &rq->csd);
+
+It does seem that the usage of the call_single_data without cache line
+alignment should still be allowed by the smp code, so just change the
+function prototype so it accepts both, but leave the default alignment
+unchanged for the other users. This seems better to me than adding
+a local hack to shut up an otherwise correct warning in the caller.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Jens Axboe <axboe@kernel.dk>
+Link: https://lkml.kernel.org/r/20210505211300.3174456-1-arnd@kernel.org
+[nc: Fix conflicts]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/smp.h |    2 +-
+ kernel/smp.c        |   20 ++++++++++----------
+ kernel/up.c         |    2 +-
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t co
+ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+                          void *info, bool wait, const struct cpumask *mask);
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+ #ifdef CONFIG_SMP
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_in
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+ /* Record current CSD work for current CPU, NULL to erase. */
+-static void csd_lock_record(call_single_data_t *csd)
++static void csd_lock_record(struct __call_single_data *csd)
+ {
+       if (!csd) {
+               smp_mb(); /* NULL cur_csd after unlock. */
+@@ -125,7 +125,7 @@ static void csd_lock_record(call_single_
+                 /* Or before unlock, as the case may be. */
+ }
+-static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
++static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
+ {
+       unsigned int csd_type;
+@@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait
+  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
+  * so waiting on other types gets much less information.
+  */
+-static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
++static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
+ {
+       int cpu = -1;
+       int cpux;
+@@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wai
+  * previous function call. For multi-cpu calls its even more interesting
+  * as we'll have to ensure no other cpu is observing our csd.
+  */
+-static __always_inline void csd_lock_wait(call_single_data_t *csd)
++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+ {
+       int bug_id = 0;
+       u64 ts0, ts1;
+@@ -219,17 +219,17 @@ static __always_inline void csd_lock_wai
+ }
+ #else
+-static void csd_lock_record(call_single_data_t *csd)
++static void csd_lock_record(struct __call_single_data *csd)
+ {
+ }
+-static __always_inline void csd_lock_wait(call_single_data_t *csd)
++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+ {
+       smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
+ }
+ #endif
+-static __always_inline void csd_lock(call_single_data_t *csd)
++static __always_inline void csd_lock(struct __call_single_data *csd)
+ {
+       csd_lock_wait(csd);
+       csd->node.u_flags |= CSD_FLAG_LOCK;
+@@ -242,7 +242,7 @@ static __always_inline void csd_lock(cal
+       smp_wmb();
+ }
+-static __always_inline void csd_unlock(call_single_data_t *csd)
++static __always_inline void csd_unlock(struct __call_single_data *csd)
+ {
+       WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
+@@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, st
+  * for execution on the given CPU. data must already have
+  * ->func, ->info, and ->flags set.
+  */
+-static int generic_exec_single(int cpu, call_single_data_t *csd)
++static int generic_exec_single(int cpu, struct __call_single_data *csd)
+ {
+       if (cpu == smp_processor_id()) {
+               smp_call_func_t func = csd->func;
+@@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
+  * NOTE: Be careful, there is unfortunately no current debugging facility to
+  * validate the correctness of this serialization.
+  */
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+ {
+       int err = 0;
+--- a/kernel/up.c
++++ b/kernel/up.c
+@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, vo
+ }
+ EXPORT_SYMBOL(smp_call_function_single);
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+ {
+       unsigned long flags;