From: Greg Kroah-Hartman Date: Wed, 12 May 2021 12:15:08 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v5.4.119~17 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=85d0b2ceaa8f8466f4dcbc00bafc41acdf26fc13;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch smp-fix-smp_call_function_single_async-prototype.patch --- diff --git a/queue-5.4/net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch b/queue-5.4/net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch new file mode 100644 index 00000000000..3372888d869 --- /dev/null +++ b/queue-5.4/net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch @@ -0,0 +1,46 @@ +From 8d432592f30fcc34ef5a10aac4887b4897884493 Mon Sep 17 00:00:00 2001 +From: Jonathon Reinhart +Date: Sat, 1 May 2021 04:28:22 -0400 +Subject: net: Only allow init netns to set default tcp cong to a restricted algo + +From: Jonathon Reinhart + +commit 8d432592f30fcc34ef5a10aac4887b4897884493 upstream. + +tcp_set_default_congestion_control() is netns-safe in that it writes +to &net->ipv4.tcp_congestion_control, but it also sets +ca->flags |= TCP_CONG_NON_RESTRICTED which is not namespaced. +This has the unintended side-effect of changing the global +net.ipv4.tcp_allowed_congestion_control sysctl, despite the fact that it +is read-only: 97684f0970f6 ("net: Make tcp_allowed_congestion_control +readonly in non-init netns") + +Resolve this netns "leak" by only allowing the init netns to set the +default algorithm to one that is restricted. This restriction could be +removed if tcp_allowed_congestion_control were namespace-ified in the +future. + +This bug was uncovered with +https://github.com/JonathonReinhart/linux-netns-sysctl-verify + +Fixes: 6670e1524477 ("tcp: Namespace-ify sysctl_tcp_default_congestion_control") +Signed-off-by: Jonathon Reinhart +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/tcp_cong.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/net/ipv4/tcp_cong.c ++++ b/net/ipv4/tcp_cong.c +@@ -229,6 +229,10 @@ int tcp_set_default_congestion_control(s + ret = -ENOENT; + } else if (!try_module_get(ca->owner)) { + ret = -EBUSY; ++ } else if (!net_eq(net, &init_net) && ++ !(ca->flags & TCP_CONG_NON_RESTRICTED)) { ++ /* Only init netns can set default to a restricted algorithm */ ++ ret = -EPERM; + } else { + prev = xchg(&net->ipv4.tcp_congestion_control, ca); + if (prev) diff --git a/queue-5.4/series b/queue-5.4/series index addbea4ecf5..085a36dd1a8 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -239,3 +239,5 @@ net-nfc-digital-fix-a-double-free-in-digital_tg_recv.patch kfifo-fix-ternary-sign-extension-bugs.patch mm-sparse-add-the-missing-sparse_buffer_fini-in-erro.patch mm-memory-failure-unnecessary-amount-of-unmapping.patch +net-only-allow-init-netns-to-set-default-tcp-cong-to-a-restricted-algo.patch +smp-fix-smp_call_function_single_async-prototype.patch diff --git a/queue-5.4/smp-fix-smp_call_function_single_async-prototype.patch b/queue-5.4/smp-fix-smp_call_function_single_async-prototype.patch new file mode 100644 index 00000000000..6c17a2a1c56 --- /dev/null +++ b/queue-5.4/smp-fix-smp_call_function_single_async-prototype.patch @@ -0,0 +1,107 @@ +From foo@baz Wed May 12 02:09:26 PM CEST 2021 +From: Arnd Bergmann +Date: Wed, 5 May 2021 23:12:42 +0200 +Subject: smp: Fix smp_call_function_single_async prototype + +From: Arnd Bergmann + +commit 1139aeb1c521eb4a050920ce6c64c36c4f2a3ab7 upstream. + +As of commit 966a967116e6 ("smp: Avoid using two cache lines for struct +call_single_data"), the smp code prefers 32-byte aligned call_single_data +objects for performance reasons, but the block layer includes an instance +of this structure in the main 'struct request' that is more senstive +to size than to performance here, see 4ccafe032005 ("block: unalign +call_single_data in struct request"). + +The result is a violation of the calling conventions that clang correctly +points out: + +block/blk-mq.c:630:39: warning: passing 8-byte aligned argument to 32-byte aligned parameter 2 of 'smp_call_function_single_async' may result in an unaligned pointer access [-Walign-mismatch] + smp_call_function_single_async(cpu, &rq->csd); + +It does seem that the usage of the call_single_data without cache line +alignment should still be allowed by the smp code, so just change the +function prototype so it accepts both, but leave the default alignment +unchanged for the other users. This seems better to me than adding +a local hack to shut up an otherwise correct warning in the caller. + +Signed-off-by: Arnd Bergmann +Signed-off-by: Peter Zijlstra (Intel) +Acked-by: Jens Axboe +Link: https://lkml.kernel.org/r/20210505211300.3174456-1-arnd@kernel.org +[nc: Fix conflicts] +Signed-off-by: Nathan Chancellor +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/smp.h | 2 +- + kernel/smp.c | 10 +++++----- + kernel/up.c | 2 +- + 3 files changed, 7 insertions(+), 7 deletions(-) + +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -57,7 +57,7 @@ void on_each_cpu_cond_mask(bool (*cond_f + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags, const struct cpumask *mask); + +-int smp_call_function_single_async(int cpu, call_single_data_t *csd); ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd); + + #ifdef CONFIG_SMP + +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -104,12 +104,12 @@ void __init call_function_init(void) + * previous function call. For multi-cpu calls its even more interesting + * as we'll have to ensure no other cpu is observing our csd. + */ +-static __always_inline void csd_lock_wait(call_single_data_t *csd) ++static __always_inline void csd_lock_wait(struct __call_single_data *csd) + { + smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); + } + +-static __always_inline void csd_lock(call_single_data_t *csd) ++static __always_inline void csd_lock(struct __call_single_data *csd) + { + csd_lock_wait(csd); + csd->flags |= CSD_FLAG_LOCK; +@@ -122,7 +122,7 @@ static __always_inline void csd_lock(cal + smp_wmb(); + } + +-static __always_inline void csd_unlock(call_single_data_t *csd) ++static __always_inline void csd_unlock(struct __call_single_data *csd) + { + WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); + +@@ -139,7 +139,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(cal + * for execution on the given CPU. data must already have + * ->func, ->info, and ->flags set. + */ +-static int generic_exec_single(int cpu, call_single_data_t *csd, ++static int generic_exec_single(int cpu, struct __call_single_data *csd, + smp_call_func_t func, void *info) + { + if (cpu == smp_processor_id()) { +@@ -332,7 +332,7 @@ EXPORT_SYMBOL(smp_call_function_single); + * NOTE: Be careful, there is unfortunately no current debugging facility to + * validate the correctness of this serialization. + */ +-int smp_call_function_single_async(int cpu, call_single_data_t *csd) ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd) + { + int err = 0; + +--- a/kernel/up.c ++++ b/kernel/up.c +@@ -24,7 +24,7 @@ int smp_call_function_single(int cpu, vo + } + EXPORT_SYMBOL(smp_call_function_single); + +-int smp_call_function_single_async(int cpu, call_single_data_t *csd) ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd) + { + unsigned long flags; +