From: Greg Kroah-Hartman Date: Wed, 12 May 2021 12:14:35 +0000 (+0200) Subject: 4.14-stable patches X-Git-Tag: v5.4.119~19 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=489e29f16247c62b5f8e46118ad3a53d648de793;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: smp-fix-smp_call_function_single_async-prototype.patch --- diff --git a/queue-4.14/series b/queue-4.14/series index 2c09b7f0245..9bd722acdab 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -229,3 +229,4 @@ powerpc-52xx-fix-an-invalid-asm-expression-addi-used.patch net-emac-emac-mac-fix-a-use-after-free-in-emac_mac_t.patch net-nfc-digital-fix-a-double-free-in-digital_tg_recv.patch kfifo-fix-ternary-sign-extension-bugs.patch +smp-fix-smp_call_function_single_async-prototype.patch diff --git a/queue-4.14/smp-fix-smp_call_function_single_async-prototype.patch b/queue-4.14/smp-fix-smp_call_function_single_async-prototype.patch new file mode 100644 index 00000000000..17f7af38a01 --- /dev/null +++ b/queue-4.14/smp-fix-smp_call_function_single_async-prototype.patch @@ -0,0 +1,107 @@ +From foo@baz Wed May 12 02:09:51 PM CEST 2021 +From: Arnd Bergmann +Date: Wed, 5 May 2021 23:12:42 +0200 +Subject: smp: Fix smp_call_function_single_async prototype + +From: Arnd Bergmann + +commit 1139aeb1c521eb4a050920ce6c64c36c4f2a3ab7 upstream. + +As of commit 966a967116e6 ("smp: Avoid using two cache lines for struct +call_single_data"), the smp code prefers 32-byte aligned call_single_data +objects for performance reasons, but the block layer includes an instance +of this structure in the main 'struct request' that is more senstive +to size than to performance here, see 4ccafe032005 ("block: unalign +call_single_data in struct request"). + +The result is a violation of the calling conventions that clang correctly +points out: + +block/blk-mq.c:630:39: warning: passing 8-byte aligned argument to 32-byte aligned parameter 2 of 'smp_call_function_single_async' may result in an unaligned pointer access [-Walign-mismatch] + smp_call_function_single_async(cpu, &rq->csd); + +It does seem that the usage of the call_single_data without cache line +alignment should still be allowed by the smp code, so just change the +function prototype so it accepts both, but leave the default alignment +unchanged for the other users. This seems better to me than adding +a local hack to shut up an otherwise correct warning in the caller. + +Signed-off-by: Arnd Bergmann +Signed-off-by: Peter Zijlstra (Intel) +Acked-by: Jens Axboe +Link: https://lkml.kernel.org/r/20210505211300.3174456-1-arnd@kernel.org +[nc: Fix conflicts] +Signed-off-by: Nathan Chancellor +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/smp.h | 2 +- + kernel/smp.c | 10 +++++----- + kernel/up.c | 2 +- + 3 files changed, 7 insertions(+), 7 deletions(-) + +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -53,7 +53,7 @@ void on_each_cpu_cond(bool (*cond_func)( + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags); + +-int smp_call_function_single_async(int cpu, call_single_data_t *csd); ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd); + + #ifdef CONFIG_SMP + +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -103,12 +103,12 @@ void __init call_function_init(void) + * previous function call. For multi-cpu calls its even more interesting + * as we'll have to ensure no other cpu is observing our csd. + */ +-static __always_inline void csd_lock_wait(call_single_data_t *csd) ++static __always_inline void csd_lock_wait(struct __call_single_data *csd) + { + smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); + } + +-static __always_inline void csd_lock(call_single_data_t *csd) ++static __always_inline void csd_lock(struct __call_single_data *csd) + { + csd_lock_wait(csd); + csd->flags |= CSD_FLAG_LOCK; +@@ -121,7 +121,7 @@ static __always_inline void csd_lock(cal + smp_wmb(); + } + +-static __always_inline void csd_unlock(call_single_data_t *csd) ++static __always_inline void csd_unlock(struct __call_single_data *csd) + { + WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); + +@@ -138,7 +138,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(cal + * for execution on the given CPU. data must already have + * ->func, ->info, and ->flags set. + */ +-static int generic_exec_single(int cpu, call_single_data_t *csd, ++static int generic_exec_single(int cpu, struct __call_single_data *csd, + smp_call_func_t func, void *info) + { + if (cpu == smp_processor_id()) { +@@ -323,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single); + * NOTE: Be careful, there is unfortunately no current debugging facility to + * validate the correctness of this serialization. + */ +-int smp_call_function_single_async(int cpu, call_single_data_t *csd) ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd) + { + int err = 0; + +--- a/kernel/up.c ++++ b/kernel/up.c +@@ -23,7 +23,7 @@ int smp_call_function_single(int cpu, vo + } + EXPORT_SYMBOL(smp_call_function_single); + +-int smp_call_function_single_async(int cpu, call_single_data_t *csd) ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd) + { + unsigned long flags; +