From: Greg Kroah-Hartman Date: Mon, 9 Mar 2026 13:46:14 +0000 (+0100) Subject: 6.12-stable patches X-Git-Tag: v6.19.7~13 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4b77ca2131cc0bce76cc322d1efcbfb62f9b7b65;p=thirdparty%2Fkernel%2Fstable-queue.git 6.12-stable patches added patches: arm-clean-up-the-memset64-c-wrapper.patch --- diff --git a/queue-6.12/arm-clean-up-the-memset64-c-wrapper.patch b/queue-6.12/arm-clean-up-the-memset64-c-wrapper.patch new file mode 100644 index 0000000000..25c32a5297 --- /dev/null +++ b/queue-6.12/arm-clean-up-the-memset64-c-wrapper.patch @@ -0,0 +1,57 @@ +From b52343d1cb47bb27ca32a3f4952cc2fd3cd165bf Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= +Date: Fri, 13 Feb 2026 08:39:29 +0100 +Subject: ARM: clean up the memset64() C wrapper +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Thomas Weißschuh + +commit b52343d1cb47bb27ca32a3f4952cc2fd3cd165bf upstream. + +The current logic to split the 64-bit argument into its 32-bit halves is +byte-order specific and a bit clunky. Use a union instead which is +easier to read and works in all cases. + +GCC still generates the same machine code. + +While at it, rename the arguments of the __memset64() prototype to +actually reflect their semantics. + +Signed-off-by: Thomas Weißschuh +Signed-off-by: Linus Torvalds +Reported-by: Ben Hutchings # for -stable +Link: https://lore.kernel.org/all/1a11526ae3d8664f705b541b8d6ea57b847b49a8.camel@decadent.org.uk/ +Suggested-by: https://lore.kernel.org/all/aZonkWMwpbFhzDJq@casper.infradead.org/ # for -stable +Link: https://lore.kernel.org/all/aZonkWMwpbFhzDJq@casper.infradead.org/ +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/include/asm/string.h | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +--- a/arch/arm/include/asm/string.h ++++ b/arch/arm/include/asm/string.h +@@ -39,13 +39,17 @@ static inline void *memset32(uint32_t *p + } + + #define __HAVE_ARCH_MEMSET64 +-extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi); ++extern void *__memset64(uint64_t *, uint32_t first, __kernel_size_t, uint32_t second); + static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) + { +- if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) +- return __memset64(p, v, n * 8, v >> 32); +- else +- return __memset64(p, v >> 32, n * 8, v); ++ union { ++ uint64_t val; ++ struct { ++ uint32_t first, second; ++ }; ++ } word = { .val = v }; ++ ++ return __memset64(p, word.first, n * 8, word.second); + } + + /* diff --git a/queue-6.12/series b/queue-6.12/series index ab9f63a9e8..8a63a12de2 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -168,3 +168,4 @@ scsi-core-fix-refcount-leak-for-tagset_refcnt.patch selftests-mptcp-more-stable-simult_flows-tests.patch selftests-mptcp-join-check-removing-signal-subflow-endp.patch xattr-switch-to-class-fd.patch +arm-clean-up-the-memset64-c-wrapper.patch