]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64/xorblocks: Switch to 'ksimd' scoped guard API
authorArd Biesheuvel <ardb@kernel.org>
Wed, 1 Oct 2025 11:58:26 +0000 (13:58 +0200)
committerArd Biesheuvel <ardb@kernel.org>
Wed, 12 Nov 2025 08:52:02 +0000 (09:52 +0100)
Switch to the more abstract 'scoped_ksimd()' API, which will be modified
in a future patch to transparently allocate a kernel mode FP/SIMD state
buffer on the stack, so that kernel mode FP/SIMD code remains
preemptible in principe, but without the memory overhead that adds 528
bytes to the size of struct task_struct.

Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
arch/arm64/include/asm/xor.h

index befcd8a7abc98d369473016a30cbd8c29e46f514..c38e3d017a79ecc914bb19bb5332a0202ce6887a 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/hardirq.h>
 #include <asm-generic/xor.h>
 #include <asm/hwcap.h>
-#include <asm/neon.h>
+#include <asm/simd.h>
 
 #ifdef CONFIG_KERNEL_MODE_NEON
 
@@ -19,9 +19,8 @@ static void
 xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
           const unsigned long * __restrict p2)
 {
-       kernel_neon_begin();
-       xor_block_inner_neon.do_2(bytes, p1, p2);
-       kernel_neon_end();
+       scoped_ksimd()
+               xor_block_inner_neon.do_2(bytes, p1, p2);
 }
 
 static void
@@ -29,9 +28,8 @@ xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
           const unsigned long * __restrict p2,
           const unsigned long * __restrict p3)
 {
-       kernel_neon_begin();
-       xor_block_inner_neon.do_3(bytes, p1, p2, p3);
-       kernel_neon_end();
+       scoped_ksimd()
+               xor_block_inner_neon.do_3(bytes, p1, p2, p3);
 }
 
 static void
@@ -40,9 +38,8 @@ xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
           const unsigned long * __restrict p3,
           const unsigned long * __restrict p4)
 {
-       kernel_neon_begin();
-       xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
-       kernel_neon_end();
+       scoped_ksimd()
+               xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
 }
 
 static void
@@ -52,9 +49,8 @@ xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
           const unsigned long * __restrict p4,
           const unsigned long * __restrict p5)
 {
-       kernel_neon_begin();
-       xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
-       kernel_neon_end();
+       scoped_ksimd()
+               xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
 }
 
 static struct xor_block_template xor_block_arm64 = {