]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
ppc64le: Restore optimized strncmp for power10
authorSachin Monga <smonga@linux.ibm.com>
Tue, 7 Oct 2025 08:14:04 +0000 (03:14 -0500)
committerSachin Monga <smonga@linux.ibm.com>
Tue, 7 Oct 2025 08:25:42 +0000 (03:25 -0500)
This patch addresses the actual cause of CVE-2025-5745

The vector non-volatile registers are not used anymore for
32 byte load and comparison operation

Additionally, the assembler workaround used earlier for the
instruction lxvp is replaced with actual instruction.

Signed-off-by: Sachin Monga <smonga@linux.ibm.com>
Co-authored-by: Paul Murphy <paumurph@redhat.com>
sysdeps/powerpc/powerpc64/le/power10/strncmp.S [new file with mode: 0644]
sysdeps/powerpc/powerpc64/multiarch/Makefile
sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S [new file with mode: 0644]
sysdeps/powerpc/powerpc64/multiarch/strncmp.c

diff --git a/sysdeps/powerpc/powerpc64/le/power10/strncmp.S b/sysdeps/powerpc/powerpc64/le/power10/strncmp.S
new file mode 100644 (file)
index 0000000..6e09fcb
--- /dev/null
@@ -0,0 +1,252 @@
+/* Optimized strncmp implementation for PowerPC64/POWER10.
+   Copyright (C) 2025 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* Implements the function
+
+   int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t [r5] n)
+
+   The implementation uses unaligned doubleword access to avoid specialized
+   code paths depending of data alignment for first 32 bytes and uses
+   vectorised loops after that.  */
+
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
+#define COMPARE_16(vreg1,vreg2,offset) \
+       lxv       vreg1+32,offset(r3); \
+       lxv       vreg2+32,offset(r4); \
+       vcmpnezb. v7,vreg1,vreg2;      \
+       bne       cr6,L(different);    \
+       cmpldi    cr7,r5,16;           \
+       ble       cr7,L(ret0);         \
+       addi      r5,r5,-16;
+
+#define COMPARE_32(vreg1,vreg2,offset,label1,label2) \
+       lxvp      vreg1+32,offset(r3);               \
+       lxvp      vreg2+32,offset(r4);               \
+       vcmpnezb. v7,vreg1+1,vreg2+1;                \
+       bne       cr6,L(label1);                     \
+       vcmpnezb. v7,vreg1,vreg2;                    \
+       bne       cr6,L(label2);                     \
+       cmpldi    cr7,r5,32;                         \
+       ble       cr7,L(ret0);                       \
+       addi      r5,r5,-32;
+
+#define TAIL_FIRST_16B(vreg1,vreg2) \
+       vctzlsbb r6,v7;             \
+       cmpld    cr7,r5,r6;         \
+       ble      cr7,L(ret0);       \
+       vextubrx r5,r6,vreg1;       \
+       vextubrx r4,r6,vreg2;       \
+       subf     r3,r4,r5;          \
+       blr;
+
+#define TAIL_SECOND_16B(vreg1,vreg2) \
+       vctzlsbb r6,v7;              \
+       addi     r0,r6,16;           \
+       cmpld    cr7,r5,r0;          \
+       ble      cr7,L(ret0);        \
+       vextubrx r5,r6,vreg1;        \
+       vextubrx r4,r6,vreg2;        \
+       subf     r3,r4,r5;           \
+       blr;
+
+#define CHECK_N_BYTES(reg1,reg2,len_reg) \
+       sldi      r6,len_reg,56;         \
+       lxvl      32+v4,reg1,r6;         \
+       lxvl      32+v5,reg2,r6;         \
+       add       reg1,reg1,len_reg;     \
+       add       reg2,reg2,len_reg;     \
+       vcmpnezb  v7,v4,v5;              \
+       vctzlsbb  r6,v7;                 \
+       cmpld     cr7,r6,len_reg;        \
+       blt       cr7,L(different);      \
+       cmpld     cr7,r5,len_reg;        \
+       ble       cr7,L(ret0);           \
+       sub       r5,r5,len_reg;         \
+
+       .machine  power10
+ENTRY_TOCLESS (STRNCMP, 4)
+       /* Check if size is 0.  */
+       cmpdi    cr0,r5,0
+       beq      cr0,L(ret0)
+       andi.   r7,r3,4095
+       andi.   r8,r4,4095
+       cmpldi  cr0,r7,4096-16
+       cmpldi  cr1,r8,4096-16
+       bgt     cr0,L(crosses)
+       bgt     cr1,L(crosses)
+       COMPARE_16(v4,v5,0)
+       addi    r3,r3,16
+       addi    r4,r4,16
+
+L(crosses):
+       andi.    r7,r3,15
+       subfic   r7,r7,16       /* r7(nalign1) = 16 - (str1 & 15).  */
+       andi.    r9,r4,15
+       subfic   r8,r9,16       /* r8(nalign2) = 16 - (str2 & 15).  */
+       cmpld    cr7,r7,r8
+       beq      cr7,L(same_aligned)
+       blt      cr7,L(nalign1_min)
+
+       /* nalign2 is minimum and s2 pointer is aligned.  */
+       CHECK_N_BYTES(r3,r4,r8)
+       /* Are we on the 64B hunk which crosses a page?  */
+       andi.   r10,r3,63       /* Determine offset into 64B hunk.  */
+       andi.   r8,r3,15        /* The offset into the 16B hunk.  */
+       neg     r7,r3
+       andi.   r9,r7,15        /* Number of bytes after a 16B cross.  */
+       rlwinm. r7,r7,26,0x3F   /* ((r4-4096))>>6&63.  */
+       beq     L(compare_64_pagecross)
+       mtctr   r7
+       b       L(compare_64B_unaligned)
+
+       /* nalign1 is minimum and s1 pointer is aligned.  */
+L(nalign1_min):
+       CHECK_N_BYTES(r3,r4,r7)
+       /* Are we on the 64B hunk which crosses a page?  */
+       andi.   r10,r4,63       /* Determine offset into 64B hunk.  */
+       andi.   r8,r4,15        /* The offset into the 16B hunk.  */
+       neg     r7,r4
+       andi.   r9,r7,15        /* Number of bytes after a 16B cross.  */
+       rlwinm. r7,r7,26,0x3F   /* ((r4-4096))>>6&63.  */
+       beq     L(compare_64_pagecross)
+       mtctr   r7
+
+       .p2align 5
+L(compare_64B_unaligned):
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       COMPARE_16(v4,v5,32)
+       COMPARE_16(v4,v5,48)
+       addi    r3,r3,64
+       addi    r4,r4,64
+       bdnz    L(compare_64B_unaligned)
+
+       /* Cross the page boundary of s2, carefully. Only for first
+       iteration we have to get the count of 64B blocks to be checked.
+       From second iteration and beyond, loop counter is always 63.  */
+L(compare_64_pagecross):
+       li      r11, 63
+       mtctr   r11
+       cmpldi  r10,16
+       ble     L(cross_4)
+       cmpldi  r10,32
+       ble     L(cross_3)
+       cmpldi  r10,48
+       ble     L(cross_2)
+L(cross_1):
+       CHECK_N_BYTES(r3,r4,r9)
+       CHECK_N_BYTES(r3,r4,r8)
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       COMPARE_16(v4,v5,32)
+       addi    r3,r3,48
+       addi    r4,r4,48
+       b       L(compare_64B_unaligned)
+L(cross_2):
+       COMPARE_16(v4,v5,0)
+       addi    r3,r3,16
+       addi    r4,r4,16
+       CHECK_N_BYTES(r3,r4,r9)
+       CHECK_N_BYTES(r3,r4,r8)
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       addi    r3,r3,32
+       addi    r4,r4,32
+       b       L(compare_64B_unaligned)
+L(cross_3):
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       addi    r3,r3,32
+       addi    r4,r4,32
+       CHECK_N_BYTES(r3,r4,r9)
+       CHECK_N_BYTES(r3,r4,r8)
+       COMPARE_16(v4,v5,0)
+       addi    r3,r3,16
+       addi    r4,r4,16
+       b       L(compare_64B_unaligned)
+L(cross_4):
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       COMPARE_16(v4,v5,32)
+       addi    r3,r3,48
+       addi    r4,r4,48
+       CHECK_N_BYTES(r3,r4,r9)
+       CHECK_N_BYTES(r3,r4,r8)
+       b       L(compare_64B_unaligned)
+
+L(same_aligned):
+       CHECK_N_BYTES(r3,r4,r7)
+       /* Align s1 to 32B and adjust s2 address.
+          Use lxvp only if both s1 and s2 are 32B aligned.  */
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       COMPARE_16(v4,v5,32)
+       COMPARE_16(v4,v5,48)
+       addi    r3,r3,64
+       addi    r4,r4,64
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       addi    r5,r5,32
+
+       clrldi  r6,r3,59
+       subfic  r7,r6,32
+       add     r3,r3,r7
+       add     r4,r4,r7
+       subf    r5,r7,r5
+       andi.   r7,r4,0x1F
+       beq     cr0,L(32B_aligned_loop)
+
+       .p2align 5
+L(16B_aligned_loop):
+       COMPARE_16(v4,v5,0)
+       COMPARE_16(v4,v5,16)
+       COMPARE_16(v4,v5,32)
+       COMPARE_16(v4,v5,48)
+       addi    r3,r3,64
+       addi    r4,r4,64
+       b       L(16B_aligned_loop)
+
+       /* Calculate and return the difference.  */
+L(different):
+       TAIL_FIRST_16B(v4,v5)
+
+       .p2align 5
+L(32B_aligned_loop):
+       COMPARE_32(v14,v16,0,tail1,tail2)
+       COMPARE_32(v14,v16,32,tail1,tail2)
+       COMPARE_32(v14,v16,64,tail1,tail2)
+       COMPARE_32(v14,v16,96,tail1,tail2)
+       addi    r3,r3,128
+       addi    r4,r4,128
+       b       L(32B_aligned_loop)
+
+L(tail1): TAIL_FIRST_16B(v15,v17)
+L(tail2): TAIL_SECOND_16B(v14,v16)
+
+       .p2align 5
+L(ret0):
+       li      r3,0
+       blr
+
+END(STRNCMP)
+libc_hidden_builtin_def(strncmp)
index 818f2879256364d931952196a2f70b7311a0af0a..c9178223a879d0e4ea450d03624016fdc6d5fefb 100644 (file)
@@ -32,7 +32,7 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
 ifneq (,$(filter %le,$(config-machine)))
 sysdep_routines += memcmp-power10 memcpy-power10 memmove-power10 memset-power10 \
                   rawmemchr-power9 rawmemchr-power10 \
-                  strcmp-power9 strcmp-power10 strncmp-power9 \
+                  strcmp-power9 strcmp-power10 strncmp-power9 strncmp-power10 \
                   strcpy-power9 strcat-power10 stpcpy-power9 \
                   strlen-power9 strncpy-power9 stpncpy-power9 strlen-power10
 endif
index dde3bec709c26d94968eec02742aa2f13910c293..f2b9cccde3c58a9273647c6efadbb63802211f65 100644 (file)
@@ -164,6 +164,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/powerpc/powerpc64/multiarch/strncmp.c.  */
   IFUNC_IMPL (i, name, strncmp,
 #ifdef __LITTLE_ENDIAN__
+             IFUNC_IMPL_ADD (array, i, strncmp, hwcap2 & PPC_FEATURE2_ARCH_3_1
+                             && hwcap & PPC_FEATURE_HAS_VSX,
+                             __strncmp_power10)
              IFUNC_IMPL_ADD (array, i, strncmp, hwcap2 & PPC_FEATURE2_ARCH_3_00
                              && hwcap & PPC_FEATURE_HAS_ALTIVEC,
                              __strncmp_power9)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S
new file mode 100644 (file)
index 0000000..bb25bc7
--- /dev/null
@@ -0,0 +1,25 @@
+/* Copyright (C) 2025 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define STRNCMP __strncmp_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/strncmp.S>
+#endif
index 4cfe27fa4571a30540c1da3ef13640a7273344de..0a664a620d6db0d04e22e14fb707ab93b601c1c5 100644 (file)
@@ -29,6 +29,7 @@ extern __typeof (strncmp) __strncmp_ppc attribute_hidden;
 extern __typeof (strncmp) __strncmp_power8 attribute_hidden;
 # ifdef __LITTLE_ENDIAN__
 extern __typeof (strncmp) __strncmp_power9 attribute_hidden;
+extern __typeof (strncmp) __strncmp_power10 attribute_hidden;
 # endif
 # undef strncmp
 
@@ -36,6 +37,9 @@ extern __typeof (strncmp) __strncmp_power9 attribute_hidden;
    ifunc symbol properly.  */
 libc_ifunc_redirected (__redirect_strncmp, strncmp,
 # ifdef __LITTLE_ENDIAN__
+                       (hwcap2 & PPC_FEATURE2_ARCH_3_1
+                        && hwcap & PPC_FEATURE_HAS_VSX)
+                       ? __strncmp_power10 :
                        (hwcap2 & PPC_FEATURE2_ARCH_3_00
                         && hwcap & PPC_FEATURE_HAS_ALTIVEC)
                        ? __strncmp_power9 :