1 /* Optimized strcmp implementation for PowerPC64/POWER9.
2 Copyright (C) 2016-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
21 # define STRCMP strcmp
24 /* Implements the function
26 int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
28 The implementation uses unaligned doubleword access for first 32 bytes
29 as in POWER8 patch and uses vectorised loops after that. */
31 /* TODO: Change this to actual instructions when minimum binutils is upgraded
32 to 2.27. Macros are defined below for these newer instructions in order
33 to maintain compatibility. */
34 #define VCTZLSBB(r,v) .long (0x10010602 | ((r)<<(32-11)) | ((v)<<(32-21)))
36 #define VEXTUBRX(t,a,b) .long (0x1000070d \
41 #define VCMPNEZB(t,a,b) .long (0x10000507 \
46 /* Get 16 bytes for unaligned case.
47 reg1: Vector to hold next 16 bytes.
48 reg2: Address to read from.
49 reg3: Permute control vector. */
50 #define GET16BYTES(reg1, reg2, reg3) \
52 vperm v8, v2, reg1, reg3; \
53 vcmpequb. v8, v0, v8; \
62 vperm reg1, v9, reg1, reg3;
64 /* TODO: change this to .machine power9 when the minimum required binutils
68 ENTRY_TOCLESS (STRCMP, 4)
71 /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
74 (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
76 with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
80 cmpldi cr7, r7, 4096-16
81 bgt cr7, L(pagecross_check)
82 cmpldi cr5, r9, 4096-16
83 bgt cr5, L(pagecross_check)
85 /* For short strings up to 16 bytes, load both s1 and s2 using
86 unaligned dwords and compare. */
92 bne cr0, L(different_nocmpb)
99 bne cr0, L(different_nocmpb)
105 /* Now it has checked for first 16 bytes. */
108 lvsr v6, 0, r4 /* Compute mask. */
114 lvsr v10, 0, r7 /* Compute mask. */
116 /* Both s1 and s2 are unaligned. */
117 GET16BYTES(v4, r7, v10)
118 GET16BYTES(v5, r4, v6)
123 /* Align s1 to qw and adjust s2 address. */
133 /* There are 2 loops depending on the input alignment.
134 Each loop gets 16 bytes from s1 and s2 and compares.
135 Loop until a mismatch or null occurs. */
138 GET16BYTES(v5, r4, v6)
142 bne cr6, L(different)
145 GET16BYTES(v5, r4, v6)
149 bne cr6, L(different)
152 GET16BYTES(v5, r4, v6)
156 bne cr6, L(different)
159 GET16BYTES(v5, r4, v6)
173 bne cr6, L(different)
180 bne cr6, L(different)
187 bne cr6, L(different)
196 /* Calculate and return the difference. */
213 rldicl r10, r10, 0, 56
224 bge cr7, L(pagecross)
227 /* If unaligned 16 bytes reads across a 4K page boundary, it uses
228 a simple byte a byte comparison until the page alignment for s1
237 /* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
238 and if *s1 is '\0'. */
245 bne cr7, L(pagecross_ne)
246 beq cr5, L(pagecross_nullfound)
247 bdnz L(pagecross_loop)
254 L(pagecross_retdiff):
260 L(pagecross_nullfound):
262 b L(pagecross_retdiff)
264 libc_hidden_builtin_def (strcmp)