1 /* Optimized memchr implementation for POWER8.
2 Copyright (C) 2017-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
21 /* void *[r3] memchr (const void *s [r3], int c [r4], size_t n [r5]) */
24 # define MEMCHR __memchr
27 ENTRY_TOCLESS (MEMCHR)
33 /* Calculate the last acceptable address and check for possible
34 addition overflow by using satured math:
46 rlwinm r6, r3, 3, 26, 28 /* Calculate padding. */
50 #ifdef __LITTLE_ENDIAN__
59 ld r12, 0(r8) /* Load doubleword from memory. */
60 cmpb r3, r12, r4 /* Check for BYTEs in DWORD1. */
62 clrldi r6, r7, 61 /* Byte count - 1 in last dword. */
63 clrrdi r7, r7, 3 /* Address of last doubleword. */
64 cmpldi cr7, r3, 0 /* Does r3 indicate we got a hit? */
70 /* Are we now aligned to a quadword boundary? */
74 /* Handle DWORD to make it QW aligned. */
81 /* At this point, r8 is 16B aligned. */
84 /* Precompute vbpermq constant. */
93 /* Are we 64-byte aligned? If so, jump to the vectorized loop.
94 Note: aligning to 64-byte will necessarily slow down performance for
95 strings around 64 bytes in length due to the extra comparisons
96 required to check alignment for the vectorized loop. This is a
97 necessary tradeoff we are willing to take in order to speed up the
98 calculation for larger strings. */
100 beq cr0, L(preloop_64B)
101 /* In order to begin the 64B loop, it needs to be 64
102 bytes aligned. So read until it is 64B aligned. */
105 vcmpequb. v11, v0, v6
106 bnl cr6, L(found_16B)
111 beq cr0, L(preloop_64B)
114 vcmpequb. v11, v0, v6
115 bnl cr6, L(found_16B)
120 beq cr0, L(preloop_64B)
123 vcmpequb. v11, v0, v6
124 bnl cr6, L(found_16B)
127 /* At this point it should be 64B aligned.
128 Prepare for the 64B loop. */
130 cmpldi r5, 64 /* Check if r5 < 64. */
133 srdi r9, r6, 6 /* Number of loop iterations. */
134 mtctr r9 /* Setup the counter. */
135 li r11, 16 /* Load required offsets. */
139 /* Handle r5 > 64. Loop over the bytes in strides of 64B. */
142 lvx v2, 0, r8 /* Load 4 quadwords. */
152 vor v11, v11, v12 /* Compare and merge into one VR for speed. */
153 vcmpequb. v11, v0, v11
155 addi r8, r8, 64 /* Adjust address for the next iteration. */
159 /* Handle remainder of 64B loop or r5 > 64. */
166 vcmpequb. v11, v0, v6
167 bnl cr6, L(found_16B)
175 vcmpequb. v11, v0, v6
176 bnl cr6, L(found_16B)
184 vcmpequb. v11, v0, v6
185 bnl cr6, L(found_16B)
193 vcmpequb. v11, v0, v6
194 bnl cr6, L(found_16B)
198 /* Found a match in 64B loop. */
201 /* Permute the first bit of each byte into bits 48-63. */
206 /* Shift each component into its correct position for merging. */
207 #ifdef __LITTLE_ENDIAN__
216 /* Merge the results and move to a GPR. */
221 #ifdef __LITTLE_ENDIAN__
226 cntlzd r6, r5 /* Count leading zeros before the match. */
228 add r3, r8, r6 /* Compute final length. */
231 /* Found a match in last 16 bytes. */
234 /* Permute the first bit of each byte into bits 48-63. */
236 /* Shift each component into its correct position for merging. */
237 #ifdef __LITTLE_ENDIAN__
245 cntlzd r6, r7 /* Count leading zeros before the match. */
247 add r3, r8, r6 /* Compute final length. */
254 /* r3 has the output of the cmpb instruction, that is, it contains
255 0xff in the same position as BYTE in the original
256 doubleword from the string. Use that to calculate the pointer.
257 We need to make sure BYTE is *before* the end of the range. */
259 #ifdef __LITTLE_ENDIAN__
262 popcntd r0, r0 /* Count trailing zeros. */
264 cntlzd r0, r3 /* Count leading zeros before the match. */
266 cmpld r8, r7 /* Are we on the last dword? */
267 srdi r0, r0, 3 /* Convert leading/trailing zeros to bytes. */
269 cmpld cr7, r0, r6 /* If on the last dword, check byte offset. */
280 /* Deals with size <= 32. */
285 ld r12, 0(r8) /* Load word from memory. */
286 cmpb r3, r12, r4 /* Check for BYTE in DWORD1. */
289 clrldi r6, r7, 61 /* Byte count - 1 in last dword. */
290 clrrdi r7, r7, 3 /* Address of last doubleword. */
291 cmpld r8, r7 /* Are we done already? */
299 bne cr6, L(done) /* Found something. */
300 beqlr /* Hit end of string (length). */
323 weak_alias (__memchr, memchr)
324 libc_hidden_builtin_def (memchr)