]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/s390/multiarch/wcpcpy-vx.S
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / s390 / multiarch / wcpcpy-vx.S
1 /* Vector optimized 32/64 bit S/390 version of wcpcpy.
2 Copyright (C) 2015-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #if defined HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc)
20
21 # include "sysdep.h"
22 # include "asm-syntax.h"
23
24 .text
25
26 /* wchar_t * wcpcpy (const wchar_t *dest, const wchar_t *src)
27 Copy string src to dest returning a pointer to its end.
28
29 Register usage:
30 -r0=border-len for switching to vector-instructions
31 -r1=tmp
32 -r2=dest and return value
33 -r3=src
34 -r4=tmp
35 -r5=current_len
36 -v16=part of src
37 -v17=index of zero
38 -v18=part of src
39 */
40 ENTRY(__wcpcpy_vx)
41 .machine "z13"
42 .machinemode "zarch_nohighgprs"
43
44 vlbb %v16,0(%r3),6 /* Load s until next 4k-byte boundary. */
45 lcbb %r1,0(%r3),6 /* Get bytes to 4k-byte boundary or 16. */
46
47 tmll %r3,3 /* Test if s is 4-byte aligned? */
48 jne .Lfallback /* And use common-code variant if not. */
49
50 vfenezf %v17,%v16,%v16 /* Find element not equal with zero search. */
51 vlgvb %r5,%v17,7 /* Load zero index or 16 if not found. */
52 clrjl %r5,%r1,.Lfound_align /* If found zero within loaded bytes,
53 copy bytes before and return. */
54
55 /* Align s to 16 byte. */
56 risbgn %r4,%r3,60,128+63,0 /* %r3 = bits 60-63 of %r2 'and' 15. */
57 lghi %r5,15 /* current_len = 15. */
58 slr %r5,%r4 /* Compute highest index to 16byte boundary. */
59
60 vstl %v16,%r5,0(%r2) /* Copy loaded characters - no zero. */
61 ahi %r5,1 /* Start loop at next character. */
62
63 /* Find zero in 16byte aligned loop. */
64 .Lloop:
65 vl %v16,0(%r5,%r3) /* Load s. */
66 vfenezfs %v17,%v16,%v16 /* Find element not equal with zero search. */
67 je .Lfound_v16_0 /* Jump away if zero was found. */
68 vl %v18,16(%r5,%r3) /* Load next part of s. */
69 vst %v16,0(%r5,%r2) /* Store previous part without zero to dst. */
70 vfenezfs %v17,%v18,%v18
71 je .Lfound_v18_16
72 vl %v16,32(%r5,%r3)
73 vst %v18,16(%r5,%r2)
74 vfenezfs %v17,%v16,%v16
75 je .Lfound_v16_32
76 vl %v18,48(%r5,%r3)
77 vst %v16,32(%r5,%r2)
78 vfenezfs %v17,%v18,%v18
79 je .Lfound_v18_48
80 vst %v18,48(%r5,%r2)
81
82 aghi %r5,64
83 j .Lloop /* No zero found -> loop. */
84
85 .Lfound_v16_32:
86 aghi %r5,32
87 .Lfound_v16_0:
88 la %r3,0(%r5,%r2)
89 vlgvb %r1,%v17,7 /* Load byte index of zero. */
90 aghi %r1,3 /* Also copy remaining bytes of zero. */
91 vstl %v16,%r1,0(%r3) /* Copy characters including zero. */
92 lay %r2,-3(%r1,%r3) /* Return pointer to zero. */
93 br %r14
94
95 .Lfound_v18_48:
96 aghi %r5,32
97 .Lfound_v18_16:
98 la %r3,16(%r5,%r2)
99 vlgvb %r1,%v17,7 /* Load byte index of zero. */
100 aghi %r1,3 /* Also copy remaining bytes of zero. */
101 vstl %v18,%r1,0(%r3) /* Copy characters including zero. */
102 lay %r2,-3(%r1,%r3) /* Return pointer to zero. */
103 br %r14
104
105 .Lfound_align:
106 aghi %r5,3 /* Also copy remaining bytes of zero. */
107 vstl %v16,%r5,0(%r2) /* Copy characters including zero. */
108 lay %r2,-3(%r5,%r2) /* Return pointer to zero. */
109 br %r14
110
111 .Lfallback:
112 jg __wcpcpy_c
113 END(__wcpcpy_vx)
114 #endif /* HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc) */