]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/poly1305/asm/poly1305-c64xplus.pl
crypto/poly1305: don't break carry chains.
[thirdparty/openssl.git] / crypto / poly1305 / asm / poly1305-c64xplus.pl
CommitLineData
f4e175e4
AP
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# Poly1305 hash for C64x+.
11#
12# October 2015
13#
4b8736a2 14# Performance is [incredible for a 32-bit processor] 1.82 cycles per
f4e175e4
AP
15# processed byte. Comparison to compiler-generated code is problematic,
16# because results were observed to vary from 2.1 to 7.6 cpb depending
17# on compiler's ability to inline small functions. Compiler also
18# disables interrupts for some reason, thus making interrupt response
19# time dependent on input length. This module on the other hand is free
20# from such limitation.
21
3aa3af68
RL
22$output=pop;
23open STDOUT,">$output";
24
f4e175e4
AP
25($CTXA,$INPB,$LEN,$PADBIT)=("A4","B4","A6","B6");
26($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
27($D0,$D1,$D2,$D3)= ("A9","B9","A11","B11");
28($R0,$R1,$R2,$R3,$S1,$S2,$S3,$S3b)=("A0","B0","A1","B1","A12","B12","A13","B13");
29($THREE,$R0b,$S2a)=("B7","B5","A5");
30
31$code.=<<___;
32 .text
33
34 .if .ASSEMBLER_VERSION<7000000
35 .asg 0,__TI_EABI__
36 .endif
37 .if __TI_EABI__
38 .asg poly1305_init,_poly1305_init
39 .asg poly1305_blocks,_poly1305_blocks
40 .asg poly1305_emit,_poly1305_emit
41 .endif
42
43 .asg B3,RA
44 .asg A15,FP
45 .asg B15,SP
46
47 .if .LITTLE_ENDIAN
48 .asg MV,SWAP2
49 .asg MV.L,SWAP4
50 .endif
51
52 .global _poly1305_init
53_poly1305_init:
54 .asmfunc
55 LDNDW *${INPB}[0],B17:B16 ; load key material
56 LDNDW *${INPB}[1],A17:A16
57
58|| ZERO B9:B8
59|| MVK -1,B0
60 STDW B9:B8,*${CTXA}[0] ; initialize h1:h0
61|| SHRU B0,4,B0 ; 0x0fffffff
62|| MVK -4,B1
63 STDW B9:B8,*${CTXA}[1] ; initialize h3:h2
64|| AND B0,B1,B1 ; 0x0ffffffc
65 STW B8,*${CTXA}[4] ; initialize h4
66
67 .if .BIG_ENDIAN
68 SWAP2 B16,B17
69|| SWAP2 B17,B16
70 SWAP2 A16,A17
71|| SWAP2 A17,A16
72 SWAP4 B16,B16
73|| SWAP4 A16,A16
74 SWAP4 B17,B17
75|| SWAP4 A17,A17
76 .endif
77
78 AND B16,B0,B20 ; r0 = key[0] & 0x0fffffff
79|| AND B17,B1,B22 ; r1 = key[1] & 0x0ffffffc
80|| EXTU B17,4,6,B16 ; r1>>2
81 AND A16,B1,B21 ; r2 = key[2] & 0x0ffffffc
82|| AND A17,B1,A23 ; r3 = key[3] & 0x0ffffffc
83|| BNOP RA
84 SHRU B21,2,B18
85|| ADD B22,B16,B16 ; s1 = r1 + r1>>2
86
87 STDW B21:B20,*${CTXA}[3] ; save r2:r0
88|| ADD B21,B18,B18 ; s2 = r2 + r2>>2
89|| SHRU A23,2,B17
90|| MV A23,B23
91 STDW B23:B22,*${CTXA}[4] ; save r3:r1
92|| ADD B23,B17,B19 ; s3 = r3 + r3>>2
93|| ADD B23,B17,B17 ; s3 = r3 + r3>>2
94 STDW B17:B16,*${CTXA}[5] ; save s3:s1
95 STDW B19:B18,*${CTXA}[6] ; save s3:s2
96|| ZERO A4 ; return 0
97 .endasmfunc
98
99 .global _poly1305_blocks
100 .align 32
101_poly1305_blocks:
102 .asmfunc stack_usage(40)
103 SHRU $LEN,4,A2 ; A2 is loop counter, number of blocks
104 [!A2] BNOP RA ; no data
105|| [A2] STW FP,*SP--(40) ; save frame pointer and alloca(40)
106|| [A2] MV SP,FP
107 [A2] STDW B13:B12,*SP[4] ; ABI says so
108|| [A2] MV $CTXA,$S3b ; borrow $S3b
109 [A2] STDW B11:B10,*SP[3]
110|| [A2] STDW A13:A12,*FP[-3]
111 [A2] STDW A11:A10,*FP[-4]
112
113|| [A2] LDDW *${S3b}[0],B25:B24 ; load h1:h0
114 [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
115 [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
116
117 LDDW *${CTXA}[1],B29:B28 ; load h3:h2, B28 is h2
118 LDNW *${INPB}[-2],$D2 ; load inp[2]
119 LDNW *${INPB}[-1],$D3 ; load inp[3]
120
121 LDDW *${CTXA}[3],$R2:$R0 ; load r2:r0
122|| LDDW *${S3b}[4],$R3:$R1 ; load r3:r1
123|| SWAP2 $D0,$D0
124
125 LDDW *${CTXA}[5],$S3:$S1 ; load s3:s1
126|| LDDW *${S3b}[6],$S3b:$S2 ; load s3:s2
127|| SWAP4 $D0,$D0
128|| SWAP2 $D1,$D1
129
130 ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
4b8736a2 131|| ADD $D0,B24,B27 ; B-copy of h0+inp[0]
f4e175e4
AP
132|| SWAP4 $D1,$D1
133 ADDU $D1,B25,$D1:$H1 ; h1+=inp[1]
134|| MVK 3,$THREE
135|| SWAP2 $D2,$D2
136 LDW *${CTXA}[4],$H4 ; load h4
137|| SWAP4 $D2,$D2
138|| MV B29,B30 ; B30 is h3
139 MV $R0,$R0b
140
141loop?:
142 MPY32U $H0,$R0,A17:A16
4b8736a2 143|| MPY32U B27,$R1,B17:B16 ; MPY32U $H0,$R1,B17:B16
f4e175e4
AP
144|| ADDU $D0,$D1:$H1,B25:B24 ; ADDU $D0,$D1:$H1,$D1:$H1
145|| ADDU $D2,B28,$D2:$H2 ; h2+=inp[2]
146|| SWAP2 $D3,$D3
147 MPY32U $H0,$R2,A19:A18
4b8736a2 148|| MPY32U B27,$R3,B19:B18 ; MPY32U $H0,$R3,B19:B18
f4e175e4
AP
149|| ADD $D0,$H1,A24 ; A-copy of B24
150|| SWAP4 $D3,$D3
151|| [A2] SUB A2,1,A2 ; decrement loop counter
152
153 MPY32U A24,$S3,A21:A20 ; MPY32U $H1,$S3,A21:A20
154|| MPY32U B24,$R0b,B21:B20 ; MPY32U $H1,$R0,B21:B20
155|| ADDU B25,$D2:$H2,$D2:$H2 ; ADDU $D1,$D2:$H2,$D2:$H2
156|| ADDU $D3,B30,$D3:$H3 ; h3+=inp[3]
157|| ADD B25,$H2,B25 ; B-copy of $H2
158 MPY32U A24,$R1,A23:A22 ; MPY32U $H1,$R1,A23:A22
159|| MPY32U B24,$R2,B23:B22 ; MPY32U $H1,$R2,B23:B22
160
161 MPY32U $H2,$S2,A25:A24
162|| MPY32U B25,$S3b,B25:B24 ; MPY32U $H2,$S3,B25:B24
163|| ADDU $D2,$D3:$H3,$D3:$H3
164|| ADD $PADBIT,$H4,$H4 ; h4+=padbit
165 MPY32U $H2,$R0,A27:A26
166|| MPY32U $H2,$R1,B27:B26
167|| ADD $D3,$H4,$H4
168|| MV $S2,$S2a
169
170 MPY32U $H3,$S1,A29:A28
171|| MPY32U $H3,$S2,B29:B28
172|| ADD A21,A17,A21 ; start accumulating "d3:d0"
173|| ADD B21,B17,B21
174|| ADDU A20,A16,A17:A16
175|| ADDU B20,B16,B17:B16
176|| [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
177 MPY32U $H3,$S3,A31:A30
178|| MPY32U $H3,$R0b,B31:B30
179|| ADD A23,A19,A23
180|| ADD B23,B19,B23
181|| ADDU A22,A18,A19:A18
182|| ADDU B22,B18,B19:B18
183|| [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
184
185 MPY32 $H4,$S1,B20
186|| MPY32 $H4,$S2a,A20
187|| ADD A25,A21,A21
188|| ADD B25,B21,B21
189|| ADDU A24,A17:A16,A17:A16
190|| ADDU B24,B17:B16,B17:B16
191|| [A2] LDNW *${INPB}[-2],$D2 ; load inp[2]
192 MPY32 $H4,$S3b,B22
193|| ADD A27,A23,A23
194|| ADD B27,B23,B23
195|| ADDU A26,A19:A18,A19:A18
196|| ADDU B26,B19:B18,B19:B18
197|| [A2] LDNW *${INPB}[-1],$D3 ; load inp[3]
198
199 MPY32 $H4,$R0b,$H4
200|| ADD A29,A21,A21 ; final hi("d0")
201|| ADD B29,B21,B21 ; final hi("d1")
202|| ADDU A28,A17:A16,A17:A16 ; final lo("d0")
203|| ADDU B28,B17:B16,B17:B16
204 ADD A31,A23,A23 ; final hi("d2")
205|| ADD B31,B23,B23 ; final hi("d3")
206|| ADDU A30,A19:A18,A19:A18
207|| ADDU B30,B19:B18,B19:B18
208 ADDU B20,B17:B16,B17:B16 ; final lo("d1")
209|| ADDU A20,A19:A18,A19:A18 ; final lo("d2")
210 ADDU B22,B19:B18,B19:B18 ; final lo("d3")
211
212|| ADD A17,A21,A21 ; "flatten" "d3:d0"
213 MV A19,B29 ; move to avoid cross-path stalls
214 ADDU A21,B17:B16,B27:B26 ; B26 is h1
215 ADD B21,B27,B27
216|| DMV B29,A18,B29:B28 ; move to avoid cross-path stalls
217 ADDU B27,B29:B28,B29:B28 ; B28 is h2
218|| [A2] SWAP2 $D0,$D0
219 ADD A23,B29,B29
220|| [A2] SWAP4 $D0,$D0
221 ADDU B29,B19:B18,B31:B30 ; B30 is h3
222 ADD B23,B31,B31
223|| MV A16,B24 ; B24 is h0
224|| [A2] SWAP2 $D1,$D1
225 ADD B31,$H4,$H4
226|| [A2] SWAP4 $D1,$D1
227
228 SHRU $H4,2,B16 ; last reduction step
229|| AND $H4,$THREE,$H4
f4e175e4 230 ADDAW B16,B16,B16 ; 5*(h4>>2)
4b8736a2 231|| [A2] BNOP loop?
f4e175e4
AP
232
233 ADDU B24,B16,B25:B24 ; B24 is h0
234|| [A2] SWAP2 $D2,$D2
235 ADDU B26,B25,B27:B26 ; B26 is h1
236|| [A2] SWAP4 $D2,$D2
237 ADDU B28,B27,B29:B28 ; B28 is h2
238|| [A2] ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
4b8736a2
AP
239|| [A2] ADD $D0,B24,B27 ; B-copy of h0+inp[0]
240 ADDU B30,B29,B31:B30 ; B30 is h3
241 ADD B31,$H4,$H4
f4e175e4
AP
242|| [A2] ADDU $D1,B26,$D1:$H1 ; h1+=inp[1]
243;;===== branch to loop? is taken here
244
245 LDDW *FP[-4],A11:A10 ; ABI says so
246 LDDW *FP[-3],A13:A12
247|| LDDW *SP[3],B11:B10
248 LDDW *SP[4],B13:B12
249|| MV B26,B25
250|| BNOP RA
251 LDW *++SP(40),FP ; restore frame pointer
252|| MV B30,B29
253 STDW B25:B24,*${CTXA}[0] ; save h1:h0
254 STDW B29:B28,*${CTXA}[1] ; save h3:h2
255 STW $H4,*${CTXA}[4] ; save h4
256 NOP 1
257 .endasmfunc
258___
259{
260my ($MAC,$NONCEA,$NONCEB)=($INPB,$LEN,$PADBIT);
261
262$code.=<<___;
263 .global _poly1305_emit
264 .align 32
265_poly1305_emit:
266 .asmfunc
267 LDDW *${CTXA}[0],A17:A16 ; load h1:h0
268 LDDW *${CTXA}[1],A19:A18 ; load h3:h2
269 LDW *${CTXA}[4],A20 ; load h4
270 MV $NONCEA,$NONCEB
271
272 MVK 5,A22 ; compare to modulus
273 ADDU A16,A22,A23:A22
274|| LDW *${NONCEA}[0],A8
275|| LDW *${NONCEB}[1],B8
276 ADDU A17,A23,A25:A24
277|| LDW *${NONCEA}[2],A9
278|| LDW *${NONCEB}[3],B9
279 ADDU A19,A25,A27:A26
280 ADDU A19,A27,A29:A28
281 ADD A20,A29,A29
282
283 SHRU A29,2,A2 ; check for overflow in 130-th bit
284
285 [A2] MV A22,A16 ; select
286|| [A2] MV A24,A17
287 [A2] MV A26,A18
288|| [A2] MV A28,A19
289
290|| ADDU A8,A16,A23:A22 ; accumulate nonce
291 ADDU B8,A17,A25:A24
292|| SWAP2 A22,A22
293 ADDU A23,A25:A24,A25:A24
294 ADDU A9,A18,A27:A26
295|| SWAP2 A24,A24
296 ADDU A25,A27:A26,A27:A26
297|| ADD B9,A19,A28
298 ADD A27,A28,A28
299|| SWAP2 A26,A26
300
301 .if .BIG_ENDIAN
302 SWAP2 A28,A28
303|| SWAP4 A22,A22
304|| SWAP4 A24,B24
305 SWAP4 A26,A26
306 SWAP4 A28,A28
307|| MV B24,A24
308 .endif
309
310 BNOP RA,1
311 STNW A22,*${MAC}[0] ; write the result
312 STNW A24,*${MAC}[1]
313 STNW A26,*${MAC}[2]
314 STNW A28,*${MAC}[3]
315 .endasmfunc
316___
317}
318$code.=<<___;
319 .sect .const
320 .cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
321 .align 4
322___
323
324print $code;