]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/poly1305/asm/poly1305-c64xplus.pl
Add OpenSSL copyright to .pl files
[thirdparty/openssl.git] / crypto / poly1305 / asm / poly1305-c64xplus.pl
CommitLineData
6aa36e8e
RS
1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
f4e175e4
AP
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# Poly1305 hash for C64x+.
18#
19# October 2015
20#
4b8736a2 21# Performance is [incredible for a 32-bit processor] 1.82 cycles per
f4e175e4
AP
22# processed byte. Comparison to compiler-generated code is problematic,
23# because results were observed to vary from 2.1 to 7.6 cpb depending
24# on compiler's ability to inline small functions. Compiler also
25# disables interrupts for some reason, thus making interrupt response
26# time dependent on input length. This module on the other hand is free
27# from such limitation.
28
3aa3af68
RL
29$output=pop;
30open STDOUT,">$output";
31
f4e175e4
AP
32($CTXA,$INPB,$LEN,$PADBIT)=("A4","B4","A6","B6");
33($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
34($D0,$D1,$D2,$D3)= ("A9","B9","A11","B11");
35($R0,$R1,$R2,$R3,$S1,$S2,$S3,$S3b)=("A0","B0","A1","B1","A12","B12","A13","B13");
36($THREE,$R0b,$S2a)=("B7","B5","A5");
37
38$code.=<<___;
39 .text
40
41 .if .ASSEMBLER_VERSION<7000000
42 .asg 0,__TI_EABI__
43 .endif
44 .if __TI_EABI__
45 .asg poly1305_init,_poly1305_init
46 .asg poly1305_blocks,_poly1305_blocks
47 .asg poly1305_emit,_poly1305_emit
48 .endif
49
50 .asg B3,RA
51 .asg A15,FP
52 .asg B15,SP
53
54 .if .LITTLE_ENDIAN
55 .asg MV,SWAP2
56 .asg MV.L,SWAP4
57 .endif
58
59 .global _poly1305_init
60_poly1305_init:
61 .asmfunc
62 LDNDW *${INPB}[0],B17:B16 ; load key material
63 LDNDW *${INPB}[1],A17:A16
64
65|| ZERO B9:B8
66|| MVK -1,B0
67 STDW B9:B8,*${CTXA}[0] ; initialize h1:h0
68|| SHRU B0,4,B0 ; 0x0fffffff
69|| MVK -4,B1
70 STDW B9:B8,*${CTXA}[1] ; initialize h3:h2
71|| AND B0,B1,B1 ; 0x0ffffffc
72 STW B8,*${CTXA}[4] ; initialize h4
73
74 .if .BIG_ENDIAN
75 SWAP2 B16,B17
76|| SWAP2 B17,B16
77 SWAP2 A16,A17
78|| SWAP2 A17,A16
79 SWAP4 B16,B16
80|| SWAP4 A16,A16
81 SWAP4 B17,B17
82|| SWAP4 A17,A17
83 .endif
84
85 AND B16,B0,B20 ; r0 = key[0] & 0x0fffffff
86|| AND B17,B1,B22 ; r1 = key[1] & 0x0ffffffc
87|| EXTU B17,4,6,B16 ; r1>>2
88 AND A16,B1,B21 ; r2 = key[2] & 0x0ffffffc
89|| AND A17,B1,A23 ; r3 = key[3] & 0x0ffffffc
90|| BNOP RA
91 SHRU B21,2,B18
92|| ADD B22,B16,B16 ; s1 = r1 + r1>>2
93
94 STDW B21:B20,*${CTXA}[3] ; save r2:r0
95|| ADD B21,B18,B18 ; s2 = r2 + r2>>2
96|| SHRU A23,2,B17
97|| MV A23,B23
98 STDW B23:B22,*${CTXA}[4] ; save r3:r1
99|| ADD B23,B17,B19 ; s3 = r3 + r3>>2
100|| ADD B23,B17,B17 ; s3 = r3 + r3>>2
101 STDW B17:B16,*${CTXA}[5] ; save s3:s1
102 STDW B19:B18,*${CTXA}[6] ; save s3:s2
103|| ZERO A4 ; return 0
104 .endasmfunc
105
106 .global _poly1305_blocks
107 .align 32
108_poly1305_blocks:
109 .asmfunc stack_usage(40)
110 SHRU $LEN,4,A2 ; A2 is loop counter, number of blocks
111 [!A2] BNOP RA ; no data
112|| [A2] STW FP,*SP--(40) ; save frame pointer and alloca(40)
113|| [A2] MV SP,FP
114 [A2] STDW B13:B12,*SP[4] ; ABI says so
115|| [A2] MV $CTXA,$S3b ; borrow $S3b
116 [A2] STDW B11:B10,*SP[3]
117|| [A2] STDW A13:A12,*FP[-3]
118 [A2] STDW A11:A10,*FP[-4]
119
120|| [A2] LDDW *${S3b}[0],B25:B24 ; load h1:h0
121 [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
122 [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
123
124 LDDW *${CTXA}[1],B29:B28 ; load h3:h2, B28 is h2
125 LDNW *${INPB}[-2],$D2 ; load inp[2]
126 LDNW *${INPB}[-1],$D3 ; load inp[3]
127
128 LDDW *${CTXA}[3],$R2:$R0 ; load r2:r0
129|| LDDW *${S3b}[4],$R3:$R1 ; load r3:r1
130|| SWAP2 $D0,$D0
131
132 LDDW *${CTXA}[5],$S3:$S1 ; load s3:s1
133|| LDDW *${S3b}[6],$S3b:$S2 ; load s3:s2
134|| SWAP4 $D0,$D0
135|| SWAP2 $D1,$D1
136
137 ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
4b8736a2 138|| ADD $D0,B24,B27 ; B-copy of h0+inp[0]
f4e175e4
AP
139|| SWAP4 $D1,$D1
140 ADDU $D1,B25,$D1:$H1 ; h1+=inp[1]
141|| MVK 3,$THREE
142|| SWAP2 $D2,$D2
143 LDW *${CTXA}[4],$H4 ; load h4
144|| SWAP4 $D2,$D2
145|| MV B29,B30 ; B30 is h3
146 MV $R0,$R0b
147
148loop?:
149 MPY32U $H0,$R0,A17:A16
4b8736a2 150|| MPY32U B27,$R1,B17:B16 ; MPY32U $H0,$R1,B17:B16
f4e175e4
AP
151|| ADDU $D0,$D1:$H1,B25:B24 ; ADDU $D0,$D1:$H1,$D1:$H1
152|| ADDU $D2,B28,$D2:$H2 ; h2+=inp[2]
153|| SWAP2 $D3,$D3
154 MPY32U $H0,$R2,A19:A18
4b8736a2 155|| MPY32U B27,$R3,B19:B18 ; MPY32U $H0,$R3,B19:B18
f4e175e4
AP
156|| ADD $D0,$H1,A24 ; A-copy of B24
157|| SWAP4 $D3,$D3
158|| [A2] SUB A2,1,A2 ; decrement loop counter
159
160 MPY32U A24,$S3,A21:A20 ; MPY32U $H1,$S3,A21:A20
161|| MPY32U B24,$R0b,B21:B20 ; MPY32U $H1,$R0,B21:B20
162|| ADDU B25,$D2:$H2,$D2:$H2 ; ADDU $D1,$D2:$H2,$D2:$H2
163|| ADDU $D3,B30,$D3:$H3 ; h3+=inp[3]
164|| ADD B25,$H2,B25 ; B-copy of $H2
165 MPY32U A24,$R1,A23:A22 ; MPY32U $H1,$R1,A23:A22
166|| MPY32U B24,$R2,B23:B22 ; MPY32U $H1,$R2,B23:B22
167
168 MPY32U $H2,$S2,A25:A24
169|| MPY32U B25,$S3b,B25:B24 ; MPY32U $H2,$S3,B25:B24
170|| ADDU $D2,$D3:$H3,$D3:$H3
171|| ADD $PADBIT,$H4,$H4 ; h4+=padbit
172 MPY32U $H2,$R0,A27:A26
173|| MPY32U $H2,$R1,B27:B26
174|| ADD $D3,$H4,$H4
175|| MV $S2,$S2a
176
177 MPY32U $H3,$S1,A29:A28
178|| MPY32U $H3,$S2,B29:B28
179|| ADD A21,A17,A21 ; start accumulating "d3:d0"
180|| ADD B21,B17,B21
181|| ADDU A20,A16,A17:A16
182|| ADDU B20,B16,B17:B16
183|| [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
184 MPY32U $H3,$S3,A31:A30
185|| MPY32U $H3,$R0b,B31:B30
186|| ADD A23,A19,A23
187|| ADD B23,B19,B23
188|| ADDU A22,A18,A19:A18
189|| ADDU B22,B18,B19:B18
190|| [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
191
192 MPY32 $H4,$S1,B20
193|| MPY32 $H4,$S2a,A20
194|| ADD A25,A21,A21
195|| ADD B25,B21,B21
196|| ADDU A24,A17:A16,A17:A16
197|| ADDU B24,B17:B16,B17:B16
198|| [A2] LDNW *${INPB}[-2],$D2 ; load inp[2]
199 MPY32 $H4,$S3b,B22
200|| ADD A27,A23,A23
201|| ADD B27,B23,B23
202|| ADDU A26,A19:A18,A19:A18
203|| ADDU B26,B19:B18,B19:B18
204|| [A2] LDNW *${INPB}[-1],$D3 ; load inp[3]
205
206 MPY32 $H4,$R0b,$H4
207|| ADD A29,A21,A21 ; final hi("d0")
208|| ADD B29,B21,B21 ; final hi("d1")
209|| ADDU A28,A17:A16,A17:A16 ; final lo("d0")
210|| ADDU B28,B17:B16,B17:B16
211 ADD A31,A23,A23 ; final hi("d2")
212|| ADD B31,B23,B23 ; final hi("d3")
213|| ADDU A30,A19:A18,A19:A18
214|| ADDU B30,B19:B18,B19:B18
215 ADDU B20,B17:B16,B17:B16 ; final lo("d1")
216|| ADDU A20,A19:A18,A19:A18 ; final lo("d2")
217 ADDU B22,B19:B18,B19:B18 ; final lo("d3")
218
219|| ADD A17,A21,A21 ; "flatten" "d3:d0"
220 MV A19,B29 ; move to avoid cross-path stalls
221 ADDU A21,B17:B16,B27:B26 ; B26 is h1
222 ADD B21,B27,B27
223|| DMV B29,A18,B29:B28 ; move to avoid cross-path stalls
224 ADDU B27,B29:B28,B29:B28 ; B28 is h2
225|| [A2] SWAP2 $D0,$D0
226 ADD A23,B29,B29
227|| [A2] SWAP4 $D0,$D0
228 ADDU B29,B19:B18,B31:B30 ; B30 is h3
229 ADD B23,B31,B31
230|| MV A16,B24 ; B24 is h0
231|| [A2] SWAP2 $D1,$D1
232 ADD B31,$H4,$H4
233|| [A2] SWAP4 $D1,$D1
234
235 SHRU $H4,2,B16 ; last reduction step
236|| AND $H4,$THREE,$H4
f4e175e4 237 ADDAW B16,B16,B16 ; 5*(h4>>2)
4b8736a2 238|| [A2] BNOP loop?
f4e175e4
AP
239
240 ADDU B24,B16,B25:B24 ; B24 is h0
241|| [A2] SWAP2 $D2,$D2
242 ADDU B26,B25,B27:B26 ; B26 is h1
243|| [A2] SWAP4 $D2,$D2
244 ADDU B28,B27,B29:B28 ; B28 is h2
245|| [A2] ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
4b8736a2
AP
246|| [A2] ADD $D0,B24,B27 ; B-copy of h0+inp[0]
247 ADDU B30,B29,B31:B30 ; B30 is h3
248 ADD B31,$H4,$H4
f4e175e4
AP
249|| [A2] ADDU $D1,B26,$D1:$H1 ; h1+=inp[1]
250;;===== branch to loop? is taken here
251
252 LDDW *FP[-4],A11:A10 ; ABI says so
253 LDDW *FP[-3],A13:A12
254|| LDDW *SP[3],B11:B10
255 LDDW *SP[4],B13:B12
256|| MV B26,B25
257|| BNOP RA
258 LDW *++SP(40),FP ; restore frame pointer
259|| MV B30,B29
260 STDW B25:B24,*${CTXA}[0] ; save h1:h0
261 STDW B29:B28,*${CTXA}[1] ; save h3:h2
262 STW $H4,*${CTXA}[4] ; save h4
263 NOP 1
264 .endasmfunc
265___
266{
267my ($MAC,$NONCEA,$NONCEB)=($INPB,$LEN,$PADBIT);
268
269$code.=<<___;
270 .global _poly1305_emit
271 .align 32
272_poly1305_emit:
273 .asmfunc
274 LDDW *${CTXA}[0],A17:A16 ; load h1:h0
275 LDDW *${CTXA}[1],A19:A18 ; load h3:h2
276 LDW *${CTXA}[4],A20 ; load h4
277 MV $NONCEA,$NONCEB
278
279 MVK 5,A22 ; compare to modulus
280 ADDU A16,A22,A23:A22
281|| LDW *${NONCEA}[0],A8
282|| LDW *${NONCEB}[1],B8
283 ADDU A17,A23,A25:A24
284|| LDW *${NONCEA}[2],A9
285|| LDW *${NONCEB}[3],B9
286 ADDU A19,A25,A27:A26
287 ADDU A19,A27,A29:A28
288 ADD A20,A29,A29
289
290 SHRU A29,2,A2 ; check for overflow in 130-th bit
291
292 [A2] MV A22,A16 ; select
293|| [A2] MV A24,A17
294 [A2] MV A26,A18
295|| [A2] MV A28,A19
296
297|| ADDU A8,A16,A23:A22 ; accumulate nonce
298 ADDU B8,A17,A25:A24
299|| SWAP2 A22,A22
300 ADDU A23,A25:A24,A25:A24
301 ADDU A9,A18,A27:A26
302|| SWAP2 A24,A24
303 ADDU A25,A27:A26,A27:A26
304|| ADD B9,A19,A28
305 ADD A27,A28,A28
306|| SWAP2 A26,A26
307
308 .if .BIG_ENDIAN
309 SWAP2 A28,A28
310|| SWAP4 A22,A22
311|| SWAP4 A24,B24
312 SWAP4 A26,A26
313 SWAP4 A28,A28
314|| MV B24,A24
315 .endif
316
317 BNOP RA,1
318 STNW A22,*${MAC}[0] ; write the result
319 STNW A24,*${MAC}[1]
320 STNW A26,*${MAC}[2]
321 STNW A28,*${MAC}[3]
322 .endasmfunc
323___
324}
325$code.=<<___;
326 .sect .const
327 .cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
328 .align 4
329___
330
331print $code;