]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/md5/asm/md5-x86_64.pl
md5/asm/md5-[586|x86_64].pl: +15% on Atom.
[thirdparty/openssl.git] / crypto / md5 / asm / md5-x86_64.pl
1 #!/usr/bin/perl -w
2 #
3 # MD5 optimized for AMD64.
4 #
5 # Author: Marc Bevand <bevand_m (at) epita.fr>
6 # Licence: I hereby disclaim the copyright on this code and place it
7 # in the public domain.
8 #
9
10 use strict;
11
12 my $code;
13
14 # round1_step() does:
15 # dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
16 # %r10d = X[k_next]
17 # %r11d = z' (copy of z for the next step)
18 # Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
19 sub round1_step
20 {
21 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
22 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
23 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
24 $code .= <<EOF;
25 xor $y, %r11d /* y ^ ... */
26 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
27 and $x, %r11d /* x & ... */
28 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
29 xor $z, %r11d /* z ^ ... */
30 add %r11d, $dst /* dst += ... */
31 rol \$$s, $dst /* dst <<< s */
32 mov $y, %r11d /* (NEXT STEP) z' = $y */
33 add $x, $dst /* dst += x */
34 EOF
35 }
36
37 # round2_step() does:
38 # dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
39 # %r10d = X[k_next]
40 # %r11d = z' (copy of z for the next step)
41 # %r12d = z' (copy of z for the next step)
42 # Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
43 sub round2_step
44 {
45 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
46 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
47 $code .= " mov %edx, %r12d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
48 $code .= <<EOF;
49 not %r11d /* not z */
50 and $x, %r12d /* x & z */
51 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
52 and $y, %r11d /* y & (not z) */
53 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
54 or %r11d, %r12d /* (y & (not z)) | (x & z) */
55 mov $y, %r11d /* (NEXT STEP) z' = $y */
56 add %r12d, $dst /* dst += ... */
57 mov $y, %r12d /* (NEXT STEP) z' = $y */
58 rol \$$s, $dst /* dst <<< s */
59 add $x, $dst /* dst += x */
60 EOF
61 }
62
63 # round3_step() does:
64 # dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
65 # %r10d = X[k_next]
66 # %r11d = y' (copy of y for the next step)
67 # Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
68 { my $round3_alter=0;
69 sub round3_step
70 {
71 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
72 $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
73 $code .= <<EOF;
74 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
75 xor $z, %r11d /* z ^ ... */
76 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
77 xor $x, %r11d /* x ^ ... */
78 add %r11d, $dst /* dst += ... */
79 EOF
80 $code .= <<EOF if ($round3_alter);
81 rol \$$s, $dst /* dst <<< s */
82 mov $x, %r11d /* (NEXT STEP) y' = $x */
83 EOF
84 $code .= <<EOF if (!$round3_alter);
85 mov $x, %r11d /* (NEXT STEP) y' = $x */
86 rol \$$s, $dst /* dst <<< s */
87 EOF
88 $code .= <<EOF;
89 add $x, $dst /* dst += x */
90 EOF
91 $round3_alter^=1;
92 }
93 }
94
95 # round4_step() does:
96 # dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
97 # %r10d = X[k_next]
98 # %r11d = not z' (copy of not z for the next step)
99 # Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
100 sub round4_step
101 {
102 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
103 $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1);
104 $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n"
105 if ($pos == -1);
106 $code .= <<EOF;
107 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
108 or $x, %r11d /* x | ... */
109 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
110 xor $y, %r11d /* y ^ ... */
111 add %r11d, $dst /* dst += ... */
112 mov \$0xffffffff, %r11d
113 rol \$$s, $dst /* dst <<< s */
114 xor $y, %r11d /* (NEXT STEP) not z' = not $y */
115 add $x, $dst /* dst += x */
116 EOF
117 }
118
119 my $flavour = shift;
120 my $output = shift;
121 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
122
123 my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
124
125 $0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
126 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
127 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
128 die "can't locate x86_64-xlate.pl";
129
130 no warnings qw(uninitialized);
131 open OUT,"| \"$^X\" $xlate $flavour $output";
132 *STDOUT=*OUT;
133
134 $code .= <<EOF;
135 .text
136 .align 16
137
138 .globl md5_block_asm_data_order
139 .type md5_block_asm_data_order,\@function,3
140 md5_block_asm_data_order:
141 push %rbp
142 push %rbx
143 push %r12
144 push %r14
145 push %r15
146 .Lprologue:
147
148 # rdi = arg #1 (ctx, MD5_CTX pointer)
149 # rsi = arg #2 (ptr, data pointer)
150 # rdx = arg #3 (nbr, number of 16-word blocks to process)
151 mov %rdi, %rbp # rbp = ctx
152 shl \$6, %rdx # rdx = nbr in bytes
153 lea (%rsi,%rdx), %rdi # rdi = end
154 mov 0*4(%rbp), %eax # eax = ctx->A
155 mov 1*4(%rbp), %ebx # ebx = ctx->B
156 mov 2*4(%rbp), %ecx # ecx = ctx->C
157 mov 3*4(%rbp), %edx # edx = ctx->D
158 # end is 'rdi'
159 # ptr is 'rsi'
160 # A is 'eax'
161 # B is 'ebx'
162 # C is 'ecx'
163 # D is 'edx'
164
165 cmp %rdi, %rsi # cmp end with ptr
166 je .Lend # jmp if ptr == end
167
168 # BEGIN of loop over 16-word blocks
169 .Lloop: # save old values of A, B, C, D
170 mov %eax, %r8d
171 mov %ebx, %r9d
172 mov %ecx, %r14d
173 mov %edx, %r15d
174 EOF
175 round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
176 round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
177 round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
178 round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
179 round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
180 round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
181 round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
182 round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
183 round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
184 round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
185 round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
186 round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
187 round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
188 round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
189 round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
190 round1_step( 1,'%ebx','%ecx','%edx','%eax', '1','0x49b40821','22');
191
192 round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
193 round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
194 round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
195 round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
196 round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
197 round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
198 round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
199 round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
200 round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
201 round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
202 round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
203 round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
204 round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
205 round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
206 round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
207 round2_step( 1,'%ebx','%ecx','%edx','%eax', '5','0x8d2a4c8a','20');
208
209 round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
210 round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
211 round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
212 round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
213 round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
214 round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
215 round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
216 round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
217 round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
218 round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
219 round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
220 round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
221 round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
222 round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
223 round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
224 round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
225
226 round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
227 round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
228 round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
229 round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
230 round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
231 round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
232 round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
233 round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
234 round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
235 round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
236 round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
237 round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
238 round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
239 round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
240 round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
241 round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
242 $code .= <<EOF;
243 # add old values of A, B, C, D
244 add %r8d, %eax
245 add %r9d, %ebx
246 add %r14d, %ecx
247 add %r15d, %edx
248
249 # loop control
250 add \$64, %rsi # ptr += 64
251 cmp %rdi, %rsi # cmp end with ptr
252 jb .Lloop # jmp if ptr < end
253 # END of loop over 16-word blocks
254
255 .Lend:
256 mov %eax, 0*4(%rbp) # ctx->A = A
257 mov %ebx, 1*4(%rbp) # ctx->B = B
258 mov %ecx, 2*4(%rbp) # ctx->C = C
259 mov %edx, 3*4(%rbp) # ctx->D = D
260
261 mov (%rsp),%r15
262 mov 8(%rsp),%r14
263 mov 16(%rsp),%r12
264 mov 24(%rsp),%rbx
265 mov 32(%rsp),%rbp
266 add \$40,%rsp
267 .Lepilogue:
268 ret
269 .size md5_block_asm_data_order,.-md5_block_asm_data_order
270 EOF
271
272 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
273 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
274 if ($win64) {
275 my $rec="%rcx";
276 my $frame="%rdx";
277 my $context="%r8";
278 my $disp="%r9";
279
280 $code.=<<___;
281 .extern __imp_RtlVirtualUnwind
282 .type se_handler,\@abi-omnipotent
283 .align 16
284 se_handler:
285 push %rsi
286 push %rdi
287 push %rbx
288 push %rbp
289 push %r12
290 push %r13
291 push %r14
292 push %r15
293 pushfq
294 sub \$64,%rsp
295
296 mov 120($context),%rax # pull context->Rax
297 mov 248($context),%rbx # pull context->Rip
298
299 lea .Lprologue(%rip),%r10
300 cmp %r10,%rbx # context->Rip<.Lprologue
301 jb .Lin_prologue
302
303 mov 152($context),%rax # pull context->Rsp
304
305 lea .Lepilogue(%rip),%r10
306 cmp %r10,%rbx # context->Rip>=.Lepilogue
307 jae .Lin_prologue
308
309 lea 40(%rax),%rax
310
311 mov -8(%rax),%rbp
312 mov -16(%rax),%rbx
313 mov -24(%rax),%r12
314 mov -32(%rax),%r14
315 mov -40(%rax),%r15
316 mov %rbx,144($context) # restore context->Rbx
317 mov %rbp,160($context) # restore context->Rbp
318 mov %r12,216($context) # restore context->R12
319 mov %r14,232($context) # restore context->R14
320 mov %r15,240($context) # restore context->R15
321
322 .Lin_prologue:
323 mov 8(%rax),%rdi
324 mov 16(%rax),%rsi
325 mov %rax,152($context) # restore context->Rsp
326 mov %rsi,168($context) # restore context->Rsi
327 mov %rdi,176($context) # restore context->Rdi
328
329 mov 40($disp),%rdi # disp->ContextRecord
330 mov $context,%rsi # context
331 mov \$154,%ecx # sizeof(CONTEXT)
332 .long 0xa548f3fc # cld; rep movsq
333
334 mov $disp,%rsi
335 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
336 mov 8(%rsi),%rdx # arg2, disp->ImageBase
337 mov 0(%rsi),%r8 # arg3, disp->ControlPc
338 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
339 mov 40(%rsi),%r10 # disp->ContextRecord
340 lea 56(%rsi),%r11 # &disp->HandlerData
341 lea 24(%rsi),%r12 # &disp->EstablisherFrame
342 mov %r10,32(%rsp) # arg5
343 mov %r11,40(%rsp) # arg6
344 mov %r12,48(%rsp) # arg7
345 mov %rcx,56(%rsp) # arg8, (NULL)
346 call *__imp_RtlVirtualUnwind(%rip)
347
348 mov \$1,%eax # ExceptionContinueSearch
349 add \$64,%rsp
350 popfq
351 pop %r15
352 pop %r14
353 pop %r13
354 pop %r12
355 pop %rbp
356 pop %rbx
357 pop %rdi
358 pop %rsi
359 ret
360 .size se_handler,.-se_handler
361
362 .section .pdata
363 .align 4
364 .rva .LSEH_begin_md5_block_asm_data_order
365 .rva .LSEH_end_md5_block_asm_data_order
366 .rva .LSEH_info_md5_block_asm_data_order
367
368 .section .xdata
369 .align 8
370 .LSEH_info_md5_block_asm_data_order:
371 .byte 9,0,0,0
372 .rva se_handler
373 ___
374 }
375
376 print $code;
377
378 close STDOUT;