]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/config/cr16/lib1funcs.S
Update copyright years.
[thirdparty/gcc.git] / libgcc / config / cr16 / lib1funcs.S
CommitLineData
b25364a0 1/* Libgcc Target specific implementation.
8d9254fc 2 Copyright (C) 2012-2020 Free Software Foundation, Inc.
b25364a0
S
3 Contributed by KPIT Cummins Infosystems Limited.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26#ifdef L_mulsi3
27 .text
28 .align 4
29 .globl ___mulsi3
30___mulsi3:
31 movw r4,r0
32 movw r2,r1
33 /* Extended multiplication between the 2 lower words */
34 muluw r1,(r1,r0)
35
36 /* Multiply the lower word of each parameter */
37 mulw r2,r5
38
39 /* With the higher word of the other */
40 mulw r3,r4
41
42 /* Add products to the higher part of the final result */
43 addw r4,r1
44 addw r5,r1
45 jump (ra)
46#endif
47
48#ifdef L_divdi3
49 .text
50 .align 4
51 .globl ___divdi3
52
53___divdi3:
54 push $4, r7, ra
55
56 /* Param #1 Long Long low bit first */
57 loadd 12(sp), (r1, r0)
58 loadd 16(sp), (r3, r2)
59
60 /* Param #2 Long Long low bit first */
61 loadd 20(sp), (r5, r4)
62 loadd 24(sp), (r7, r6)
63
64 /* Set neg to 0 */
65 movw $0, r10
66
67 subd $16, (sp)
68
69 /* Compare if param1 is greater than 0 */
70 cmpw $0, r3
71 ble L4
72
73 /* Invert param1 and neg */
74 movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
75 xord (r9, r8), (r1, r0) /* Xor low bits of param 1 with temp */
76 xord (r9, r8), (r3, r2) /* Xor high bits of param 1 with temp */
77 addd $1, (r1, r0) /* Add 1 to low bits of param 1 */
78 xorw $1, r10 /* Invert neg */
79 bcc L4 /* If no carry occurred go to L4 */
80 addd $1, (r3, r2) /* Add 1 to high bits of param 1 */
81
82L4: stord (r1, r0), 0(sp)
83 stord (r3, r2), 4(sp)
84
85 /* Compare if param2 is greater than 0 */
86 cmpw $0, r7
87 ble L5
88
89 /* Invert param2 and neg */
90 movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
91 xord (r9, r8), (r5, r4) /* Xor low bits of param 2 with temp */
92 xord (r9, r8), (r7, r6) /* Xor high bits of param 2 with temp */
93 addd $1, (r5, r4) /* Add 1 to low bits of param 2 */
94 xorw $1, r10 /* Invert neg */
95 bcc L5 /* If no carry occurred go to L5 */
96 addd $1, (r7, r6) /* Add 1 to high bits of param 2 */
97
98L5: stord (r5, r4), 8(sp)
99 stord (r7, r6), 12(sp)
100 movw $0, r2
101
102 /* Call udivmoddi3 */
103#ifdef __PIC__
104 loadd ___udivmoddi3@cGOT(r12), (r1,r0)
105 jal (r1,r0)
106#else
107 bal (ra), ___udivmoddi3
108#endif
109
110 /* If (neg) */
111 addd $16, (sp)
112 cmpw $0, r10 /* Compare 0 with neg */
113 beq Lexit__
114
115 /* Neg = -Neg */
116 xord (r9, r8), (r1, r0) /* Xor low bits of ures with temp */
117 xord (r9, r8), (r3, r2) /* Xor high bits of ures with temp */
118 addd $1, (r1, r0) /* Add 1 to low bits of ures */
119 bcc Lexit__
120 addd $1, (r3, r2) /* Add 1 to high bit of ures */
121
122Lexit__:
123# ifdef __ID_SHARED_LIB__
124 pop $2, r12
125# endif
126 popret $4, r7, ra
127#endif
128
129#ifdef L_lshrdi3
130 .text
131 .align 4
132 .globl ___lshrdi3
133
134___lshrdi3:
135 push $3, r7
136
137 /* Load parameters from stack in this order */
138 movw r2, r6 /* Number of shifts */
139 loadd 6(sp), (r1, r0) /* Low bits */
140 loadd 10(sp), (r3, r2)/* High bits */
141
142 xorw $-1, r6 /* Invert number of shifts */
143 addw $1, r6 /* Add 1 by number of shifts */
144
145 movw r6, r7 /* Copy number of shifts */
146
147 tbit $15, r6 /* Test if number is negative */
148 bfs L2 /* If negative jump to L2 */
149
150 movd (r1, r0), (r9, r8) /* Copy low bits */
151
152 subw $32, r7 /* Calc how many bits will overflow */
153 /* Shift the temp low bit to the right to see the overflowing bits */
154 lshd r7, (r9, r8)
155
156 cmpw $32, r6 /* If number of shifts is higher than 31 */
157 blt L1 /* Shift by moving */
158
159 lshd r6, (r3, r2) /* Shift high bits */
160 lshd r6, (r1, r0) /* Shift low bits */
161 addd (r9, r8), (r3, r2) /* Add overflow to the high bits */
162 popret $3, r7 /* Return */
163
164L1: movd $0, (r1, r0) /* Reset low bit */
165 movd (r9, r8), (r3, r2) /* Add the overflow from the low bit */
166 popret $3, r7 /* Return */
167
168L2: movd (r3, r2), (r9, r8) /* Copy high bits */
169
170 addw $32, r7 /* Calc how many bits will overflow */
171 /* Shift the temp low bit to the left to see the overflowing bits */
172 lshd r7, (r9, r8)
173
174 cmpw $-32, r6 /* If number of shifts is lower than -31 */
175 bgt L3 /* Shift by moving */
176
177 lshd r6, (r1, r0) /* Shift low bits */
178 lshd r6, (r3, r2) /* Shift high bits */
179 addd (r9, r8), (r1, r0) /* Add overflow to the low bits */
180 popret $3, r7 /* Return */
181
182L3: movd $0, (r3, r2) /* Reset the high bit */
183 movd (r9, r8), (r1, r0) /* Add the overflow from the high bit */
184 popret $3, r7 /* Return */
185#endif
186
187#ifdef L_moddi3
188 .text
189 .align 4
190 .globl ___moddi3
191
192___moddi3:
193 push $4, r7, ra
194
195 /* Param #1 Long Long low bit first */
196 loadd 12(sp), (r1, r0)
197 loadd 16(sp), (r3, r2)
198
199 /* Param #2 Long Long low bit first */
200 loadd 20(sp), (r5, r4)
201 loadd 24(sp), (r7, r6)
202
203 subd $18, (sp)
204
205 /* Set neg to 0 */
206 storw $0, 16(sp)
207
208 movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
209
210 /* Compare if param1 is greater than 0 */
211 cmpw $0, r3
212 ble L4
213
214 /* Invert param1 and neg */
215 xord (r9, r8), (r1, r0) /* Xor low bits of param 1 with temp */
216 xord (r9, r8), (r3, r2) /* Xor high bits of param 1 with temp */
217 addd $1, (r1, r0) /* Add 1 to low bits of param 1 */
218 storw $1, 16(sp)
219 bcc L4 /* If no carry occurred go to L4 */
220 addd $1, (r3, r2) /* Add 1 to high bits of param 1 */
221
222L4: stord (r1, r0), 0(sp)
223 stord (r3, r2), 4(sp)
224
225 /* Compare if param2 is greater than 0 */
226 cmpw $0, r7
227 ble L5
228
229 /* Invert param2 and neg */
230 xord (r9, r8), (r5, r4) /* Xor low bits of param 2 with temp */
231 xord (r9, r8), (r7, r6) /* Xor high bits of param 2 with temp */
232 addd $1, (r5, r4) /* Add 1 to low bits of param 2 */
233 bcc L5 /* If no carry occurred go to L5 */
234 addd $1, (r7, r6) /* Add 1 to high bits of param 2 */
235
236L5: stord (r5, r4), 8(sp)
237 stord (r7, r6), 12(sp)
238 movw $1, r2
239
240 /* Call udivmoddi3 */
241#ifdef __PIC__
242 loadd ___udivmoddi3@cGOT(r12), (r1,r0)
243 jal (r1,r0)
244#else
245 bal (ra), ___udivmoddi3
246#endif
247
248 /* If (neg) */
249 loadw 16(sp), r10 /* Load neg from stack */
250 addd $18, (sp)
251 cmpw $0, r10 /* Compare 0 with neg */
252 beq Lexit__
253
254 /* Neg = -Neg */
255 xord (r9, r8), (r1, r0) /* Xor low bits of ures with temp */
256 xord (r9, r8), (r3, r2) /* Xor high bits of ures with temp */
257 addd $1, (r1, r0) /* Add 1 to low bits of ures */
258 bcc Lexit__
259 addd $1, (r3, r2) /* Add 1 to high bit of ures */
260Lexit__:
261# ifdef __ID_SHARED_LIB__
262 pop $2, r12
263# endif
264 popret $4, r7, ra
265#endif
266
267#ifdef L_muldi3
268 .text
269 .align 4
270 .globl ___muldi3
271
272___muldi3:
273 push $2, r13
274 push $7, r7
275
276 /* Param #1 Long Long low bit first */
277 loadd 18(sp), (r1, r0)
278 loadd 22(sp), (r3, r2)
279
280 /* Param #2 Long Long low bit first */
281 loadd 26(sp), (r5, r4)
282 loadd 30(sp), (r7, r6)
283
284 /* Clear r13, r12 */
285 movd $0, (r12)
286 movd $0, (r13)
287
288 /* Set neg */
289 movw $0, r10
290
291 /* Compare if param1 is greater than 0 */
292 cmpw $0, r3
293 ble L1
294
295 /* Invert param1 and neg */
296 movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
297 xord (r9, r8), (r1, r0) /* Xor low bits of param 1 with temp */
298 xord (r9, r8), (r3, r2) /* Xor high bits of param 1 with temp */
299 addd $1, (r1, r0) /* Add 1 to low bits of param 1 */
300 xorw $1, r10 /* Invert neg */
301 bcc L1 /* If no carry occurred go to L1 */
302 addd $1, (r3, r2) /* Add 1 to high bits of param 1 */
303
304L1: /* Compare if param2 is greater than 0 */
305 cmpw $0, r7
306 ble L2
307
308 /* Invert param2 and neg */
309 movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
310 xord (r9, r8), (r5, r4) /* Xor low bits of param 2 with temp */
311 xord (r9, r8), (r7, r6) /* Xor high bits of param 2 with temp */
312 addd $1, (r5, r4) /* Add 1 to low bits of param 2 */
313 xorw $1, r10 /* Invert neg */
314 bcc L2 /* If no carry occurred go to L2 */
315 addd $1, (r7, r6) /* Add 1 to high bits of param 2 */
316
317L2: storw r10, 18(sp) /* Store neg to stack so we can use r10 */
318
319 /* B*D */
320 /* Bl*Dl */
321 macuw r0, r4, (r12) /* Multiply r0 and r4 and add to r12 */
322
323 /* Bh*Dl */
324 movd $0, (r9, r8) /* Clear r9, r8 */
325 macuw r1, r4, (r9, r8) /* Multiply Bh*Dl and add result to (r9, r8) */
326 movw r9, r10 /* Shift left: r9 to r10 */
327 lshd $16, (r9, r8) /* Shift left: r8 to r9 */
328 movw $0, r11 /* Clear r11 */
329 addd (r9, r8), (r12) /* Add (r9, r8) to r12 */
330 bcc L3 /* If no carry occurred go to L3 */
331 addd $1, (r13) /* If carry occurred add 1 to r13 */
332
333L3: addd (r11, r10), (r13) /* Add (r11, r10) to r13 */
334
335 /* Bl*Dh */
336 movd $0, (r9, r8) /* Clear (r9, r8) */
337 macuw r0, r5, (r9, r8) /* Multiply r0 and r5 and stor in (r9, r8) */
338 movw r9, r10 /* Shift left: r9 to r10 */
339 lshd $16, (r9, r8) /* Shift left: r8 to r9 */
340 addd (r9, r8), (r12) /* Add (r9, r8) to r12 */
341 bcc L4 /* If no carry occurred go to L4 */
342 addd $1, (r13) /* If carry occurred add 1 to r13 */
343
344L4: addd (r11, r10), (r13) /* Add (r11, r10) to r13 */
345
346 /* Bh*Dh */
347 movd $0, (r9, r8) /* Clear (r9, r8) */
348 macuw r1, r5, (r9, r8) /* Multiply r1 and r5 and add to r13 */
349 addd (r9, r8), (r13) /* Add (r9, r8) to result */
350
351 /* A*D */
352 /* Al*Dl */
353 movd $0, (r11, r10) /* Clear (r11, r10) */
354 macuw r2, r4, (r11, r10)/* Multiply r2 and r4 and add to (r11, r10) */
355
356 addd (r13), (r11, r10) /* Copy r13 to (r11, r10) */
357
358 /* Al*Dh */
359 movd $0, (r9, r8) /* Clear (r9, r8) */
360 macuw r2, r5, (r9, r8) /* Multiply r2 and r5 and add to (r9, r8) */
361 addw r8, r11 /* Add r8 to r11 */
362
363 /* Ah*Dl */
364 muluw r3, (r5, r4) /* Multiply r3 and r4 and stor in (r5, r4) */
365 addw r4, r11 /* Add r4 to r11 */
366
367 /* B*C */
368 /* Bl*Cl */
369 movd $0, (r9, r8) /* Clear (r9, r8) */
370 macuw r0, r6, (r9, r8) /* Multiply r0 and r6 and add to (r9, r8) */
371 addd (r9, r8), (r11, r10)/* Add (r9, r8) to result */
372
373 /* Bl*Ch */
374 movd $0, (r9, r8) /* Clear (r9, r8) */
375 macuw r0, r7, (r9, r8) /* Multiply r0 and r7 and add to (r9, r8) */
376 addw r8, r11 /* Add r8 to r11 */
377
378 loadw 18(sp), r8 /* Load neg from stack */
379
380 /* Bh*Cl */
381 muluw r1, (r7, r6) /* Multiply r1 and r6 and stor in (r7, r6) */
382 addw r6, r11 /* Add r6 to r11 */
383
384E1: movd (r11, r10), (r3, r2)
385 movd (r12), (r1, r0)
386
387 /* If (neg) */
388 cmpw $0, r8 /* Compare 0 with neg */
389 beq Lexit__
390
391 /* Neg = -Neg */
392 movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
393 xord (r9, r8), (r1, r0) /* Xor low bits of result with temp */
394 xord (r9, r8), (r3, r2) /* Xor high bits of result with temp */
395 addd $1, (r1, r0) /* Add 1 to low bits of result */
396 bcc Lexit__
397 addd $1, (r3, r2) /* Add 1 to high bit of result */
398Lexit__:
399 pop $7, r7
400 popret $2, r13
401#endif
402
403#ifdef L_negdi2
404 .text
405 .align 4
406 .globl ___negdi2
407
408___negdi2:
409 /* Load parameter from the registers in this order */
410 loadd 0(sp), (r1, r0)
411 loadd 4(sp), (r3, r2)
412
413 movd $-1, (r6, r5) /* Set temp to FFFFFFFF */
414 xord (r6, r5), (r1, r0) /* Xor low bits with temp */
415 xord (r6, r5), (r3, r2) /* Xor high bits with temp */
416 addd $1, (r1, r0) /* Add one */
417 jcc (ra)
418 addd $1, (r3, r2) /* Add the carry to the high bits */
419 jump (ra)
420#endif
421
422#ifdef L_udivdi3
423 .text
424 .align 4
425 .globl ___udivdi3
426
427___udivdi3:
428 movw $0, r2
429 br ___udivmoddi3
430#endif
431
432#ifdef L_udivmoddi3
433 .text
434 .align 4
435 .globl ___udivmoddi3
436
437___udivmoddi3:
438 push $2, r13
439 push $7, r7
440
441 /* Param #1 Long Long low bit first */
442 loadd 18(sp), (r1, r0)
443 storw r2, 18(sp) /* Store modulo on stack */
444 loadd 22(sp), (r3, r2)
445
446 /* Param #2 Long Long low bit first */
447 loadd 26(sp), (r5, r4)
448 loadd 30(sp), (r7, r6)
449
450 /* Set ures to 0 */
451 movd $0, (r13)
452 movd $0, (r12)
453
454 cmpd (r12), (r5, r4)
455 beq LE
456
457L5: movd $1, (r9, r8) /* Store 1 in low bits from bit */
458 movd $0, (r11, r10) /* Store 0 in high bits from bit */
459
460L6: /* While (den < num && (!den & (1LL<<63))) */
461 /* Compare high bits from param 1 and param 2 */
462 cmpd (r7, r6), (r3, r2)
463 bhi L10 /* If param 2 is greater go to L10 */
464 bne L8 /* If param 1 is greater go to L8 */
465 cmpd (r5, r4), (r1, r0) /* Compare low bits from param 1 and param 2 */
466 /* If param 2 is greater or the same go to L1 */
467 bhs L10
468
469L8: /* Check if most significant bit of param 2 is set */
470 tbit $15, r7
471 bfs L10 /* If PSR is set go to L10 */
472
473 /* Shift bit */
474 lshd $1, (r11, r10) /* Shift left: high bits of bit */
475 /* Check if most significant bit of bit is set */
476 tbit $15, r9
477 lshd $1, (r9, r8) /* Shift left: low bits of bit */
478 bfs L28 /* If PSR is set go to L28 */
479
480L9: /* Shift b */
481 lshd $1, (r7, r6) /* Shift left: high bits of param 2 */
482 /* Check if most significant bit of param 2 is set */
483 tbit $15, r5
484 lshd $1, (r5, r4) /* Shift left: low bits of param 2 */
485 bfc L6 /* If PSR is set go to L6 */
486 addw $1, r6 /* Add 1 to the highest bits of b */
487 br L6 /* Go to L6 */
488
489L10: /* While (bit) */
490 cmpd $0, (r11, r10)
491 bne L11
492 cmpd $0, (r9, r8)
493 beq E1
494
495L11: /* If (num >= den) */
496 cmpd (r3, r2), (r7, r6) /* Compare high bits of param 1 and param 2 */
497 blo L15 /* If param 1 lower than param 2 go to L15 */
498 bne L12 /* If not equal go to L12 */
499 cmpd (r1, r0), (r5, r4) /* Compare low bits of param 1 and param 2 */
500 blo L15 /* If param 1 lower than param 2 go to L15 */
501
502L12: /* Ures |= bit */
503 ord (r11, r10), (r13)
504 ord (r9, r8), (r12)
505
506 /* Num -= den */
507 subd (r7, r6), (r3, r2) /* Subtract highest 32 bits from each other */
508 subd (r5, r4), (r1, r0) /* Subtract lowest 32 bits from each other */
509 bcc L15 /* If no carry occurred go to L15 */
510 subd $1, (r3, r2) /* Subtract the carry */
511
512L15: /* Shift bit to the right */
513 lshd $-1, (r9, r8) /* Shift right: low bits of bit */
514 /* Check if least significant bit of high bits is set */
515 tbit $0, r10
516 lshd $-1, (r11, r10) /* Shift right: high bits of bit */
517 bfs L18 /* If PSR is set go to L18 */
518
519L17: /* Shift param#2 to the right */
520 lshd $-1, (r5, r4) /* Shift right: low bits of param 2 */
521 /* Check if least significant bit of high bits is set */
522 tbit $0, r6
523 lshd $-1, (r7, r6) /* Shift right: high bits of param 2 */
524 bfc L10 /* If PSR is not set go to L10 */
525 /* Or with 0x8000 to set most significant bit */
526 orw $32768, r5
527 br L10 /* Go to L10 */
528
529L18: /* Or with 0x8000 to set most significant bit */
530 orw $32768, r9
531 br L17
532
533L28: /* Left shift bit */
534 addw $1, r10 /* Add 1 to highest bits of bit */
535 br L9 /* Go to L9 */
536
537LE: cmpd (r12), (r7, r6)
538 bne L5
539 excp dvz
540 br Lexit__
541
542E1: loadw 18(sp), r4
543 cmpw $0, r4
544 bne Lexit__
545
546 /* Return result */
547 movd (r12), (r1, r0)
548 movd (r13), (r3, r2)
549Lexit__:
550 pop $7, r7
551 popret $2, r13
552#endif
553
554#ifdef L_umoddi3
555 .text
556 .align 4
557 .globl ___umoddi3
558
559___umoddi3:
560 movw $1, r2
561 br ___udivmoddi3
562#endif
563