]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/avr/libgcc.S
vec.h (VEC_reserve_exact): Define.
[thirdparty/gcc.git] / gcc / config / avr / libgcc.S
CommitLineData
90e7678c
DC
1/* -*- Mode: Asm -*- */
2/* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
3 Contributed by Denis Chertykov <denisc@overta.ru>
4
5This file is free software; you can redistribute it and/or modify it
6under the terms of the GNU General Public License as published by the
7Free Software Foundation; either version 2, or (at your option) any
8later version.
9
10In addition to the permissions in the GNU General Public License, the
11Free Software Foundation gives you unlimited permission to link the
f7af368f
JL
12compiled version of this file into combinations with other programs,
13and to distribute those combinations without any restriction coming
14from the use of this file. (The General Public License restrictions
15do apply in other respects; for example, they cover modification of
16the file, and distribution when not linked into a combine
17executable.)
90e7678c
DC
18
19This file is distributed in the hope that it will be useful, but
20WITHOUT ANY WARRANTY; without even the implied warranty of
21MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22General Public License for more details.
23
24You should have received a copy of the GNU General Public License
25along with this program; see the file COPYING. If not, write to
39d14dda
KC
26the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27Boston, MA 02110-1301, USA. */
90e7678c 28
90e7678c
DC
29#define __zero_reg__ r1
30#define __tmp_reg__ r0
31#define __SREG__ 0x3f
32#define __SP_H__ 0x3e
33#define __SP_L__ 0x3d
34
1d26ac96
MM
35/* Most of the functions here are called directly from avr.md
36 patterns, instead of using the standard libcall mechanisms.
37 This can make better code because GCC knows exactly which
38 of the call-used registers (not all of them) are clobbered. */
39
3f8a8c68 40 .section .text.libgcc, "ax", @progbits
bad3869a 41
1d26ac96 42 .macro mov_l r_dest, r_src
7ed9c001 43#if defined (__AVR_HAVE_MOVW__)
1d26ac96
MM
44 movw \r_dest, \r_src
45#else
46 mov \r_dest, \r_src
47#endif
48 .endm
49
50 .macro mov_h r_dest, r_src
7ed9c001 51#if defined (__AVR_HAVE_MOVW__)
1d26ac96
MM
52 ; empty
53#else
54 mov \r_dest, \r_src
55#endif
56 .endm
57
bad3869a
MM
58/* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
59#if !defined (__AVR_ENHANCED__)
90e7678c
DC
60/*******************************************************
61 Multiplication 8 x 8
62*******************************************************/
bad3869a 63#if defined (L_mulqi3)
90e7678c 64
c4984bad 65#define r_arg2 r22 /* multiplicand */
90e7678c
DC
66#define r_arg1 r24 /* multiplier */
67#define r_res __tmp_reg__ /* result */
68
bad3869a
MM
69 .global __mulqi3
70 .func __mulqi3
71__mulqi3:
90e7678c
DC
72 clr r_res ; clear result
73__mulqi3_loop:
74 sbrc r_arg1,0
75 add r_res,r_arg2
76 add r_arg2,r_arg2 ; shift multiplicand
77 breq __mulqi3_exit ; while multiplicand != 0
78 lsr r_arg1 ;
79 brne __mulqi3_loop ; exit if multiplier = 0
80__mulqi3_exit:
81 mov r_arg1,r_res ; result to return register
82 ret
83
84#undef r_arg2
85#undef r_arg1
86#undef r_res
87
cdd9eb8f 88.endfunc
bad3869a 89#endif /* defined (L_mulqi3) */
90e7678c 90
1d26ac96
MM
91#if defined (L_mulqihi3)
92 .global __mulqihi3
93 .func __mulqihi3
94__mulqihi3:
95 clr r25
96 sbrc r24, 7
97 dec r25
98 clr r23
99 sbrc r22, 7
100 dec r22
101 rjmp __mulhi3
102 .endfunc
103#endif /* defined (L_mulqihi3) */
104
105#if defined (L_umulqihi3)
106 .global __umulqihi3
107 .func __umulqihi3
108__umulqihi3:
109 clr r25
110 clr r23
111 rjmp __mulhi3
112 .endfunc
113#endif /* defined (L_umulqihi3) */
90e7678c
DC
114
115/*******************************************************
116 Multiplication 16 x 16
117*******************************************************/
bad3869a 118#if defined (L_mulhi3)
90e7678c
DC
119#define r_arg1L r24 /* multiplier Low */
120#define r_arg1H r25 /* multiplier High */
121#define r_arg2L r22 /* multiplicand Low */
122#define r_arg2H r23 /* multiplicand High */
1d26ac96 123#define r_resL __tmp_reg__ /* result Low */
90e7678c
DC
124#define r_resH r21 /* result High */
125
bad3869a
MM
126 .global __mulhi3
127 .func __mulhi3
128__mulhi3:
90e7678c
DC
129 clr r_resH ; clear result
130 clr r_resL ; clear result
131__mulhi3_loop:
132 sbrs r_arg1L,0
133 rjmp __mulhi3_skip1
134 add r_resL,r_arg2L ; result + multiplicand
135 adc r_resH,r_arg2H
136__mulhi3_skip1:
137 add r_arg2L,r_arg2L ; shift multiplicand
138 adc r_arg2H,r_arg2H
139
500164d2
MM
140 cp r_arg2L,__zero_reg__
141 cpc r_arg2H,__zero_reg__
90e7678c
DC
142 breq __mulhi3_exit ; while multiplicand != 0
143
144 lsr r_arg1H ; gets LSB of multiplier
145 ror r_arg1L
7656d28a 146 sbiw r_arg1L,0
90e7678c
DC
147 brne __mulhi3_loop ; exit if multiplier = 0
148__mulhi3_exit:
149 mov r_arg1H,r_resH ; result to return register
150 mov r_arg1L,r_resL
151 ret
152
153#undef r_arg1L
154#undef r_arg1H
155#undef r_arg2L
156#undef r_arg2H
157#undef r_resL
158#undef r_resH
159
cdd9eb8f 160.endfunc
bad3869a
MM
161#endif /* defined (L_mulhi3) */
162#endif /* !defined (__AVR_ENHANCED__) */
90e7678c 163
1d26ac96
MM
164#if defined (L_mulhisi3)
165 .global __mulhisi3
166 .func __mulhisi3
167__mulhisi3:
168 mov_l r18, r24
169 mov_h r19, r25
170 clr r24
171 sbrc r23, 7
172 dec r24
173 mov r25, r24
174 clr r20
175 sbrc r19, 7
176 dec r20
177 mov r21, r20
178 rjmp __mulsi3
179 .endfunc
180#endif /* defined (L_mulhisi3) */
181
182#if defined (L_umulhisi3)
183 .global __umulhisi3
184 .func __umulhisi3
185__umulhisi3:
186 mov_l r18, r24
187 mov_h r19, r25
188 clr r24
189 clr r25
190 clr r20
191 clr r21
192 rjmp __mulsi3
193 .endfunc
194#endif /* defined (L_umulhisi3) */
195
bad3869a 196#if defined (L_mulsi3)
90e7678c
DC
197/*******************************************************
198 Multiplication 32 x 32
199*******************************************************/
200#define r_arg1L r22 /* multiplier Low */
201#define r_arg1H r23
202#define r_arg1HL r24
203#define r_arg1HH r25 /* multiplier High */
204
205
206#define r_arg2L r18 /* multiplicand Low */
207#define r_arg2H r19
208#define r_arg2HL r20
209#define r_arg2HH r21 /* multiplicand High */
210
211#define r_resL r26 /* result Low */
212#define r_resH r27
213#define r_resHL r30
214#define r_resHH r31 /* result High */
215
216
bad3869a
MM
217 .global __mulsi3
218 .func __mulsi3
219__mulsi3:
220#if defined (__AVR_ENHANCED__)
221 mul r_arg1L, r_arg2L
222 movw r_resL, r0
223 mul r_arg1H, r_arg2H
224 movw r_resHL, r0
225 mul r_arg1HL, r_arg2L
226 add r_resHL, r0
227 adc r_resHH, r1
228 mul r_arg1L, r_arg2HL
229 add r_resHL, r0
230 adc r_resHH, r1
231 mul r_arg1HH, r_arg2L
232 add r_resHH, r0
233 mul r_arg1HL, r_arg2H
234 add r_resHH, r0
235 mul r_arg1H, r_arg2HL
236 add r_resHH, r0
237 mul r_arg1L, r_arg2HH
238 add r_resHH, r0
239 clr r_arg1HH ; use instead of __zero_reg__ to add carry
240 mul r_arg1H, r_arg2L
241 add r_resH, r0
242 adc r_resHL, r1
243 adc r_resHH, r_arg1HH ; add carry
244 mul r_arg1L, r_arg2H
245 add r_resH, r0
246 adc r_resHL, r1
247 adc r_resHH, r_arg1HH ; add carry
248 movw r_arg1L, r_resL
249 movw r_arg1HL, r_resHL
250 clr r1 ; __zero_reg__ clobbered by "mul"
251 ret
252#else
90e7678c
DC
253 clr r_resHH ; clear result
254 clr r_resHL ; clear result
255 clr r_resH ; clear result
256 clr r_resL ; clear result
257__mulsi3_loop:
258 sbrs r_arg1L,0
259 rjmp __mulsi3_skip1
260 add r_resL,r_arg2L ; result + multiplicand
261 adc r_resH,r_arg2H
262 adc r_resHL,r_arg2HL
263 adc r_resHH,r_arg2HH
264__mulsi3_skip1:
265 add r_arg2L,r_arg2L ; shift multiplicand
266 adc r_arg2H,r_arg2H
267 adc r_arg2HL,r_arg2HL
268 adc r_arg2HH,r_arg2HH
269
270 lsr r_arg1HH ; gets LSB of multiplier
271 ror r_arg1HL
272 ror r_arg1H
273 ror r_arg1L
274 brne __mulsi3_loop
275 sbiw r_arg1HL,0
276 cpc r_arg1H,r_arg1L
277 brne __mulsi3_loop ; exit if multiplier = 0
278__mulsi3_exit:
7ed9c001
DC
279 mov_h r_arg1HH,r_resHH ; result to return register
280 mov_l r_arg1HL,r_resHL
281 mov_h r_arg1H,r_resH
282 mov_l r_arg1L,r_resL
90e7678c 283 ret
bad3869a 284#endif /* !defined (__AVR_ENHANCED__) */
90e7678c
DC
285#undef r_arg1L
286#undef r_arg1H
287#undef r_arg1HL
288#undef r_arg1HH
289
290
291#undef r_arg2L
292#undef r_arg2H
293#undef r_arg2HL
294#undef r_arg2HH
295
296#undef r_resL
297#undef r_resH
298#undef r_resHL
299#undef r_resHH
300
cdd9eb8f 301.endfunc
bad3869a 302#endif /* defined (L_mulsi3) */
90e7678c
DC
303
304/*******************************************************
305 Division 8 / 8 => (result + remainder)
306*******************************************************/
1d26ac96
MM
307#define r_rem r25 /* remainder */
308#define r_arg1 r24 /* dividend, quotient */
c4984bad 309#define r_arg2 r22 /* divisor */
1d26ac96 310#define r_cnt r23 /* loop count */
90e7678c 311
1d26ac96
MM
312#if defined (L_udivmodqi4)
313 .global __udivmodqi4
314 .func __udivmodqi4
315__udivmodqi4:
316 sub r_rem,r_rem ; clear remainder and carry
317 ldi r_cnt,9 ; init loop counter
318 rjmp __udivmodqi4_ep ; jump to entry point
319__udivmodqi4_loop:
320 rol r_rem ; shift dividend into remainder
321 cp r_rem,r_arg2 ; compare remainder & divisor
322 brcs __udivmodqi4_ep ; remainder <= divisor
323 sub r_rem,r_arg2 ; restore remainder
324__udivmodqi4_ep:
325 rol r_arg1 ; shift dividend (with CARRY)
326 dec r_cnt ; decrement loop counter
327 brne __udivmodqi4_loop
328 com r_arg1 ; complement result
329 ; because C flag was complemented in loop
90e7678c 330 ret
1d26ac96
MM
331 .endfunc
332#endif /* defined (L_udivmodqi4) */
333
334#if defined (L_divmodqi4)
335 .global __divmodqi4
336 .func __divmodqi4
337__divmodqi4:
338 bst r_arg1,7 ; store sign of dividend
90e7678c
DC
339 mov __tmp_reg__,r_arg1
340 eor __tmp_reg__,r_arg2; r0.7 is sign of result
341 sbrc r_arg1,7
1d26ac96 342 neg r_arg1 ; dividend negative : negate
90e7678c 343 sbrc r_arg2,7
1d26ac96
MM
344 neg r_arg2 ; divisor negative : negate
345 rcall __udivmodqi4 ; do the unsigned div/mod
346 brtc __divmodqi4_1
90e7678c 347 neg r_rem ; correct remainder sign
1d26ac96 348__divmodqi4_1:
90e7678c
DC
349 sbrc __tmp_reg__,7
350 neg r_arg1 ; correct result sign
1d26ac96
MM
351__divmodqi4_exit:
352 ret
353 .endfunc
354#endif /* defined (L_divmodqi4) */
90e7678c
DC
355
356#undef r_rem
357#undef r_arg1
358#undef r_arg2
359#undef r_cnt
360
361
362/*******************************************************
363 Division 16 / 16 => (result + remainder)
364*******************************************************/
365#define r_remL r26 /* remainder Low */
366#define r_remH r27 /* remainder High */
1d26ac96
MM
367
368/* return: remainder */
90e7678c
DC
369#define r_arg1L r24 /* dividend Low */
370#define r_arg1H r25 /* dividend High */
1d26ac96
MM
371
372/* return: quotient */
90e7678c
DC
373#define r_arg2L r22 /* divisor Low */
374#define r_arg2H r23 /* divisor High */
375
376#define r_cnt r21 /* loop count */
90e7678c 377
1d26ac96
MM
378#if defined (L_udivmodhi4)
379 .global __udivmodhi4
380 .func __udivmodhi4
381__udivmodhi4:
90e7678c 382 sub r_remL,r_remL
1d26ac96 383 sub r_remH,r_remH ; clear remainder and carry
90e7678c 384 ldi r_cnt,17 ; init loop counter
1d26ac96
MM
385 rjmp __udivmodhi4_ep ; jump to entry point
386__udivmodhi4_loop:
90e7678c
DC
387 rol r_remL ; shift dividend into remainder
388 rol r_remH
389 cp r_remL,r_arg2L ; compare remainder & divisor
390 cpc r_remH,r_arg2H
1d26ac96 391 brcs __udivmodhi4_ep ; remainder < divisor
90e7678c
DC
392 sub r_remL,r_arg2L ; restore remainder
393 sbc r_remH,r_arg2H
1d26ac96 394__udivmodhi4_ep:
90e7678c
DC
395 rol r_arg1L ; shift dividend (with CARRY)
396 rol r_arg1H
397 dec r_cnt ; decrement loop counter
1d26ac96
MM
398 brne __udivmodhi4_loop
399 com r_arg1L
400 com r_arg1H
401; div/mod results to return registers, as for the div() function
402 mov_l r_arg2L, r_arg1L ; quotient
403 mov_h r_arg2H, r_arg1H
404 mov_l r_arg1L, r_remL ; remainder
405 mov_h r_arg1H, r_remH
406 ret
407 .endfunc
408#endif /* defined (L_udivmodhi4) */
409
410#if defined (L_divmodhi4)
411 .global __divmodhi4
412 .func __divmodhi4
413__divmodhi4:
414 .global _div
415_div:
416 bst r_arg1H,7 ; store sign of dividend
417 mov __tmp_reg__,r_arg1H
418 eor __tmp_reg__,r_arg2H ; r0.7 is sign of result
419 rcall __divmodhi4_neg1 ; dividend negative : negate
420 sbrc r_arg2H,7
421 rcall __divmodhi4_neg2 ; divisor negative : negate
422 rcall __udivmodhi4 ; do the unsigned div/mod
423 rcall __divmodhi4_neg1 ; correct remainder sign
90e7678c 424 tst __tmp_reg__
1d26ac96
MM
425 brpl __divmodhi4_exit
426__divmodhi4_neg2:
427 com r_arg2H
428 neg r_arg2L ; correct divisor/result sign
429 sbci r_arg2H,0xff
430__divmodhi4_exit:
90e7678c 431 ret
1d26ac96
MM
432__divmodhi4_neg1:
433 brtc __divmodhi4_exit
90e7678c 434 com r_arg1H
1d26ac96
MM
435 neg r_arg1L ; correct dividend/remainder sign
436 sbci r_arg1H,0xff
90e7678c 437 ret
1d26ac96
MM
438 .endfunc
439#endif /* defined (L_divmodhi4) */
440
90e7678c
DC
441#undef r_remH
442#undef r_remL
443
444#undef r_arg1H
445#undef r_arg1L
446
447#undef r_arg2H
448#undef r_arg2L
449
450#undef r_cnt
451
452/*******************************************************
453 Division 32 / 32 => (result + remainder)
454*******************************************************/
455#define r_remHH r31 /* remainder High */
456#define r_remHL r30
457#define r_remH r27
458#define r_remL r26 /* remainder Low */
1d26ac96
MM
459
460/* return: remainder */
90e7678c
DC
461#define r_arg1HH r25 /* dividend High */
462#define r_arg1HL r24
463#define r_arg1H r23
464#define r_arg1L r22 /* dividend Low */
1d26ac96
MM
465
466/* return: quotient */
90e7678c
DC
467#define r_arg2HH r21 /* divisor High */
468#define r_arg2HL r20
469#define r_arg2H r19
470#define r_arg2L r18 /* divisor Low */
471
bad3869a 472#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
90e7678c 473
1d26ac96
MM
474#if defined (L_udivmodsi4)
475 .global __udivmodsi4
476 .func __udivmodsi4
477__udivmodsi4:
bad3869a
MM
478 ldi r_remL, 33 ; init loop counter
479 mov r_cnt, r_remL
90e7678c 480 sub r_remL,r_remL
1d26ac96
MM
481 sub r_remH,r_remH ; clear remainder and carry
482 mov_l r_remHL, r_remL
483 mov_h r_remHH, r_remH
484 rjmp __udivmodsi4_ep ; jump to entry point
485__udivmodsi4_loop:
90e7678c
DC
486 rol r_remL ; shift dividend into remainder
487 rol r_remH
488 rol r_remHL
489 rol r_remHH
490 cp r_remL,r_arg2L ; compare remainder & divisor
491 cpc r_remH,r_arg2H
492 cpc r_remHL,r_arg2HL
493 cpc r_remHH,r_arg2HH
1d26ac96 494 brcs __udivmodsi4_ep ; remainder <= divisor
90e7678c
DC
495 sub r_remL,r_arg2L ; restore remainder
496 sbc r_remH,r_arg2H
497 sbc r_remHL,r_arg2HL
498 sbc r_remHH,r_arg2HH
1d26ac96 499__udivmodsi4_ep:
90e7678c
DC
500 rol r_arg1L ; shift dividend (with CARRY)
501 rol r_arg1H
502 rol r_arg1HL
503 rol r_arg1HH
504 dec r_cnt ; decrement loop counter
1d26ac96 505 brne __udivmodsi4_loop
bad3869a 506 ; __zero_reg__ now restored (r_cnt == 0)
90e7678c
DC
507 com r_arg1L
508 com r_arg1H
509 com r_arg1HL
510 com r_arg1HH
1d26ac96
MM
511; div/mod results to return registers, as for the ldiv() function
512 mov_l r_arg2L, r_arg1L ; quotient
513 mov_h r_arg2H, r_arg1H
514 mov_l r_arg2HL, r_arg1HL
515 mov_h r_arg2HH, r_arg1HH
516 mov_l r_arg1L, r_remL ; remainder
517 mov_h r_arg1H, r_remH
518 mov_l r_arg1HL, r_remHL
519 mov_h r_arg1HH, r_remHH
90e7678c 520 ret
1d26ac96
MM
521 .endfunc
522#endif /* defined (L_udivmodsi4) */
523
524#if defined (L_divmodsi4)
525 .global __divmodsi4
526 .func __divmodsi4
527__divmodsi4:
528 bst r_arg1HH,7 ; store sign of dividend
529 mov __tmp_reg__,r_arg1HH
530 eor __tmp_reg__,r_arg2HH ; r0.7 is sign of result
531 rcall __divmodsi4_neg1 ; dividend negative : negate
532 sbrc r_arg2HH,7
533 rcall __divmodsi4_neg2 ; divisor negative : negate
534 rcall __udivmodsi4 ; do the unsigned div/mod
535 rcall __divmodsi4_neg1 ; correct remainder sign
536 rol __tmp_reg__
537 brcc __divmodsi4_exit
538__divmodsi4_neg2:
539 com r_arg2HH
540 com r_arg2HL
541 com r_arg2H
542 neg r_arg2L ; correct divisor/quotient sign
543 sbci r_arg2H,0xff
544 sbci r_arg2HL,0xff
545 sbci r_arg2HH,0xff
546__divmodsi4_exit:
547 ret
548__divmodsi4_neg1:
549 brtc __divmodsi4_exit
550 com r_arg1HH
551 com r_arg1HL
552 com r_arg1H
553 neg r_arg1L ; correct dividend/remainder sign
554 sbci r_arg1H, 0xff
555 sbci r_arg1HL,0xff
556 sbci r_arg1HH,0xff
557 ret
558 .endfunc
559#endif /* defined (L_divmodsi4) */
90e7678c
DC
560
561/**********************************
562 * This is a prologue subroutine
563 **********************************/
bad3869a 564#if defined (L_prologue)
90e7678c 565
cdd9eb8f
DC
566 .global __prologue_saves__
567 .func __prologue_saves__
568__prologue_saves__:
90e7678c
DC
569 push r2
570 push r3
571 push r4
572 push r5
573 push r6
574 push r7
575 push r8
576 push r9
577 push r10
578 push r11
579 push r12
580 push r13
581 push r14
582 push r15
583 push r16
584 push r17
585 push r28
586 push r29
587 in r28,__SP_L__
588 in r29,__SP_H__
90e7678c
DC
589 sub r28,r26
590 sbc r29,r27
591 in __tmp_reg__,__SREG__
592 cli
90e7678c 593 out __SP_H__,r29
78cf8279
MM
594 out __SREG__,__tmp_reg__
595 out __SP_L__,r28
90e7678c 596 ijmp
cdd9eb8f 597.endfunc
bad3869a 598#endif /* defined (L_prologue) */
90e7678c
DC
599
600/*
56b871c1 601 * This is an epilogue subroutine
90e7678c 602 */
bad3869a 603#if defined (L_epilogue)
90e7678c 604
cdd9eb8f
DC
605 .global __epilogue_restores__
606 .func __epilogue_restores__
607__epilogue_restores__:
90e7678c
DC
608 ldd r2,Y+18
609 ldd r3,Y+17
610 ldd r4,Y+16
611 ldd r5,Y+15
612 ldd r6,Y+14
613 ldd r7,Y+13
614 ldd r8,Y+12
615 ldd r9,Y+11
616 ldd r10,Y+10
617 ldd r11,Y+9
618 ldd r12,Y+8
619 ldd r13,Y+7
620 ldd r14,Y+6
621 ldd r15,Y+5
622 ldd r16,Y+4
623 ldd r17,Y+3
624 ldd r26,Y+2
625 ldd r27,Y+1
626 add r28,r30
627 adc r29,__zero_reg__
628 in __tmp_reg__,__SREG__
629 cli
90e7678c 630 out __SP_H__,r29
78cf8279
MM
631 out __SREG__,__tmp_reg__
632 out __SP_L__,r28
1d26ac96
MM
633 mov_l r28, r26
634 mov_h r29, r27
90e7678c 635 ret
6bec29c9
DC
636.endfunc
637#endif /* defined (L_epilogue) */
90e7678c 638
6bec29c9 639#ifdef L_exit
9af145ae
MM
640 .section .fini9,"ax",@progbits
641 .global _exit
cdd9eb8f
DC
642 .func _exit
643_exit:
9af145ae
MM
644 .weak exit
645exit:
646
647 /* Code from .fini8 ... .fini1 sections inserted by ld script. */
648
649 .section .fini0,"ax",@progbits
650__stop_program:
651 rjmp __stop_program
652 .endfunc
6bec29c9
DC
653#endif /* defined (L_exit) */
654
655#ifdef L_cleanup
c4984bad 656 .weak _cleanup
6bec29c9 657 .func _cleanup
c4984bad
MM
658_cleanup:
659 ret
cdd9eb8f 660.endfunc
6bec29c9
DC
661#endif /* defined (L_cleanup) */
662
663#ifdef L_tablejump
1268b05f
MM
664 .global __tablejump2__
665 .func __tablejump2__
666__tablejump2__:
667 lsl r30
668 rol r31
9af145ae
MM
669 .global __tablejump__
670__tablejump__:
6bec29c9
DC
671#if defined (__AVR_ENHANCED__)
672 lpm __tmp_reg__, Z+
673 lpm r31, Z
674 mov r30, __tmp_reg__
675 ijmp
676#else
677 lpm
9af145ae 678 adiw r30, 1
6bec29c9 679 push r0
6bec29c9
DC
680 lpm
681 push r0
682 ret
cdd9eb8f 683#endif
9af145ae 684 .endfunc
6bec29c9 685#endif /* defined (L_tablejump) */
bad3869a 686
9af145ae
MM
687/* __do_copy_data is only necessary if there is anything in .data section.
688 Does not use RAMPZ - crt*.o provides a replacement for >64K devices. */
689
690#ifdef L_copy_data
691 .section .init4,"ax",@progbits
692 .global __do_copy_data
693__do_copy_data:
694 ldi r17, hi8(__data_end)
695 ldi r26, lo8(__data_start)
696 ldi r27, hi8(__data_start)
697 ldi r30, lo8(__data_load_start)
698 ldi r31, hi8(__data_load_start)
699 rjmp .do_copy_data_start
700.do_copy_data_loop:
7ed9c001 701#if defined (__AVR_HAVE_LPMX__)
9af145ae
MM
702 lpm r0, Z+
703#else
704 lpm
705 adiw r30, 1
706#endif
707 st X+, r0
708.do_copy_data_start:
709 cpi r26, lo8(__data_end)
710 cpc r27, r17
711 brne .do_copy_data_loop
712#endif /* L_copy_data */
713
714/* __do_clear_bss is only necessary if there is anything in .bss section. */
715
716#ifdef L_clear_bss
717 .section .init4,"ax",@progbits
718 .global __do_clear_bss
719__do_clear_bss:
720 ldi r17, hi8(__bss_end)
721 ldi r26, lo8(__bss_start)
722 ldi r27, hi8(__bss_start)
723 rjmp .do_clear_bss_start
724.do_clear_bss_loop:
725 st X+, __zero_reg__
726.do_clear_bss_start:
727 cpi r26, lo8(__bss_end)
728 cpc r27, r17
729 brne .do_clear_bss_loop
730#endif /* L_clear_bss */
731
732/* __do_global_ctors and __do_global_dtors are only necessary
733 if there are any constructors/destructors. */
734
735#if defined (__AVR_MEGA__)
736#define XCALL call
737#else
738#define XCALL rcall
739#endif
740
741#ifdef L_ctors
742 .section .init6,"ax",@progbits
743 .global __do_global_ctors
744__do_global_ctors:
745 ldi r17, hi8(__ctors_start)
746 ldi r28, lo8(__ctors_end)
747 ldi r29, hi8(__ctors_end)
748 rjmp .do_global_ctors_start
749.do_global_ctors_loop:
750 sbiw r28, 2
751 mov_h r31, r29
752 mov_l r30, r28
753 XCALL __tablejump__
754.do_global_ctors_start:
755 cpi r28, lo8(__ctors_start)
756 cpc r29, r17
757 brne .do_global_ctors_loop
758#endif /* L_ctors */
759
760#ifdef L_dtors
761 .section .fini6,"ax",@progbits
762 .global __do_global_dtors
763__do_global_dtors:
764 ldi r17, hi8(__dtors_end)
765 ldi r28, lo8(__dtors_start)
766 ldi r29, hi8(__dtors_start)
767 rjmp .do_global_dtors_start
768.do_global_dtors_loop:
769 mov_h r31, r29
770 mov_l r30, r28
771 XCALL __tablejump__
772 adiw r28, 2
773.do_global_dtors_start:
774 cpi r28, lo8(__dtors_end)
775 cpc r29, r17
776 brne .do_global_dtors_loop
777#endif /* L_dtors */
778