1 /* Subroutines for insn-output.c for Intel 860
2 Copyright (C) 1989, 91, 97, 98, 1999 Free Software Foundation, Inc.
5 Written by Richard Stallman (rms@ai.mit.edu).
7 Hacked substantially by Ron Guilmette (rfg@netcom.com) to cater
8 to the whims of the System V Release 4 assembler.
10 This file is part of GNU CC.
12 GNU CC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2, or (at your option)
17 GNU CC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GNU CC; see the file COPYING. If not, write to
24 the Free Software Foundation, 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
34 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-flags.h"
41 #include "insn-attr.h"
45 static rtx
find_addr_reg ();
47 #ifndef I860_REG_PREFIX
48 #define I860_REG_PREFIX ""
51 char *i860_reg_prefix
= I860_REG_PREFIX
;
53 /* Save information from a "cmpxx" operation until the branch is emitted. */
55 rtx i860_compare_op0
, i860_compare_op1
;
57 /* Return non-zero if this pattern, can be evaluated safely, even if it
60 safe_insn_src_p (op
, mode
)
62 enum machine_mode mode
;
64 /* Just experimenting. */
66 /* No floating point src is safe if it contains an arithmetic
67 operation, since that operation may trap. */
68 switch (GET_CODE (op
))
80 return CONSTANT_ADDRESS_P (XEXP (op
, 0));
82 /* We never need to negate or complement constants. */
84 return (mode
!= SFmode
&& mode
!= DFmode
);
101 return (mode
!= SFmode
&& mode
!= DFmode
);
108 if ((GET_CODE (XEXP (op
, 0)) == CONST_INT
&& ! SMALL_INT (XEXP (op
, 0)))
109 || (GET_CODE (XEXP (op
, 1)) == CONST_INT
&& ! SMALL_INT (XEXP (op
, 1))))
118 /* Return 1 if REG is clobbered in IN.
119 Return 2 if REG is used in IN.
120 Return 3 if REG is both used and clobbered in IN.
121 Return 0 if neither. */
124 reg_clobbered_p (reg
, in
)
128 register enum rtx_code code
;
133 code
= GET_CODE (in
);
135 if (code
== SET
|| code
== CLOBBER
)
137 rtx dest
= SET_DEST (in
);
141 while (GET_CODE (dest
) == STRICT_LOW_PART
142 || GET_CODE (dest
) == SUBREG
143 || GET_CODE (dest
) == SIGN_EXTRACT
144 || GET_CODE (dest
) == ZERO_EXTRACT
)
145 dest
= XEXP (dest
, 0);
149 else if (GET_CODE (dest
) == REG
150 && refers_to_regno_p (REGNO (reg
),
151 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
155 /* Anything that sets just part of the register
156 is considered using as well as setting it.
157 But note that a straight SUBREG of a single-word value
158 clobbers the entire value. */
159 if (dest
!= SET_DEST (in
)
160 && ! (GET_CODE (SET_DEST (in
)) == SUBREG
161 || UNITS_PER_WORD
>= GET_MODE_SIZE (GET_MODE (dest
))))
168 used
= refers_to_regno_p (REGNO (reg
),
169 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
172 used
= refers_to_regno_p (REGNO (reg
),
173 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
177 return set
+ used
* 2;
180 if (refers_to_regno_p (REGNO (reg
),
181 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
187 /* Return non-zero if OP can be written to without screwing up
188 GCC's model of what's going on. It is assumed that this operand
189 appears in the dest position of a SET insn in a conditional
190 branch's delay slot. AFTER is the label to start looking from. */
192 operand_clobbered_before_used_after (op
, after
)
196 /* Just experimenting. */
197 if (GET_CODE (op
) == CC0
)
199 if (GET_CODE (op
) == REG
)
203 if (op
== stack_pointer_rtx
)
206 /* Scan forward from the label, to see if the value of OP
207 is clobbered before the first use. */
209 for (insn
= NEXT_INSN (after
); insn
; insn
= NEXT_INSN (insn
))
211 if (GET_CODE (insn
) == NOTE
)
213 if (GET_CODE (insn
) == INSN
214 || GET_CODE (insn
) == JUMP_INSN
215 || GET_CODE (insn
) == CALL_INSN
)
217 switch (reg_clobbered_p (op
, PATTERN (insn
)))
227 /* If we reach another label without clobbering OP,
228 then we cannot safely write it here. */
229 else if (GET_CODE (insn
) == CODE_LABEL
)
231 if (GET_CODE (insn
) == JUMP_INSN
)
233 if (condjump_p (insn
))
235 /* This is a jump insn which has already
236 been mangled. We can't tell what it does. */
237 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
239 if (! JUMP_LABEL (insn
))
241 /* Keep following jumps. */
242 insn
= JUMP_LABEL (insn
);
248 /* In both of these cases, the first insn executed
249 for this op will be a orh whatever%h,%?r0,%?r31,
250 which is tolerable. */
251 if (GET_CODE (op
) == MEM
)
252 return (CONSTANT_ADDRESS_P (XEXP (op
, 0)));
257 /* Return non-zero if this pattern, as a source to a "SET",
258 is known to yield an instruction of unit size. */
260 single_insn_src_p (op
, mode
)
262 enum machine_mode mode
;
264 switch (GET_CODE (op
))
267 /* This is not always a single insn src, technically,
268 but output_delayed_branch knows how to deal with it. */
273 /* This is not a single insn src, technically,
274 but output_delayed_branch knows how to deal with it. */
283 /* We never need to negate or complement constants. */
285 return (mode
!= DFmode
);
292 /* Detect cases that require multiple instructions. */
293 if (CONSTANT_P (XEXP (op
, 1))
294 && !(GET_CODE (XEXP (op
, 1)) == CONST_INT
295 && SMALL_INT (XEXP (op
, 1))))
307 /* Not doing floating point, since they probably
308 take longer than the branch slot they might fill. */
309 return (mode
!= SFmode
&& mode
!= DFmode
);
312 if (GET_CODE (XEXP (op
, 1)) == NOT
)
314 rtx arg
= XEXP (XEXP (op
, 1), 0);
316 && !(GET_CODE (arg
) == CONST_INT
318 || (INTVAL (arg
) & 0xffff) == 0)))
323 /* Both small and round numbers take one instruction;
325 if (CONSTANT_P (XEXP (op
, 1))
326 && !(GET_CODE (XEXP (op
, 1)) == CONST_INT
327 && (SMALL_INT (XEXP (op
, 1))
328 || (INTVAL (XEXP (op
, 1)) & 0xffff) == 0)))
337 if (SUBREG_WORD (op
) != 0)
339 return single_insn_src_p (SUBREG_REG (op
), mode
);
341 /* Not doing floating point, since they probably
342 take longer than the branch slot they might fill. */
356 /* Return non-zero only if OP is a register of mode MODE,
359 reg_or_0_operand (op
, mode
)
361 enum machine_mode mode
;
363 return (op
== const0_rtx
|| register_operand (op
, mode
)
364 || op
== CONST0_RTX (mode
));
367 /* Return truth value of whether OP can be used as an operands in a three
368 address add/subtract insn (such as add %o1,7,%l2) of mode MODE. */
371 arith_operand (op
, mode
)
373 enum machine_mode mode
;
375 return (register_operand (op
, mode
)
376 || (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
)));
379 /* Return 1 if OP is a valid first operand for a logical insn of mode MODE. */
382 logic_operand (op
, mode
)
384 enum machine_mode mode
;
386 return (register_operand (op
, mode
)
387 || (GET_CODE (op
) == CONST_INT
&& LOGIC_INT (op
)));
390 /* Return 1 if OP is a valid first operand for a shift insn of mode MODE. */
393 shift_operand (op
, mode
)
395 enum machine_mode mode
;
397 return (register_operand (op
, mode
)
398 || (GET_CODE (op
) == CONST_INT
));
401 /* Return 1 if OP is a valid first operand for either a logical insn
402 or an add insn of mode MODE. */
405 compare_operand (op
, mode
)
407 enum machine_mode mode
;
409 return (register_operand (op
, mode
)
410 || (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
) && LOGIC_INT (op
)));
413 /* Return truth value of whether OP can be used as the 5-bit immediate
414 operand of a bte or btne insn. */
417 bte_operand (op
, mode
)
419 enum machine_mode mode
;
421 return (register_operand (op
, mode
)
422 || (GET_CODE (op
) == CONST_INT
423 && (unsigned) INTVAL (op
) < 0x20));
426 /* Return 1 if OP is an indexed memory reference of mode MODE. */
429 indexed_operand (op
, mode
)
431 enum machine_mode mode
;
433 return (GET_CODE (op
) == MEM
&& GET_MODE (op
) == mode
434 && GET_CODE (XEXP (op
, 0)) == PLUS
435 && GET_MODE (XEXP (op
, 0)) == SImode
436 && register_operand (XEXP (XEXP (op
, 0), 0), SImode
)
437 && register_operand (XEXP (XEXP (op
, 0), 1), SImode
));
440 /* Return 1 if OP is a suitable source operand for a load insn
444 load_operand (op
, mode
)
446 enum machine_mode mode
;
448 return (memory_operand (op
, mode
) || indexed_operand (op
, mode
));
451 /* Return truth value of whether OP is a integer which fits the
452 range constraining immediate operands in add/subtract insns. */
457 enum machine_mode mode
;
459 return (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
));
462 /* Return truth value of whether OP is a integer which fits the
463 range constraining immediate operands in logic insns. */
468 enum machine_mode mode
;
470 return (GET_CODE (op
) == CONST_INT
&& LOGIC_INT (op
));
473 /* Test for a valid operand for a call instruction.
474 Don't allow the arg pointer register or virtual regs
475 since they may change into reg + const, which the patterns
479 call_insn_operand (op
, mode
)
481 enum machine_mode mode
;
483 if (GET_CODE (op
) == MEM
484 && (CONSTANT_ADDRESS_P (XEXP (op
, 0))
485 || (GET_CODE (XEXP (op
, 0)) == REG
486 && XEXP (op
, 0) != arg_pointer_rtx
487 && !(REGNO (XEXP (op
, 0)) >= FIRST_PSEUDO_REGISTER
488 && REGNO (XEXP (op
, 0)) <= LAST_VIRTUAL_REGISTER
))))
493 /* Return the best assembler insn template
494 for moving operands[1] into operands[0] as a fullword. */
497 singlemove_string (operands
)
500 if (GET_CODE (operands
[0]) == MEM
)
502 if (GET_CODE (operands
[1]) != MEM
)
503 if (CONSTANT_ADDRESS_P (XEXP (operands
[0], 0)))
505 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
506 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
507 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
510 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
512 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
513 cc_status
.mdep
= XEXP (operands
[0], 0);
514 return "st.l %r1,%L0(%?r31)";
517 return "st.l %r1,%0";
524 cc_status
.flags
&= ~CC_F0_IS_0
;
525 xoperands
[0] = gen_rtx (REG
, SFmode
, 32);
526 xoperands
[1] = operands
[1];
527 output_asm_insn (singlemove_string (xoperands
), xoperands
);
528 xoperands
[1] = xoperands
[0];
529 xoperands
[0] = operands
[0];
530 output_asm_insn (singlemove_string (xoperands
), xoperands
);
535 if (GET_CODE (operands
[1]) == MEM
)
537 if (CONSTANT_ADDRESS_P (XEXP (operands
[1], 0)))
539 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
540 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
541 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
544 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
546 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
547 cc_status
.mdep
= XEXP (operands
[1], 0);
548 return "ld.l %L1(%?r31),%0";
550 return "ld.l %m1,%0";
552 if (GET_CODE (operands
[1]) == CONST_INT
)
554 if (operands
[1] == const0_rtx
)
555 return "mov %?r0,%0";
556 if((INTVAL (operands
[1]) & 0xffff0000) == 0)
557 return "or %L1,%?r0,%0";
558 if((INTVAL (operands
[1]) & 0xffff8000) == 0xffff8000)
559 return "adds %1,%?r0,%0";
560 if((INTVAL (operands
[1]) & 0x0000ffff) == 0)
561 return "orh %H1,%?r0,%0";
566 /* Output assembler code to perform a doubleword move insn
567 with operands OPERANDS. */
570 output_move_double (operands
)
573 enum { REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
575 rtx addreg0
= 0, addreg1
= 0;
576 int highest_first
= 0;
577 int no_addreg1_decrement
= 0;
579 /* First classify both operands. */
581 if (REG_P (operands
[0]))
583 else if (offsettable_memref_p (operands
[0]))
585 else if (GET_CODE (operands
[0]) == MEM
)
590 if (REG_P (operands
[1]))
592 else if (CONSTANT_P (operands
[1]))
594 else if (offsettable_memref_p (operands
[1]))
596 else if (GET_CODE (operands
[1]) == MEM
)
601 /* Check for the cases that the operand constraints are not
602 supposed to allow to happen. Abort if we get one,
603 because generating code for these cases is painful. */
605 if (optype0
== RNDOP
|| optype1
== RNDOP
)
608 /* If an operand is an unoffsettable memory ref, find a register
609 we can increment temporarily to make it refer to the second word. */
611 if (optype0
== MEMOP
)
612 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
614 if (optype1
== MEMOP
)
615 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
617 /* ??? Perhaps in some cases move double words
618 if there is a spare pair of floating regs. */
620 /* Ok, we can do one word at a time.
621 Normally we do the low-numbered word first,
622 but if either operand is autodecrementing then we
623 do the high-numbered word first.
625 In either case, set up in LATEHALF the operands to use
626 for the high-numbered word and in some cases alter the
627 operands in OPERANDS to be suitable for the low-numbered word. */
629 if (optype0
== REGOP
)
630 latehalf
[0] = gen_rtx (REG
, SImode
, REGNO (operands
[0]) + 1);
631 else if (optype0
== OFFSOP
)
632 latehalf
[0] = adj_offsettable_operand (operands
[0], 4);
634 latehalf
[0] = operands
[0];
636 if (optype1
== REGOP
)
637 latehalf
[1] = gen_rtx (REG
, SImode
, REGNO (operands
[1]) + 1);
638 else if (optype1
== OFFSOP
)
639 latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
640 else if (optype1
== CNSTOP
)
642 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
643 split_double (operands
[1], &operands
[1], &latehalf
[1]);
644 else if (CONSTANT_P (operands
[1]))
645 latehalf
[1] = const0_rtx
;
648 latehalf
[1] = operands
[1];
650 /* If the first move would clobber the source of the second one,
651 do them in the other order.
653 RMS says "This happens only for registers;
654 such overlap can't happen in memory unless the user explicitly
655 sets it up, and that is an undefined circumstance."
657 but it happens on the sparc when loading parameter registers,
658 so I am going to define that circumstance, and make it work
661 if (optype0
== REGOP
&& optype1
== REGOP
662 && REGNO (operands
[0]) == REGNO (latehalf
[1]))
664 CC_STATUS_PARTIAL_INIT
;
665 /* Make any unoffsettable addresses point at high-numbered word. */
667 output_asm_insn ("adds 0x4,%0,%0", &addreg0
);
669 output_asm_insn ("adds 0x4,%0,%0", &addreg1
);
672 output_asm_insn (singlemove_string (latehalf
), latehalf
);
674 /* Undo the adds we just did. */
676 output_asm_insn ("adds -0x4,%0,%0", &addreg0
);
678 output_asm_insn ("adds -0x4,%0,%0", &addreg1
);
680 /* Do low-numbered word. */
681 return singlemove_string (operands
);
683 else if (optype0
== REGOP
&& optype1
!= REGOP
684 && reg_overlap_mentioned_p (operands
[0], operands
[1]))
686 /* If both halves of dest are used in the src memory address,
687 add the two regs and put them in the low reg (operands[0]).
688 Then it works to load latehalf first. */
689 if (reg_mentioned_p (operands
[0], XEXP (operands
[1], 0))
690 && reg_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
693 xops
[0] = latehalf
[0];
694 xops
[1] = operands
[0];
695 output_asm_insn ("adds %1,%0,%1", xops
);
696 operands
[1] = gen_rtx (MEM
, DImode
, operands
[0]);
697 latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
701 /* Only one register in the dest is used in the src memory address,
702 and this is the first register of the dest, so we want to do
703 the late half first here also. */
704 else if (! reg_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
706 /* Only one register in the dest is used in the src memory address,
707 and this is the second register of the dest, so we want to do
708 the late half last. If addreg1 is set, and addreg1 is the same
709 register as latehalf, then we must suppress the trailing decrement,
710 because it would clobber the value just loaded. */
711 else if (addreg1
&& reg_mentioned_p (addreg1
, latehalf
[0]))
712 no_addreg1_decrement
= 1;
715 /* Normal case: do the two words, low-numbered first.
716 Overlap case (highest_first set): do high-numbered word first. */
719 output_asm_insn (singlemove_string (operands
), operands
);
721 CC_STATUS_PARTIAL_INIT
;
722 /* Make any unoffsettable addresses point at high-numbered word. */
724 output_asm_insn ("adds 0x4,%0,%0", &addreg0
);
726 output_asm_insn ("adds 0x4,%0,%0", &addreg1
);
729 output_asm_insn (singlemove_string (latehalf
), latehalf
);
731 /* Undo the adds we just did. */
733 output_asm_insn ("adds -0x4,%0,%0", &addreg0
);
734 if (addreg1
&& !no_addreg1_decrement
)
735 output_asm_insn ("adds -0x4,%0,%0", &addreg1
);
738 output_asm_insn (singlemove_string (operands
), operands
);
744 output_fp_move_double (operands
)
747 /* If the source operand is any sort of zero, use f0 instead. */
749 if (operands
[1] == CONST0_RTX (GET_MODE (operands
[1])))
750 operands
[1] = gen_rtx (REG
, DFmode
, F0_REGNUM
);
752 if (FP_REG_P (operands
[0]))
754 if (FP_REG_P (operands
[1]))
755 return "fmov.dd %1,%0";
756 if (GET_CODE (operands
[1]) == REG
)
758 output_asm_insn ("ixfr %1,%0", operands
);
759 operands
[0] = gen_rtx (REG
, VOIDmode
, REGNO (operands
[0]) + 1);
760 operands
[1] = gen_rtx (REG
, VOIDmode
, REGNO (operands
[1]) + 1);
763 if (operands
[1] == CONST0_RTX (DFmode
))
764 return "fmov.dd f0,%0";
765 if (CONSTANT_ADDRESS_P (XEXP (operands
[1], 0)))
767 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
768 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
769 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
772 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
774 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
775 cc_status
.mdep
= XEXP (operands
[1], 0);
776 return "fld.d %L1(%?r31),%0";
778 return "fld.d %1,%0";
780 else if (FP_REG_P (operands
[1]))
782 if (GET_CODE (operands
[0]) == REG
)
784 output_asm_insn ("fxfr %1,%0", operands
);
785 operands
[0] = gen_rtx (REG
, VOIDmode
, REGNO (operands
[0]) + 1);
786 operands
[1] = gen_rtx (REG
, VOIDmode
, REGNO (operands
[1]) + 1);
789 if (CONSTANT_ADDRESS_P (XEXP (operands
[0], 0)))
791 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
792 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
793 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
796 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
798 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
799 cc_status
.mdep
= XEXP (operands
[0], 0);
800 return "fst.d %1,%L0(%?r31)";
802 return "fst.d %1,%0";
810 /* Return a REG that occurs in ADDR with coefficient 1.
811 ADDR can be effectively incremented by incrementing REG. */
817 while (GET_CODE (addr
) == PLUS
)
819 if (GET_CODE (XEXP (addr
, 0)) == REG
)
820 addr
= XEXP (addr
, 0);
821 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
822 addr
= XEXP (addr
, 1);
823 else if (CONSTANT_P (XEXP (addr
, 0)))
824 addr
= XEXP (addr
, 1);
825 else if (CONSTANT_P (XEXP (addr
, 1)))
826 addr
= XEXP (addr
, 0);
830 if (GET_CODE (addr
) == REG
)
837 /* Return a template for a load instruction with mode MODE and
838 arguments from the string ARGS.
840 This string is in static storage. */
843 load_opcode (mode
, args
, reg
)
844 enum machine_mode mode
;
880 sprintf (buf
, "%s %s", opcode
, args
);
884 /* Return a template for a store instruction with mode MODE and
885 arguments from the string ARGS.
887 This string is in static storage. */
890 store_opcode (mode
, args
, reg
)
891 enum machine_mode mode
;
927 sprintf (buf
, "%s %s", opcode
, args
);
931 /* Output a store-in-memory whose operands are OPERANDS[0,1].
932 OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero.
934 This function returns a template for an insn.
935 This is in static storage.
937 It may also output some insns directly.
938 It may alter the values of operands[0] and operands[1]. */
941 output_store (operands
)
944 enum machine_mode mode
= GET_MODE (operands
[0]);
945 rtx address
= XEXP (operands
[0], 0);
948 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
949 cc_status
.mdep
= address
;
951 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
952 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
953 && address
== cc_prev_status
.mdep
))
956 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
957 cc_prev_status
.mdep
= address
;
960 /* Store zero in two parts when appropriate. */
961 if (mode
== DFmode
&& operands
[1] == CONST0_RTX (DFmode
))
962 return store_opcode (DFmode
, "%r1,%L0(%?r31)", operands
[1]);
964 /* Code below isn't smart enough to move a doubleword in two parts,
965 so use output_move_double to do that in the cases that require it. */
966 if ((mode
== DImode
|| mode
== DFmode
)
967 && ! FP_REG_P (operands
[1]))
968 return output_move_double (operands
);
970 return store_opcode (mode
, "%r1,%L0(%?r31)", operands
[1]);
973 /* Output a load-from-memory whose operands are OPERANDS[0,1].
974 OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
976 This function returns a template for an insn.
977 This is in static storage.
979 It may also output some insns directly.
980 It may alter the values of operands[0] and operands[1]. */
983 output_load (operands
)
986 enum machine_mode mode
= GET_MODE (operands
[0]);
987 rtx address
= XEXP (operands
[1], 0);
989 /* We don't bother trying to see if we know %hi(address).
990 This is because we are doing a load, and if we know the
991 %hi value, we probably also know that value in memory. */
992 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
993 cc_status
.mdep
= address
;
995 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
996 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
997 && address
== cc_prev_status
.mdep
998 && cc_prev_status
.mdep
== cc_status
.mdep
))
1001 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
1002 cc_prev_status
.mdep
= address
;
1005 /* Code below isn't smart enough to move a doubleword in two parts,
1006 so use output_move_double to do that in the cases that require it. */
1007 if ((mode
== DImode
|| mode
== DFmode
)
1008 && ! FP_REG_P (operands
[0]))
1009 return output_move_double (operands
);
1011 return load_opcode (mode
, "%L1(%?r31),%0", operands
[0]);
1015 /* Load the address specified by OPERANDS[3] into the register
1016 specified by OPERANDS[0].
1018 OPERANDS[3] may be the result of a sum, hence it could either be:
1023 (3) REG + REG + CONST_INT
1024 (4) REG + REG (special case of 3).
1026 Note that (3) is not a legitimate address.
1027 All cases are handled here. */
1030 output_load_address (operands
)
1035 if (CONSTANT_P (operands
[3]))
1037 output_asm_insn ("mov %3,%0", operands
);
1041 if (REG_P (operands
[3]))
1043 if (REGNO (operands
[0]) != REGNO (operands
[3]))
1044 output_asm_insn ("shl %?r0,%3,%0", operands
);
1048 if (GET_CODE (operands
[3]) != PLUS
)
1051 base
= XEXP (operands
[3], 0);
1052 offset
= XEXP (operands
[3], 1);
1054 if (GET_CODE (base
) == CONST_INT
)
1061 if (GET_CODE (offset
) != CONST_INT
)
1063 /* Operand is (PLUS (REG) (REG)). */
1065 offset
= const0_rtx
;
1071 operands
[7] = offset
;
1072 CC_STATUS_PARTIAL_INIT
;
1073 if (SMALL_INT (offset
))
1074 output_asm_insn ("adds %7,%6,%0", operands
);
1076 output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands
);
1078 else if (GET_CODE (base
) == PLUS
)
1080 operands
[6] = XEXP (base
, 0);
1081 operands
[7] = XEXP (base
, 1);
1082 operands
[8] = offset
;
1084 CC_STATUS_PARTIAL_INIT
;
1085 if (SMALL_INT (offset
))
1086 output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands
);
1088 output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands
);
1095 /* Output code to place a size count SIZE in register REG.
1096 Because block moves are pipelined, we don't include the
1097 first element in the transfer of SIZE to REG.
1098 For this, we subtract ALIGN. (Actually, I think it is not
1099 right to subtract on this machine, so right now we don't.) */
1102 output_size_for_block_move (size
, reg
, align
)
1103 rtx size
, reg
, align
;
1108 xoperands
[1] = size
;
1109 xoperands
[2] = align
;
1112 cc_status
.flags
&= ~ CC_KNOW_HI_R31
;
1113 output_asm_insn (singlemove_string (xoperands
), xoperands
);
1115 if (GET_CODE (size
) == REG
)
1116 output_asm_insn ("sub %2,%1,%0", xoperands
);
1120 = GEN_INT (INTVAL (size
) - INTVAL (align
));
1121 cc_status
.flags
&= ~ CC_KNOW_HI_R31
;
1122 output_asm_insn ("mov %1,%0", xoperands
);
1127 /* Emit code to perform a block move.
1129 OPERANDS[0] is the destination.
1130 OPERANDS[1] is the source.
1131 OPERANDS[2] is the size.
1132 OPERANDS[3] is the known safe alignment.
1133 OPERANDS[4..6] are pseudos we can safely clobber as temps. */
1136 output_block_move (operands
)
1139 /* A vector for our computed operands. Note that load_output_address
1140 makes use of (and can clobber) up to the 8th element of this vector. */
1143 static int movstrsi_label
= 0;
1145 rtx temp1
= operands
[4];
1146 rtx alignrtx
= operands
[3];
1147 int align
= INTVAL (alignrtx
);
1150 xoperands
[0] = operands
[0];
1151 xoperands
[1] = operands
[1];
1152 xoperands
[2] = temp1
;
1154 /* We can't move more than four bytes at a time
1155 because we have only one register to move them through. */
1159 alignrtx
= GEN_INT (4);
1162 /* Recognize special cases of block moves. These occur
1163 when GNU C++ is forced to treat something as BLKmode
1164 to keep it in memory, when its mode could be represented
1165 with something smaller.
1167 We cannot do this for global variables, since we don't know
1168 what pages they don't cross. Sigh. */
1169 if (GET_CODE (operands
[2]) == CONST_INT
1170 && ! CONSTANT_ADDRESS_P (operands
[0])
1171 && ! CONSTANT_ADDRESS_P (operands
[1]))
1173 int size
= INTVAL (operands
[2]);
1174 rtx op0
= xoperands
[0];
1175 rtx op1
= xoperands
[1];
1177 if ((align
& 3) == 0 && (size
& 3) == 0 && (size
>> 2) <= 16)
1179 if (memory_address_p (SImode
, plus_constant (op0
, size
))
1180 && memory_address_p (SImode
, plus_constant (op1
, size
)))
1182 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1183 for (i
= (size
>>2)-1; i
>= 0; i
--)
1185 xoperands
[0] = plus_constant (op0
, i
* 4);
1186 xoperands
[1] = plus_constant (op1
, i
* 4);
1187 output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",
1193 else if ((align
& 1) == 0 && (size
& 1) == 0 && (size
>> 1) <= 16)
1195 if (memory_address_p (HImode
, plus_constant (op0
, size
))
1196 && memory_address_p (HImode
, plus_constant (op1
, size
)))
1198 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1199 for (i
= (size
>>1)-1; i
>= 0; i
--)
1201 xoperands
[0] = plus_constant (op0
, i
* 2);
1202 xoperands
[1] = plus_constant (op1
, i
* 2);
1203 output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",
1209 else if (size
<= 16)
1211 if (memory_address_p (QImode
, plus_constant (op0
, size
))
1212 && memory_address_p (QImode
, plus_constant (op1
, size
)))
1214 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1215 for (i
= size
-1; i
>= 0; i
--)
1217 xoperands
[0] = plus_constant (op0
, i
);
1218 xoperands
[1] = plus_constant (op1
, i
);
1219 output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",
1227 /* Since we clobber untold things, nix the condition codes. */
1230 /* This is the size of the transfer.
1231 Either use the register which already contains the size,
1232 or use a free register (used by no operands). */
1233 output_size_for_block_move (operands
[2], operands
[4], alignrtx
);
1236 /* Also emit code to decrement the size value by ALIGN. */
1237 zoperands
[0] = operands
[0];
1238 zoperands
[3] = plus_constant (operands
[0], align
);
1239 output_load_address (zoperands
);
1242 /* Generate number for unique label. */
1244 xoperands
[3] = GEN_INT (movstrsi_label
++);
1246 /* Calculate the size of the chunks we will be trying to move first. */
1249 if ((align
& 3) == 0)
1251 else if ((align
& 1) == 0)
1257 /* Copy the increment (negative) to a register for bla insn. */
1259 xoperands
[4] = GEN_INT (- chunk_size
);
1260 xoperands
[5] = operands
[5];
1261 output_asm_insn ("adds %4,%?r0,%5", xoperands
);
1263 /* Predecrement the loop counter. This happens again also in the `bla'
1264 instruction which precedes the loop, but we need to have it done
1265 two times before we enter the loop because of the bizarre semantics
1266 of the bla instruction. */
1268 output_asm_insn ("adds %5,%2,%2", xoperands
);
1270 /* Check for the case where the original count was less than or equal to
1271 zero. Avoid going through the loop at all if the original count was
1272 indeed less than or equal to zero. Note that we treat the count as
1273 if it were a signed 32-bit quantity here, rather than an unsigned one,
1274 even though we really shouldn't. We have to do this because of the
1275 semantics of the `ble' instruction, which assume that the count is
1276 a signed 32-bit value. Anyway, in practice it won't matter because
1277 nobody is going to try to do a memcpy() of more than half of the
1278 entire address space (i.e. 2 gigabytes) anyway. */
1280 output_asm_insn ("bc .Le%3", xoperands
);
1282 /* Make available a register which is a temporary. */
1284 xoperands
[6] = operands
[6];
1286 /* Now the actual loop.
1287 In xoperands, elements 1 and 0 are the input and output vectors.
1288 Element 2 is the loop index. Element 5 is the increment. */
1290 output_asm_insn ("subs %1,%5,%1", xoperands
);
1291 output_asm_insn ("bla %5,%2,.Lm%3", xoperands
);
1292 output_asm_insn ("adds %0,%2,%6", xoperands
);
1293 output_asm_insn ("\n.Lm%3:", xoperands
); /* Label for bla above. */
1294 output_asm_insn ("\n.Ls%3:", xoperands
); /* Loop start label. */
1295 output_asm_insn ("adds %5,%6,%6", xoperands
);
1297 /* NOTE: The code here which is supposed to handle the cases where the
1298 sources and destinations are known to start on a 4 or 2 byte boundary
1299 are currently broken. They fail to do anything about the overflow
1300 bytes which might still need to be copied even after we have copied
1301 some number of words or halfwords. Thus, for now we use the lowest
1302 common denominator, i.e. the code which just copies some number of
1303 totally unaligned individual bytes. (See the calculation of
1304 chunk_size above. */
1306 if (chunk_size
== 4)
1308 output_asm_insn ("ld.l %2(%1),%?r31", xoperands
);
1309 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1310 output_asm_insn ("st.l %?r31,8(%6)", xoperands
);
1312 else if (chunk_size
== 2)
1314 output_asm_insn ("ld.s %2(%1),%?r31", xoperands
);
1315 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1316 output_asm_insn ("st.s %?r31,4(%6)", xoperands
);
1318 else /* chunk_size == 1 */
1320 output_asm_insn ("ld.b %2(%1),%?r31", xoperands
);
1321 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1322 output_asm_insn ("st.b %?r31,2(%6)", xoperands
);
1324 output_asm_insn ("\n.Le%3:", xoperands
); /* Here if count <= 0. */
1330 /* Output a delayed branch insn with the delay insn in its
1331 branch slot. The delayed branch insn template is in TEMPLATE,
1332 with operands OPERANDS. The insn in its delay slot is INSN.
1334 As a special case, since we know that all memory transfers are via
1335 ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
1336 reference around the branch as
1340 ld/st l%x(%?r31),...
1342 As another special case, we handle loading (SYMBOL_REF ...) and
1343 other large constants around branches as well:
1350 /* ??? Disabled because this re-recognition is incomplete and causes
1351 constrain_operands to segfault. Anyone who cares should fix up
1352 the code to use the DBR pass. */
1355 output_delayed_branch (template, operands
, insn
)
1360 rtx src
= XVECEXP (PATTERN (insn
), 0, 1);
1361 rtx dest
= XVECEXP (PATTERN (insn
), 0, 0);
1363 /* See if we are doing some branch together with setting some register
1364 to some 32-bit value which does (or may) have some of the high-order
1365 16 bits set. If so, we need to set the register in two stages. One
1366 stage must be done before the branch, and the other one can be done
1367 in the delay slot. */
1369 if ( (GET_CODE (src
) == CONST_INT
1370 && ((unsigned) INTVAL (src
) & (unsigned) 0xffff0000) != (unsigned) 0)
1371 || (GET_CODE (src
) == SYMBOL_REF
)
1372 || (GET_CODE (src
) == LABEL_REF
)
1373 || (GET_CODE (src
) == CONST
))
1376 xoperands
[0] = dest
;
1379 CC_STATUS_PARTIAL_INIT
;
1380 /* Output the `orh' insn. */
1381 output_asm_insn ("orh %H1,%?r0,%0", xoperands
);
1383 /* Output the branch instruction next. */
1384 output_asm_insn (template, operands
);
1386 /* Now output the `or' insn. */
1387 output_asm_insn ("or %L1,%0,%0", xoperands
);
1389 else if ((GET_CODE (src
) == MEM
1390 && CONSTANT_ADDRESS_P (XEXP (src
, 0)))
1391 || (GET_CODE (dest
) == MEM
1392 && CONSTANT_ADDRESS_P (XEXP (dest
, 0))))
1395 char *split_template
;
1396 xoperands
[0] = dest
;
1399 /* Output the `orh' insn. */
1400 if (GET_CODE (src
) == MEM
)
1402 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1403 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1404 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
1407 output_asm_insn ("orh %h1,%?r0,%?r31", xoperands
);
1409 split_template
= load_opcode (GET_MODE (dest
),
1410 "%L1(%?r31),%0", dest
);
1414 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1415 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1416 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
1419 output_asm_insn ("orh %h0,%?r0,%?r31", xoperands
);
1421 split_template
= store_opcode (GET_MODE (dest
),
1422 "%r1,%L0(%?r31)", src
);
1425 /* Output the branch instruction next. */
1426 output_asm_insn (template, operands
);
1428 /* Now output the load or store.
1429 No need to do a CC_STATUS_INIT, because we are branching anyway. */
1430 output_asm_insn (split_template
, xoperands
);
1434 int insn_code_number
;
1435 rtx pat
= gen_rtx (SET
, VOIDmode
, dest
, src
);
1436 rtx delay_insn
= gen_rtx (INSN
, VOIDmode
, 0, 0, 0, pat
, -1, 0, 0);
1439 /* Output the branch instruction first. */
1440 output_asm_insn (template, operands
);
1442 /* Now recognize the insn which we put in its delay slot.
1443 We must do this after outputting the branch insn,
1444 since operands may just be a pointer to `recog_operand'. */
1445 INSN_CODE (delay_insn
) = insn_code_number
1446 = recog (pat
, delay_insn
, NULL_PTR
);
1447 if (insn_code_number
== -1)
1450 for (i
= 0; i
< insn_n_operands
[insn_code_number
]; i
++)
1452 if (GET_CODE (recog_operand
[i
]) == SUBREG
)
1453 recog_operand
[i
] = alter_subreg (recog_operand
[i
]);
1456 insn_extract (delay_insn
);
1457 if (! constrain_operands (1))
1458 fatal_insn_not_found (delay_insn
);
1460 template = insn_template
[insn_code_number
];
1462 template = (*insn_outfun
[insn_code_number
]) (recog_operand
, delay_insn
);
1463 output_asm_insn (template, recog_operand
);
1469 /* Output a newly constructed insn DELAY_INSN. */
1471 output_delay_insn (delay_insn
)
1475 int insn_code_number
;
1478 /* Now recognize the insn which we put in its delay slot.
1479 We must do this after outputting the branch insn,
1480 since operands may just be a pointer to `recog_operand'. */
1481 insn_code_number
= recog_memoized (delay_insn
);
1482 if (insn_code_number
== -1)
1485 /* Extract the operands of this delay insn. */
1486 INSN_CODE (delay_insn
) = insn_code_number
;
1487 insn_extract (delay_insn
);
1489 /* It is possible that this insn has not been properly scanned by final
1490 yet. If this insn's operands don't appear in the peephole's
1491 actual operands, then they won't be fixed up by final, so we
1492 make sure they get fixed up here. -- This is a kludge. */
1493 for (i
= 0; i
< insn_n_operands
[insn_code_number
]; i
++)
1495 if (GET_CODE (recog_operand
[i
]) == SUBREG
)
1496 recog_operand
[i
] = alter_subreg (recog_operand
[i
]);
1499 #ifdef REGISTER_CONSTRAINTS
1500 if (! constrain_operands (1))
1504 cc_prev_status
= cc_status
;
1506 /* Update `cc_status' for this instruction.
1507 The instruction's output routine may change it further.
1508 If the output routine for a jump insn needs to depend
1509 on the cc status, it should look at cc_prev_status. */
1511 NOTICE_UPDATE_CC (PATTERN (delay_insn
), delay_insn
);
1513 /* Now get the template for what this insn would
1514 have been, without the branch. */
1516 template = insn_template
[insn_code_number
];
1518 template = (*insn_outfun
[insn_code_number
]) (recog_operand
, delay_insn
);
1519 output_asm_insn (template, recog_operand
);
1524 /* Special routine to convert an SFmode value represented as a
1525 CONST_DOUBLE into its equivalent unsigned long bit pattern.
1526 We convert the value from a double precision floating-point
1527 value to single precision first, and thence to a bit-wise
1528 equivalent unsigned long value. This routine is used when
1529 generating an immediate move of an SFmode value directly
1530 into a general register because the svr4 assembler doesn't
1531 grok floating literals in instruction operand contexts. */
1534 sfmode_constant_to_ulong (x
)
1538 union { float f
; unsigned long i
; } u2
;
1540 if (GET_CODE (x
) != CONST_DOUBLE
|| GET_MODE (x
) != SFmode
)
1543 #if TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT
1544 error IEEE emulation needed
1546 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
1551 /* This function generates the assembly code for function entry.
1552 The macro FUNCTION_PROLOGUE in i860.h is defined to call this function.
1554 ASM_FILE is a stdio stream to output the code to.
1555 SIZE is an int: how many units of temporary storage to allocate.
1557 Refer to the array `regs_ever_live' to determine which registers
1558 to save; `regs_ever_live[I]' is nonzero if register number I
1559 is ever used in the function. This macro is responsible for
1560 knowing which registers should not be saved even if used.
1562 NOTE: `frame_lower_bytes' is the count of bytes which will lie
1563 between the new `fp' value and the new `sp' value after the
1564 prologue is done. `frame_upper_bytes' is the count of bytes
1565 that will lie between the new `fp' and the *old* `sp' value
1566 after the new `fp' is setup (in the prologue). The upper
1567 part of each frame always includes at least 2 words (8 bytes)
1568 to hold the saved frame pointer and the saved return address.
1570 The svr4 ABI for the i860 now requires that the values of the
1571 stack pointer and frame pointer registers be kept aligned to
1572 16-byte boundaries at all times. We obey that restriction here.
1574 The svr4 ABI for the i860 is entirely vague when it comes to specifying
1575 exactly where the "preserved" registers should be saved. The native
1576 svr4 C compiler I now have doesn't help to clarify the requirements
1577 very much because it is plainly out-of-date and non-ABI-compliant
1578 (in at least one important way, i.e. how it generates function
1581 The native svr4 C compiler saves the "preserved" registers (i.e.
1582 r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative
1583 offsets from the frame pointer).
1585 Previous versions of GCC also saved the "preserved" registers in the
1586 "negative" part of the frame, but they saved them using positive
1587 offsets from the (adjusted) stack pointer (after it had been adjusted
1588 to allocate space for the new frame). That's just plain wrong
1589 because if the current function calls alloca(), the stack pointer
1590 will get moved, and it will be impossible to restore the registers
1591 properly again after that.
1593 Both compilers handled parameter registers (i.e. r16-r27 and f8-f15)
1594 by copying their values either into various "preserved" registers or
1595 into stack slots in the lower part of the current frame (as seemed
1596 appropriate, depending upon subsequent usage of these values).
1598 Here we want to save the preserved registers at some offset from the
1599 frame pointer register so as to avoid any possible problems arising
1600 from calls to alloca(). We can either save them at small positive
1601 offsets from the frame pointer, or at small negative offsets from
1602 the frame pointer. If we save them at small negative offsets from
1603 the frame pointer (i.e. in the lower part of the frame) then we
1604 must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how
1605 many bytes of space we plan to use in the lower part of the frame
1606 for this purpose. Since other parts of the compiler reference the
1607 value of STARTING_FRAME_OFFSET long before final() calls this function,
1608 we would have to go ahead and assume the worst-case storage requirements
1609 for saving all of the "preserved" registers (and use that number, i.e.
1610 `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in
1611 the lower part of the frame. That could potentially be very wasteful,
1612 and that wastefulness could really hamper people compiling for embedded
1613 i860 targets with very tight limits on stack space. Thus, we choose
1614 here to save the preserved registers in the upper part of the
1615 frame, so that we can decide at the very last minute how much (or how
1616 little) space we must allocate for this purpose.
1618 To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved
1619 registers must always be saved so that the saved values of registers
1620 with higher numbers are at higher addresses. We obey that restriction
1623 There are two somewhat different ways that you can generate prologues
1624 here... i.e. pedantically ABI-compliant, and the "other" way. The
1625 "other" way is more consistent with what is currently generated by the
1626 "native" svr4 C compiler for the i860. That's important if you want
1627 to use the current (as of 8/91) incarnation of svr4 SDB for the i860.
1628 The SVR4 SDB for the i860 insists on having function prologues be
1631 To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES
1632 in the i860svr4.h file. (By default this is *not* defined).
1634 The differences between the ABI-compliant and non-ABI-compliant prologues
1635 are that (a) the ABI version seems to require the use of *signed*
1636 (rather than unsigned) adds and subtracts, and (b) the ordering of
1637 the various steps (e.g. saving preserved registers, saving the
1638 return address, setting up the new frame pointer value) is different.
1640 For strict ABI compliance, it seems to be the case that the very last
1641 thing that is supposed to happen in the prologue is getting the frame
1642 pointer set to its new value (but only after everything else has
1643 already been properly setup). We do that here, but only if the symbol
1644 I860_STRICT_ABI_PROLOGUES is defined.
1647 #ifndef STACK_ALIGNMENT
1648 #define STACK_ALIGNMENT 16
1651 extern char call_used_regs
[];
1652 extern int leaf_function_p ();
1654 char *current_function_original_name
;
1656 static int must_preserve_r1
;
1657 static unsigned must_preserve_bytes
;
1660 function_prologue (asm_file
, local_bytes
)
1661 register FILE *asm_file
;
1662 register unsigned local_bytes
;
1664 register unsigned frame_lower_bytes
;
1665 register unsigned frame_upper_bytes
;
1666 register unsigned total_fsize
;
1667 register unsigned preserved_reg_bytes
= 0;
1668 register unsigned i
;
1669 register unsigned preserved_so_far
= 0;
1671 must_preserve_r1
= (optimize
< 2 || ! leaf_function_p ());
1672 must_preserve_bytes
= 4 + (must_preserve_r1
? 4 : 0);
1674 /* Count registers that need preserving. Ignore r0. It never needs
1677 for (i
= 1; i
< FIRST_PSEUDO_REGISTER
; i
++)
1679 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1680 preserved_reg_bytes
+= 4;
1683 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
1685 frame_lower_bytes
= (local_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
1687 /* The upper part of each frame will contain the saved fp,
1688 the saved r1, and stack slots for all of the other "preserved"
1689 registers that we find we will need to save & restore. */
1691 frame_upper_bytes
= must_preserve_bytes
+ preserved_reg_bytes
;
1693 /* Round-up the frame_upper_bytes so that it's a multiple of 16. */
1696 = (frame_upper_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
1698 total_fsize
= frame_upper_bytes
+ frame_lower_bytes
;
1700 #ifndef I860_STRICT_ABI_PROLOGUES
1702 /* There are two kinds of function prologues.
1703 You use the "small" version if the total frame size is
1704 small enough so that it can fit into an immediate 16-bit
1705 value in one instruction. Otherwise, you use the "large"
1706 version of the function prologue. */
1708 if (total_fsize
> 0x7fff)
1710 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1711 but the native C compiler on svr4 uses `addu'. */
1713 fprintf (asm_file
, "\taddu -%d,%ssp,%ssp\n",
1714 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1716 /* Save the old frame pointer. */
1718 fprintf (asm_file
, "\tst.l %sfp,0(%ssp)\n",
1719 i860_reg_prefix
, i860_reg_prefix
);
1721 /* Setup the new frame pointer. The ABI sez to do this after
1722 preserving registers (using adds), but that's not what the
1723 native C compiler on svr4 does. */
1725 fprintf (asm_file
, "\taddu 0,%ssp,%sfp\n",
1726 i860_reg_prefix
, i860_reg_prefix
);
1728 /* Get the value of frame_lower_bytes into r31. */
1730 fprintf (asm_file
, "\torh %d,%sr0,%sr31\n",
1731 frame_lower_bytes
>> 16, i860_reg_prefix
, i860_reg_prefix
);
1732 fprintf (asm_file
, "\tor %d,%sr31,%sr31\n",
1733 frame_lower_bytes
& 0xffff, i860_reg_prefix
, i860_reg_prefix
);
1735 /* Now re-adjust the stack pointer using the value in r31.
1736 The ABI sez to do this with `subs' but SDB may prefer `subu'. */
1738 fprintf (asm_file
, "\tsubu %ssp,%sr31,%ssp\n",
1739 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1741 /* Preserve registers. The ABI sez to do this before setting
1742 up the new frame pointer, but that's not what the native
1743 C compiler on svr4 does. */
1745 for (i
= 1; i
< 32; i
++)
1746 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1747 fprintf (asm_file
, "\tst.l %s%s,%d(%sfp)\n",
1748 i860_reg_prefix
, reg_names
[i
],
1749 must_preserve_bytes
+ (4 * preserved_so_far
++),
1752 for (i
= 32; i
< 64; i
++)
1753 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1754 fprintf (asm_file
, "\tfst.l %s%s,%d(%sfp)\n",
1755 i860_reg_prefix
, reg_names
[i
],
1756 must_preserve_bytes
+ (4 * preserved_so_far
++),
1759 /* Save the return address. */
1761 if (must_preserve_r1
)
1762 fprintf (asm_file
, "\tst.l %sr1,4(%sfp)\n",
1763 i860_reg_prefix
, i860_reg_prefix
);
1767 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1768 but the native C compiler on svr4 uses `addu'. */
1770 fprintf (asm_file
, "\taddu -%d,%ssp,%ssp\n",
1771 total_fsize
, i860_reg_prefix
, i860_reg_prefix
);
1773 /* Save the old frame pointer. */
1775 fprintf (asm_file
, "\tst.l %sfp,%d(%ssp)\n",
1776 i860_reg_prefix
, frame_lower_bytes
, i860_reg_prefix
);
1778 /* Setup the new frame pointer. The ABI sez to do this after
1779 preserving registers and after saving the return address,
1780 (and its saz to do this using adds), but that's not what the
1781 native C compiler on svr4 does. */
1783 fprintf (asm_file
, "\taddu %d,%ssp,%sfp\n",
1784 frame_lower_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1786 /* Preserve registers. The ABI sez to do this before setting
1787 up the new frame pointer, but that's not what the native
1788 compiler on svr4 does. */
1790 for (i
= 1; i
< 32; i
++)
1791 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1792 fprintf (asm_file
, "\tst.l %s%s,%d(%sfp)\n",
1793 i860_reg_prefix
, reg_names
[i
],
1794 must_preserve_bytes
+ (4 * preserved_so_far
++),
1797 for (i
= 32; i
< 64; i
++)
1798 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1799 fprintf (asm_file
, "\tfst.l %s%s,%d(%sfp)\n",
1800 i860_reg_prefix
, reg_names
[i
],
1801 must_preserve_bytes
+ (4 * preserved_so_far
++),
1804 /* Save the return address. The ABI sez to do this earlier,
1805 and also via an offset from %sp, but the native C compiler
1806 on svr4 does it later (i.e. now) and uses an offset from
1809 if (must_preserve_r1
)
1810 fprintf (asm_file
, "\tst.l %sr1,4(%sfp)\n",
1811 i860_reg_prefix
, i860_reg_prefix
);
1814 #else /* defined(I860_STRICT_ABI_PROLOGUES) */
1816 /* There are two kinds of function prologues.
1817 You use the "small" version if the total frame size is
1818 small enough so that it can fit into an immediate 16-bit
1819 value in one instruction. Otherwise, you use the "large"
1820 version of the function prologue. */
1822 if (total_fsize
> 0x7fff)
1824 /* Adjust the stack pointer (thereby allocating a new frame). */
1826 fprintf (asm_file
, "\tadds -%d,%ssp,%ssp\n",
1827 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1829 /* Save the caller's frame pointer. */
1831 fprintf (asm_file
, "\tst.l %sfp,0(%ssp)\n",
1832 i860_reg_prefix
, i860_reg_prefix
);
1834 /* Save return address. */
1836 if (must_preserve_r1
)
1837 fprintf (asm_file
, "\tst.l %sr1,4(%ssp)\n",
1838 i860_reg_prefix
, i860_reg_prefix
);
1840 /* Get the value of frame_lower_bytes into r31 for later use. */
1842 fprintf (asm_file
, "\torh %d,%sr0,%sr31\n",
1843 frame_lower_bytes
>> 16, i860_reg_prefix
, i860_reg_prefix
);
1844 fprintf (asm_file
, "\tor %d,%sr31,%sr31\n",
1845 frame_lower_bytes
& 0xffff, i860_reg_prefix
, i860_reg_prefix
);
1847 /* Now re-adjust the stack pointer using the value in r31. */
1849 fprintf (asm_file
, "\tsubs %ssp,%sr31,%ssp\n",
1850 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1852 /* Pre-compute value to be used as the new frame pointer. */
1854 fprintf (asm_file
, "\tadds %ssp,%sr31,%sr31\n",
1855 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1857 /* Preserve registers. */
1859 for (i
= 1; i
< 32; i
++)
1860 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1861 fprintf (asm_file
, "\tst.l %s%s,%d(%sr31)\n",
1862 i860_reg_prefix
, reg_names
[i
],
1863 must_preserve_bytes
+ (4 * preserved_so_far
++),
1866 for (i
= 32; i
< 64; i
++)
1867 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1868 fprintf (asm_file
, "\tfst.l %s%s,%d(%sr31)\n",
1869 i860_reg_prefix
, reg_names
[i
],
1870 must_preserve_bytes
+ (4 * preserved_so_far
++),
1873 /* Actually set the new value of the frame pointer. */
1875 fprintf (asm_file
, "\tmov %sr31,%sfp\n",
1876 i860_reg_prefix
, i860_reg_prefix
);
1880 /* Adjust the stack pointer. */
1882 fprintf (asm_file
, "\tadds -%d,%ssp,%ssp\n",
1883 total_fsize
, i860_reg_prefix
, i860_reg_prefix
);
1885 /* Save the caller's frame pointer. */
1887 fprintf (asm_file
, "\tst.l %sfp,%d(%ssp)\n",
1888 i860_reg_prefix
, frame_lower_bytes
, i860_reg_prefix
);
1890 /* Save the return address. */
1892 if (must_preserve_r1
)
1893 fprintf (asm_file
, "\tst.l %sr1,%d(%ssp)\n",
1894 i860_reg_prefix
, frame_lower_bytes
+ 4, i860_reg_prefix
);
1896 /* Preserve registers. */
1898 for (i
= 1; i
< 32; i
++)
1899 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1900 fprintf (asm_file
, "\tst.l %s%s,%d(%ssp)\n",
1901 i860_reg_prefix
, reg_names
[i
],
1902 frame_lower_bytes
+ must_preserve_bytes
+ (4 * preserved_so_far
++),
1905 for (i
= 32; i
< 64; i
++)
1906 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1907 fprintf (asm_file
, "\tfst.l %s%s,%d(%ssp)\n",
1908 i860_reg_prefix
, reg_names
[i
],
1909 frame_lower_bytes
+ must_preserve_bytes
+ (4 * preserved_so_far
++),
1912 /* Setup the new frame pointer. */
1914 fprintf (asm_file
, "\tadds %d,%ssp,%sfp\n",
1915 frame_lower_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1917 #endif /* defined(I860_STRICT_ABI_PROLOGUES) */
1919 #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX
1920 ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file
);
1921 #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */
1924 /* This function generates the assembly code for function exit.
1925 The macro FUNCTION_EPILOGUE in i860.h is defined to call this function.
1927 ASM_FILE is a stdio stream to output the code to.
1928 SIZE is an int: how many units of temporary storage to allocate.
1930 The function epilogue should not depend on the current stack pointer!
1931 It should use the frame pointer only. This is mandatory because
1932 of alloca; we also take advantage of it to omit stack adjustments
1935 Note that when we go to restore the preserved register values we must
1936 not try to address their slots by using offsets from the stack pointer.
1937 That's because the stack pointer may have been moved during the function
1938 execution due to a call to alloca(). Rather, we must restore all
1939 preserved registers via offsets from the frame pointer value.
1941 Note also that when the current frame is being "popped" (by adjusting
1942 the value of the stack pointer) on function exit, we must (for the
1943 sake of alloca) set the new value of the stack pointer based upon
1944 the current value of the frame pointer. We can't just add what we
1945 believe to be the (static) frame size to the stack pointer because
1946 if we did that, and alloca() had been called during this function,
1947 we would end up returning *without* having fully deallocated all of
1948 the space grabbed by alloca. If that happened, and a function
1949 containing one or more alloca() calls was called over and over again,
1950 then the stack would grow without limit!
1952 Finally note that the epilogues generated here are completely ABI
1953 compliant. They go out of their way to insure that the value in
1954 the frame pointer register is never less than the value in the stack
1955 pointer register. It's not clear why this relationship needs to be
1956 maintained at all times, but maintaining it only costs one extra
1957 instruction, so what the hell.
1960 /* This corresponds to a version 4 TDESC structure. Lower numbered
1961 versions successively omit the last word of the structure. We
1962 don't try to handle version 5 here. */
1964 typedef struct TDESC_flags
{
1967 int callable_block
:1;
1969 int fregs
:6; /* fp regs 2-7 */
1970 int iregs
:16; /* regs 0-15 */
1973 typedef struct TDESC
{
1975 int integer_reg_offset
; /* same as must_preserve_bytes */
1976 int floating_point_reg_offset
;
1977 unsigned int positive_frame_size
; /* same as frame_upper_bytes */
1978 unsigned int negative_frame_size
; /* same as frame_lower_bytes */
1982 function_epilogue (asm_file
, local_bytes
)
1983 register FILE *asm_file
;
1984 register unsigned local_bytes
;
1986 register unsigned frame_upper_bytes
;
1987 register unsigned frame_lower_bytes
;
1988 register unsigned preserved_reg_bytes
= 0;
1989 register unsigned i
;
1990 register unsigned restored_so_far
= 0;
1991 register unsigned int_restored
;
1992 register unsigned mask
;
1993 unsigned intflags
=0;
1994 register TDESC_flags
*flags
= (TDESC_flags
*) &intflags
;
1997 flags
->reg_packing
= 1;
1998 flags
->iregs
= 8; /* old fp always gets saved */
2000 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
2002 frame_lower_bytes
= (local_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
2004 /* Count the number of registers that were preserved in the prologue.
2005 Ignore r0. It is never preserved. */
2007 for (i
= 1; i
< FIRST_PSEUDO_REGISTER
; i
++)
2009 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
2010 preserved_reg_bytes
+= 4;
2013 /* The upper part of each frame will contain only saved fp,
2014 the saved r1, and stack slots for all of the other "preserved"
2015 registers that we find we will need to save & restore. */
2017 frame_upper_bytes
= must_preserve_bytes
+ preserved_reg_bytes
;
2019 /* Round-up frame_upper_bytes so that t is a multiple of 16. */
2022 = (frame_upper_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
2024 /* Restore all of the "preserved" registers that need restoring. */
2028 for (i
= 1; i
< 32; i
++, mask
<<=1)
2029 if (regs_ever_live
[i
] && ! call_used_regs
[i
]) {
2030 fprintf (asm_file
, "\tld.l %d(%sfp),%s%s\n",
2031 must_preserve_bytes
+ (4 * restored_so_far
++),
2032 i860_reg_prefix
, i860_reg_prefix
, reg_names
[i
]);
2033 if (i
> 3 && i
< 16)
2034 flags
->iregs
|= mask
;
2037 int_restored
= restored_so_far
;
2040 for (i
= 32; i
< 64; i
++) {
2041 if (regs_ever_live
[i
] && ! call_used_regs
[i
]) {
2042 fprintf (asm_file
, "\tfld.l %d(%sfp),%s%s\n",
2043 must_preserve_bytes
+ (4 * restored_so_far
++),
2044 i860_reg_prefix
, i860_reg_prefix
, reg_names
[i
]);
2045 if (i
> 33 & i
< 40)
2046 flags
->fregs
|= mask
;
2048 if (i
> 33 && i
< 40)
2052 /* Get the value we plan to use to restore the stack pointer into r31. */
2054 fprintf (asm_file
, "\tadds %d,%sfp,%sr31\n",
2055 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
2057 /* Restore the return address and the old frame pointer. */
2059 if (must_preserve_r1
) {
2060 fprintf (asm_file
, "\tld.l 4(%sfp),%sr1\n",
2061 i860_reg_prefix
, i860_reg_prefix
);
2065 fprintf (asm_file
, "\tld.l 0(%sfp),%sfp\n",
2066 i860_reg_prefix
, i860_reg_prefix
);
2068 /* Return and restore the old stack pointer value. */
2070 fprintf (asm_file
, "\tbri %sr1\n\tmov %sr31,%ssp\n",
2071 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
2073 #ifdef OUTPUT_TDESC /* Output an ABI-compliant TDESC entry */
2074 if (! frame_lower_bytes
) {
2076 if (! frame_upper_bytes
) {
2078 if (restored_so_far
== int_restored
) /* No FP saves */
2082 assemble_name(asm_file
,current_function_original_name
);
2083 fputs(".TDESC:\n", asm_file
);
2084 fprintf(asm_file
, "%s 0x%0x\n", ASM_LONG
, intflags
);
2085 fprintf(asm_file
, "%s %d\n", ASM_LONG
,
2086 int_restored
? must_preserve_bytes
: 0);
2087 if (flags
->version
> 1) {
2088 fprintf(asm_file
, "%s %d\n", ASM_LONG
,
2089 (restored_so_far
== int_restored
) ? 0 : must_preserve_bytes
+
2090 (4 * int_restored
));
2091 if (flags
->version
> 2) {
2092 fprintf(asm_file
, "%s %d\n", ASM_LONG
, frame_upper_bytes
);
2093 if (flags
->version
> 3)
2094 fprintf(asm_file
, "%s %d\n", ASM_LONG
, frame_lower_bytes
);
2098 fprintf(asm_file
, "%s ", ASM_LONG
);
2099 assemble_name(asm_file
, current_function_original_name
);
2100 fprintf(asm_file
, "\n%s ", ASM_LONG
);
2101 assemble_name(asm_file
, current_function_original_name
);
2102 fputs(".TDESC\n", asm_file
);
2108 /* Expand a library call to __builtin_saveregs. */
2112 rtx fn
= gen_rtx_SYMBOL_REF (Pmode
, "__builtin_saveregs");
2113 rtx save
= gen_reg_rtx (Pmode
);
2114 rtx valreg
= LIBCALL_VALUE (Pmode
);
2117 /* The return value register overlaps the first argument register.
2118 Save and restore it around the call. */
2119 emit_move_insn (save
, valreg
);
2120 ret
= emit_library_call_value (fn
, NULL_RTX
, 1, Pmode
, 0);
2121 if (GET_CODE (ret
) != REG
|| REGNO (ret
) < FIRST_PSEUDO_REGISTER
)
2122 ret
= copy_to_reg (ret
);
2123 emit_move_insn (valreg
, save
);
2129 i860_build_va_list ()
2131 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2134 record
= make_node (RECORD_TYPE
);
2136 field_ireg_used
= build_decl (FIELD_DECL
, get_identifier ("__ireg_used"),
2137 unsigned_type_node
);
2138 field_freg_used
= build_decl (FIELD_DECL
, get_identifier ("__freg_used"),
2139 unsigned_type_node
);
2140 field_reg_base
= build_decl (FIELD_DECL
, get_identifier ("__reg_base"),
2142 field_mem_ptr
= build_decl (FIELD_DECL
, get_identifier ("__mem_ptr"),
2145 DECL_FIELD_CONTEXT (field_ireg_used
) = record
;
2146 DECL_FIELD_CONTEXT (field_freg_used
) = record
;
2147 DECL_FIELD_CONTEXT (field_reg_base
) = record
;
2148 DECL_FIELD_CONTEXT (field_mem_ptr
) = record
;
2150 #ifdef I860_SVR4_VA_LIST
2151 TYPE_FIELDS (record
) = field_ireg_used
;
2152 TREE_CHAIN (field_ireg_used
) = field_freg_used
;
2153 TREE_CHAIN (field_freg_used
) = field_reg_base
;
2154 TREE_CHAIN (field_reg_base
) = field_mem_ptr
;
2156 TYPE_FIELDS (record
) = field_reg_base
;
2157 TREE_CHAIN (field_reg_base
) = field_mem_ptr
;
2158 TREE_CHAIN (field_mem_ptr
) = field_ireg_used
;
2159 TREE_CHAIN (field_ireg_used
) = field_freg_used
;
2162 layout_type (record
);
2167 i860_va_start (stdarg_p
, valist
, nextarg
)
2174 saveregs
= make_tree (build_pointer_type (va_list_type_node
),
2175 expand_builtin_saveregs ());
2176 saveregs
= build1 (INDIRECT_REF
, va_list_type_node
, saveregs
);
2180 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2181 tree ireg_used
, freg_used
, reg_base
, mem_ptr
;
2183 #ifdef I860_SVR4_VA_LIST
2184 field_ireg_used
= TYPE_FIELDS (va_list_type_node
);
2185 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2186 field_reg_base
= TREE_CHAIN (field_freg_used
);
2187 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2189 field_reg_base
= TYPE_FIELDS (va_list_type_node
);
2190 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2191 field_ireg_used
= TREE_CHAIN (field_mem_ptr
);
2192 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2195 ireg_used
= build (COMPONENT_REF
, TREE_TYPE (field_ireg_used
),
2196 valist
, field_ireg_used
);
2197 freg_used
= build (COMPONENT_REF
, TREE_TYPE (field_freg_used
),
2198 valist
, field_freg_used
);
2199 reg_base
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2200 valist
, field_reg_base
);
2201 mem_ptr
= build (COMPONENT_REF
, TREE_TYPE (field_mem_ptr
),
2202 valist
, field_mem_ptr
);
2204 t
= build_int_2 (current_function_args_info
.ints
, 0);
2205 t
= build (MODIFY_EXPR
, TREE_TYPE (ireg_used
), ireg_used
, t
);
2206 TREE_SIDE_EFFECTS (t
) = 1;
2207 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2209 t
= build_int_2 (ROUNDUP (current_function_args_info
.floats
, 8), 0);
2210 t
= build (MODIFY_EXPR
, TREE_TYPE (freg_used
), freg_used
, t
);
2211 TREE_SIDE_EFFECTS (t
) = 1;
2212 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2214 t
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2215 saveregs
, field_reg_base
);
2216 t
= build (MODIFY_EXPR
, TREE_TYPE (reg_base
), reg_base
, t
);
2217 TREE_SIDE_EFFECTS (t
) = 1;
2218 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2220 t
= make_tree (ptr_type_node
, nextarg
);
2221 t
= build (MODIFY_EXPR
, TREE_TYPE (mem_ptr
), mem_ptr
, t
);
2222 TREE_SIDE_EFFECTS (t
) = 1;
2223 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2227 t
= build (MODIFY_EXPR
, va_list_type_node
, valist
, saveregs
);
2228 TREE_SIDE_EFFECTS (t
) = 1;
2229 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2233 #define NUM_PARM_FREGS 8
2234 #define NUM_PARM_IREGS 12
2235 #ifdef I860_SVR4_VARARGS
2236 #define FREG_OFFSET 0
2237 #define IREG_OFFSET (NUM_PARM_FREGS * UNITS_PER_WORD)
2239 #define FREG_OFFSET (NUM_PARM_IREGS * UNITS_PER_WORD)
2240 #define IREG_OFFSET 0
2244 i860_va_arg (valist
, type
)
2247 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2248 tree type_ptr_node
, t
;
2249 rtx lab_over
= NULL_RTX
;
2251 HOST_WIDE_INT align
;
2253 #ifdef I860_SVR4_VA_LIST
2254 field_ireg_used
= TYPE_FIELDS (va_list_type_node
);
2255 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2256 field_reg_base
= TREE_CHAIN (field_freg_used
);
2257 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2259 field_reg_base
= TYPE_FIELDS (va_list_type_node
);
2260 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2261 field_ireg_used
= TREE_CHAIN (field_mem_ptr
);
2262 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2265 field_ireg_used
= build (COMPONENT_REF
, TREE_TYPE (field_ireg_used
),
2266 valist
, field_ireg_used
);
2267 field_freg_used
= build (COMPONENT_REF
, TREE_TYPE (field_freg_used
),
2268 valist
, field_freg_used
);
2269 field_reg_base
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2270 valist
, field_reg_base
);
2271 field_mem_ptr
= build (COMPONENT_REF
, TREE_TYPE (field_mem_ptr
),
2272 valist
, field_mem_ptr
);
2274 ret
= gen_reg_rtx (Pmode
);
2275 type_ptr_node
= build_pointer_type (type
);
2277 if (! AGGREGATE_TYPE_P (type
))
2279 int nparm
, incr
, ofs
;
2283 if (FLOAT_TYPE_P (type
))
2285 field
= field_freg_used
;
2286 nparm
= NUM_PARM_FREGS
;
2292 field
= field_ireg_used
;
2293 nparm
= NUM_PARM_IREGS
;
2294 incr
= int_size_in_bytes (type
) / UNITS_PER_WORD
;
2298 lab_false
= gen_label_rtx ();
2299 lab_over
= gen_label_rtx ();
2301 emit_cmp_and_jump_insns (expand_expr (field
, NULL_RTX
, 0, 0),
2302 GEN_INT (nparm
- incr
), GT
, const0_rtx
,
2303 TYPE_MODE (TREE_TYPE (field
)),
2304 TREE_UNSIGNED (field
), 0, lab_false
);
2306 t
= fold (build (POSTINCREMENT_EXPR
, TREE_TYPE (field
), field
,
2307 build_int_2 (incr
, 0)));
2308 TREE_SIDE_EFFECTS (t
) = 1;
2310 t
= fold (build (MULT_EXPR
, TREE_TYPE (field
), field
,
2311 build_int_2 (UNITS_PER_WORD
, 0)));
2312 TREE_SIDE_EFFECTS (t
) = 1;
2314 t
= fold (build (PLUS_EXPR
, ptr_type_node
, field_reg_base
,
2315 fold (build (PLUS_EXPR
, TREE_TYPE (field
), t
,
2316 build_int_2 (ofs
, 0)))));
2317 TREE_SIDE_EFFECTS (t
) = 1;
2319 val
= expand_expr (t
, ret
, VOIDmode
, EXPAND_NORMAL
);
2321 emit_move_insn (ret
, val
);
2323 emit_jump_insn (gen_jump (lab_over
));
2325 emit_label (lab_false
);
2328 align
= TYPE_ALIGN (type
);
2329 if (align
< BITS_PER_WORD
)
2330 align
= BITS_PER_WORD
;
2331 align
/= BITS_PER_UNIT
;
2333 t
= build (PLUS_EXPR
, ptr_type_node
, field_mem_ptr
,
2334 build_int_2 (align
- 1, 0));
2335 t
= build (BIT_AND_EXPR
, ptr_type_node
, t
, build_int_2 (-align
, -1));
2337 val
= expand_expr (t
, ret
, VOIDmode
, EXPAND_NORMAL
);
2339 emit_move_insn (ret
, val
);
2341 t
= fold (build (PLUS_EXPR
, ptr_type_node
,
2342 make_tree (ptr_type_node
, ret
),
2343 build_int_2 (int_size_in_bytes (type
), 0)));
2344 t
= build (MODIFY_EXPR
, ptr_type_node
, field_mem_ptr
, t
);
2345 TREE_SIDE_EFFECTS (t
) = 1;
2346 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2349 emit_label (lab_over
);