1 /* Subroutines for insn-output.c for Intel 860
2 Copyright (C) 1989, 91, 97, 98, 1999 Free Software Foundation, Inc.
5 Written by Richard Stallman (rms@ai.mit.edu).
7 Hacked substantially by Ron Guilmette (rfg@netcom.com) to cater
8 to the whims of the System V Release 4 assembler.
10 This file is part of GNU CC.
12 GNU CC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2, or (at your option)
17 GNU CC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GNU CC; see the file COPYING. If not, write to
24 the Free Software Foundation, 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
34 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-flags.h"
41 #include "insn-attr.h"
45 static rtx
find_addr_reg ();
47 #ifndef I860_REG_PREFIX
48 #define I860_REG_PREFIX ""
51 char *i860_reg_prefix
= I860_REG_PREFIX
;
53 /* Save information from a "cmpxx" operation until the branch is emitted. */
55 rtx i860_compare_op0
, i860_compare_op1
;
57 /* Return non-zero if this pattern, can be evaluated safely, even if it
60 safe_insn_src_p (op
, mode
)
62 enum machine_mode mode
;
64 /* Just experimenting. */
66 /* No floating point src is safe if it contains an arithmetic
67 operation, since that operation may trap. */
68 switch (GET_CODE (op
))
80 return CONSTANT_ADDRESS_P (XEXP (op
, 0));
82 /* We never need to negate or complement constants. */
84 return (mode
!= SFmode
&& mode
!= DFmode
);
101 return (mode
!= SFmode
&& mode
!= DFmode
);
108 if ((GET_CODE (XEXP (op
, 0)) == CONST_INT
&& ! SMALL_INT (XEXP (op
, 0)))
109 || (GET_CODE (XEXP (op
, 1)) == CONST_INT
&& ! SMALL_INT (XEXP (op
, 1))))
118 /* Return 1 if REG is clobbered in IN.
119 Return 2 if REG is used in IN.
120 Return 3 if REG is both used and clobbered in IN.
121 Return 0 if neither. */
124 reg_clobbered_p (reg
, in
)
128 register enum rtx_code code
;
133 code
= GET_CODE (in
);
135 if (code
== SET
|| code
== CLOBBER
)
137 rtx dest
= SET_DEST (in
);
141 while (GET_CODE (dest
) == STRICT_LOW_PART
142 || GET_CODE (dest
) == SUBREG
143 || GET_CODE (dest
) == SIGN_EXTRACT
144 || GET_CODE (dest
) == ZERO_EXTRACT
)
145 dest
= XEXP (dest
, 0);
149 else if (GET_CODE (dest
) == REG
150 && refers_to_regno_p (REGNO (reg
),
151 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
155 /* Anything that sets just part of the register
156 is considered using as well as setting it.
157 But note that a straight SUBREG of a single-word value
158 clobbers the entire value. */
159 if (dest
!= SET_DEST (in
)
160 && ! (GET_CODE (SET_DEST (in
)) == SUBREG
161 || UNITS_PER_WORD
>= GET_MODE_SIZE (GET_MODE (dest
))))
168 used
= refers_to_regno_p (REGNO (reg
),
169 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
172 used
= refers_to_regno_p (REGNO (reg
),
173 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
177 return set
+ used
* 2;
180 if (refers_to_regno_p (REGNO (reg
),
181 REGNO (reg
) + HARD_REGNO_NREGS (reg
, GET_MODE (reg
)),
187 /* Return non-zero if OP can be written to without screwing up
188 GCC's model of what's going on. It is assumed that this operand
189 appears in the dest position of a SET insn in a conditional
190 branch's delay slot. AFTER is the label to start looking from. */
192 operand_clobbered_before_used_after (op
, after
)
196 /* Just experimenting. */
197 if (GET_CODE (op
) == CC0
)
199 if (GET_CODE (op
) == REG
)
203 if (op
== stack_pointer_rtx
)
206 /* Scan forward from the label, to see if the value of OP
207 is clobbered before the first use. */
209 for (insn
= NEXT_INSN (after
); insn
; insn
= NEXT_INSN (insn
))
211 if (GET_CODE (insn
) == NOTE
)
213 if (GET_CODE (insn
) == INSN
214 || GET_CODE (insn
) == JUMP_INSN
215 || GET_CODE (insn
) == CALL_INSN
)
217 switch (reg_clobbered_p (op
, PATTERN (insn
)))
227 /* If we reach another label without clobbering OP,
228 then we cannot safely write it here. */
229 else if (GET_CODE (insn
) == CODE_LABEL
)
231 if (GET_CODE (insn
) == JUMP_INSN
)
233 if (condjump_p (insn
))
235 /* This is a jump insn which has already
236 been mangled. We can't tell what it does. */
237 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
239 if (! JUMP_LABEL (insn
))
241 /* Keep following jumps. */
242 insn
= JUMP_LABEL (insn
);
248 /* In both of these cases, the first insn executed
249 for this op will be a orh whatever%h,%?r0,%?r31,
250 which is tolerable. */
251 if (GET_CODE (op
) == MEM
)
252 return (CONSTANT_ADDRESS_P (XEXP (op
, 0)));
257 /* Return non-zero if this pattern, as a source to a "SET",
258 is known to yield an instruction of unit size. */
260 single_insn_src_p (op
, mode
)
262 enum machine_mode mode
;
264 switch (GET_CODE (op
))
267 /* This is not always a single insn src, technically,
268 but output_delayed_branch knows how to deal with it. */
273 /* This is not a single insn src, technically,
274 but output_delayed_branch knows how to deal with it. */
283 /* We never need to negate or complement constants. */
285 return (mode
!= DFmode
);
292 /* Detect cases that require multiple instructions. */
293 if (CONSTANT_P (XEXP (op
, 1))
294 && !(GET_CODE (XEXP (op
, 1)) == CONST_INT
295 && SMALL_INT (XEXP (op
, 1))))
307 /* Not doing floating point, since they probably
308 take longer than the branch slot they might fill. */
309 return (mode
!= SFmode
&& mode
!= DFmode
);
312 if (GET_CODE (XEXP (op
, 1)) == NOT
)
314 rtx arg
= XEXP (XEXP (op
, 1), 0);
316 && !(GET_CODE (arg
) == CONST_INT
318 || (INTVAL (arg
) & 0xffff) == 0)))
323 /* Both small and round numbers take one instruction;
325 if (CONSTANT_P (XEXP (op
, 1))
326 && !(GET_CODE (XEXP (op
, 1)) == CONST_INT
327 && (SMALL_INT (XEXP (op
, 1))
328 || (INTVAL (XEXP (op
, 1)) & 0xffff) == 0)))
337 if (SUBREG_WORD (op
) != 0)
339 return single_insn_src_p (SUBREG_REG (op
), mode
);
341 /* Not doing floating point, since they probably
342 take longer than the branch slot they might fill. */
356 /* Return non-zero only if OP is a register of mode MODE,
359 reg_or_0_operand (op
, mode
)
361 enum machine_mode mode
;
363 return (op
== const0_rtx
|| register_operand (op
, mode
)
364 || op
== CONST0_RTX (mode
));
367 /* Return truth value of whether OP can be used as an operands in a three
368 address add/subtract insn (such as add %o1,7,%l2) of mode MODE. */
371 arith_operand (op
, mode
)
373 enum machine_mode mode
;
375 return (register_operand (op
, mode
)
376 || (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
)));
379 /* Return 1 if OP is a valid first operand for a logical insn of mode MODE. */
382 logic_operand (op
, mode
)
384 enum machine_mode mode
;
386 return (register_operand (op
, mode
)
387 || (GET_CODE (op
) == CONST_INT
&& LOGIC_INT (op
)));
390 /* Return 1 if OP is a valid first operand for a shift insn of mode MODE. */
393 shift_operand (op
, mode
)
395 enum machine_mode mode
;
397 return (register_operand (op
, mode
)
398 || (GET_CODE (op
) == CONST_INT
));
401 /* Return 1 if OP is a valid first operand for either a logical insn
402 or an add insn of mode MODE. */
405 compare_operand (op
, mode
)
407 enum machine_mode mode
;
409 return (register_operand (op
, mode
)
410 || (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
) && LOGIC_INT (op
)));
413 /* Return truth value of whether OP can be used as the 5-bit immediate
414 operand of a bte or btne insn. */
417 bte_operand (op
, mode
)
419 enum machine_mode mode
;
421 return (register_operand (op
, mode
)
422 || (GET_CODE (op
) == CONST_INT
423 && (unsigned) INTVAL (op
) < 0x20));
426 /* Return 1 if OP is an indexed memory reference of mode MODE. */
429 indexed_operand (op
, mode
)
431 enum machine_mode mode
;
433 return (GET_CODE (op
) == MEM
&& GET_MODE (op
) == mode
434 && GET_CODE (XEXP (op
, 0)) == PLUS
435 && GET_MODE (XEXP (op
, 0)) == SImode
436 && register_operand (XEXP (XEXP (op
, 0), 0), SImode
)
437 && register_operand (XEXP (XEXP (op
, 0), 1), SImode
));
440 /* Return 1 if OP is a suitable source operand for a load insn
444 load_operand (op
, mode
)
446 enum machine_mode mode
;
448 return (memory_operand (op
, mode
) || indexed_operand (op
, mode
));
451 /* Return truth value of whether OP is a integer which fits the
452 range constraining immediate operands in add/subtract insns. */
457 enum machine_mode mode
;
459 return (GET_CODE (op
) == CONST_INT
&& SMALL_INT (op
));
462 /* Return truth value of whether OP is a integer which fits the
463 range constraining immediate operands in logic insns. */
468 enum machine_mode mode
;
470 return (GET_CODE (op
) == CONST_INT
&& LOGIC_INT (op
));
473 /* Test for a valid operand for a call instruction.
474 Don't allow the arg pointer register or virtual regs
475 since they may change into reg + const, which the patterns
479 call_insn_operand (op
, mode
)
481 enum machine_mode mode
;
483 if (GET_CODE (op
) == MEM
484 && (CONSTANT_ADDRESS_P (XEXP (op
, 0))
485 || (GET_CODE (XEXP (op
, 0)) == REG
486 && XEXP (op
, 0) != arg_pointer_rtx
487 && !(REGNO (XEXP (op
, 0)) >= FIRST_PSEUDO_REGISTER
488 && REGNO (XEXP (op
, 0)) <= LAST_VIRTUAL_REGISTER
))))
493 /* Return the best assembler insn template
494 for moving operands[1] into operands[0] as a fullword. */
497 singlemove_string (operands
)
500 if (GET_CODE (operands
[0]) == MEM
)
502 if (GET_CODE (operands
[1]) != MEM
)
503 if (CONSTANT_ADDRESS_P (XEXP (operands
[0], 0)))
505 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
506 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
507 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
510 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
512 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
513 cc_status
.mdep
= XEXP (operands
[0], 0);
514 return "st.l %r1,%L0(%?r31)";
517 return "st.l %r1,%0";
524 cc_status
.flags
&= ~CC_F0_IS_0
;
525 xoperands
[0] = gen_rtx_REG (SFmode
, 32);
526 xoperands
[1] = operands
[1];
527 output_asm_insn (singlemove_string (xoperands
), xoperands
);
528 xoperands
[1] = xoperands
[0];
529 xoperands
[0] = operands
[0];
530 output_asm_insn (singlemove_string (xoperands
), xoperands
);
535 if (GET_CODE (operands
[1]) == MEM
)
537 if (CONSTANT_ADDRESS_P (XEXP (operands
[1], 0)))
539 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
540 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
541 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
544 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
546 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
547 cc_status
.mdep
= XEXP (operands
[1], 0);
548 return "ld.l %L1(%?r31),%0";
550 return "ld.l %m1,%0";
552 if (GET_CODE (operands
[1]) == CONST_INT
)
554 if (operands
[1] == const0_rtx
)
555 return "mov %?r0,%0";
556 if((INTVAL (operands
[1]) & 0xffff0000) == 0)
557 return "or %L1,%?r0,%0";
558 if((INTVAL (operands
[1]) & 0xffff8000) == 0xffff8000)
559 return "adds %1,%?r0,%0";
560 if((INTVAL (operands
[1]) & 0x0000ffff) == 0)
561 return "orh %H1,%?r0,%0";
566 /* Output assembler code to perform a doubleword move insn
567 with operands OPERANDS. */
570 output_move_double (operands
)
573 enum { REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
575 rtx addreg0
= 0, addreg1
= 0;
576 int highest_first
= 0;
577 int no_addreg1_decrement
= 0;
579 /* First classify both operands. */
581 if (REG_P (operands
[0]))
583 else if (offsettable_memref_p (operands
[0]))
585 else if (GET_CODE (operands
[0]) == MEM
)
590 if (REG_P (operands
[1]))
592 else if (CONSTANT_P (operands
[1]))
594 else if (offsettable_memref_p (operands
[1]))
596 else if (GET_CODE (operands
[1]) == MEM
)
601 /* Check for the cases that the operand constraints are not
602 supposed to allow to happen. Abort if we get one,
603 because generating code for these cases is painful. */
605 if (optype0
== RNDOP
|| optype1
== RNDOP
)
608 /* If an operand is an unoffsettable memory ref, find a register
609 we can increment temporarily to make it refer to the second word. */
611 if (optype0
== MEMOP
)
612 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
614 if (optype1
== MEMOP
)
615 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
617 /* ??? Perhaps in some cases move double words
618 if there is a spare pair of floating regs. */
620 /* Ok, we can do one word at a time.
621 Normally we do the low-numbered word first,
622 but if either operand is autodecrementing then we
623 do the high-numbered word first.
625 In either case, set up in LATEHALF the operands to use
626 for the high-numbered word and in some cases alter the
627 operands in OPERANDS to be suitable for the low-numbered word. */
629 if (optype0
== REGOP
)
630 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
631 else if (optype0
== OFFSOP
)
632 latehalf
[0] = adj_offsettable_operand (operands
[0], 4);
634 latehalf
[0] = operands
[0];
636 if (optype1
== REGOP
)
637 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
638 else if (optype1
== OFFSOP
)
639 latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
640 else if (optype1
== CNSTOP
)
642 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
643 split_double (operands
[1], &operands
[1], &latehalf
[1]);
644 else if (CONSTANT_P (operands
[1]))
645 latehalf
[1] = const0_rtx
;
648 latehalf
[1] = operands
[1];
650 /* If the first move would clobber the source of the second one,
651 do them in the other order.
653 RMS says "This happens only for registers;
654 such overlap can't happen in memory unless the user explicitly
655 sets it up, and that is an undefined circumstance."
657 but it happens on the sparc when loading parameter registers,
658 so I am going to define that circumstance, and make it work
661 if (optype0
== REGOP
&& optype1
== REGOP
662 && REGNO (operands
[0]) == REGNO (latehalf
[1]))
664 CC_STATUS_PARTIAL_INIT
;
665 /* Make any unoffsettable addresses point at high-numbered word. */
667 output_asm_insn ("adds 0x4,%0,%0", &addreg0
);
669 output_asm_insn ("adds 0x4,%0,%0", &addreg1
);
672 output_asm_insn (singlemove_string (latehalf
), latehalf
);
674 /* Undo the adds we just did. */
676 output_asm_insn ("adds -0x4,%0,%0", &addreg0
);
678 output_asm_insn ("adds -0x4,%0,%0", &addreg1
);
680 /* Do low-numbered word. */
681 return singlemove_string (operands
);
683 else if (optype0
== REGOP
&& optype1
!= REGOP
684 && reg_overlap_mentioned_p (operands
[0], operands
[1]))
686 /* If both halves of dest are used in the src memory address,
687 add the two regs and put them in the low reg (operands[0]).
688 Then it works to load latehalf first. */
689 if (reg_mentioned_p (operands
[0], XEXP (operands
[1], 0))
690 && reg_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
693 xops
[0] = latehalf
[0];
694 xops
[1] = operands
[0];
695 output_asm_insn ("adds %1,%0,%1", xops
);
696 operands
[1] = gen_rtx_MEM (DImode
, operands
[0]);
697 latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
701 /* Only one register in the dest is used in the src memory address,
702 and this is the first register of the dest, so we want to do
703 the late half first here also. */
704 else if (! reg_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
706 /* Only one register in the dest is used in the src memory address,
707 and this is the second register of the dest, so we want to do
708 the late half last. If addreg1 is set, and addreg1 is the same
709 register as latehalf, then we must suppress the trailing decrement,
710 because it would clobber the value just loaded. */
711 else if (addreg1
&& reg_mentioned_p (addreg1
, latehalf
[0]))
712 no_addreg1_decrement
= 1;
715 /* Normal case: do the two words, low-numbered first.
716 Overlap case (highest_first set): do high-numbered word first. */
719 output_asm_insn (singlemove_string (operands
), operands
);
721 CC_STATUS_PARTIAL_INIT
;
722 /* Make any unoffsettable addresses point at high-numbered word. */
724 output_asm_insn ("adds 0x4,%0,%0", &addreg0
);
726 output_asm_insn ("adds 0x4,%0,%0", &addreg1
);
729 output_asm_insn (singlemove_string (latehalf
), latehalf
);
731 /* Undo the adds we just did. */
733 output_asm_insn ("adds -0x4,%0,%0", &addreg0
);
734 if (addreg1
&& !no_addreg1_decrement
)
735 output_asm_insn ("adds -0x4,%0,%0", &addreg1
);
738 output_asm_insn (singlemove_string (operands
), operands
);
744 output_fp_move_double (operands
)
747 /* If the source operand is any sort of zero, use f0 instead. */
749 if (operands
[1] == CONST0_RTX (GET_MODE (operands
[1])))
750 operands
[1] = gen_rtx_REG (DFmode
, F0_REGNUM
);
752 if (FP_REG_P (operands
[0]))
754 if (FP_REG_P (operands
[1]))
755 return "fmov.dd %1,%0";
756 if (GET_CODE (operands
[1]) == REG
)
758 output_asm_insn ("ixfr %1,%0", operands
);
759 operands
[0] = gen_rtx_REG (VOIDmode
, REGNO (operands
[0]) + 1);
760 operands
[1] = gen_rtx_REG (VOIDmode
, REGNO (operands
[1]) + 1);
763 if (operands
[1] == CONST0_RTX (DFmode
))
764 return "fmov.dd f0,%0";
765 if (CONSTANT_ADDRESS_P (XEXP (operands
[1], 0)))
767 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
768 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
769 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
772 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
774 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
775 cc_status
.mdep
= XEXP (operands
[1], 0);
776 return "fld.d %L1(%?r31),%0";
778 return "fld.d %1,%0";
780 else if (FP_REG_P (operands
[1]))
782 if (GET_CODE (operands
[0]) == REG
)
784 output_asm_insn ("fxfr %1,%0", operands
);
785 operands
[0] = gen_rtx_REG (VOIDmode
, REGNO (operands
[0]) + 1);
786 operands
[1] = gen_rtx_REG (VOIDmode
, REGNO (operands
[1]) + 1);
789 if (CONSTANT_ADDRESS_P (XEXP (operands
[0], 0)))
791 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
792 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
793 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
796 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
798 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
799 cc_status
.mdep
= XEXP (operands
[0], 0);
800 return "fst.d %1,%L0(%?r31)";
802 return "fst.d %1,%0";
810 /* Return a REG that occurs in ADDR with coefficient 1.
811 ADDR can be effectively incremented by incrementing REG. */
817 while (GET_CODE (addr
) == PLUS
)
819 if (GET_CODE (XEXP (addr
, 0)) == REG
)
820 addr
= XEXP (addr
, 0);
821 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
822 addr
= XEXP (addr
, 1);
823 else if (CONSTANT_P (XEXP (addr
, 0)))
824 addr
= XEXP (addr
, 1);
825 else if (CONSTANT_P (XEXP (addr
, 1)))
826 addr
= XEXP (addr
, 0);
830 if (GET_CODE (addr
) == REG
)
837 /* Return a template for a load instruction with mode MODE and
838 arguments from the string ARGS.
840 This string is in static storage. */
843 load_opcode (mode
, args
, reg
)
844 enum machine_mode mode
;
880 sprintf (buf
, "%s %s", opcode
, args
);
884 /* Return a template for a store instruction with mode MODE and
885 arguments from the string ARGS.
887 This string is in static storage. */
890 store_opcode (mode
, args
, reg
)
891 enum machine_mode mode
;
927 sprintf (buf
, "%s %s", opcode
, args
);
931 /* Output a store-in-memory whose operands are OPERANDS[0,1].
932 OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero.
934 This function returns a template for an insn.
935 This is in static storage.
937 It may also output some insns directly.
938 It may alter the values of operands[0] and operands[1]. */
941 output_store (operands
)
944 enum machine_mode mode
= GET_MODE (operands
[0]);
945 rtx address
= XEXP (operands
[0], 0);
948 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
949 cc_status
.mdep
= address
;
951 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
952 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
953 && address
== cc_prev_status
.mdep
))
956 output_asm_insn ("orh %h0,%?r0,%?r31", operands
);
957 cc_prev_status
.mdep
= address
;
960 /* Store zero in two parts when appropriate. */
961 if (mode
== DFmode
&& operands
[1] == CONST0_RTX (DFmode
))
962 return store_opcode (DFmode
, "%r1,%L0(%?r31)", operands
[1]);
964 /* Code below isn't smart enough to move a doubleword in two parts,
965 so use output_move_double to do that in the cases that require it. */
966 if ((mode
== DImode
|| mode
== DFmode
)
967 && ! FP_REG_P (operands
[1]))
968 return output_move_double (operands
);
970 return store_opcode (mode
, "%r1,%L0(%?r31)", operands
[1]);
973 /* Output a load-from-memory whose operands are OPERANDS[0,1].
974 OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
976 This function returns a template for an insn.
977 This is in static storage.
979 It may also output some insns directly.
980 It may alter the values of operands[0] and operands[1]. */
983 output_load (operands
)
986 enum machine_mode mode
= GET_MODE (operands
[0]);
987 rtx address
= XEXP (operands
[1], 0);
989 /* We don't bother trying to see if we know %hi(address).
990 This is because we are doing a load, and if we know the
991 %hi value, we probably also know that value in memory. */
992 cc_status
.flags
|= CC_KNOW_HI_R31
| CC_HI_R31_ADJ
;
993 cc_status
.mdep
= address
;
995 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
996 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
997 && address
== cc_prev_status
.mdep
998 && cc_prev_status
.mdep
== cc_status
.mdep
))
1001 output_asm_insn ("orh %h1,%?r0,%?r31", operands
);
1002 cc_prev_status
.mdep
= address
;
1005 /* Code below isn't smart enough to move a doubleword in two parts,
1006 so use output_move_double to do that in the cases that require it. */
1007 if ((mode
== DImode
|| mode
== DFmode
)
1008 && ! FP_REG_P (operands
[0]))
1009 return output_move_double (operands
);
1011 return load_opcode (mode
, "%L1(%?r31),%0", operands
[0]);
1015 /* Load the address specified by OPERANDS[3] into the register
1016 specified by OPERANDS[0].
1018 OPERANDS[3] may be the result of a sum, hence it could either be:
1023 (3) REG + REG + CONST_INT
1024 (4) REG + REG (special case of 3).
1026 Note that (3) is not a legitimate address.
1027 All cases are handled here. */
1030 output_load_address (operands
)
1035 if (CONSTANT_P (operands
[3]))
1037 output_asm_insn ("mov %3,%0", operands
);
1041 if (REG_P (operands
[3]))
1043 if (REGNO (operands
[0]) != REGNO (operands
[3]))
1044 output_asm_insn ("shl %?r0,%3,%0", operands
);
1048 if (GET_CODE (operands
[3]) != PLUS
)
1051 base
= XEXP (operands
[3], 0);
1052 offset
= XEXP (operands
[3], 1);
1054 if (GET_CODE (base
) == CONST_INT
)
1061 if (GET_CODE (offset
) != CONST_INT
)
1063 /* Operand is (PLUS (REG) (REG)). */
1065 offset
= const0_rtx
;
1071 operands
[7] = offset
;
1072 CC_STATUS_PARTIAL_INIT
;
1073 if (SMALL_INT (offset
))
1074 output_asm_insn ("adds %7,%6,%0", operands
);
1076 output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands
);
1078 else if (GET_CODE (base
) == PLUS
)
1080 operands
[6] = XEXP (base
, 0);
1081 operands
[7] = XEXP (base
, 1);
1082 operands
[8] = offset
;
1084 CC_STATUS_PARTIAL_INIT
;
1085 if (SMALL_INT (offset
))
1086 output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands
);
1088 output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands
);
1095 /* Output code to place a size count SIZE in register REG.
1096 Because block moves are pipelined, we don't include the
1097 first element in the transfer of SIZE to REG.
1098 For this, we subtract ALIGN. (Actually, I think it is not
1099 right to subtract on this machine, so right now we don't.) */
1102 output_size_for_block_move (size
, reg
, align
)
1103 rtx size
, reg
, align
;
1108 xoperands
[1] = size
;
1109 xoperands
[2] = align
;
1112 cc_status
.flags
&= ~ CC_KNOW_HI_R31
;
1113 output_asm_insn (singlemove_string (xoperands
), xoperands
);
1115 if (GET_CODE (size
) == REG
)
1116 output_asm_insn ("sub %2,%1,%0", xoperands
);
1119 xoperands
[1] = GEN_INT (INTVAL (size
) - INTVAL (align
));
1120 cc_status
.flags
&= ~ CC_KNOW_HI_R31
;
1121 output_asm_insn ("mov %1,%0", xoperands
);
1126 /* Emit code to perform a block move.
1128 OPERANDS[0] is the destination.
1129 OPERANDS[1] is the source.
1130 OPERANDS[2] is the size.
1131 OPERANDS[3] is the known safe alignment.
1132 OPERANDS[4..6] are pseudos we can safely clobber as temps. */
1135 output_block_move (operands
)
1138 /* A vector for our computed operands. Note that load_output_address
1139 makes use of (and can clobber) up to the 8th element of this vector. */
1142 static int movstrsi_label
= 0;
1144 rtx temp1
= operands
[4];
1145 rtx alignrtx
= operands
[3];
1146 int align
= INTVAL (alignrtx
);
1149 xoperands
[0] = operands
[0];
1150 xoperands
[1] = operands
[1];
1151 xoperands
[2] = temp1
;
1153 /* We can't move more than four bytes at a time
1154 because we have only one register to move them through. */
1158 alignrtx
= GEN_INT (4);
1161 /* Recognize special cases of block moves. These occur
1162 when GNU C++ is forced to treat something as BLKmode
1163 to keep it in memory, when its mode could be represented
1164 with something smaller.
1166 We cannot do this for global variables, since we don't know
1167 what pages they don't cross. Sigh. */
1168 if (GET_CODE (operands
[2]) == CONST_INT
1169 && ! CONSTANT_ADDRESS_P (operands
[0])
1170 && ! CONSTANT_ADDRESS_P (operands
[1]))
1172 int size
= INTVAL (operands
[2]);
1173 rtx op0
= xoperands
[0];
1174 rtx op1
= xoperands
[1];
1176 if ((align
& 3) == 0 && (size
& 3) == 0 && (size
>> 2) <= 16)
1178 if (memory_address_p (SImode
, plus_constant (op0
, size
))
1179 && memory_address_p (SImode
, plus_constant (op1
, size
)))
1181 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1182 for (i
= (size
>>2)-1; i
>= 0; i
--)
1184 xoperands
[0] = plus_constant (op0
, i
* 4);
1185 xoperands
[1] = plus_constant (op1
, i
* 4);
1186 output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",
1192 else if ((align
& 1) == 0 && (size
& 1) == 0 && (size
>> 1) <= 16)
1194 if (memory_address_p (HImode
, plus_constant (op0
, size
))
1195 && memory_address_p (HImode
, plus_constant (op1
, size
)))
1197 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1198 for (i
= (size
>>1)-1; i
>= 0; i
--)
1200 xoperands
[0] = plus_constant (op0
, i
* 2);
1201 xoperands
[1] = plus_constant (op1
, i
* 2);
1202 output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",
1208 else if (size
<= 16)
1210 if (memory_address_p (QImode
, plus_constant (op0
, size
))
1211 && memory_address_p (QImode
, plus_constant (op1
, size
)))
1213 cc_status
.flags
&= ~CC_KNOW_HI_R31
;
1214 for (i
= size
-1; i
>= 0; i
--)
1216 xoperands
[0] = plus_constant (op0
, i
);
1217 xoperands
[1] = plus_constant (op1
, i
);
1218 output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",
1226 /* Since we clobber untold things, nix the condition codes. */
1229 /* This is the size of the transfer.
1230 Either use the register which already contains the size,
1231 or use a free register (used by no operands). */
1232 output_size_for_block_move (operands
[2], operands
[4], alignrtx
);
1235 /* Also emit code to decrement the size value by ALIGN. */
1236 zoperands
[0] = operands
[0];
1237 zoperands
[3] = plus_constant (operands
[0], align
);
1238 output_load_address (zoperands
);
1241 /* Generate number for unique label. */
1243 xoperands
[3] = GEN_INT (movstrsi_label
++);
1245 /* Calculate the size of the chunks we will be trying to move first. */
1248 if ((align
& 3) == 0)
1250 else if ((align
& 1) == 0)
1256 /* Copy the increment (negative) to a register for bla insn. */
1258 xoperands
[4] = GEN_INT (- chunk_size
);
1259 xoperands
[5] = operands
[5];
1260 output_asm_insn ("adds %4,%?r0,%5", xoperands
);
1262 /* Predecrement the loop counter. This happens again also in the `bla'
1263 instruction which precedes the loop, but we need to have it done
1264 two times before we enter the loop because of the bizarre semantics
1265 of the bla instruction. */
1267 output_asm_insn ("adds %5,%2,%2", xoperands
);
1269 /* Check for the case where the original count was less than or equal to
1270 zero. Avoid going through the loop at all if the original count was
1271 indeed less than or equal to zero. Note that we treat the count as
1272 if it were a signed 32-bit quantity here, rather than an unsigned one,
1273 even though we really shouldn't. We have to do this because of the
1274 semantics of the `ble' instruction, which assume that the count is
1275 a signed 32-bit value. Anyway, in practice it won't matter because
1276 nobody is going to try to do a memcpy() of more than half of the
1277 entire address space (i.e. 2 gigabytes) anyway. */
1279 output_asm_insn ("bc .Le%3", xoperands
);
1281 /* Make available a register which is a temporary. */
1283 xoperands
[6] = operands
[6];
1285 /* Now the actual loop.
1286 In xoperands, elements 1 and 0 are the input and output vectors.
1287 Element 2 is the loop index. Element 5 is the increment. */
1289 output_asm_insn ("subs %1,%5,%1", xoperands
);
1290 output_asm_insn ("bla %5,%2,.Lm%3", xoperands
);
1291 output_asm_insn ("adds %0,%2,%6", xoperands
);
1292 output_asm_insn ("\n.Lm%3:", xoperands
); /* Label for bla above. */
1293 output_asm_insn ("\n.Ls%3:", xoperands
); /* Loop start label. */
1294 output_asm_insn ("adds %5,%6,%6", xoperands
);
1296 /* NOTE: The code here which is supposed to handle the cases where the
1297 sources and destinations are known to start on a 4 or 2 byte boundary
1298 are currently broken. They fail to do anything about the overflow
1299 bytes which might still need to be copied even after we have copied
1300 some number of words or halfwords. Thus, for now we use the lowest
1301 common denominator, i.e. the code which just copies some number of
1302 totally unaligned individual bytes. (See the calculation of
1303 chunk_size above. */
1305 if (chunk_size
== 4)
1307 output_asm_insn ("ld.l %2(%1),%?r31", xoperands
);
1308 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1309 output_asm_insn ("st.l %?r31,8(%6)", xoperands
);
1311 else if (chunk_size
== 2)
1313 output_asm_insn ("ld.s %2(%1),%?r31", xoperands
);
1314 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1315 output_asm_insn ("st.s %?r31,4(%6)", xoperands
);
1317 else /* chunk_size == 1 */
1319 output_asm_insn ("ld.b %2(%1),%?r31", xoperands
);
1320 output_asm_insn ("bla %5,%2,.Ls%3", xoperands
);
1321 output_asm_insn ("st.b %?r31,2(%6)", xoperands
);
1323 output_asm_insn ("\n.Le%3:", xoperands
); /* Here if count <= 0. */
1329 /* Output a delayed branch insn with the delay insn in its
1330 branch slot. The delayed branch insn template is in TEMPLATE,
1331 with operands OPERANDS. The insn in its delay slot is INSN.
1333 As a special case, since we know that all memory transfers are via
1334 ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
1335 reference around the branch as
1339 ld/st l%x(%?r31),...
1341 As another special case, we handle loading (SYMBOL_REF ...) and
1342 other large constants around branches as well:
1349 /* ??? Disabled because this re-recognition is incomplete and causes
1350 constrain_operands to segfault. Anyone who cares should fix up
1351 the code to use the DBR pass. */
1354 output_delayed_branch (template, operands
, insn
)
1359 rtx src
= XVECEXP (PATTERN (insn
), 0, 1);
1360 rtx dest
= XVECEXP (PATTERN (insn
), 0, 0);
1362 /* See if we are doing some branch together with setting some register
1363 to some 32-bit value which does (or may) have some of the high-order
1364 16 bits set. If so, we need to set the register in two stages. One
1365 stage must be done before the branch, and the other one can be done
1366 in the delay slot. */
1368 if ( (GET_CODE (src
) == CONST_INT
1369 && ((unsigned) INTVAL (src
) & (unsigned) 0xffff0000) != (unsigned) 0)
1370 || (GET_CODE (src
) == SYMBOL_REF
)
1371 || (GET_CODE (src
) == LABEL_REF
)
1372 || (GET_CODE (src
) == CONST
))
1375 xoperands
[0] = dest
;
1378 CC_STATUS_PARTIAL_INIT
;
1379 /* Output the `orh' insn. */
1380 output_asm_insn ("orh %H1,%?r0,%0", xoperands
);
1382 /* Output the branch instruction next. */
1383 output_asm_insn (template, operands
);
1385 /* Now output the `or' insn. */
1386 output_asm_insn ("or %L1,%0,%0", xoperands
);
1388 else if ((GET_CODE (src
) == MEM
1389 && CONSTANT_ADDRESS_P (XEXP (src
, 0)))
1390 || (GET_CODE (dest
) == MEM
1391 && CONSTANT_ADDRESS_P (XEXP (dest
, 0))))
1394 char *split_template
;
1395 xoperands
[0] = dest
;
1398 /* Output the `orh' insn. */
1399 if (GET_CODE (src
) == MEM
)
1401 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1402 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1403 && cc_prev_status
.mdep
== XEXP (operands
[1], 0)))
1406 output_asm_insn ("orh %h1,%?r0,%?r31", xoperands
);
1408 split_template
= load_opcode (GET_MODE (dest
),
1409 "%L1(%?r31),%0", dest
);
1413 if (! ((cc_prev_status
.flags
& CC_KNOW_HI_R31
)
1414 && (cc_prev_status
.flags
& CC_HI_R31_ADJ
)
1415 && cc_prev_status
.mdep
== XEXP (operands
[0], 0)))
1418 output_asm_insn ("orh %h0,%?r0,%?r31", xoperands
);
1420 split_template
= store_opcode (GET_MODE (dest
),
1421 "%r1,%L0(%?r31)", src
);
1424 /* Output the branch instruction next. */
1425 output_asm_insn (template, operands
);
1427 /* Now output the load or store.
1428 No need to do a CC_STATUS_INIT, because we are branching anyway. */
1429 output_asm_insn (split_template
, xoperands
);
1433 int insn_code_number
;
1434 rtx pat
= gen_rtx_SET (VOIDmode
, dest
, src
);
1435 rtx delay_insn
= gen_rtx_INSN (VOIDmode
, 0, 0, 0, pat
, -1, 0, 0);
1438 /* Output the branch instruction first. */
1439 output_asm_insn (template, operands
);
1441 /* Now recognize the insn which we put in its delay slot.
1442 We must do this after outputting the branch insn,
1443 since operands may just be a pointer to `recog_data.operand'. */
1444 INSN_CODE (delay_insn
) = insn_code_number
1445 = recog (pat
, delay_insn
, NULL_PTR
);
1446 if (insn_code_number
== -1)
1449 for (i
= 0; i
< insn_data
[insn_code_number
].n_operands
; i
++)
1451 if (GET_CODE (recog_data
.operand
[i
]) == SUBREG
)
1452 recog_data
.operand
[i
] = alter_subreg (recog_data
.operand
[i
]);
1455 insn_extract (delay_insn
);
1456 if (! constrain_operands (1))
1457 fatal_insn_not_found (delay_insn
);
1459 template = insn_data
[insn_code_number
].template;
1461 template = ((*insn_data
[insn_code_number
].outfun
)
1462 (recog_data
.operand
, delay_insn
));
1463 output_asm_insn (template, recog_data
.operand
);
1469 /* Output a newly constructed insn DELAY_INSN. */
1471 output_delay_insn (delay_insn
)
1475 int insn_code_number
;
1478 /* Now recognize the insn which we put in its delay slot.
1479 We must do this after outputting the branch insn,
1480 since operands may just be a pointer to `recog_data.operand'. */
1481 insn_code_number
= recog_memoized (delay_insn
);
1482 if (insn_code_number
== -1)
1485 /* Extract the operands of this delay insn. */
1486 INSN_CODE (delay_insn
) = insn_code_number
;
1487 insn_extract (delay_insn
);
1489 /* It is possible that this insn has not been properly scanned by final
1490 yet. If this insn's operands don't appear in the peephole's
1491 actual operands, then they won't be fixed up by final, so we
1492 make sure they get fixed up here. -- This is a kludge. */
1493 for (i
= 0; i
< insn_data
[insn_code_number
].n_operands
; i
++)
1495 if (GET_CODE (recog_data
.operand
[i
]) == SUBREG
)
1496 recog_data
.operand
[i
] = alter_subreg (recog_data
.operand
[i
]);
1499 #ifdef REGISTER_CONSTRAINTS
1500 if (! constrain_operands (1))
1504 cc_prev_status
= cc_status
;
1506 /* Update `cc_status' for this instruction.
1507 The instruction's output routine may change it further.
1508 If the output routine for a jump insn needs to depend
1509 on the cc status, it should look at cc_prev_status. */
1511 NOTICE_UPDATE_CC (PATTERN (delay_insn
), delay_insn
);
1513 /* Now get the template for what this insn would
1514 have been, without the branch. */
1516 template = insn_data
[insn_code_number
].template;
1518 template = ((*insn_data
[insn_code_number
].outfun
)
1519 (recog_data
.operand
, delay_insn
));
1520 output_asm_insn (template, recog_data
.operand
);
1525 /* Special routine to convert an SFmode value represented as a
1526 CONST_DOUBLE into its equivalent unsigned long bit pattern.
1527 We convert the value from a double precision floating-point
1528 value to single precision first, and thence to a bit-wise
1529 equivalent unsigned long value. This routine is used when
1530 generating an immediate move of an SFmode value directly
1531 into a general register because the svr4 assembler doesn't
1532 grok floating literals in instruction operand contexts. */
1535 sfmode_constant_to_ulong (x
)
1539 union { float f
; unsigned long i
; } u2
;
1541 if (GET_CODE (x
) != CONST_DOUBLE
|| GET_MODE (x
) != SFmode
)
1544 #if TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT
1545 error IEEE emulation needed
1547 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
1552 /* This function generates the assembly code for function entry.
1553 The macro FUNCTION_PROLOGUE in i860.h is defined to call this function.
1555 ASM_FILE is a stdio stream to output the code to.
1556 SIZE is an int: how many units of temporary storage to allocate.
1558 Refer to the array `regs_ever_live' to determine which registers
1559 to save; `regs_ever_live[I]' is nonzero if register number I
1560 is ever used in the function. This macro is responsible for
1561 knowing which registers should not be saved even if used.
1563 NOTE: `frame_lower_bytes' is the count of bytes which will lie
1564 between the new `fp' value and the new `sp' value after the
1565 prologue is done. `frame_upper_bytes' is the count of bytes
1566 that will lie between the new `fp' and the *old* `sp' value
1567 after the new `fp' is setup (in the prologue). The upper
1568 part of each frame always includes at least 2 words (8 bytes)
1569 to hold the saved frame pointer and the saved return address.
1571 The svr4 ABI for the i860 now requires that the values of the
1572 stack pointer and frame pointer registers be kept aligned to
1573 16-byte boundaries at all times. We obey that restriction here.
1575 The svr4 ABI for the i860 is entirely vague when it comes to specifying
1576 exactly where the "preserved" registers should be saved. The native
1577 svr4 C compiler I now have doesn't help to clarify the requirements
1578 very much because it is plainly out-of-date and non-ABI-compliant
1579 (in at least one important way, i.e. how it generates function
1582 The native svr4 C compiler saves the "preserved" registers (i.e.
1583 r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative
1584 offsets from the frame pointer).
1586 Previous versions of GCC also saved the "preserved" registers in the
1587 "negative" part of the frame, but they saved them using positive
1588 offsets from the (adjusted) stack pointer (after it had been adjusted
1589 to allocate space for the new frame). That's just plain wrong
1590 because if the current function calls alloca(), the stack pointer
1591 will get moved, and it will be impossible to restore the registers
1592 properly again after that.
1594 Both compilers handled parameter registers (i.e. r16-r27 and f8-f15)
1595 by copying their values either into various "preserved" registers or
1596 into stack slots in the lower part of the current frame (as seemed
1597 appropriate, depending upon subsequent usage of these values).
1599 Here we want to save the preserved registers at some offset from the
1600 frame pointer register so as to avoid any possible problems arising
1601 from calls to alloca(). We can either save them at small positive
1602 offsets from the frame pointer, or at small negative offsets from
1603 the frame pointer. If we save them at small negative offsets from
1604 the frame pointer (i.e. in the lower part of the frame) then we
1605 must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how
1606 many bytes of space we plan to use in the lower part of the frame
1607 for this purpose. Since other parts of the compiler reference the
1608 value of STARTING_FRAME_OFFSET long before final() calls this function,
1609 we would have to go ahead and assume the worst-case storage requirements
1610 for saving all of the "preserved" registers (and use that number, i.e.
1611 `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in
1612 the lower part of the frame. That could potentially be very wasteful,
1613 and that wastefulness could really hamper people compiling for embedded
1614 i860 targets with very tight limits on stack space. Thus, we choose
1615 here to save the preserved registers in the upper part of the
1616 frame, so that we can decide at the very last minute how much (or how
1617 little) space we must allocate for this purpose.
1619 To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved
1620 registers must always be saved so that the saved values of registers
1621 with higher numbers are at higher addresses. We obey that restriction
1624 There are two somewhat different ways that you can generate prologues
1625 here... i.e. pedantically ABI-compliant, and the "other" way. The
1626 "other" way is more consistent with what is currently generated by the
1627 "native" svr4 C compiler for the i860. That's important if you want
1628 to use the current (as of 8/91) incarnation of svr4 SDB for the i860.
1629 The SVR4 SDB for the i860 insists on having function prologues be
1632 To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES
1633 in the i860svr4.h file. (By default this is *not* defined).
1635 The differences between the ABI-compliant and non-ABI-compliant prologues
1636 are that (a) the ABI version seems to require the use of *signed*
1637 (rather than unsigned) adds and subtracts, and (b) the ordering of
1638 the various steps (e.g. saving preserved registers, saving the
1639 return address, setting up the new frame pointer value) is different.
1641 For strict ABI compliance, it seems to be the case that the very last
1642 thing that is supposed to happen in the prologue is getting the frame
1643 pointer set to its new value (but only after everything else has
1644 already been properly setup). We do that here, but only if the symbol
1645 I860_STRICT_ABI_PROLOGUES is defined.
1648 #ifndef STACK_ALIGNMENT
1649 #define STACK_ALIGNMENT 16
1652 extern char call_used_regs
[];
1653 extern int leaf_function_p ();
1655 char *current_function_original_name
;
1657 static int must_preserve_r1
;
1658 static unsigned must_preserve_bytes
;
1661 function_prologue (asm_file
, local_bytes
)
1662 register FILE *asm_file
;
1663 register unsigned local_bytes
;
1665 register unsigned frame_lower_bytes
;
1666 register unsigned frame_upper_bytes
;
1667 register unsigned total_fsize
;
1668 register unsigned preserved_reg_bytes
= 0;
1669 register unsigned i
;
1670 register unsigned preserved_so_far
= 0;
1672 must_preserve_r1
= (optimize
< 2 || ! leaf_function_p ());
1673 must_preserve_bytes
= 4 + (must_preserve_r1
? 4 : 0);
1675 /* Count registers that need preserving. Ignore r0. It never needs
1678 for (i
= 1; i
< FIRST_PSEUDO_REGISTER
; i
++)
1680 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1681 preserved_reg_bytes
+= 4;
1684 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
1686 frame_lower_bytes
= (local_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
1688 /* The upper part of each frame will contain the saved fp,
1689 the saved r1, and stack slots for all of the other "preserved"
1690 registers that we find we will need to save & restore. */
1692 frame_upper_bytes
= must_preserve_bytes
+ preserved_reg_bytes
;
1694 /* Round-up the frame_upper_bytes so that it's a multiple of 16. */
1697 = (frame_upper_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
1699 total_fsize
= frame_upper_bytes
+ frame_lower_bytes
;
1701 #ifndef I860_STRICT_ABI_PROLOGUES
1703 /* There are two kinds of function prologues.
1704 You use the "small" version if the total frame size is
1705 small enough so that it can fit into an immediate 16-bit
1706 value in one instruction. Otherwise, you use the "large"
1707 version of the function prologue. */
1709 if (total_fsize
> 0x7fff)
1711 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1712 but the native C compiler on svr4 uses `addu'. */
1714 fprintf (asm_file
, "\taddu -%d,%ssp,%ssp\n",
1715 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1717 /* Save the old frame pointer. */
1719 fprintf (asm_file
, "\tst.l %sfp,0(%ssp)\n",
1720 i860_reg_prefix
, i860_reg_prefix
);
1722 /* Setup the new frame pointer. The ABI sez to do this after
1723 preserving registers (using adds), but that's not what the
1724 native C compiler on svr4 does. */
1726 fprintf (asm_file
, "\taddu 0,%ssp,%sfp\n",
1727 i860_reg_prefix
, i860_reg_prefix
);
1729 /* Get the value of frame_lower_bytes into r31. */
1731 fprintf (asm_file
, "\torh %d,%sr0,%sr31\n",
1732 frame_lower_bytes
>> 16, i860_reg_prefix
, i860_reg_prefix
);
1733 fprintf (asm_file
, "\tor %d,%sr31,%sr31\n",
1734 frame_lower_bytes
& 0xffff, i860_reg_prefix
, i860_reg_prefix
);
1736 /* Now re-adjust the stack pointer using the value in r31.
1737 The ABI sez to do this with `subs' but SDB may prefer `subu'. */
1739 fprintf (asm_file
, "\tsubu %ssp,%sr31,%ssp\n",
1740 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1742 /* Preserve registers. The ABI sez to do this before setting
1743 up the new frame pointer, but that's not what the native
1744 C compiler on svr4 does. */
1746 for (i
= 1; i
< 32; i
++)
1747 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1748 fprintf (asm_file
, "\tst.l %s%s,%d(%sfp)\n",
1749 i860_reg_prefix
, reg_names
[i
],
1750 must_preserve_bytes
+ (4 * preserved_so_far
++),
1753 for (i
= 32; i
< 64; i
++)
1754 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1755 fprintf (asm_file
, "\tfst.l %s%s,%d(%sfp)\n",
1756 i860_reg_prefix
, reg_names
[i
],
1757 must_preserve_bytes
+ (4 * preserved_so_far
++),
1760 /* Save the return address. */
1762 if (must_preserve_r1
)
1763 fprintf (asm_file
, "\tst.l %sr1,4(%sfp)\n",
1764 i860_reg_prefix
, i860_reg_prefix
);
1768 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1769 but the native C compiler on svr4 uses `addu'. */
1771 fprintf (asm_file
, "\taddu -%d,%ssp,%ssp\n",
1772 total_fsize
, i860_reg_prefix
, i860_reg_prefix
);
1774 /* Save the old frame pointer. */
1776 fprintf (asm_file
, "\tst.l %sfp,%d(%ssp)\n",
1777 i860_reg_prefix
, frame_lower_bytes
, i860_reg_prefix
);
1779 /* Setup the new frame pointer. The ABI sez to do this after
1780 preserving registers and after saving the return address,
1781 (and its saz to do this using adds), but that's not what the
1782 native C compiler on svr4 does. */
1784 fprintf (asm_file
, "\taddu %d,%ssp,%sfp\n",
1785 frame_lower_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1787 /* Preserve registers. The ABI sez to do this before setting
1788 up the new frame pointer, but that's not what the native
1789 compiler on svr4 does. */
1791 for (i
= 1; i
< 32; i
++)
1792 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1793 fprintf (asm_file
, "\tst.l %s%s,%d(%sfp)\n",
1794 i860_reg_prefix
, reg_names
[i
],
1795 must_preserve_bytes
+ (4 * preserved_so_far
++),
1798 for (i
= 32; i
< 64; i
++)
1799 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1800 fprintf (asm_file
, "\tfst.l %s%s,%d(%sfp)\n",
1801 i860_reg_prefix
, reg_names
[i
],
1802 must_preserve_bytes
+ (4 * preserved_so_far
++),
1805 /* Save the return address. The ABI sez to do this earlier,
1806 and also via an offset from %sp, but the native C compiler
1807 on svr4 does it later (i.e. now) and uses an offset from
1810 if (must_preserve_r1
)
1811 fprintf (asm_file
, "\tst.l %sr1,4(%sfp)\n",
1812 i860_reg_prefix
, i860_reg_prefix
);
1815 #else /* defined(I860_STRICT_ABI_PROLOGUES) */
1817 /* There are two kinds of function prologues.
1818 You use the "small" version if the total frame size is
1819 small enough so that it can fit into an immediate 16-bit
1820 value in one instruction. Otherwise, you use the "large"
1821 version of the function prologue. */
1823 if (total_fsize
> 0x7fff)
1825 /* Adjust the stack pointer (thereby allocating a new frame). */
1827 fprintf (asm_file
, "\tadds -%d,%ssp,%ssp\n",
1828 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1830 /* Save the caller's frame pointer. */
1832 fprintf (asm_file
, "\tst.l %sfp,0(%ssp)\n",
1833 i860_reg_prefix
, i860_reg_prefix
);
1835 /* Save return address. */
1837 if (must_preserve_r1
)
1838 fprintf (asm_file
, "\tst.l %sr1,4(%ssp)\n",
1839 i860_reg_prefix
, i860_reg_prefix
);
1841 /* Get the value of frame_lower_bytes into r31 for later use. */
1843 fprintf (asm_file
, "\torh %d,%sr0,%sr31\n",
1844 frame_lower_bytes
>> 16, i860_reg_prefix
, i860_reg_prefix
);
1845 fprintf (asm_file
, "\tor %d,%sr31,%sr31\n",
1846 frame_lower_bytes
& 0xffff, i860_reg_prefix
, i860_reg_prefix
);
1848 /* Now re-adjust the stack pointer using the value in r31. */
1850 fprintf (asm_file
, "\tsubs %ssp,%sr31,%ssp\n",
1851 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1853 /* Pre-compute value to be used as the new frame pointer. */
1855 fprintf (asm_file
, "\tadds %ssp,%sr31,%sr31\n",
1856 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
1858 /* Preserve registers. */
1860 for (i
= 1; i
< 32; i
++)
1861 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1862 fprintf (asm_file
, "\tst.l %s%s,%d(%sr31)\n",
1863 i860_reg_prefix
, reg_names
[i
],
1864 must_preserve_bytes
+ (4 * preserved_so_far
++),
1867 for (i
= 32; i
< 64; i
++)
1868 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1869 fprintf (asm_file
, "\tfst.l %s%s,%d(%sr31)\n",
1870 i860_reg_prefix
, reg_names
[i
],
1871 must_preserve_bytes
+ (4 * preserved_so_far
++),
1874 /* Actually set the new value of the frame pointer. */
1876 fprintf (asm_file
, "\tmov %sr31,%sfp\n",
1877 i860_reg_prefix
, i860_reg_prefix
);
1881 /* Adjust the stack pointer. */
1883 fprintf (asm_file
, "\tadds -%d,%ssp,%ssp\n",
1884 total_fsize
, i860_reg_prefix
, i860_reg_prefix
);
1886 /* Save the caller's frame pointer. */
1888 fprintf (asm_file
, "\tst.l %sfp,%d(%ssp)\n",
1889 i860_reg_prefix
, frame_lower_bytes
, i860_reg_prefix
);
1891 /* Save the return address. */
1893 if (must_preserve_r1
)
1894 fprintf (asm_file
, "\tst.l %sr1,%d(%ssp)\n",
1895 i860_reg_prefix
, frame_lower_bytes
+ 4, i860_reg_prefix
);
1897 /* Preserve registers. */
1899 for (i
= 1; i
< 32; i
++)
1900 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1901 fprintf (asm_file
, "\tst.l %s%s,%d(%ssp)\n",
1902 i860_reg_prefix
, reg_names
[i
],
1903 frame_lower_bytes
+ must_preserve_bytes
+ (4 * preserved_so_far
++),
1906 for (i
= 32; i
< 64; i
++)
1907 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
1908 fprintf (asm_file
, "\tfst.l %s%s,%d(%ssp)\n",
1909 i860_reg_prefix
, reg_names
[i
],
1910 frame_lower_bytes
+ must_preserve_bytes
+ (4 * preserved_so_far
++),
1913 /* Setup the new frame pointer. */
1915 fprintf (asm_file
, "\tadds %d,%ssp,%sfp\n",
1916 frame_lower_bytes
, i860_reg_prefix
, i860_reg_prefix
);
1918 #endif /* defined(I860_STRICT_ABI_PROLOGUES) */
1920 #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX
1921 ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file
);
1922 #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */
1925 /* This function generates the assembly code for function exit.
1926 The macro FUNCTION_EPILOGUE in i860.h is defined to call this function.
1928 ASM_FILE is a stdio stream to output the code to.
1929 SIZE is an int: how many units of temporary storage to allocate.
1931 The function epilogue should not depend on the current stack pointer!
1932 It should use the frame pointer only. This is mandatory because
1933 of alloca; we also take advantage of it to omit stack adjustments
1936 Note that when we go to restore the preserved register values we must
1937 not try to address their slots by using offsets from the stack pointer.
1938 That's because the stack pointer may have been moved during the function
1939 execution due to a call to alloca(). Rather, we must restore all
1940 preserved registers via offsets from the frame pointer value.
1942 Note also that when the current frame is being "popped" (by adjusting
1943 the value of the stack pointer) on function exit, we must (for the
1944 sake of alloca) set the new value of the stack pointer based upon
1945 the current value of the frame pointer. We can't just add what we
1946 believe to be the (static) frame size to the stack pointer because
1947 if we did that, and alloca() had been called during this function,
1948 we would end up returning *without* having fully deallocated all of
1949 the space grabbed by alloca. If that happened, and a function
1950 containing one or more alloca() calls was called over and over again,
1951 then the stack would grow without limit!
1953 Finally note that the epilogues generated here are completely ABI
1954 compliant. They go out of their way to insure that the value in
1955 the frame pointer register is never less than the value in the stack
1956 pointer register. It's not clear why this relationship needs to be
1957 maintained at all times, but maintaining it only costs one extra
1958 instruction, so what the hell.
1961 /* This corresponds to a version 4 TDESC structure. Lower numbered
1962 versions successively omit the last word of the structure. We
1963 don't try to handle version 5 here. */
1965 typedef struct TDESC_flags
{
1968 int callable_block
:1;
1970 int fregs
:6; /* fp regs 2-7 */
1971 int iregs
:16; /* regs 0-15 */
1974 typedef struct TDESC
{
1976 int integer_reg_offset
; /* same as must_preserve_bytes */
1977 int floating_point_reg_offset
;
1978 unsigned int positive_frame_size
; /* same as frame_upper_bytes */
1979 unsigned int negative_frame_size
; /* same as frame_lower_bytes */
1983 function_epilogue (asm_file
, local_bytes
)
1984 register FILE *asm_file
;
1985 register unsigned local_bytes
;
1987 register unsigned frame_upper_bytes
;
1988 register unsigned frame_lower_bytes
;
1989 register unsigned preserved_reg_bytes
= 0;
1990 register unsigned i
;
1991 register unsigned restored_so_far
= 0;
1992 register unsigned int_restored
;
1993 register unsigned mask
;
1994 unsigned intflags
=0;
1995 register TDESC_flags
*flags
= (TDESC_flags
*) &intflags
;
1998 flags
->reg_packing
= 1;
1999 flags
->iregs
= 8; /* old fp always gets saved */
2001 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
2003 frame_lower_bytes
= (local_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
2005 /* Count the number of registers that were preserved in the prologue.
2006 Ignore r0. It is never preserved. */
2008 for (i
= 1; i
< FIRST_PSEUDO_REGISTER
; i
++)
2010 if (regs_ever_live
[i
] && ! call_used_regs
[i
])
2011 preserved_reg_bytes
+= 4;
2014 /* The upper part of each frame will contain only saved fp,
2015 the saved r1, and stack slots for all of the other "preserved"
2016 registers that we find we will need to save & restore. */
2018 frame_upper_bytes
= must_preserve_bytes
+ preserved_reg_bytes
;
2020 /* Round-up frame_upper_bytes so that t is a multiple of 16. */
2023 = (frame_upper_bytes
+ STACK_ALIGNMENT
- 1) & -STACK_ALIGNMENT
;
2025 /* Restore all of the "preserved" registers that need restoring. */
2029 for (i
= 1; i
< 32; i
++, mask
<<=1)
2030 if (regs_ever_live
[i
] && ! call_used_regs
[i
]) {
2031 fprintf (asm_file
, "\tld.l %d(%sfp),%s%s\n",
2032 must_preserve_bytes
+ (4 * restored_so_far
++),
2033 i860_reg_prefix
, i860_reg_prefix
, reg_names
[i
]);
2034 if (i
> 3 && i
< 16)
2035 flags
->iregs
|= mask
;
2038 int_restored
= restored_so_far
;
2041 for (i
= 32; i
< 64; i
++) {
2042 if (regs_ever_live
[i
] && ! call_used_regs
[i
]) {
2043 fprintf (asm_file
, "\tfld.l %d(%sfp),%s%s\n",
2044 must_preserve_bytes
+ (4 * restored_so_far
++),
2045 i860_reg_prefix
, i860_reg_prefix
, reg_names
[i
]);
2046 if (i
> 33 & i
< 40)
2047 flags
->fregs
|= mask
;
2049 if (i
> 33 && i
< 40)
2053 /* Get the value we plan to use to restore the stack pointer into r31. */
2055 fprintf (asm_file
, "\tadds %d,%sfp,%sr31\n",
2056 frame_upper_bytes
, i860_reg_prefix
, i860_reg_prefix
);
2058 /* Restore the return address and the old frame pointer. */
2060 if (must_preserve_r1
) {
2061 fprintf (asm_file
, "\tld.l 4(%sfp),%sr1\n",
2062 i860_reg_prefix
, i860_reg_prefix
);
2066 fprintf (asm_file
, "\tld.l 0(%sfp),%sfp\n",
2067 i860_reg_prefix
, i860_reg_prefix
);
2069 /* Return and restore the old stack pointer value. */
2071 fprintf (asm_file
, "\tbri %sr1\n\tmov %sr31,%ssp\n",
2072 i860_reg_prefix
, i860_reg_prefix
, i860_reg_prefix
);
2074 #ifdef OUTPUT_TDESC /* Output an ABI-compliant TDESC entry */
2075 if (! frame_lower_bytes
) {
2077 if (! frame_upper_bytes
) {
2079 if (restored_so_far
== int_restored
) /* No FP saves */
2083 assemble_name(asm_file
,current_function_original_name
);
2084 fputs(".TDESC:\n", asm_file
);
2085 fprintf(asm_file
, "%s 0x%0x\n", ASM_LONG
, intflags
);
2086 fprintf(asm_file
, "%s %d\n", ASM_LONG
,
2087 int_restored
? must_preserve_bytes
: 0);
2088 if (flags
->version
> 1) {
2089 fprintf(asm_file
, "%s %d\n", ASM_LONG
,
2090 (restored_so_far
== int_restored
) ? 0 : must_preserve_bytes
+
2091 (4 * int_restored
));
2092 if (flags
->version
> 2) {
2093 fprintf(asm_file
, "%s %d\n", ASM_LONG
, frame_upper_bytes
);
2094 if (flags
->version
> 3)
2095 fprintf(asm_file
, "%s %d\n", ASM_LONG
, frame_lower_bytes
);
2099 fprintf(asm_file
, "%s ", ASM_LONG
);
2100 assemble_name(asm_file
, current_function_original_name
);
2101 fprintf(asm_file
, "\n%s ", ASM_LONG
);
2102 assemble_name(asm_file
, current_function_original_name
);
2103 fputs(".TDESC\n", asm_file
);
2109 /* Expand a library call to __builtin_saveregs. */
2113 rtx fn
= gen_rtx_SYMBOL_REF (Pmode
, "__builtin_saveregs");
2114 rtx save
= gen_reg_rtx (Pmode
);
2115 rtx valreg
= LIBCALL_VALUE (Pmode
);
2118 /* The return value register overlaps the first argument register.
2119 Save and restore it around the call. */
2120 emit_move_insn (save
, valreg
);
2121 ret
= emit_library_call_value (fn
, NULL_RTX
, 1, Pmode
, 0);
2122 if (GET_CODE (ret
) != REG
|| REGNO (ret
) < FIRST_PSEUDO_REGISTER
)
2123 ret
= copy_to_reg (ret
);
2124 emit_move_insn (valreg
, save
);
2130 i860_build_va_list ()
2132 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2135 record
= make_node (RECORD_TYPE
);
2137 field_ireg_used
= build_decl (FIELD_DECL
, get_identifier ("__ireg_used"),
2138 unsigned_type_node
);
2139 field_freg_used
= build_decl (FIELD_DECL
, get_identifier ("__freg_used"),
2140 unsigned_type_node
);
2141 field_reg_base
= build_decl (FIELD_DECL
, get_identifier ("__reg_base"),
2143 field_mem_ptr
= build_decl (FIELD_DECL
, get_identifier ("__mem_ptr"),
2146 DECL_FIELD_CONTEXT (field_ireg_used
) = record
;
2147 DECL_FIELD_CONTEXT (field_freg_used
) = record
;
2148 DECL_FIELD_CONTEXT (field_reg_base
) = record
;
2149 DECL_FIELD_CONTEXT (field_mem_ptr
) = record
;
2151 #ifdef I860_SVR4_VA_LIST
2152 TYPE_FIELDS (record
) = field_ireg_used
;
2153 TREE_CHAIN (field_ireg_used
) = field_freg_used
;
2154 TREE_CHAIN (field_freg_used
) = field_reg_base
;
2155 TREE_CHAIN (field_reg_base
) = field_mem_ptr
;
2157 TYPE_FIELDS (record
) = field_reg_base
;
2158 TREE_CHAIN (field_reg_base
) = field_mem_ptr
;
2159 TREE_CHAIN (field_mem_ptr
) = field_ireg_used
;
2160 TREE_CHAIN (field_ireg_used
) = field_freg_used
;
2163 layout_type (record
);
2168 i860_va_start (stdarg_p
, valist
, nextarg
)
2175 saveregs
= make_tree (build_pointer_type (va_list_type_node
),
2176 expand_builtin_saveregs ());
2177 saveregs
= build1 (INDIRECT_REF
, va_list_type_node
, saveregs
);
2181 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2182 tree ireg_used
, freg_used
, reg_base
, mem_ptr
;
2184 #ifdef I860_SVR4_VA_LIST
2185 field_ireg_used
= TYPE_FIELDS (va_list_type_node
);
2186 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2187 field_reg_base
= TREE_CHAIN (field_freg_used
);
2188 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2190 field_reg_base
= TYPE_FIELDS (va_list_type_node
);
2191 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2192 field_ireg_used
= TREE_CHAIN (field_mem_ptr
);
2193 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2196 ireg_used
= build (COMPONENT_REF
, TREE_TYPE (field_ireg_used
),
2197 valist
, field_ireg_used
);
2198 freg_used
= build (COMPONENT_REF
, TREE_TYPE (field_freg_used
),
2199 valist
, field_freg_used
);
2200 reg_base
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2201 valist
, field_reg_base
);
2202 mem_ptr
= build (COMPONENT_REF
, TREE_TYPE (field_mem_ptr
),
2203 valist
, field_mem_ptr
);
2205 t
= build_int_2 (current_function_args_info
.ints
, 0);
2206 t
= build (MODIFY_EXPR
, TREE_TYPE (ireg_used
), ireg_used
, t
);
2207 TREE_SIDE_EFFECTS (t
) = 1;
2208 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2210 t
= build_int_2 (ROUNDUP (current_function_args_info
.floats
, 8), 0);
2211 t
= build (MODIFY_EXPR
, TREE_TYPE (freg_used
), freg_used
, t
);
2212 TREE_SIDE_EFFECTS (t
) = 1;
2213 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2215 t
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2216 saveregs
, field_reg_base
);
2217 t
= build (MODIFY_EXPR
, TREE_TYPE (reg_base
), reg_base
, t
);
2218 TREE_SIDE_EFFECTS (t
) = 1;
2219 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2221 t
= make_tree (ptr_type_node
, nextarg
);
2222 t
= build (MODIFY_EXPR
, TREE_TYPE (mem_ptr
), mem_ptr
, t
);
2223 TREE_SIDE_EFFECTS (t
) = 1;
2224 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2228 t
= build (MODIFY_EXPR
, va_list_type_node
, valist
, saveregs
);
2229 TREE_SIDE_EFFECTS (t
) = 1;
2230 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2234 #define NUM_PARM_FREGS 8
2235 #define NUM_PARM_IREGS 12
2236 #ifdef I860_SVR4_VARARGS
2237 #define FREG_OFFSET 0
2238 #define IREG_OFFSET (NUM_PARM_FREGS * UNITS_PER_WORD)
2240 #define FREG_OFFSET (NUM_PARM_IREGS * UNITS_PER_WORD)
2241 #define IREG_OFFSET 0
2245 i860_va_arg (valist
, type
)
2248 tree field_ireg_used
, field_freg_used
, field_reg_base
, field_mem_ptr
;
2249 tree type_ptr_node
, t
;
2250 rtx lab_over
= NULL_RTX
;
2252 HOST_WIDE_INT align
;
2254 #ifdef I860_SVR4_VA_LIST
2255 field_ireg_used
= TYPE_FIELDS (va_list_type_node
);
2256 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2257 field_reg_base
= TREE_CHAIN (field_freg_used
);
2258 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2260 field_reg_base
= TYPE_FIELDS (va_list_type_node
);
2261 field_mem_ptr
= TREE_CHAIN (field_reg_base
);
2262 field_ireg_used
= TREE_CHAIN (field_mem_ptr
);
2263 field_freg_used
= TREE_CHAIN (field_ireg_used
);
2266 field_ireg_used
= build (COMPONENT_REF
, TREE_TYPE (field_ireg_used
),
2267 valist
, field_ireg_used
);
2268 field_freg_used
= build (COMPONENT_REF
, TREE_TYPE (field_freg_used
),
2269 valist
, field_freg_used
);
2270 field_reg_base
= build (COMPONENT_REF
, TREE_TYPE (field_reg_base
),
2271 valist
, field_reg_base
);
2272 field_mem_ptr
= build (COMPONENT_REF
, TREE_TYPE (field_mem_ptr
),
2273 valist
, field_mem_ptr
);
2275 ret
= gen_reg_rtx (Pmode
);
2276 type_ptr_node
= build_pointer_type (type
);
2278 if (! AGGREGATE_TYPE_P (type
))
2280 int nparm
, incr
, ofs
;
2284 if (FLOAT_TYPE_P (type
))
2286 field
= field_freg_used
;
2287 nparm
= NUM_PARM_FREGS
;
2293 field
= field_ireg_used
;
2294 nparm
= NUM_PARM_IREGS
;
2295 incr
= int_size_in_bytes (type
) / UNITS_PER_WORD
;
2299 lab_false
= gen_label_rtx ();
2300 lab_over
= gen_label_rtx ();
2302 emit_cmp_and_jump_insns (expand_expr (field
, NULL_RTX
, 0, 0),
2303 GEN_INT (nparm
- incr
), GT
, const0_rtx
,
2304 TYPE_MODE (TREE_TYPE (field
)),
2305 TREE_UNSIGNED (field
), 0, lab_false
);
2307 t
= fold (build (POSTINCREMENT_EXPR
, TREE_TYPE (field
), field
,
2308 build_int_2 (incr
, 0)));
2309 TREE_SIDE_EFFECTS (t
) = 1;
2311 t
= fold (build (MULT_EXPR
, TREE_TYPE (field
), field
,
2312 build_int_2 (UNITS_PER_WORD
, 0)));
2313 TREE_SIDE_EFFECTS (t
) = 1;
2315 t
= fold (build (PLUS_EXPR
, ptr_type_node
, field_reg_base
,
2316 fold (build (PLUS_EXPR
, TREE_TYPE (field
), t
,
2317 build_int_2 (ofs
, 0)))));
2318 TREE_SIDE_EFFECTS (t
) = 1;
2320 val
= expand_expr (t
, ret
, VOIDmode
, EXPAND_NORMAL
);
2322 emit_move_insn (ret
, val
);
2324 emit_jump_insn (gen_jump (lab_over
));
2326 emit_label (lab_false
);
2329 align
= TYPE_ALIGN (type
);
2330 if (align
< BITS_PER_WORD
)
2331 align
= BITS_PER_WORD
;
2332 align
/= BITS_PER_UNIT
;
2334 t
= build (PLUS_EXPR
, ptr_type_node
, field_mem_ptr
,
2335 build_int_2 (align
- 1, 0));
2336 t
= build (BIT_AND_EXPR
, ptr_type_node
, t
, build_int_2 (-align
, -1));
2338 val
= expand_expr (t
, ret
, VOIDmode
, EXPAND_NORMAL
);
2340 emit_move_insn (ret
, val
);
2342 t
= fold (build (PLUS_EXPR
, ptr_type_node
,
2343 make_tree (ptr_type_node
, ret
),
2344 build_int_2 (int_size_in_bytes (type
), 0)));
2345 t
= build (MODIFY_EXPR
, ptr_type_node
, field_mem_ptr
, t
);
2346 TREE_SIDE_EFFECTS (t
) = 1;
2347 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
2350 emit_label (lab_over
);