]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/i860/i860.c
Include function.h in most files.
[thirdparty/gcc.git] / gcc / config / i860 / i860.c
1 /* Subroutines for insn-output.c for Intel 860
2 Copyright (C) 1989, 91, 97, 98, 1999 Free Software Foundation, Inc.
3 Derived from sparc.c.
4
5 Written by Richard Stallman (rms@ai.mit.edu).
6
7 Hacked substantially by Ron Guilmette (rfg@netcom.com) to cater
8 to the whims of the System V Release 4 assembler.
9
10 This file is part of GNU CC.
11
12 GNU CC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2, or (at your option)
15 any later version.
16
17 GNU CC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GNU CC; see the file COPYING. If not, write to
24 the Free Software Foundation, 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
26
27
28 #include "config.h"
29 #include <stdio.h>
30 #include "flags.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "regs.h"
34 #include "hard-reg-set.h"
35 #include "real.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-flags.h"
39 #include "output.h"
40 #include "recog.h"
41 #include "insn-attr.h"
42 #include "function.h"
43 #include "expr.h"
44
45 static rtx find_addr_reg ();
46
47 #ifndef I860_REG_PREFIX
48 #define I860_REG_PREFIX ""
49 #endif
50
51 char *i860_reg_prefix = I860_REG_PREFIX;
52
53 /* Save information from a "cmpxx" operation until the branch is emitted. */
54
55 rtx i860_compare_op0, i860_compare_op1;
56 \f
57 /* Return non-zero if this pattern, can be evaluated safely, even if it
58 was not asked for. */
59 int
60 safe_insn_src_p (op, mode)
61 rtx op;
62 enum machine_mode mode;
63 {
64 /* Just experimenting. */
65
66 /* No floating point src is safe if it contains an arithmetic
67 operation, since that operation may trap. */
68 switch (GET_CODE (op))
69 {
70 case CONST_INT:
71 case LABEL_REF:
72 case SYMBOL_REF:
73 case CONST:
74 return 1;
75
76 case REG:
77 return 1;
78
79 case MEM:
80 return CONSTANT_ADDRESS_P (XEXP (op, 0));
81
82 /* We never need to negate or complement constants. */
83 case NEG:
84 return (mode != SFmode && mode != DFmode);
85 case NOT:
86 case ZERO_EXTEND:
87 return 1;
88
89 case EQ:
90 case NE:
91 case LT:
92 case GT:
93 case LE:
94 case GE:
95 case LTU:
96 case GTU:
97 case LEU:
98 case GEU:
99 case MINUS:
100 case PLUS:
101 return (mode != SFmode && mode != DFmode);
102 case AND:
103 case IOR:
104 case XOR:
105 case ASHIFT:
106 case ASHIFTRT:
107 case LSHIFTRT:
108 if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0)))
109 || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1))))
110 return 0;
111 return 1;
112
113 default:
114 return 0;
115 }
116 }
117
118 /* Return 1 if REG is clobbered in IN.
119 Return 2 if REG is used in IN.
120 Return 3 if REG is both used and clobbered in IN.
121 Return 0 if neither. */
122
123 static int
124 reg_clobbered_p (reg, in)
125 rtx reg;
126 rtx in;
127 {
128 register enum rtx_code code;
129
130 if (in == 0)
131 return 0;
132
133 code = GET_CODE (in);
134
135 if (code == SET || code == CLOBBER)
136 {
137 rtx dest = SET_DEST (in);
138 int set = 0;
139 int used = 0;
140
141 while (GET_CODE (dest) == STRICT_LOW_PART
142 || GET_CODE (dest) == SUBREG
143 || GET_CODE (dest) == SIGN_EXTRACT
144 || GET_CODE (dest) == ZERO_EXTRACT)
145 dest = XEXP (dest, 0);
146
147 if (dest == reg)
148 set = 1;
149 else if (GET_CODE (dest) == REG
150 && refers_to_regno_p (REGNO (reg),
151 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
152 SET_DEST (in), 0))
153 {
154 set = 1;
155 /* Anything that sets just part of the register
156 is considered using as well as setting it.
157 But note that a straight SUBREG of a single-word value
158 clobbers the entire value. */
159 if (dest != SET_DEST (in)
160 && ! (GET_CODE (SET_DEST (in)) == SUBREG
161 || UNITS_PER_WORD >= GET_MODE_SIZE (GET_MODE (dest))))
162 used = 1;
163 }
164
165 if (code == SET)
166 {
167 if (set)
168 used = refers_to_regno_p (REGNO (reg),
169 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
170 SET_SRC (in), 0);
171 else
172 used = refers_to_regno_p (REGNO (reg),
173 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
174 in, 0);
175 }
176
177 return set + used * 2;
178 }
179
180 if (refers_to_regno_p (REGNO (reg),
181 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
182 in, 0))
183 return 2;
184 return 0;
185 }
186
187 /* Return non-zero if OP can be written to without screwing up
188 GCC's model of what's going on. It is assumed that this operand
189 appears in the dest position of a SET insn in a conditional
190 branch's delay slot. AFTER is the label to start looking from. */
191 int
192 operand_clobbered_before_used_after (op, after)
193 rtx op;
194 rtx after;
195 {
196 /* Just experimenting. */
197 if (GET_CODE (op) == CC0)
198 return 1;
199 if (GET_CODE (op) == REG)
200 {
201 rtx insn;
202
203 if (op == stack_pointer_rtx)
204 return 0;
205
206 /* Scan forward from the label, to see if the value of OP
207 is clobbered before the first use. */
208
209 for (insn = NEXT_INSN (after); insn; insn = NEXT_INSN (insn))
210 {
211 if (GET_CODE (insn) == NOTE)
212 continue;
213 if (GET_CODE (insn) == INSN
214 || GET_CODE (insn) == JUMP_INSN
215 || GET_CODE (insn) == CALL_INSN)
216 {
217 switch (reg_clobbered_p (op, PATTERN (insn)))
218 {
219 default:
220 return 0;
221 case 1:
222 return 1;
223 case 0:
224 break;
225 }
226 }
227 /* If we reach another label without clobbering OP,
228 then we cannot safely write it here. */
229 else if (GET_CODE (insn) == CODE_LABEL)
230 return 0;
231 if (GET_CODE (insn) == JUMP_INSN)
232 {
233 if (condjump_p (insn))
234 return 0;
235 /* This is a jump insn which has already
236 been mangled. We can't tell what it does. */
237 if (GET_CODE (PATTERN (insn)) == PARALLEL)
238 return 0;
239 if (! JUMP_LABEL (insn))
240 return 0;
241 /* Keep following jumps. */
242 insn = JUMP_LABEL (insn);
243 }
244 }
245 return 1;
246 }
247
248 /* In both of these cases, the first insn executed
249 for this op will be a orh whatever%h,%?r0,%?r31,
250 which is tolerable. */
251 if (GET_CODE (op) == MEM)
252 return (CONSTANT_ADDRESS_P (XEXP (op, 0)));
253
254 return 0;
255 }
256
257 /* Return non-zero if this pattern, as a source to a "SET",
258 is known to yield an instruction of unit size. */
259 int
260 single_insn_src_p (op, mode)
261 rtx op;
262 enum machine_mode mode;
263 {
264 switch (GET_CODE (op))
265 {
266 case CONST_INT:
267 /* This is not always a single insn src, technically,
268 but output_delayed_branch knows how to deal with it. */
269 return 1;
270
271 case SYMBOL_REF:
272 case CONST:
273 /* This is not a single insn src, technically,
274 but output_delayed_branch knows how to deal with it. */
275 return 1;
276
277 case REG:
278 return 1;
279
280 case MEM:
281 return 1;
282
283 /* We never need to negate or complement constants. */
284 case NEG:
285 return (mode != DFmode);
286 case NOT:
287 case ZERO_EXTEND:
288 return 1;
289
290 case PLUS:
291 case MINUS:
292 /* Detect cases that require multiple instructions. */
293 if (CONSTANT_P (XEXP (op, 1))
294 && !(GET_CODE (XEXP (op, 1)) == CONST_INT
295 && SMALL_INT (XEXP (op, 1))))
296 return 0;
297 case EQ:
298 case NE:
299 case LT:
300 case GT:
301 case LE:
302 case GE:
303 case LTU:
304 case GTU:
305 case LEU:
306 case GEU:
307 /* Not doing floating point, since they probably
308 take longer than the branch slot they might fill. */
309 return (mode != SFmode && mode != DFmode);
310
311 case AND:
312 if (GET_CODE (XEXP (op, 1)) == NOT)
313 {
314 rtx arg = XEXP (XEXP (op, 1), 0);
315 if (CONSTANT_P (arg)
316 && !(GET_CODE (arg) == CONST_INT
317 && (SMALL_INT (arg)
318 || (INTVAL (arg) & 0xffff) == 0)))
319 return 0;
320 }
321 case IOR:
322 case XOR:
323 /* Both small and round numbers take one instruction;
324 others take two. */
325 if (CONSTANT_P (XEXP (op, 1))
326 && !(GET_CODE (XEXP (op, 1)) == CONST_INT
327 && (SMALL_INT (XEXP (op, 1))
328 || (INTVAL (XEXP (op, 1)) & 0xffff) == 0)))
329 return 0;
330
331 case ASHIFT:
332 case ASHIFTRT:
333 case LSHIFTRT:
334 return 1;
335
336 case SUBREG:
337 if (SUBREG_WORD (op) != 0)
338 return 0;
339 return single_insn_src_p (SUBREG_REG (op), mode);
340
341 /* Not doing floating point, since they probably
342 take longer than the branch slot they might fill. */
343 case FLOAT_EXTEND:
344 case FLOAT_TRUNCATE:
345 case FLOAT:
346 case FIX:
347 case UNSIGNED_FLOAT:
348 case UNSIGNED_FIX:
349 return 0;
350
351 default:
352 return 0;
353 }
354 }
355 \f
356 /* Return non-zero only if OP is a register of mode MODE,
357 or const0_rtx. */
358 int
359 reg_or_0_operand (op, mode)
360 rtx op;
361 enum machine_mode mode;
362 {
363 return (op == const0_rtx || register_operand (op, mode)
364 || op == CONST0_RTX (mode));
365 }
366
367 /* Return truth value of whether OP can be used as an operands in a three
368 address add/subtract insn (such as add %o1,7,%l2) of mode MODE. */
369
370 int
371 arith_operand (op, mode)
372 rtx op;
373 enum machine_mode mode;
374 {
375 return (register_operand (op, mode)
376 || (GET_CODE (op) == CONST_INT && SMALL_INT (op)));
377 }
378
379 /* Return 1 if OP is a valid first operand for a logical insn of mode MODE. */
380
381 int
382 logic_operand (op, mode)
383 rtx op;
384 enum machine_mode mode;
385 {
386 return (register_operand (op, mode)
387 || (GET_CODE (op) == CONST_INT && LOGIC_INT (op)));
388 }
389
390 /* Return 1 if OP is a valid first operand for a shift insn of mode MODE. */
391
392 int
393 shift_operand (op, mode)
394 rtx op;
395 enum machine_mode mode;
396 {
397 return (register_operand (op, mode)
398 || (GET_CODE (op) == CONST_INT));
399 }
400
401 /* Return 1 if OP is a valid first operand for either a logical insn
402 or an add insn of mode MODE. */
403
404 int
405 compare_operand (op, mode)
406 rtx op;
407 enum machine_mode mode;
408 {
409 return (register_operand (op, mode)
410 || (GET_CODE (op) == CONST_INT && SMALL_INT (op) && LOGIC_INT (op)));
411 }
412
413 /* Return truth value of whether OP can be used as the 5-bit immediate
414 operand of a bte or btne insn. */
415
416 int
417 bte_operand (op, mode)
418 rtx op;
419 enum machine_mode mode;
420 {
421 return (register_operand (op, mode)
422 || (GET_CODE (op) == CONST_INT
423 && (unsigned) INTVAL (op) < 0x20));
424 }
425
426 /* Return 1 if OP is an indexed memory reference of mode MODE. */
427
428 int
429 indexed_operand (op, mode)
430 rtx op;
431 enum machine_mode mode;
432 {
433 return (GET_CODE (op) == MEM && GET_MODE (op) == mode
434 && GET_CODE (XEXP (op, 0)) == PLUS
435 && GET_MODE (XEXP (op, 0)) == SImode
436 && register_operand (XEXP (XEXP (op, 0), 0), SImode)
437 && register_operand (XEXP (XEXP (op, 0), 1), SImode));
438 }
439
440 /* Return 1 if OP is a suitable source operand for a load insn
441 with mode MODE. */
442
443 int
444 load_operand (op, mode)
445 rtx op;
446 enum machine_mode mode;
447 {
448 return (memory_operand (op, mode) || indexed_operand (op, mode));
449 }
450
451 /* Return truth value of whether OP is a integer which fits the
452 range constraining immediate operands in add/subtract insns. */
453
454 int
455 small_int (op, mode)
456 rtx op;
457 enum machine_mode mode;
458 {
459 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
460 }
461
462 /* Return truth value of whether OP is a integer which fits the
463 range constraining immediate operands in logic insns. */
464
465 int
466 logic_int (op, mode)
467 rtx op;
468 enum machine_mode mode;
469 {
470 return (GET_CODE (op) == CONST_INT && LOGIC_INT (op));
471 }
472
473 /* Test for a valid operand for a call instruction.
474 Don't allow the arg pointer register or virtual regs
475 since they may change into reg + const, which the patterns
476 can't handle yet. */
477
478 int
479 call_insn_operand (op, mode)
480 rtx op;
481 enum machine_mode mode;
482 {
483 if (GET_CODE (op) == MEM
484 && (CONSTANT_ADDRESS_P (XEXP (op, 0))
485 || (GET_CODE (XEXP (op, 0)) == REG
486 && XEXP (op, 0) != arg_pointer_rtx
487 && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
488 && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
489 return 1;
490 return 0;
491 }
492 \f
493 /* Return the best assembler insn template
494 for moving operands[1] into operands[0] as a fullword. */
495
496 static char *
497 singlemove_string (operands)
498 rtx *operands;
499 {
500 if (GET_CODE (operands[0]) == MEM)
501 {
502 if (GET_CODE (operands[1]) != MEM)
503 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
504 {
505 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
506 && (cc_prev_status.flags & CC_HI_R31_ADJ)
507 && cc_prev_status.mdep == XEXP (operands[0], 0)))
508 {
509 CC_STATUS_INIT;
510 output_asm_insn ("orh %h0,%?r0,%?r31", operands);
511 }
512 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
513 cc_status.mdep = XEXP (operands[0], 0);
514 return "st.l %r1,%L0(%?r31)";
515 }
516 else
517 return "st.l %r1,%0";
518 else
519 abort ();
520 #if 0
521 {
522 rtx xoperands[2];
523
524 cc_status.flags &= ~CC_F0_IS_0;
525 xoperands[0] = gen_rtx (REG, SFmode, 32);
526 xoperands[1] = operands[1];
527 output_asm_insn (singlemove_string (xoperands), xoperands);
528 xoperands[1] = xoperands[0];
529 xoperands[0] = operands[0];
530 output_asm_insn (singlemove_string (xoperands), xoperands);
531 return "";
532 }
533 #endif
534 }
535 if (GET_CODE (operands[1]) == MEM)
536 {
537 if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
538 {
539 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
540 && (cc_prev_status.flags & CC_HI_R31_ADJ)
541 && cc_prev_status.mdep == XEXP (operands[1], 0)))
542 {
543 CC_STATUS_INIT;
544 output_asm_insn ("orh %h1,%?r0,%?r31", operands);
545 }
546 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
547 cc_status.mdep = XEXP (operands[1], 0);
548 return "ld.l %L1(%?r31),%0";
549 }
550 return "ld.l %m1,%0";
551 }
552 if (GET_CODE (operands[1]) == CONST_INT)
553 {
554 if (operands[1] == const0_rtx)
555 return "mov %?r0,%0";
556 if((INTVAL (operands[1]) & 0xffff0000) == 0)
557 return "or %L1,%?r0,%0";
558 if((INTVAL (operands[1]) & 0xffff8000) == 0xffff8000)
559 return "adds %1,%?r0,%0";
560 if((INTVAL (operands[1]) & 0x0000ffff) == 0)
561 return "orh %H1,%?r0,%0";
562 }
563 return "mov %1,%0";
564 }
565 \f
566 /* Output assembler code to perform a doubleword move insn
567 with operands OPERANDS. */
568
569 char *
570 output_move_double (operands)
571 rtx *operands;
572 {
573 enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1;
574 rtx latehalf[2];
575 rtx addreg0 = 0, addreg1 = 0;
576 int highest_first = 0;
577 int no_addreg1_decrement = 0;
578
579 /* First classify both operands. */
580
581 if (REG_P (operands[0]))
582 optype0 = REGOP;
583 else if (offsettable_memref_p (operands[0]))
584 optype0 = OFFSOP;
585 else if (GET_CODE (operands[0]) == MEM)
586 optype0 = MEMOP;
587 else
588 optype0 = RNDOP;
589
590 if (REG_P (operands[1]))
591 optype1 = REGOP;
592 else if (CONSTANT_P (operands[1]))
593 optype1 = CNSTOP;
594 else if (offsettable_memref_p (operands[1]))
595 optype1 = OFFSOP;
596 else if (GET_CODE (operands[1]) == MEM)
597 optype1 = MEMOP;
598 else
599 optype1 = RNDOP;
600
601 /* Check for the cases that the operand constraints are not
602 supposed to allow to happen. Abort if we get one,
603 because generating code for these cases is painful. */
604
605 if (optype0 == RNDOP || optype1 == RNDOP)
606 abort ();
607
608 /* If an operand is an unoffsettable memory ref, find a register
609 we can increment temporarily to make it refer to the second word. */
610
611 if (optype0 == MEMOP)
612 addreg0 = find_addr_reg (XEXP (operands[0], 0));
613
614 if (optype1 == MEMOP)
615 addreg1 = find_addr_reg (XEXP (operands[1], 0));
616
617 /* ??? Perhaps in some cases move double words
618 if there is a spare pair of floating regs. */
619
620 /* Ok, we can do one word at a time.
621 Normally we do the low-numbered word first,
622 but if either operand is autodecrementing then we
623 do the high-numbered word first.
624
625 In either case, set up in LATEHALF the operands to use
626 for the high-numbered word and in some cases alter the
627 operands in OPERANDS to be suitable for the low-numbered word. */
628
629 if (optype0 == REGOP)
630 latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
631 else if (optype0 == OFFSOP)
632 latehalf[0] = adj_offsettable_operand (operands[0], 4);
633 else
634 latehalf[0] = operands[0];
635
636 if (optype1 == REGOP)
637 latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
638 else if (optype1 == OFFSOP)
639 latehalf[1] = adj_offsettable_operand (operands[1], 4);
640 else if (optype1 == CNSTOP)
641 {
642 if (GET_CODE (operands[1]) == CONST_DOUBLE)
643 split_double (operands[1], &operands[1], &latehalf[1]);
644 else if (CONSTANT_P (operands[1]))
645 latehalf[1] = const0_rtx;
646 }
647 else
648 latehalf[1] = operands[1];
649
650 /* If the first move would clobber the source of the second one,
651 do them in the other order.
652
653 RMS says "This happens only for registers;
654 such overlap can't happen in memory unless the user explicitly
655 sets it up, and that is an undefined circumstance."
656
657 but it happens on the sparc when loading parameter registers,
658 so I am going to define that circumstance, and make it work
659 as expected. */
660
661 if (optype0 == REGOP && optype1 == REGOP
662 && REGNO (operands[0]) == REGNO (latehalf[1]))
663 {
664 CC_STATUS_PARTIAL_INIT;
665 /* Make any unoffsettable addresses point at high-numbered word. */
666 if (addreg0)
667 output_asm_insn ("adds 0x4,%0,%0", &addreg0);
668 if (addreg1)
669 output_asm_insn ("adds 0x4,%0,%0", &addreg1);
670
671 /* Do that word. */
672 output_asm_insn (singlemove_string (latehalf), latehalf);
673
674 /* Undo the adds we just did. */
675 if (addreg0)
676 output_asm_insn ("adds -0x4,%0,%0", &addreg0);
677 if (addreg1)
678 output_asm_insn ("adds -0x4,%0,%0", &addreg1);
679
680 /* Do low-numbered word. */
681 return singlemove_string (operands);
682 }
683 else if (optype0 == REGOP && optype1 != REGOP
684 && reg_overlap_mentioned_p (operands[0], operands[1]))
685 {
686 /* If both halves of dest are used in the src memory address,
687 add the two regs and put them in the low reg (operands[0]).
688 Then it works to load latehalf first. */
689 if (reg_mentioned_p (operands[0], XEXP (operands[1], 0))
690 && reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
691 {
692 rtx xops[2];
693 xops[0] = latehalf[0];
694 xops[1] = operands[0];
695 output_asm_insn ("adds %1,%0,%1", xops);
696 operands[1] = gen_rtx (MEM, DImode, operands[0]);
697 latehalf[1] = adj_offsettable_operand (operands[1], 4);
698 addreg1 = 0;
699 highest_first = 1;
700 }
701 /* Only one register in the dest is used in the src memory address,
702 and this is the first register of the dest, so we want to do
703 the late half first here also. */
704 else if (! reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
705 highest_first = 1;
706 /* Only one register in the dest is used in the src memory address,
707 and this is the second register of the dest, so we want to do
708 the late half last. If addreg1 is set, and addreg1 is the same
709 register as latehalf, then we must suppress the trailing decrement,
710 because it would clobber the value just loaded. */
711 else if (addreg1 && reg_mentioned_p (addreg1, latehalf[0]))
712 no_addreg1_decrement = 1;
713 }
714
715 /* Normal case: do the two words, low-numbered first.
716 Overlap case (highest_first set): do high-numbered word first. */
717
718 if (! highest_first)
719 output_asm_insn (singlemove_string (operands), operands);
720
721 CC_STATUS_PARTIAL_INIT;
722 /* Make any unoffsettable addresses point at high-numbered word. */
723 if (addreg0)
724 output_asm_insn ("adds 0x4,%0,%0", &addreg0);
725 if (addreg1)
726 output_asm_insn ("adds 0x4,%0,%0", &addreg1);
727
728 /* Do that word. */
729 output_asm_insn (singlemove_string (latehalf), latehalf);
730
731 /* Undo the adds we just did. */
732 if (addreg0)
733 output_asm_insn ("adds -0x4,%0,%0", &addreg0);
734 if (addreg1 && !no_addreg1_decrement)
735 output_asm_insn ("adds -0x4,%0,%0", &addreg1);
736
737 if (highest_first)
738 output_asm_insn (singlemove_string (operands), operands);
739
740 return "";
741 }
742 \f
743 char *
744 output_fp_move_double (operands)
745 rtx *operands;
746 {
747 /* If the source operand is any sort of zero, use f0 instead. */
748
749 if (operands[1] == CONST0_RTX (GET_MODE (operands[1])))
750 operands[1] = gen_rtx (REG, DFmode, F0_REGNUM);
751
752 if (FP_REG_P (operands[0]))
753 {
754 if (FP_REG_P (operands[1]))
755 return "fmov.dd %1,%0";
756 if (GET_CODE (operands[1]) == REG)
757 {
758 output_asm_insn ("ixfr %1,%0", operands);
759 operands[0] = gen_rtx (REG, VOIDmode, REGNO (operands[0]) + 1);
760 operands[1] = gen_rtx (REG, VOIDmode, REGNO (operands[1]) + 1);
761 return "ixfr %1,%0";
762 }
763 if (operands[1] == CONST0_RTX (DFmode))
764 return "fmov.dd f0,%0";
765 if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
766 {
767 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
768 && (cc_prev_status.flags & CC_HI_R31_ADJ)
769 && cc_prev_status.mdep == XEXP (operands[1], 0)))
770 {
771 CC_STATUS_INIT;
772 output_asm_insn ("orh %h1,%?r0,%?r31", operands);
773 }
774 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
775 cc_status.mdep = XEXP (operands[1], 0);
776 return "fld.d %L1(%?r31),%0";
777 }
778 return "fld.d %1,%0";
779 }
780 else if (FP_REG_P (operands[1]))
781 {
782 if (GET_CODE (operands[0]) == REG)
783 {
784 output_asm_insn ("fxfr %1,%0", operands);
785 operands[0] = gen_rtx (REG, VOIDmode, REGNO (operands[0]) + 1);
786 operands[1] = gen_rtx (REG, VOIDmode, REGNO (operands[1]) + 1);
787 return "fxfr %1,%0";
788 }
789 if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
790 {
791 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
792 && (cc_prev_status.flags & CC_HI_R31_ADJ)
793 && cc_prev_status.mdep == XEXP (operands[0], 0)))
794 {
795 CC_STATUS_INIT;
796 output_asm_insn ("orh %h0,%?r0,%?r31", operands);
797 }
798 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
799 cc_status.mdep = XEXP (operands[0], 0);
800 return "fst.d %1,%L0(%?r31)";
801 }
802 return "fst.d %1,%0";
803 }
804 else
805 abort ();
806 /* NOTREACHED */
807 return NULL;
808 }
809 \f
810 /* Return a REG that occurs in ADDR with coefficient 1.
811 ADDR can be effectively incremented by incrementing REG. */
812
813 static rtx
814 find_addr_reg (addr)
815 rtx addr;
816 {
817 while (GET_CODE (addr) == PLUS)
818 {
819 if (GET_CODE (XEXP (addr, 0)) == REG)
820 addr = XEXP (addr, 0);
821 else if (GET_CODE (XEXP (addr, 1)) == REG)
822 addr = XEXP (addr, 1);
823 else if (CONSTANT_P (XEXP (addr, 0)))
824 addr = XEXP (addr, 1);
825 else if (CONSTANT_P (XEXP (addr, 1)))
826 addr = XEXP (addr, 0);
827 else
828 abort ();
829 }
830 if (GET_CODE (addr) == REG)
831 return addr;
832 abort ();
833 /* NOTREACHED */
834 return NULL;
835 }
836
837 /* Return a template for a load instruction with mode MODE and
838 arguments from the string ARGS.
839
840 This string is in static storage. */
841
842 static char *
843 load_opcode (mode, args, reg)
844 enum machine_mode mode;
845 char *args;
846 rtx reg;
847 {
848 static char buf[30];
849 char *opcode;
850
851 switch (mode)
852 {
853 case QImode:
854 opcode = "ld.b";
855 break;
856
857 case HImode:
858 opcode = "ld.s";
859 break;
860
861 case SImode:
862 case SFmode:
863 if (FP_REG_P (reg))
864 opcode = "fld.l";
865 else
866 opcode = "ld.l";
867 break;
868
869 case DImode:
870 if (!FP_REG_P (reg))
871 abort ();
872 case DFmode:
873 opcode = "fld.d";
874 break;
875
876 default:
877 abort ();
878 }
879
880 sprintf (buf, "%s %s", opcode, args);
881 return buf;
882 }
883
884 /* Return a template for a store instruction with mode MODE and
885 arguments from the string ARGS.
886
887 This string is in static storage. */
888
889 static char *
890 store_opcode (mode, args, reg)
891 enum machine_mode mode;
892 char *args;
893 rtx reg;
894 {
895 static char buf[30];
896 char *opcode;
897
898 switch (mode)
899 {
900 case QImode:
901 opcode = "st.b";
902 break;
903
904 case HImode:
905 opcode = "st.s";
906 break;
907
908 case SImode:
909 case SFmode:
910 if (FP_REG_P (reg))
911 opcode = "fst.l";
912 else
913 opcode = "st.l";
914 break;
915
916 case DImode:
917 if (!FP_REG_P (reg))
918 abort ();
919 case DFmode:
920 opcode = "fst.d";
921 break;
922
923 default:
924 abort ();
925 }
926
927 sprintf (buf, "%s %s", opcode, args);
928 return buf;
929 }
930 \f
931 /* Output a store-in-memory whose operands are OPERANDS[0,1].
932 OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero.
933
934 This function returns a template for an insn.
935 This is in static storage.
936
937 It may also output some insns directly.
938 It may alter the values of operands[0] and operands[1]. */
939
940 char *
941 output_store (operands)
942 rtx *operands;
943 {
944 enum machine_mode mode = GET_MODE (operands[0]);
945 rtx address = XEXP (operands[0], 0);
946 char *string;
947
948 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
949 cc_status.mdep = address;
950
951 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
952 && (cc_prev_status.flags & CC_HI_R31_ADJ)
953 && address == cc_prev_status.mdep))
954 {
955 CC_STATUS_INIT;
956 output_asm_insn ("orh %h0,%?r0,%?r31", operands);
957 cc_prev_status.mdep = address;
958 }
959
960 /* Store zero in two parts when appropriate. */
961 if (mode == DFmode && operands[1] == CONST0_RTX (DFmode))
962 return store_opcode (DFmode, "%r1,%L0(%?r31)", operands[1]);
963
964 /* Code below isn't smart enough to move a doubleword in two parts,
965 so use output_move_double to do that in the cases that require it. */
966 if ((mode == DImode || mode == DFmode)
967 && ! FP_REG_P (operands[1]))
968 return output_move_double (operands);
969
970 return store_opcode (mode, "%r1,%L0(%?r31)", operands[1]);
971 }
972
973 /* Output a load-from-memory whose operands are OPERANDS[0,1].
974 OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
975
976 This function returns a template for an insn.
977 This is in static storage.
978
979 It may also output some insns directly.
980 It may alter the values of operands[0] and operands[1]. */
981
982 char *
983 output_load (operands)
984 rtx *operands;
985 {
986 enum machine_mode mode = GET_MODE (operands[0]);
987 rtx address = XEXP (operands[1], 0);
988
989 /* We don't bother trying to see if we know %hi(address).
990 This is because we are doing a load, and if we know the
991 %hi value, we probably also know that value in memory. */
992 cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
993 cc_status.mdep = address;
994
995 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
996 && (cc_prev_status.flags & CC_HI_R31_ADJ)
997 && address == cc_prev_status.mdep
998 && cc_prev_status.mdep == cc_status.mdep))
999 {
1000 CC_STATUS_INIT;
1001 output_asm_insn ("orh %h1,%?r0,%?r31", operands);
1002 cc_prev_status.mdep = address;
1003 }
1004
1005 /* Code below isn't smart enough to move a doubleword in two parts,
1006 so use output_move_double to do that in the cases that require it. */
1007 if ((mode == DImode || mode == DFmode)
1008 && ! FP_REG_P (operands[0]))
1009 return output_move_double (operands);
1010
1011 return load_opcode (mode, "%L1(%?r31),%0", operands[0]);
1012 }
1013 \f
1014 #if 0
1015 /* Load the address specified by OPERANDS[3] into the register
1016 specified by OPERANDS[0].
1017
1018 OPERANDS[3] may be the result of a sum, hence it could either be:
1019
1020 (1) CONST
1021 (2) REG
1022 (2) REG + CONST_INT
1023 (3) REG + REG + CONST_INT
1024 (4) REG + REG (special case of 3).
1025
1026 Note that (3) is not a legitimate address.
1027 All cases are handled here. */
1028
1029 void
1030 output_load_address (operands)
1031 rtx *operands;
1032 {
1033 rtx base, offset;
1034
1035 if (CONSTANT_P (operands[3]))
1036 {
1037 output_asm_insn ("mov %3,%0", operands);
1038 return;
1039 }
1040
1041 if (REG_P (operands[3]))
1042 {
1043 if (REGNO (operands[0]) != REGNO (operands[3]))
1044 output_asm_insn ("shl %?r0,%3,%0", operands);
1045 return;
1046 }
1047
1048 if (GET_CODE (operands[3]) != PLUS)
1049 abort ();
1050
1051 base = XEXP (operands[3], 0);
1052 offset = XEXP (operands[3], 1);
1053
1054 if (GET_CODE (base) == CONST_INT)
1055 {
1056 rtx tmp = base;
1057 base = offset;
1058 offset = tmp;
1059 }
1060
1061 if (GET_CODE (offset) != CONST_INT)
1062 {
1063 /* Operand is (PLUS (REG) (REG)). */
1064 base = operands[3];
1065 offset = const0_rtx;
1066 }
1067
1068 if (REG_P (base))
1069 {
1070 operands[6] = base;
1071 operands[7] = offset;
1072 CC_STATUS_PARTIAL_INIT;
1073 if (SMALL_INT (offset))
1074 output_asm_insn ("adds %7,%6,%0", operands);
1075 else
1076 output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands);
1077 }
1078 else if (GET_CODE (base) == PLUS)
1079 {
1080 operands[6] = XEXP (base, 0);
1081 operands[7] = XEXP (base, 1);
1082 operands[8] = offset;
1083
1084 CC_STATUS_PARTIAL_INIT;
1085 if (SMALL_INT (offset))
1086 output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands);
1087 else
1088 output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands);
1089 }
1090 else
1091 abort ();
1092 }
1093 #endif
1094
1095 /* Output code to place a size count SIZE in register REG.
1096 Because block moves are pipelined, we don't include the
1097 first element in the transfer of SIZE to REG.
1098 For this, we subtract ALIGN. (Actually, I think it is not
1099 right to subtract on this machine, so right now we don't.) */
1100
1101 static void
1102 output_size_for_block_move (size, reg, align)
1103 rtx size, reg, align;
1104 {
1105 rtx xoperands[3];
1106
1107 xoperands[0] = reg;
1108 xoperands[1] = size;
1109 xoperands[2] = align;
1110
1111 #if 1
1112 cc_status.flags &= ~ CC_KNOW_HI_R31;
1113 output_asm_insn (singlemove_string (xoperands), xoperands);
1114 #else
1115 if (GET_CODE (size) == REG)
1116 output_asm_insn ("sub %2,%1,%0", xoperands);
1117 else
1118 {
1119 xoperands[1]
1120 = GEN_INT (INTVAL (size) - INTVAL (align));
1121 cc_status.flags &= ~ CC_KNOW_HI_R31;
1122 output_asm_insn ("mov %1,%0", xoperands);
1123 }
1124 #endif
1125 }
1126
1127 /* Emit code to perform a block move.
1128
1129 OPERANDS[0] is the destination.
1130 OPERANDS[1] is the source.
1131 OPERANDS[2] is the size.
1132 OPERANDS[3] is the known safe alignment.
1133 OPERANDS[4..6] are pseudos we can safely clobber as temps. */
1134
1135 char *
1136 output_block_move (operands)
1137 rtx *operands;
1138 {
1139 /* A vector for our computed operands. Note that load_output_address
1140 makes use of (and can clobber) up to the 8th element of this vector. */
1141 rtx xoperands[10];
1142 rtx zoperands[10];
1143 static int movstrsi_label = 0;
1144 int i, j;
1145 rtx temp1 = operands[4];
1146 rtx alignrtx = operands[3];
1147 int align = INTVAL (alignrtx);
1148 int chunk_size;
1149
1150 xoperands[0] = operands[0];
1151 xoperands[1] = operands[1];
1152 xoperands[2] = temp1;
1153
1154 /* We can't move more than four bytes at a time
1155 because we have only one register to move them through. */
1156 if (align > 4)
1157 {
1158 align = 4;
1159 alignrtx = GEN_INT (4);
1160 }
1161
1162 /* Recognize special cases of block moves. These occur
1163 when GNU C++ is forced to treat something as BLKmode
1164 to keep it in memory, when its mode could be represented
1165 with something smaller.
1166
1167 We cannot do this for global variables, since we don't know
1168 what pages they don't cross. Sigh. */
1169 if (GET_CODE (operands[2]) == CONST_INT
1170 && ! CONSTANT_ADDRESS_P (operands[0])
1171 && ! CONSTANT_ADDRESS_P (operands[1]))
1172 {
1173 int size = INTVAL (operands[2]);
1174 rtx op0 = xoperands[0];
1175 rtx op1 = xoperands[1];
1176
1177 if ((align & 3) == 0 && (size & 3) == 0 && (size >> 2) <= 16)
1178 {
1179 if (memory_address_p (SImode, plus_constant (op0, size))
1180 && memory_address_p (SImode, plus_constant (op1, size)))
1181 {
1182 cc_status.flags &= ~CC_KNOW_HI_R31;
1183 for (i = (size>>2)-1; i >= 0; i--)
1184 {
1185 xoperands[0] = plus_constant (op0, i * 4);
1186 xoperands[1] = plus_constant (op1, i * 4);
1187 output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",
1188 xoperands);
1189 }
1190 return "";
1191 }
1192 }
1193 else if ((align & 1) == 0 && (size & 1) == 0 && (size >> 1) <= 16)
1194 {
1195 if (memory_address_p (HImode, plus_constant (op0, size))
1196 && memory_address_p (HImode, plus_constant (op1, size)))
1197 {
1198 cc_status.flags &= ~CC_KNOW_HI_R31;
1199 for (i = (size>>1)-1; i >= 0; i--)
1200 {
1201 xoperands[0] = plus_constant (op0, i * 2);
1202 xoperands[1] = plus_constant (op1, i * 2);
1203 output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",
1204 xoperands);
1205 }
1206 return "";
1207 }
1208 }
1209 else if (size <= 16)
1210 {
1211 if (memory_address_p (QImode, plus_constant (op0, size))
1212 && memory_address_p (QImode, plus_constant (op1, size)))
1213 {
1214 cc_status.flags &= ~CC_KNOW_HI_R31;
1215 for (i = size-1; i >= 0; i--)
1216 {
1217 xoperands[0] = plus_constant (op0, i);
1218 xoperands[1] = plus_constant (op1, i);
1219 output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",
1220 xoperands);
1221 }
1222 return "";
1223 }
1224 }
1225 }
1226
1227 /* Since we clobber untold things, nix the condition codes. */
1228 CC_STATUS_INIT;
1229
1230 /* This is the size of the transfer.
1231 Either use the register which already contains the size,
1232 or use a free register (used by no operands). */
1233 output_size_for_block_move (operands[2], operands[4], alignrtx);
1234
1235 #if 0
1236 /* Also emit code to decrement the size value by ALIGN. */
1237 zoperands[0] = operands[0];
1238 zoperands[3] = plus_constant (operands[0], align);
1239 output_load_address (zoperands);
1240 #endif
1241
1242 /* Generate number for unique label. */
1243
1244 xoperands[3] = GEN_INT (movstrsi_label++);
1245
1246 /* Calculate the size of the chunks we will be trying to move first. */
1247
1248 #if 0
1249 if ((align & 3) == 0)
1250 chunk_size = 4;
1251 else if ((align & 1) == 0)
1252 chunk_size = 2;
1253 else
1254 #endif
1255 chunk_size = 1;
1256
1257 /* Copy the increment (negative) to a register for bla insn. */
1258
1259 xoperands[4] = GEN_INT (- chunk_size);
1260 xoperands[5] = operands[5];
1261 output_asm_insn ("adds %4,%?r0,%5", xoperands);
1262
1263 /* Predecrement the loop counter. This happens again also in the `bla'
1264 instruction which precedes the loop, but we need to have it done
1265 two times before we enter the loop because of the bizarre semantics
1266 of the bla instruction. */
1267
1268 output_asm_insn ("adds %5,%2,%2", xoperands);
1269
1270 /* Check for the case where the original count was less than or equal to
1271 zero. Avoid going through the loop at all if the original count was
1272 indeed less than or equal to zero. Note that we treat the count as
1273 if it were a signed 32-bit quantity here, rather than an unsigned one,
1274 even though we really shouldn't. We have to do this because of the
1275 semantics of the `ble' instruction, which assume that the count is
1276 a signed 32-bit value. Anyway, in practice it won't matter because
1277 nobody is going to try to do a memcpy() of more than half of the
1278 entire address space (i.e. 2 gigabytes) anyway. */
1279
1280 output_asm_insn ("bc .Le%3", xoperands);
1281
1282 /* Make available a register which is a temporary. */
1283
1284 xoperands[6] = operands[6];
1285
1286 /* Now the actual loop.
1287 In xoperands, elements 1 and 0 are the input and output vectors.
1288 Element 2 is the loop index. Element 5 is the increment. */
1289
1290 output_asm_insn ("subs %1,%5,%1", xoperands);
1291 output_asm_insn ("bla %5,%2,.Lm%3", xoperands);
1292 output_asm_insn ("adds %0,%2,%6", xoperands);
1293 output_asm_insn ("\n.Lm%3:", xoperands); /* Label for bla above. */
1294 output_asm_insn ("\n.Ls%3:", xoperands); /* Loop start label. */
1295 output_asm_insn ("adds %5,%6,%6", xoperands);
1296
1297 /* NOTE: The code here which is supposed to handle the cases where the
1298 sources and destinations are known to start on a 4 or 2 byte boundary
1299 are currently broken. They fail to do anything about the overflow
1300 bytes which might still need to be copied even after we have copied
1301 some number of words or halfwords. Thus, for now we use the lowest
1302 common denominator, i.e. the code which just copies some number of
1303 totally unaligned individual bytes. (See the calculation of
1304 chunk_size above. */
1305
1306 if (chunk_size == 4)
1307 {
1308 output_asm_insn ("ld.l %2(%1),%?r31", xoperands);
1309 output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
1310 output_asm_insn ("st.l %?r31,8(%6)", xoperands);
1311 }
1312 else if (chunk_size == 2)
1313 {
1314 output_asm_insn ("ld.s %2(%1),%?r31", xoperands);
1315 output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
1316 output_asm_insn ("st.s %?r31,4(%6)", xoperands);
1317 }
1318 else /* chunk_size == 1 */
1319 {
1320 output_asm_insn ("ld.b %2(%1),%?r31", xoperands);
1321 output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
1322 output_asm_insn ("st.b %?r31,2(%6)", xoperands);
1323 }
1324 output_asm_insn ("\n.Le%3:", xoperands); /* Here if count <= 0. */
1325
1326 return "";
1327 }
1328 \f
1329 #if 0
1330 /* Output a delayed branch insn with the delay insn in its
1331 branch slot. The delayed branch insn template is in TEMPLATE,
1332 with operands OPERANDS. The insn in its delay slot is INSN.
1333
1334 As a special case, since we know that all memory transfers are via
1335 ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
1336 reference around the branch as
1337
1338 orh ha%x,%?r0,%?r31
1339 b ...
1340 ld/st l%x(%?r31),...
1341
1342 As another special case, we handle loading (SYMBOL_REF ...) and
1343 other large constants around branches as well:
1344
1345 orh h%x,%?r0,%0
1346 b ...
1347 or l%x,%0,%1
1348
1349 */
1350 /* ??? Disabled because this re-recognition is incomplete and causes
1351 constrain_operands to segfault. Anyone who cares should fix up
1352 the code to use the DBR pass. */
1353
1354 char *
1355 output_delayed_branch (template, operands, insn)
1356 char *template;
1357 rtx *operands;
1358 rtx insn;
1359 {
1360 rtx src = XVECEXP (PATTERN (insn), 0, 1);
1361 rtx dest = XVECEXP (PATTERN (insn), 0, 0);
1362
1363 /* See if we are doing some branch together with setting some register
1364 to some 32-bit value which does (or may) have some of the high-order
1365 16 bits set. If so, we need to set the register in two stages. One
1366 stage must be done before the branch, and the other one can be done
1367 in the delay slot. */
1368
1369 if ( (GET_CODE (src) == CONST_INT
1370 && ((unsigned) INTVAL (src) & (unsigned) 0xffff0000) != (unsigned) 0)
1371 || (GET_CODE (src) == SYMBOL_REF)
1372 || (GET_CODE (src) == LABEL_REF)
1373 || (GET_CODE (src) == CONST))
1374 {
1375 rtx xoperands[2];
1376 xoperands[0] = dest;
1377 xoperands[1] = src;
1378
1379 CC_STATUS_PARTIAL_INIT;
1380 /* Output the `orh' insn. */
1381 output_asm_insn ("orh %H1,%?r0,%0", xoperands);
1382
1383 /* Output the branch instruction next. */
1384 output_asm_insn (template, operands);
1385
1386 /* Now output the `or' insn. */
1387 output_asm_insn ("or %L1,%0,%0", xoperands);
1388 }
1389 else if ((GET_CODE (src) == MEM
1390 && CONSTANT_ADDRESS_P (XEXP (src, 0)))
1391 || (GET_CODE (dest) == MEM
1392 && CONSTANT_ADDRESS_P (XEXP (dest, 0))))
1393 {
1394 rtx xoperands[2];
1395 char *split_template;
1396 xoperands[0] = dest;
1397 xoperands[1] = src;
1398
1399 /* Output the `orh' insn. */
1400 if (GET_CODE (src) == MEM)
1401 {
1402 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
1403 && (cc_prev_status.flags & CC_HI_R31_ADJ)
1404 && cc_prev_status.mdep == XEXP (operands[1], 0)))
1405 {
1406 CC_STATUS_INIT;
1407 output_asm_insn ("orh %h1,%?r0,%?r31", xoperands);
1408 }
1409 split_template = load_opcode (GET_MODE (dest),
1410 "%L1(%?r31),%0", dest);
1411 }
1412 else
1413 {
1414 if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
1415 && (cc_prev_status.flags & CC_HI_R31_ADJ)
1416 && cc_prev_status.mdep == XEXP (operands[0], 0)))
1417 {
1418 CC_STATUS_INIT;
1419 output_asm_insn ("orh %h0,%?r0,%?r31", xoperands);
1420 }
1421 split_template = store_opcode (GET_MODE (dest),
1422 "%r1,%L0(%?r31)", src);
1423 }
1424
1425 /* Output the branch instruction next. */
1426 output_asm_insn (template, operands);
1427
1428 /* Now output the load or store.
1429 No need to do a CC_STATUS_INIT, because we are branching anyway. */
1430 output_asm_insn (split_template, xoperands);
1431 }
1432 else
1433 {
1434 int insn_code_number;
1435 rtx pat = gen_rtx (SET, VOIDmode, dest, src);
1436 rtx delay_insn = gen_rtx (INSN, VOIDmode, 0, 0, 0, pat, -1, 0, 0);
1437 int i;
1438
1439 /* Output the branch instruction first. */
1440 output_asm_insn (template, operands);
1441
1442 /* Now recognize the insn which we put in its delay slot.
1443 We must do this after outputting the branch insn,
1444 since operands may just be a pointer to `recog_operand'. */
1445 INSN_CODE (delay_insn) = insn_code_number
1446 = recog (pat, delay_insn, NULL_PTR);
1447 if (insn_code_number == -1)
1448 abort ();
1449
1450 for (i = 0; i < insn_n_operands[insn_code_number]; i++)
1451 {
1452 if (GET_CODE (recog_operand[i]) == SUBREG)
1453 recog_operand[i] = alter_subreg (recog_operand[i]);
1454 }
1455
1456 insn_extract (delay_insn);
1457 if (! constrain_operands (1))
1458 fatal_insn_not_found (delay_insn);
1459
1460 template = insn_template[insn_code_number];
1461 if (template == 0)
1462 template = (*insn_outfun[insn_code_number]) (recog_operand, delay_insn);
1463 output_asm_insn (template, recog_operand);
1464 }
1465 CC_STATUS_INIT;
1466 return "";
1467 }
1468
1469 /* Output a newly constructed insn DELAY_INSN. */
1470 char *
1471 output_delay_insn (delay_insn)
1472 rtx delay_insn;
1473 {
1474 char *template;
1475 int insn_code_number;
1476 int i;
1477
1478 /* Now recognize the insn which we put in its delay slot.
1479 We must do this after outputting the branch insn,
1480 since operands may just be a pointer to `recog_operand'. */
1481 insn_code_number = recog_memoized (delay_insn);
1482 if (insn_code_number == -1)
1483 abort ();
1484
1485 /* Extract the operands of this delay insn. */
1486 INSN_CODE (delay_insn) = insn_code_number;
1487 insn_extract (delay_insn);
1488
1489 /* It is possible that this insn has not been properly scanned by final
1490 yet. If this insn's operands don't appear in the peephole's
1491 actual operands, then they won't be fixed up by final, so we
1492 make sure they get fixed up here. -- This is a kludge. */
1493 for (i = 0; i < insn_n_operands[insn_code_number]; i++)
1494 {
1495 if (GET_CODE (recog_operand[i]) == SUBREG)
1496 recog_operand[i] = alter_subreg (recog_operand[i]);
1497 }
1498
1499 #ifdef REGISTER_CONSTRAINTS
1500 if (! constrain_operands (1))
1501 abort ();
1502 #endif
1503
1504 cc_prev_status = cc_status;
1505
1506 /* Update `cc_status' for this instruction.
1507 The instruction's output routine may change it further.
1508 If the output routine for a jump insn needs to depend
1509 on the cc status, it should look at cc_prev_status. */
1510
1511 NOTICE_UPDATE_CC (PATTERN (delay_insn), delay_insn);
1512
1513 /* Now get the template for what this insn would
1514 have been, without the branch. */
1515
1516 template = insn_template[insn_code_number];
1517 if (template == 0)
1518 template = (*insn_outfun[insn_code_number]) (recog_operand, delay_insn);
1519 output_asm_insn (template, recog_operand);
1520 return "";
1521 }
1522 #endif
1523 \f
1524 /* Special routine to convert an SFmode value represented as a
1525 CONST_DOUBLE into its equivalent unsigned long bit pattern.
1526 We convert the value from a double precision floating-point
1527 value to single precision first, and thence to a bit-wise
1528 equivalent unsigned long value. This routine is used when
1529 generating an immediate move of an SFmode value directly
1530 into a general register because the svr4 assembler doesn't
1531 grok floating literals in instruction operand contexts. */
1532
1533 unsigned long
1534 sfmode_constant_to_ulong (x)
1535 rtx x;
1536 {
1537 REAL_VALUE_TYPE d;
1538 union { float f; unsigned long i; } u2;
1539
1540 if (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != SFmode)
1541 abort ();
1542
1543 #if TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT
1544 error IEEE emulation needed
1545 #endif
1546 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
1547 u2.f = d;
1548 return u2.i;
1549 }
1550 \f
1551 /* This function generates the assembly code for function entry.
1552 The macro FUNCTION_PROLOGUE in i860.h is defined to call this function.
1553
1554 ASM_FILE is a stdio stream to output the code to.
1555 SIZE is an int: how many units of temporary storage to allocate.
1556
1557 Refer to the array `regs_ever_live' to determine which registers
1558 to save; `regs_ever_live[I]' is nonzero if register number I
1559 is ever used in the function. This macro is responsible for
1560 knowing which registers should not be saved even if used.
1561
1562 NOTE: `frame_lower_bytes' is the count of bytes which will lie
1563 between the new `fp' value and the new `sp' value after the
1564 prologue is done. `frame_upper_bytes' is the count of bytes
1565 that will lie between the new `fp' and the *old* `sp' value
1566 after the new `fp' is setup (in the prologue). The upper
1567 part of each frame always includes at least 2 words (8 bytes)
1568 to hold the saved frame pointer and the saved return address.
1569
1570 The svr4 ABI for the i860 now requires that the values of the
1571 stack pointer and frame pointer registers be kept aligned to
1572 16-byte boundaries at all times. We obey that restriction here.
1573
1574 The svr4 ABI for the i860 is entirely vague when it comes to specifying
1575 exactly where the "preserved" registers should be saved. The native
1576 svr4 C compiler I now have doesn't help to clarify the requirements
1577 very much because it is plainly out-of-date and non-ABI-compliant
1578 (in at least one important way, i.e. how it generates function
1579 epilogues).
1580
1581 The native svr4 C compiler saves the "preserved" registers (i.e.
1582 r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative
1583 offsets from the frame pointer).
1584
1585 Previous versions of GCC also saved the "preserved" registers in the
1586 "negative" part of the frame, but they saved them using positive
1587 offsets from the (adjusted) stack pointer (after it had been adjusted
1588 to allocate space for the new frame). That's just plain wrong
1589 because if the current function calls alloca(), the stack pointer
1590 will get moved, and it will be impossible to restore the registers
1591 properly again after that.
1592
1593 Both compilers handled parameter registers (i.e. r16-r27 and f8-f15)
1594 by copying their values either into various "preserved" registers or
1595 into stack slots in the lower part of the current frame (as seemed
1596 appropriate, depending upon subsequent usage of these values).
1597
1598 Here we want to save the preserved registers at some offset from the
1599 frame pointer register so as to avoid any possible problems arising
1600 from calls to alloca(). We can either save them at small positive
1601 offsets from the frame pointer, or at small negative offsets from
1602 the frame pointer. If we save them at small negative offsets from
1603 the frame pointer (i.e. in the lower part of the frame) then we
1604 must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how
1605 many bytes of space we plan to use in the lower part of the frame
1606 for this purpose. Since other parts of the compiler reference the
1607 value of STARTING_FRAME_OFFSET long before final() calls this function,
1608 we would have to go ahead and assume the worst-case storage requirements
1609 for saving all of the "preserved" registers (and use that number, i.e.
1610 `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in
1611 the lower part of the frame. That could potentially be very wasteful,
1612 and that wastefulness could really hamper people compiling for embedded
1613 i860 targets with very tight limits on stack space. Thus, we choose
1614 here to save the preserved registers in the upper part of the
1615 frame, so that we can decide at the very last minute how much (or how
1616 little) space we must allocate for this purpose.
1617
1618 To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved
1619 registers must always be saved so that the saved values of registers
1620 with higher numbers are at higher addresses. We obey that restriction
1621 here.
1622
1623 There are two somewhat different ways that you can generate prologues
1624 here... i.e. pedantically ABI-compliant, and the "other" way. The
1625 "other" way is more consistent with what is currently generated by the
1626 "native" svr4 C compiler for the i860. That's important if you want
1627 to use the current (as of 8/91) incarnation of svr4 SDB for the i860.
1628 The SVR4 SDB for the i860 insists on having function prologues be
1629 non-ABI-compliant!
1630
1631 To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES
1632 in the i860svr4.h file. (By default this is *not* defined).
1633
1634 The differences between the ABI-compliant and non-ABI-compliant prologues
1635 are that (a) the ABI version seems to require the use of *signed*
1636 (rather than unsigned) adds and subtracts, and (b) the ordering of
1637 the various steps (e.g. saving preserved registers, saving the
1638 return address, setting up the new frame pointer value) is different.
1639
1640 For strict ABI compliance, it seems to be the case that the very last
1641 thing that is supposed to happen in the prologue is getting the frame
1642 pointer set to its new value (but only after everything else has
1643 already been properly setup). We do that here, but only if the symbol
1644 I860_STRICT_ABI_PROLOGUES is defined.
1645 */
1646
1647 #ifndef STACK_ALIGNMENT
1648 #define STACK_ALIGNMENT 16
1649 #endif
1650
1651 extern char call_used_regs[];
1652 extern int leaf_function_p ();
1653
1654 char *current_function_original_name;
1655
1656 static int must_preserve_r1;
1657 static unsigned must_preserve_bytes;
1658
1659 void
1660 function_prologue (asm_file, local_bytes)
1661 register FILE *asm_file;
1662 register unsigned local_bytes;
1663 {
1664 register unsigned frame_lower_bytes;
1665 register unsigned frame_upper_bytes;
1666 register unsigned total_fsize;
1667 register unsigned preserved_reg_bytes = 0;
1668 register unsigned i;
1669 register unsigned preserved_so_far = 0;
1670
1671 must_preserve_r1 = (optimize < 2 || ! leaf_function_p ());
1672 must_preserve_bytes = 4 + (must_preserve_r1 ? 4 : 0);
1673
1674 /* Count registers that need preserving. Ignore r0. It never needs
1675 preserving. */
1676
1677 for (i = 1; i < FIRST_PSEUDO_REGISTER; i++)
1678 {
1679 if (regs_ever_live[i] && ! call_used_regs[i])
1680 preserved_reg_bytes += 4;
1681 }
1682
1683 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
1684
1685 frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
1686
1687 /* The upper part of each frame will contain the saved fp,
1688 the saved r1, and stack slots for all of the other "preserved"
1689 registers that we find we will need to save & restore. */
1690
1691 frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes;
1692
1693 /* Round-up the frame_upper_bytes so that it's a multiple of 16. */
1694
1695 frame_upper_bytes
1696 = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
1697
1698 total_fsize = frame_upper_bytes + frame_lower_bytes;
1699
1700 #ifndef I860_STRICT_ABI_PROLOGUES
1701
1702 /* There are two kinds of function prologues.
1703 You use the "small" version if the total frame size is
1704 small enough so that it can fit into an immediate 16-bit
1705 value in one instruction. Otherwise, you use the "large"
1706 version of the function prologue. */
1707
1708 if (total_fsize > 0x7fff)
1709 {
1710 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1711 but the native C compiler on svr4 uses `addu'. */
1712
1713 fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n",
1714 frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
1715
1716 /* Save the old frame pointer. */
1717
1718 fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n",
1719 i860_reg_prefix, i860_reg_prefix);
1720
1721 /* Setup the new frame pointer. The ABI sez to do this after
1722 preserving registers (using adds), but that's not what the
1723 native C compiler on svr4 does. */
1724
1725 fprintf (asm_file, "\taddu 0,%ssp,%sfp\n",
1726 i860_reg_prefix, i860_reg_prefix);
1727
1728 /* Get the value of frame_lower_bytes into r31. */
1729
1730 fprintf (asm_file, "\torh %d,%sr0,%sr31\n",
1731 frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix);
1732 fprintf (asm_file, "\tor %d,%sr31,%sr31\n",
1733 frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix);
1734
1735 /* Now re-adjust the stack pointer using the value in r31.
1736 The ABI sez to do this with `subs' but SDB may prefer `subu'. */
1737
1738 fprintf (asm_file, "\tsubu %ssp,%sr31,%ssp\n",
1739 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
1740
1741 /* Preserve registers. The ABI sez to do this before setting
1742 up the new frame pointer, but that's not what the native
1743 C compiler on svr4 does. */
1744
1745 for (i = 1; i < 32; i++)
1746 if (regs_ever_live[i] && ! call_used_regs[i])
1747 fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n",
1748 i860_reg_prefix, reg_names[i],
1749 must_preserve_bytes + (4 * preserved_so_far++),
1750 i860_reg_prefix);
1751
1752 for (i = 32; i < 64; i++)
1753 if (regs_ever_live[i] && ! call_used_regs[i])
1754 fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n",
1755 i860_reg_prefix, reg_names[i],
1756 must_preserve_bytes + (4 * preserved_so_far++),
1757 i860_reg_prefix);
1758
1759 /* Save the return address. */
1760
1761 if (must_preserve_r1)
1762 fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n",
1763 i860_reg_prefix, i860_reg_prefix);
1764 }
1765 else
1766 {
1767 /* Adjust the stack pointer. The ABI sez to do this using `adds',
1768 but the native C compiler on svr4 uses `addu'. */
1769
1770 fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n",
1771 total_fsize, i860_reg_prefix, i860_reg_prefix);
1772
1773 /* Save the old frame pointer. */
1774
1775 fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n",
1776 i860_reg_prefix, frame_lower_bytes, i860_reg_prefix);
1777
1778 /* Setup the new frame pointer. The ABI sez to do this after
1779 preserving registers and after saving the return address,
1780 (and its saz to do this using adds), but that's not what the
1781 native C compiler on svr4 does. */
1782
1783 fprintf (asm_file, "\taddu %d,%ssp,%sfp\n",
1784 frame_lower_bytes, i860_reg_prefix, i860_reg_prefix);
1785
1786 /* Preserve registers. The ABI sez to do this before setting
1787 up the new frame pointer, but that's not what the native
1788 compiler on svr4 does. */
1789
1790 for (i = 1; i < 32; i++)
1791 if (regs_ever_live[i] && ! call_used_regs[i])
1792 fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n",
1793 i860_reg_prefix, reg_names[i],
1794 must_preserve_bytes + (4 * preserved_so_far++),
1795 i860_reg_prefix);
1796
1797 for (i = 32; i < 64; i++)
1798 if (regs_ever_live[i] && ! call_used_regs[i])
1799 fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n",
1800 i860_reg_prefix, reg_names[i],
1801 must_preserve_bytes + (4 * preserved_so_far++),
1802 i860_reg_prefix);
1803
1804 /* Save the return address. The ABI sez to do this earlier,
1805 and also via an offset from %sp, but the native C compiler
1806 on svr4 does it later (i.e. now) and uses an offset from
1807 %fp. */
1808
1809 if (must_preserve_r1)
1810 fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n",
1811 i860_reg_prefix, i860_reg_prefix);
1812 }
1813
1814 #else /* defined(I860_STRICT_ABI_PROLOGUES) */
1815
1816 /* There are two kinds of function prologues.
1817 You use the "small" version if the total frame size is
1818 small enough so that it can fit into an immediate 16-bit
1819 value in one instruction. Otherwise, you use the "large"
1820 version of the function prologue. */
1821
1822 if (total_fsize > 0x7fff)
1823 {
1824 /* Adjust the stack pointer (thereby allocating a new frame). */
1825
1826 fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n",
1827 frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
1828
1829 /* Save the caller's frame pointer. */
1830
1831 fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n",
1832 i860_reg_prefix, i860_reg_prefix);
1833
1834 /* Save return address. */
1835
1836 if (must_preserve_r1)
1837 fprintf (asm_file, "\tst.l %sr1,4(%ssp)\n",
1838 i860_reg_prefix, i860_reg_prefix);
1839
1840 /* Get the value of frame_lower_bytes into r31 for later use. */
1841
1842 fprintf (asm_file, "\torh %d,%sr0,%sr31\n",
1843 frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix);
1844 fprintf (asm_file, "\tor %d,%sr31,%sr31\n",
1845 frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix);
1846
1847 /* Now re-adjust the stack pointer using the value in r31. */
1848
1849 fprintf (asm_file, "\tsubs %ssp,%sr31,%ssp\n",
1850 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
1851
1852 /* Pre-compute value to be used as the new frame pointer. */
1853
1854 fprintf (asm_file, "\tadds %ssp,%sr31,%sr31\n",
1855 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
1856
1857 /* Preserve registers. */
1858
1859 for (i = 1; i < 32; i++)
1860 if (regs_ever_live[i] && ! call_used_regs[i])
1861 fprintf (asm_file, "\tst.l %s%s,%d(%sr31)\n",
1862 i860_reg_prefix, reg_names[i],
1863 must_preserve_bytes + (4 * preserved_so_far++),
1864 i860_reg_prefix);
1865
1866 for (i = 32; i < 64; i++)
1867 if (regs_ever_live[i] && ! call_used_regs[i])
1868 fprintf (asm_file, "\tfst.l %s%s,%d(%sr31)\n",
1869 i860_reg_prefix, reg_names[i],
1870 must_preserve_bytes + (4 * preserved_so_far++),
1871 i860_reg_prefix);
1872
1873 /* Actually set the new value of the frame pointer. */
1874
1875 fprintf (asm_file, "\tmov %sr31,%sfp\n",
1876 i860_reg_prefix, i860_reg_prefix);
1877 }
1878 else
1879 {
1880 /* Adjust the stack pointer. */
1881
1882 fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n",
1883 total_fsize, i860_reg_prefix, i860_reg_prefix);
1884
1885 /* Save the caller's frame pointer. */
1886
1887 fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n",
1888 i860_reg_prefix, frame_lower_bytes, i860_reg_prefix);
1889
1890 /* Save the return address. */
1891
1892 if (must_preserve_r1)
1893 fprintf (asm_file, "\tst.l %sr1,%d(%ssp)\n",
1894 i860_reg_prefix, frame_lower_bytes + 4, i860_reg_prefix);
1895
1896 /* Preserve registers. */
1897
1898 for (i = 1; i < 32; i++)
1899 if (regs_ever_live[i] && ! call_used_regs[i])
1900 fprintf (asm_file, "\tst.l %s%s,%d(%ssp)\n",
1901 i860_reg_prefix, reg_names[i],
1902 frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++),
1903 i860_reg_prefix);
1904
1905 for (i = 32; i < 64; i++)
1906 if (regs_ever_live[i] && ! call_used_regs[i])
1907 fprintf (asm_file, "\tfst.l %s%s,%d(%ssp)\n",
1908 i860_reg_prefix, reg_names[i],
1909 frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++),
1910 i860_reg_prefix);
1911
1912 /* Setup the new frame pointer. */
1913
1914 fprintf (asm_file, "\tadds %d,%ssp,%sfp\n",
1915 frame_lower_bytes, i860_reg_prefix, i860_reg_prefix);
1916 }
1917 #endif /* defined(I860_STRICT_ABI_PROLOGUES) */
1918
1919 #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX
1920 ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file);
1921 #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */
1922 }
1923 \f
1924 /* This function generates the assembly code for function exit.
1925 The macro FUNCTION_EPILOGUE in i860.h is defined to call this function.
1926
1927 ASM_FILE is a stdio stream to output the code to.
1928 SIZE is an int: how many units of temporary storage to allocate.
1929
1930 The function epilogue should not depend on the current stack pointer!
1931 It should use the frame pointer only. This is mandatory because
1932 of alloca; we also take advantage of it to omit stack adjustments
1933 before returning.
1934
1935 Note that when we go to restore the preserved register values we must
1936 not try to address their slots by using offsets from the stack pointer.
1937 That's because the stack pointer may have been moved during the function
1938 execution due to a call to alloca(). Rather, we must restore all
1939 preserved registers via offsets from the frame pointer value.
1940
1941 Note also that when the current frame is being "popped" (by adjusting
1942 the value of the stack pointer) on function exit, we must (for the
1943 sake of alloca) set the new value of the stack pointer based upon
1944 the current value of the frame pointer. We can't just add what we
1945 believe to be the (static) frame size to the stack pointer because
1946 if we did that, and alloca() had been called during this function,
1947 we would end up returning *without* having fully deallocated all of
1948 the space grabbed by alloca. If that happened, and a function
1949 containing one or more alloca() calls was called over and over again,
1950 then the stack would grow without limit!
1951
1952 Finally note that the epilogues generated here are completely ABI
1953 compliant. They go out of their way to insure that the value in
1954 the frame pointer register is never less than the value in the stack
1955 pointer register. It's not clear why this relationship needs to be
1956 maintained at all times, but maintaining it only costs one extra
1957 instruction, so what the hell.
1958 */
1959
1960 /* This corresponds to a version 4 TDESC structure. Lower numbered
1961 versions successively omit the last word of the structure. We
1962 don't try to handle version 5 here. */
1963
1964 typedef struct TDESC_flags {
1965 int version:4;
1966 int reg_packing:1;
1967 int callable_block:1;
1968 int reserved:4;
1969 int fregs:6; /* fp regs 2-7 */
1970 int iregs:16; /* regs 0-15 */
1971 } TDESC_flags;
1972
1973 typedef struct TDESC {
1974 TDESC_flags flags;
1975 int integer_reg_offset; /* same as must_preserve_bytes */
1976 int floating_point_reg_offset;
1977 unsigned int positive_frame_size; /* same as frame_upper_bytes */
1978 unsigned int negative_frame_size; /* same as frame_lower_bytes */
1979 } TDESC;
1980
1981 void
1982 function_epilogue (asm_file, local_bytes)
1983 register FILE *asm_file;
1984 register unsigned local_bytes;
1985 {
1986 register unsigned frame_upper_bytes;
1987 register unsigned frame_lower_bytes;
1988 register unsigned preserved_reg_bytes = 0;
1989 register unsigned i;
1990 register unsigned restored_so_far = 0;
1991 register unsigned int_restored;
1992 register unsigned mask;
1993 unsigned intflags=0;
1994 register TDESC_flags *flags = (TDESC_flags *) &intflags;
1995
1996 flags->version = 4;
1997 flags->reg_packing = 1;
1998 flags->iregs = 8; /* old fp always gets saved */
1999
2000 /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
2001
2002 frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
2003
2004 /* Count the number of registers that were preserved in the prologue.
2005 Ignore r0. It is never preserved. */
2006
2007 for (i = 1; i < FIRST_PSEUDO_REGISTER; i++)
2008 {
2009 if (regs_ever_live[i] && ! call_used_regs[i])
2010 preserved_reg_bytes += 4;
2011 }
2012
2013 /* The upper part of each frame will contain only saved fp,
2014 the saved r1, and stack slots for all of the other "preserved"
2015 registers that we find we will need to save & restore. */
2016
2017 frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes;
2018
2019 /* Round-up frame_upper_bytes so that t is a multiple of 16. */
2020
2021 frame_upper_bytes
2022 = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
2023
2024 /* Restore all of the "preserved" registers that need restoring. */
2025
2026 mask = 2;
2027
2028 for (i = 1; i < 32; i++, mask<<=1)
2029 if (regs_ever_live[i] && ! call_used_regs[i]) {
2030 fprintf (asm_file, "\tld.l %d(%sfp),%s%s\n",
2031 must_preserve_bytes + (4 * restored_so_far++),
2032 i860_reg_prefix, i860_reg_prefix, reg_names[i]);
2033 if (i > 3 && i < 16)
2034 flags->iregs |= mask;
2035 }
2036
2037 int_restored = restored_so_far;
2038 mask = 1;
2039
2040 for (i = 32; i < 64; i++) {
2041 if (regs_ever_live[i] && ! call_used_regs[i]) {
2042 fprintf (asm_file, "\tfld.l %d(%sfp),%s%s\n",
2043 must_preserve_bytes + (4 * restored_so_far++),
2044 i860_reg_prefix, i860_reg_prefix, reg_names[i]);
2045 if (i > 33 & i < 40)
2046 flags->fregs |= mask;
2047 }
2048 if (i > 33 && i < 40)
2049 mask<<=1;
2050 }
2051
2052 /* Get the value we plan to use to restore the stack pointer into r31. */
2053
2054 fprintf (asm_file, "\tadds %d,%sfp,%sr31\n",
2055 frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
2056
2057 /* Restore the return address and the old frame pointer. */
2058
2059 if (must_preserve_r1) {
2060 fprintf (asm_file, "\tld.l 4(%sfp),%sr1\n",
2061 i860_reg_prefix, i860_reg_prefix);
2062 flags->iregs |= 2;
2063 }
2064
2065 fprintf (asm_file, "\tld.l 0(%sfp),%sfp\n",
2066 i860_reg_prefix, i860_reg_prefix);
2067
2068 /* Return and restore the old stack pointer value. */
2069
2070 fprintf (asm_file, "\tbri %sr1\n\tmov %sr31,%ssp\n",
2071 i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
2072
2073 #ifdef OUTPUT_TDESC /* Output an ABI-compliant TDESC entry */
2074 if (! frame_lower_bytes) {
2075 flags->version--;
2076 if (! frame_upper_bytes) {
2077 flags->version--;
2078 if (restored_so_far == int_restored) /* No FP saves */
2079 flags->version--;
2080 }
2081 }
2082 assemble_name(asm_file,current_function_original_name);
2083 fputs(".TDESC:\n", asm_file);
2084 fprintf(asm_file, "%s 0x%0x\n", ASM_LONG, intflags);
2085 fprintf(asm_file, "%s %d\n", ASM_LONG,
2086 int_restored ? must_preserve_bytes : 0);
2087 if (flags->version > 1) {
2088 fprintf(asm_file, "%s %d\n", ASM_LONG,
2089 (restored_so_far == int_restored) ? 0 : must_preserve_bytes +
2090 (4 * int_restored));
2091 if (flags->version > 2) {
2092 fprintf(asm_file, "%s %d\n", ASM_LONG, frame_upper_bytes);
2093 if (flags->version > 3)
2094 fprintf(asm_file, "%s %d\n", ASM_LONG, frame_lower_bytes);
2095 }
2096 }
2097 tdesc_section();
2098 fprintf(asm_file, "%s ", ASM_LONG);
2099 assemble_name(asm_file, current_function_original_name);
2100 fprintf(asm_file, "\n%s ", ASM_LONG);
2101 assemble_name(asm_file, current_function_original_name);
2102 fputs(".TDESC\n", asm_file);
2103 text_section();
2104 #endif
2105 }
2106 \f
2107
2108 /* Expand a library call to __builtin_saveregs. */
2109 rtx
2110 i860_saveregs ()
2111 {
2112 rtx fn = gen_rtx_SYMBOL_REF (Pmode, "__builtin_saveregs");
2113 rtx save = gen_reg_rtx (Pmode);
2114 rtx valreg = LIBCALL_VALUE (Pmode);
2115 rtx ret;
2116
2117 /* The return value register overlaps the first argument register.
2118 Save and restore it around the call. */
2119 emit_move_insn (save, valreg);
2120 ret = emit_library_call_value (fn, NULL_RTX, 1, Pmode, 0);
2121 if (GET_CODE (ret) != REG || REGNO (ret) < FIRST_PSEUDO_REGISTER)
2122 ret = copy_to_reg (ret);
2123 emit_move_insn (valreg, save);
2124
2125 return ret;
2126 }
2127
2128 tree
2129 i860_build_va_list ()
2130 {
2131 tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
2132 tree record;
2133
2134 record = make_node (RECORD_TYPE);
2135
2136 field_ireg_used = build_decl (FIELD_DECL, get_identifier ("__ireg_used"),
2137 unsigned_type_node);
2138 field_freg_used = build_decl (FIELD_DECL, get_identifier ("__freg_used"),
2139 unsigned_type_node);
2140 field_reg_base = build_decl (FIELD_DECL, get_identifier ("__reg_base"),
2141 ptr_type_node);
2142 field_mem_ptr = build_decl (FIELD_DECL, get_identifier ("__mem_ptr"),
2143 ptr_type_node);
2144
2145 DECL_FIELD_CONTEXT (field_ireg_used) = record;
2146 DECL_FIELD_CONTEXT (field_freg_used) = record;
2147 DECL_FIELD_CONTEXT (field_reg_base) = record;
2148 DECL_FIELD_CONTEXT (field_mem_ptr) = record;
2149
2150 #ifdef I860_SVR4_VA_LIST
2151 TYPE_FIELDS (record) = field_ireg_used;
2152 TREE_CHAIN (field_ireg_used) = field_freg_used;
2153 TREE_CHAIN (field_freg_used) = field_reg_base;
2154 TREE_CHAIN (field_reg_base) = field_mem_ptr;
2155 #else
2156 TYPE_FIELDS (record) = field_reg_base;
2157 TREE_CHAIN (field_reg_base) = field_mem_ptr;
2158 TREE_CHAIN (field_mem_ptr) = field_ireg_used;
2159 TREE_CHAIN (field_ireg_used) = field_freg_used;
2160 #endif
2161
2162 layout_type (record);
2163 return record;
2164 }
2165
2166 void
2167 i860_va_start (stdarg_p, valist, nextarg)
2168 int stdarg_p;
2169 tree valist;
2170 rtx nextarg;
2171 {
2172 tree saveregs, t;
2173
2174 saveregs = make_tree (build_pointer_type (va_list_type_node),
2175 expand_builtin_saveregs ());
2176 saveregs = build1 (INDIRECT_REF, va_list_type_node, saveregs);
2177
2178 if (stdarg_p)
2179 {
2180 tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
2181 tree ireg_used, freg_used, reg_base, mem_ptr;
2182
2183 #ifdef I860_SVR4_VA_LIST
2184 field_ireg_used = TYPE_FIELDS (va_list_type_node);
2185 field_freg_used = TREE_CHAIN (field_ireg_used);
2186 field_reg_base = TREE_CHAIN (field_freg_used);
2187 field_mem_ptr = TREE_CHAIN (field_reg_base);
2188 #else
2189 field_reg_base = TYPE_FIELDS (va_list_type_node);
2190 field_mem_ptr = TREE_CHAIN (field_reg_base);
2191 field_ireg_used = TREE_CHAIN (field_mem_ptr);
2192 field_freg_used = TREE_CHAIN (field_ireg_used);
2193 #endif
2194
2195 ireg_used = build (COMPONENT_REF, TREE_TYPE (field_ireg_used),
2196 valist, field_ireg_used);
2197 freg_used = build (COMPONENT_REF, TREE_TYPE (field_freg_used),
2198 valist, field_freg_used);
2199 reg_base = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
2200 valist, field_reg_base);
2201 mem_ptr = build (COMPONENT_REF, TREE_TYPE (field_mem_ptr),
2202 valist, field_mem_ptr);
2203
2204 t = build_int_2 (current_function_args_info.ints, 0);
2205 t = build (MODIFY_EXPR, TREE_TYPE (ireg_used), ireg_used, t);
2206 TREE_SIDE_EFFECTS (t) = 1;
2207 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2208
2209 t = build_int_2 (ROUNDUP (current_function_args_info.floats, 8), 0);
2210 t = build (MODIFY_EXPR, TREE_TYPE (freg_used), freg_used, t);
2211 TREE_SIDE_EFFECTS (t) = 1;
2212 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2213
2214 t = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
2215 saveregs, field_reg_base);
2216 t = build (MODIFY_EXPR, TREE_TYPE (reg_base), reg_base, t);
2217 TREE_SIDE_EFFECTS (t) = 1;
2218 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2219
2220 t = make_tree (ptr_type_node, nextarg);
2221 t = build (MODIFY_EXPR, TREE_TYPE (mem_ptr), mem_ptr, t);
2222 TREE_SIDE_EFFECTS (t) = 1;
2223 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2224 }
2225 else
2226 {
2227 t = build (MODIFY_EXPR, va_list_type_node, valist, saveregs);
2228 TREE_SIDE_EFFECTS (t) = 1;
2229 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2230 }
2231 }
2232
2233 #define NUM_PARM_FREGS 8
2234 #define NUM_PARM_IREGS 12
2235 #ifdef I860_SVR4_VARARGS
2236 #define FREG_OFFSET 0
2237 #define IREG_OFFSET (NUM_PARM_FREGS * UNITS_PER_WORD)
2238 #else
2239 #define FREG_OFFSET (NUM_PARM_IREGS * UNITS_PER_WORD)
2240 #define IREG_OFFSET 0
2241 #endif
2242
2243 rtx
2244 i860_va_arg (valist, type)
2245 tree valist, type;
2246 {
2247 tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
2248 tree type_ptr_node, t;
2249 rtx lab_over = NULL_RTX;
2250 rtx ret, val;
2251 HOST_WIDE_INT align;
2252
2253 #ifdef I860_SVR4_VA_LIST
2254 field_ireg_used = TYPE_FIELDS (va_list_type_node);
2255 field_freg_used = TREE_CHAIN (field_ireg_used);
2256 field_reg_base = TREE_CHAIN (field_freg_used);
2257 field_mem_ptr = TREE_CHAIN (field_reg_base);
2258 #else
2259 field_reg_base = TYPE_FIELDS (va_list_type_node);
2260 field_mem_ptr = TREE_CHAIN (field_reg_base);
2261 field_ireg_used = TREE_CHAIN (field_mem_ptr);
2262 field_freg_used = TREE_CHAIN (field_ireg_used);
2263 #endif
2264
2265 field_ireg_used = build (COMPONENT_REF, TREE_TYPE (field_ireg_used),
2266 valist, field_ireg_used);
2267 field_freg_used = build (COMPONENT_REF, TREE_TYPE (field_freg_used),
2268 valist, field_freg_used);
2269 field_reg_base = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
2270 valist, field_reg_base);
2271 field_mem_ptr = build (COMPONENT_REF, TREE_TYPE (field_mem_ptr),
2272 valist, field_mem_ptr);
2273
2274 ret = gen_reg_rtx (Pmode);
2275 type_ptr_node = build_pointer_type (type);
2276
2277 if (! AGGREGATE_TYPE_P (type))
2278 {
2279 int nparm, incr, ofs;
2280 tree field;
2281 rtx lab_false;
2282
2283 if (FLOAT_TYPE_P (type))
2284 {
2285 field = field_freg_used;
2286 nparm = NUM_PARM_FREGS;
2287 incr = 2;
2288 ofs = FREG_OFFSET;
2289 }
2290 else
2291 {
2292 field = field_ireg_used;
2293 nparm = NUM_PARM_IREGS;
2294 incr = int_size_in_bytes (type) / UNITS_PER_WORD;
2295 ofs = IREG_OFFSET;
2296 }
2297
2298 lab_false = gen_label_rtx ();
2299 lab_over = gen_label_rtx ();
2300
2301 emit_cmp_and_jump_insns (expand_expr (field, NULL_RTX, 0, 0),
2302 GEN_INT (nparm - incr), GT, const0_rtx,
2303 TYPE_MODE (TREE_TYPE (field)),
2304 TREE_UNSIGNED (field), 0, lab_false);
2305
2306 t = fold (build (POSTINCREMENT_EXPR, TREE_TYPE (field), field,
2307 build_int_2 (incr, 0)));
2308 TREE_SIDE_EFFECTS (t) = 1;
2309
2310 t = fold (build (MULT_EXPR, TREE_TYPE (field), field,
2311 build_int_2 (UNITS_PER_WORD, 0)));
2312 TREE_SIDE_EFFECTS (t) = 1;
2313
2314 t = fold (build (PLUS_EXPR, ptr_type_node, field_reg_base,
2315 fold (build (PLUS_EXPR, TREE_TYPE (field), t,
2316 build_int_2 (ofs, 0)))));
2317 TREE_SIDE_EFFECTS (t) = 1;
2318
2319 val = expand_expr (t, ret, VOIDmode, EXPAND_NORMAL);
2320 if (val != ret)
2321 emit_move_insn (ret, val);
2322
2323 emit_jump_insn (gen_jump (lab_over));
2324 emit_barrier ();
2325 emit_label (lab_false);
2326 }
2327
2328 align = TYPE_ALIGN (type);
2329 if (align < BITS_PER_WORD)
2330 align = BITS_PER_WORD;
2331 align /= BITS_PER_UNIT;
2332
2333 t = build (PLUS_EXPR, ptr_type_node, field_mem_ptr,
2334 build_int_2 (align - 1, 0));
2335 t = build (BIT_AND_EXPR, ptr_type_node, t, build_int_2 (-align, -1));
2336
2337 val = expand_expr (t, ret, VOIDmode, EXPAND_NORMAL);
2338 if (val != ret)
2339 emit_move_insn (ret, val);
2340
2341 t = fold (build (PLUS_EXPR, ptr_type_node,
2342 make_tree (ptr_type_node, ret),
2343 build_int_2 (int_size_in_bytes (type), 0)));
2344 t = build (MODIFY_EXPR, ptr_type_node, field_mem_ptr, t);
2345 TREE_SIDE_EFFECTS (t) = 1;
2346 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2347
2348 if (lab_over)
2349 emit_label (lab_over);
2350
2351 return ret;
2352 }