1 /* Subroutines for insn-output.c for Tahoe.
2 Copyright (C) 1989, 1991, 1997 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
33 #include "insn-attr.h"
36 * File: output-tahoe.c
38 * Original port made at the University of Buffalo by Devon Bowen,
39 * Dale Wiles and Kevin Zachmann.
41 * Changes for HCX by Piet van Oostrum,
42 * University of Utrecht, The Netherlands (piet@cs.ruu.nl)
44 * Speed tweaks by Michael Tiemann (tiemann@lurch.stanford.edu).
46 * Mail bugs reports or fixes to: gcc@cs.buffalo.edu
50 /* On tahoe, you have to go to memory to convert a register
51 from sub-word to word. */
53 rtx tahoe_reg_conversion_loc
;
56 extensible_operand (op
, mode
)
58 enum machine_mode mode
;
60 if ((GET_CODE (op
) == REG
61 || (GET_CODE (op
) == SUBREG
62 && GET_CODE (SUBREG_REG (op
)) == REG
))
63 && tahoe_reg_conversion_loc
== 0)
64 tahoe_reg_conversion_loc
= assign_stack_local (SImode
, GET_MODE_SIZE (SImode
));
65 return general_operand (op
, mode
);
68 /* most of the print_operand_address function was taken from the vax */
69 /* since the modes are basically the same. I had to add a special case, */
70 /* though, for symbol references with offsets. */
72 print_operand_address (file
, addr
)
76 register rtx reg1
, reg2
, breg
, ireg
;
78 static char *reg_name
[] = REGISTER_NAMES
;
81 switch (GET_CODE (addr
))
85 addr
= XEXP (addr
, 0);
89 fprintf (file
, "(%s)", reg_name
[REGNO (addr
)]);
93 fprintf (file
, "-(%s)", reg_name
[REGNO (XEXP (addr
, 0))]);
97 fprintf (file
, "(%s)+", reg_name
[REGNO (XEXP (addr
, 0))]);
105 if (CONSTANT_ADDRESS_P (XEXP (addr
, 0))
106 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
107 output_addr_const (file
, addr
);
109 if (CONSTANT_ADDRESS_P (XEXP (addr
, 1))
110 && GET_CODE (XEXP (addr
, 0)) == CONST_INT
)
111 output_addr_const (file
, addr
);
113 if (CONSTANT_ADDRESS_P (XEXP (addr
, 0))
114 || GET_CODE (XEXP (addr
, 0)) == MEM
)
116 offset
= XEXP (addr
, 0);
117 addr
= XEXP (addr
, 1);
119 else if (CONSTANT_ADDRESS_P (XEXP (addr
, 1))
120 || GET_CODE (XEXP (addr
, 1)) == MEM
)
122 offset
= XEXP (addr
, 1);
123 addr
= XEXP (addr
, 0);
125 if (GET_CODE (addr
) != PLUS
)
127 else if (GET_CODE (XEXP (addr
, 0)) == MULT
)
129 reg1
= XEXP (addr
, 0);
130 addr
= XEXP (addr
, 1);
132 else if (GET_CODE (XEXP (addr
, 1)) == MULT
)
134 reg1
= XEXP (addr
, 1);
135 addr
= XEXP (addr
, 0);
137 else if (GET_CODE (XEXP (addr
, 0)) == REG
)
139 reg1
= XEXP (addr
, 0);
140 addr
= XEXP (addr
, 1);
142 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
144 reg1
= XEXP (addr
, 1);
145 addr
= XEXP (addr
, 0);
147 if (GET_CODE (addr
) == REG
|| GET_CODE (addr
) == MULT
)
157 if (addr
!= 0) abort ();
160 if (reg1
!= 0 && GET_CODE (reg1
) == MULT
)
165 else if (reg2
!= 0 && GET_CODE (reg2
) == MULT
)
170 else if (reg2
!= 0 || GET_CODE (addr
) == MEM
)
181 output_address (offset
);
184 if (GET_CODE (breg
) != REG
)
186 fprintf (file
, "(%s)", reg_name
[REGNO (breg
)]);
190 if (GET_CODE (ireg
) == MULT
)
191 ireg
= XEXP (ireg
, 0);
192 if (GET_CODE (ireg
) != REG
)
194 fprintf (file
, "[%s]", reg_name
[REGNO (ireg
)]);
199 output_addr_const (file
, addr
);
203 /* Do a quick check and find out what the best way to do the */
204 /* mini-move is. Could be a push or a move..... */
207 singlemove_string (operands
)
210 if (operands
[1] == const0_rtx
)
212 if (push_operand (operands
[0], SImode
))
217 /* given the rtx for an address, return true if the given */
218 /* register number is used in the address somewhere. */
220 regisused(addr
,regnum
)
224 if (GET_CODE(addr
) == REG
)
225 if (REGNO(addr
) == regnum
)
230 if (GET_CODE(addr
) == MEM
)
231 return regisused(XEXP(addr
,0),regnum
);
233 if ((GET_CODE(addr
) == MULT
) || (GET_CODE(addr
) == PLUS
))
234 return ((regisused(XEXP(addr
,0),regnum
)) ||
235 (regisused(XEXP(addr
,1),regnum
)));
241 /* Given some rtx, traverse it and return the register used in a */
242 /* index. If no index is found, return 0. */
250 if (GET_CODE(addr
) == MEM
)
251 return index_reg(XEXP(addr
,0));
253 if (GET_CODE(addr
) == MULT
)
254 if (GET_CODE(XEXP(addr
,0)) == REG
)
259 if (GET_CODE(addr
) == PLUS
)
260 if (temp
= index_reg(XEXP(addr
,0)))
263 return index_reg(XEXP(addr
,1));
269 /* simulate the move double by generating two movl's. You have */
270 /* to be careful about mixing modes here. */
273 output_move_double (operands
)
276 enum { REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, INDOP
, CNSTOP
, RNDOP
}
279 rtx shftreg0
= 0, shftreg1
= 0;
280 rtx temp0
= 0, temp1
= 0;
281 rtx addreg0
= 0, addreg1
= 0;
284 /* First classify both operands. */
286 if (REG_P (operands
[0]))
288 else if ((GET_CODE(operands
[0])==MEM
) && (shftreg0
=index_reg(operands
[0])))
290 else if (offsettable_memref_p (operands
[0]))
292 else if (GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
) {
295 } else if (GET_CODE (operands
[0]) == MEM
)
300 if (REG_P (operands
[1]))
302 else if ((GET_CODE(operands
[1])==MEM
) && (shftreg1
=index_reg(operands
[1])))
304 else if (offsettable_memref_p (operands
[1]))
306 else if (GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
308 else if (GET_CODE (operands
[1]) == MEM
)
310 else if (CONSTANT_P (operands
[1]))
315 /* set up for the high byte move for operand zero */
319 /* if it's a register, just use the next highest in the */
320 /* high address move. */
322 case REGOP
: latehalf
[0] = gen_rtx (REG
,SImode
,REGNO(operands
[0])+1);
325 /* for an offsettable address, use the gcc function to */
326 /* modify the operand to get an offset of 4 higher for */
327 /* the second move. */
329 case OFFSOP
: latehalf
[0] = adj_offsettable_operand (operands
[0], 4);
332 /* if the operand is MEMOP type, it must be a pointer */
333 /* to a pointer. So just remember to increase the mem */
334 /* location and use the same operand. */
336 case MEMOP
: latehalf
[0] = operands
[0];
337 addreg0
= XEXP(operands
[0],0);
340 /* if we're dealing with a push instruction, just leave */
341 /* the operand alone since it auto-increments. */
343 case PUSHOP
: latehalf
[0] = operands
[0];
346 /* YUCK! Indexed addressing!! If the address is considered */
347 /* offsettable, go use the offset in the high part. Otherwise */
348 /* find what exactly is being added to the multiplication. If */
349 /* it's a mem reference, increment that with the high part */
350 /* being unchanged to cause the shift. If it's a reg, do the */
351 /* same. If you can't identify it, abort. Remember that the */
352 /* shift register was already set during identification. */
354 case INDOP
: if (offsettable_memref_p(operands
[0])) {
355 latehalf
[0] = adj_offsettable_operand(operands
[0],4);
359 latehalf
[0] = operands
[0];
361 temp0
= XEXP(XEXP(operands
[0],0),0);
362 if (GET_CODE(temp0
) == MULT
) {
364 temp0
= XEXP(XEXP(operands
[0],0),1);
366 temp1
= XEXP(XEXP(operands
[0],0),1);
367 if (GET_CODE(temp1
) != MULT
)
371 if (GET_CODE(temp0
) == MEM
)
373 else if (GET_CODE(temp0
) == REG
)
380 /* if we don't know the operand type, print a friendly */
381 /* little error message... 8-) */
387 /* do the same setup for operand one */
391 case REGOP
: latehalf
[1] = gen_rtx(REG
,SImode
,REGNO(operands
[1])+1);
394 case OFFSOP
: latehalf
[1] = adj_offsettable_operand (operands
[1], 4);
397 case MEMOP
: latehalf
[1] = operands
[1];
398 addreg1
= XEXP(operands
[1],0);
401 case POPOP
: latehalf
[1] = operands
[1];
404 case INDOP
: if (offsettable_memref_p(operands
[1])) {
405 latehalf
[1] = adj_offsettable_operand(operands
[1],4);
409 latehalf
[1] = operands
[1];
411 temp0
= XEXP(XEXP(operands
[1],0),0);
412 if (GET_CODE(temp0
) == MULT
) {
414 temp0
= XEXP(XEXP(operands
[1],0),1);
416 temp1
= XEXP(XEXP(operands
[1],0),1);
417 if (GET_CODE(temp1
) != MULT
)
421 if (GET_CODE(temp0
) == MEM
)
423 else if (GET_CODE(temp0
) == REG
)
431 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
432 split_double (operands
[1], &operands
[1], &latehalf
[1]);
433 else if (CONSTANT_P (operands
[1]))
434 latehalf
[1] = const0_rtx
;
443 /* double the register used for shifting in both of the operands */
444 /* but make sure the same register isn't doubled twice! */
446 if (shftreg0
&& shftreg1
&& (rtx_equal_p(shftreg0
,shftreg1
)))
447 output_asm_insn("addl2 %0,%0", &shftreg0
);
450 output_asm_insn("addl2 %0,%0", &shftreg0
);
452 output_asm_insn("addl2 %0,%0", &shftreg1
);
455 /* if the destination is a register and that register is needed in */
456 /* the source addressing mode, swap the order of the moves since we */
457 /* don't want this destroyed til last. If both regs are used, not */
458 /* much we can do, so abort. If these becomes a problem, maybe we */
459 /* can do it on the stack? */
461 if (GET_CODE(operands
[0])==REG
&& regisused(operands
[1],REGNO(operands
[0])))
462 if (regisused(latehalf
[1],REGNO(latehalf
[0])))
467 /* if we're pushing, do the high address part first. */
471 if (addreg0
&& addreg1
&& (rtx_equal_p(addreg0
,addreg1
)))
472 output_asm_insn("addl2 $4,%0", &addreg0
);
475 output_asm_insn("addl2 $4,%0", &addreg0
);
477 output_asm_insn("addl2 $4,%0", &addreg1
);
480 output_asm_insn(singlemove_string(latehalf
), latehalf
);
482 if (addreg0
&& addreg1
&& (rtx_equal_p(addreg0
,addreg1
)))
483 output_asm_insn("subl2 $4,%0", &addreg0
);
486 output_asm_insn("subl2 $4,%0", &addreg0
);
488 output_asm_insn("subl2 $4,%0", &addreg1
);
491 return singlemove_string(operands
);
494 output_asm_insn(singlemove_string(operands
), operands
);
496 if (addreg0
&& addreg1
&& (rtx_equal_p(addreg0
,addreg1
)))
497 output_asm_insn("addl2 $4,%0", &addreg0
);
500 output_asm_insn("addl2 $4,%0", &addreg0
);
502 output_asm_insn("addl2 $4,%0", &addreg1
);
505 output_asm_insn(singlemove_string(latehalf
), latehalf
);
507 if (addreg0
&& addreg1
&& (rtx_equal_p(addreg0
,addreg1
)))
508 output_asm_insn("subl2 $4,%0", &addreg0
);
511 output_asm_insn("subl2 $4,%0", &addreg0
);
513 output_asm_insn("subl2 $4,%0", &addreg1
);
516 if (shftreg0
&& shftreg1
&& (rtx_equal_p(shftreg0
,shftreg1
)))
517 output_asm_insn("shar $1,%0,%0", &shftreg0
);
520 output_asm_insn("shar $1,%0,%0", &shftreg0
);
522 output_asm_insn("shar $1,%0,%0", &shftreg1
);
529 /* This checks if a zero_extended cmp[bw] can be replaced by a sign_extended
530 cmp[bw]. This can be done if the operand is a constant that fits in a
531 byte/word or a memory operand. Besides that the next instruction must be an
532 unsigned compare. Some of these tests are done by the machine description */
535 tahoe_cmp_check (insn
, op
, max
)
536 rtx insn
, op
; int max
;
538 if (GET_CODE (op
) == CONST_INT
539 && ( INTVAL (op
) < 0 || INTVAL (op
) > max
))
542 register rtx next
= NEXT_INSN (insn
);
544 if ((GET_CODE (next
) == JUMP_INSN
545 || GET_CODE (next
) == INSN
546 || GET_CODE (next
) == CALL_INSN
))
548 next
= PATTERN (next
);
549 if (GET_CODE (next
) == SET
550 && SET_DEST (next
) == pc_rtx
551 && GET_CODE (SET_SRC (next
)) == IF_THEN_ELSE
)
552 switch (GET_CODE (XEXP (SET_SRC (next
), 0)))