]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/pyr/pyr.c
Merge in gcc2-ss-010999
[thirdparty/gcc.git] / gcc / config / pyr / pyr.c
CommitLineData
585021dc 1/* Subroutines for insn-output.c for Pyramid 90x, 9000, and MIServer Series.
c5c76735 2 Copyright (C) 1989, 1991, 1997, 1998, 1999 Free Software Foundation, Inc.
585021dc
CH
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
b0f95773
RK
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
585021dc
CH
20
21/* Some output-actions in pyr.md need these. */
585021dc 22#include "config.h"
c5c76735 23#include "system.h"
585021dc
CH
24#include "rtl.h"
25#include "regs.h"
26#include "hard-reg-set.h"
27#include "real.h"
28#include "insn-config.h"
29#include "conditions.h"
30#include "insn-flags.h"
31#include "output.h"
32#include "insn-attr.h"
33#include "tree.h"
49ad7cfa 34#include "function.h"
585021dc
CH
35
36/*
37 * Do FUNCTION_ARG.
38 * This cannot be defined as a macro on pyramids, because Pyramid Technology's
39 * C compiler dies on (several equivalent definitions of) this macro.
40 * The only way around this cc bug was to make this a function.
41 * While it would be possible to use a macro version for gcc, it seems
42 * more reliable to have a single version of the code.
43 */
44void *
45pyr_function_arg(cum, mode, type, named)
46 CUMULATIVE_ARGS cum;
47 enum machine_mode mode;
48 tree type;
49{
50 return (void *)(FUNCTION_ARG_HELPER (cum, mode,type,named));
51}
52\f
53/* Do the hard part of PARAM_SAFE_FOR_REG_P.
54 * This cannot be defined as a macro on pyramids, because Pyramid Technology's
55 * C compiler dies on (several equivalent definitions of) this macro.
56 * The only way around this cc bug was to make this a function.
57 */
58int
59inner_param_safe_helper (type)
60 tree type;
61{
62 return (INNER_PARAM_SAFE_HELPER(type));
63}
64\f
65
66/* Return 1 if OP is a non-indexed operand of mode MODE.
67 This is either a register reference, a memory reference,
68 or a constant. In the case of a memory reference, the address
69 is checked to make sure it isn't indexed.
70
71 Register and memory references must have mode MODE in order to be valid,
72 but some constants have no machine mode and are valid for any mode.
73
74 If MODE is VOIDmode, OP is checked for validity for whatever mode
75 it has.
76
77 The main use of this function is as a predicate in match_operand
78 expressions in the machine description.
79
80 It is useful to compare this with general_operand(). They should
81 be identical except for one line.
82
83 This function seems necessary because of the non-orthogonality of
84 Pyramid insns.
85 For any 2-operand insn, and any combination of operand modes,
86 if indexing is valid for the isn's second operand, it is invalid
87 for the first operand to be indexed. */
88
89extern int volatile_ok;
90
91int
92nonindexed_operand (op, mode)
93 register rtx op;
94 enum machine_mode mode;
95{
96 register RTX_CODE code = GET_CODE (op);
97 int mode_altering_drug = 0;
98
99 if (mode == VOIDmode)
100 mode = GET_MODE (op);
101
102 /* Don't accept CONST_INT or anything similar
103 if the caller wants something floating. */
104 if (GET_MODE (op) == VOIDmode && mode != VOIDmode
105 && GET_MODE_CLASS (mode) != MODE_INT)
106 return 0;
107
108 if (CONSTANT_P (op))
109 return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode)
110 && LEGITIMATE_CONSTANT_P (op));
111
112 /* Except for certain constants with VOIDmode, already checked for,
113 OP's mode must match MODE if MODE specifies a mode. */
114
115 if (GET_MODE (op) != mode)
116 return 0;
117
118 while (code == SUBREG)
119 {
120 op = SUBREG_REG (op);
121 code = GET_CODE (op);
122#if 0
123 /* No longer needed, since (SUBREG (MEM...))
124 will load the MEM into a reload reg in the MEM's own mode. */
125 mode_altering_drug = 1;
126#endif
127 }
128 if (code == REG)
129 return 1;
130 if (code == CONST_DOUBLE)
131 return LEGITIMATE_CONSTANT_P (op);
132 if (code == MEM)
133 {
134 register rtx y = XEXP (op, 0);
135 if (! volatile_ok && MEM_VOLATILE_P (op))
136 return 0;
137 GO_IF_NONINDEXED_ADDRESS (y, win);
138 }
139 return 0;
140
141 win:
142 if (mode_altering_drug)
143 return ! mode_dependent_address_p (XEXP (op, 0));
144 return 1;
145}
146
147/* Return non-zero if the rtx OP has an immediate component. An
148 immediate component or additive term equal to zero is rejected
149 due to assembler problems. */
150
151int
152has_direct_base (op)
153 rtx op;
154{
155 if ((CONSTANT_ADDRESS_P (op)
156 && op != const0_rtx)
157 || (GET_CODE (op) == PLUS
158 && ((CONSTANT_ADDRESS_P (XEXP (op, 1))
159 && XEXP (op, 1) != const0_rtx)
160 || (CONSTANT_ADDRESS_P (XEXP (op, 0))
161 && XEXP (op, 0) != const0_rtx))))
162 return 1;
163
164 return 0;
165}
166
167/* Return zero if the rtx OP has a (scaled) index. */
168
169int
170has_index (op)
171 rtx op;
172{
173 if (GET_CODE (op) == PLUS
174 && (GET_CODE (XEXP (op, 0)) == MULT
175 || (GET_CODE (XEXP (op, 1)) == MULT)))
176 return 1;
177 else
178 return 0;
179}
180
181int swap_operands;
182
183/* weird_memory_memory -- return 1 if OP1 and OP2 can be compared (or
184 exchanged with xchw) with one instruction. If the operands need to
185 be swapped, set the global variable SWAP_OPERANDS. This function
186 silently assumes that both OP0 and OP1 are valid memory references.
187 */
188
189int
190weird_memory_memory (op0, op1)
191 rtx op0, op1;
192{
193 RTX_CODE code0, code1;
194
195 op0 = XEXP (op0, 0);
196 op1 = XEXP (op1, 0);
197 code0 = GET_CODE (op0);
198 code1 = GET_CODE (op1);
199
200 swap_operands = 0;
201
202 if (code1 == REG || code1 == SUBREG)
203 {
204 return 1;
205 }
206 if (code0 == REG || code0 == SUBREG)
207 {
208 swap_operands = 1;
209 return 1;
210 }
211 if (has_direct_base (op0) && has_direct_base (op1))
212 {
213 if (has_index (op1))
214 {
215 if (has_index (op0))
216 return 0;
217 swap_operands = 1;
218 }
219
220 return 1;
221 }
222 return 0;
223}
224
225int
226signed_comparison (x, mode)
227 rtx x;
228 enum machine_mode mode;
229{
230 return ! TRULY_UNSIGNED_COMPARE_P (GET_CODE (x));
231}
232
233extern rtx force_reg ();
234rtx test_op0, test_op1;
235enum machine_mode test_mode;
236
237/* Sign-extend or zero-extend constant X from FROM_MODE to TO_MODE. */
238
239rtx
240extend_const (x, extop, from_mode, to_mode)
241 rtx x;
242 RTX_CODE extop;
243 enum machine_mode from_mode, to_mode;
244{
245 int val;
246 int negative;
247 if (from_mode == to_mode)
248 return x;
249 if (GET_CODE (x) != CONST_INT)
250 abort ();
251 val = INTVAL (x);
252 negative = val & (1 << (GET_MODE_BITSIZE (from_mode) - 1));
253 if (GET_MODE_BITSIZE (from_mode) == HOST_BITS_PER_INT)
254 abort ();
255 if (negative && extop == SIGN_EXTEND)
256 val = val | ((-1) << (GET_MODE_BITSIZE (from_mode)));
257 else
258 val = val & ~((-1) << (GET_MODE_BITSIZE (from_mode)));
259 if (GET_MODE_BITSIZE (to_mode) == HOST_BITS_PER_INT)
3a598fbe 260 return GEN_INT (val);
c5c76735 261
3a598fbe 262 return GEN_INT (val & ~((-1) << (GET_MODE_BITSIZE (to_mode))));
585021dc
CH
263}
264
265rtx
266ensure_extended (op, extop, from_mode)
267 rtx op;
268 RTX_CODE extop;
269 enum machine_mode from_mode;
270{
271 if (GET_CODE (op) == CONST_INT)
272 return extend_const (op, extop, from_mode, SImode);
273 else
274 return force_reg (SImode, gen_rtx (extop, SImode, op));
275}
276
277/* Emit rtl for a branch, as well as any delayed (integer) compare insns.
278 The compare insn to perform is determined by the global variables
279 test_op0 and test_op1. */
280
281void
282extend_and_branch (extop)
283 RTX_CODE extop;
284{
285 rtx op0, op1;
286 RTX_CODE code0, code1;
287
288 op0 = test_op0, op1 = test_op1;
289 if (op0 == 0)
290 return;
291
292 code0 = GET_CODE (op0);
293 if (op1 != 0)
294 code1 = GET_CODE (op1);
295 test_op0 = test_op1 = 0;
296
297 if (op1 == 0)
298 {
299 op0 = ensure_extended (op0, extop, test_mode);
c5c76735 300 emit_insn (gen_rtx_SET (VOIDmode, cc0_rtx, op0));
585021dc
CH
301 }
302 else
303 {
304 if (CONSTANT_P (op0) && CONSTANT_P (op1))
305 {
306 op0 = ensure_extended (op0, extop, test_mode);
307 op1 = ensure_extended (op1, extop, test_mode);
308 }
309 else if (extop == ZERO_EXTEND && test_mode == HImode)
310 {
311 /* Pyramids have no unsigned "cmphi" instructions. We need to
312 zero extend unsigned halfwords into temporary registers. */
313 op0 = ensure_extended (op0, extop, test_mode);
314 op1 = ensure_extended (op1, extop, test_mode);
315 }
316 else if (CONSTANT_P (op0))
317 {
318 op0 = ensure_extended (op0, extop, test_mode);
319 op1 = ensure_extended (op1, extop, test_mode);
320 }
321 else if (CONSTANT_P (op1))
322 {
323 op1 = ensure_extended (op1, extop, test_mode);
324 op0 = ensure_extended (op0, extop, test_mode);
325 }
326 else if ((code0 == REG || code0 == SUBREG)
327 && (code1 == REG || code1 == SUBREG))
328 {
329 /* I could do this case without extension, by using the virtual
330 register address (but that would lose for global regs). */
331 op0 = ensure_extended (op0, extop, test_mode);
332 op1 = ensure_extended (op1, extop, test_mode);
333 }
334 else if (code0 == MEM && code1 == MEM)
335 {
336 /* Load into a reg if the address combination can't be handled
337 directly. */
338 if (! weird_memory_memory (op0, op1))
339 op0 = force_reg (test_mode, op0);
340 }
341
c5c76735
JL
342 emit_insn (gen_rtx_SET (VOIDmode, cc0_rtx,
343 gen_rtx_COMPARE (VOIDmode, op0, op1)));
585021dc
CH
344 }
345}
346
347/* Return non-zero if the two single-word moves with operands[0]
348 and operands[1] for the first single-word move, and operands[2]
349 and operands[3] for the second single-word move, is possible to
350 combine to a double word move.
351
352 The criterion is whether the operands are in consecutive memory cells,
353 registers, etc. */
354
355int
356movdi_possible (operands)
357 rtx operands[];
358{
359 int cnst_diff0, cnst_diff1;
360 RTX_CODE code0 = GET_CODE (operands[0]);
361 RTX_CODE code1 = GET_CODE (operands[1]);
362
363 /* Don't dare to combine (possibly overlapping) memory -> memory moves. */
364 /* It would be possible to detect the cases where we dare, by using
365 constant_diff (operands[0], operands[1])!!! */
366 if (code0 == MEM && code1 == MEM)
367 return 0;
368
369 cnst_diff0 = consecutive_operands (operands[0], operands[2]);
370 if (cnst_diff0 == 0)
371 return 0;
372
373 cnst_diff1 = consecutive_operands (operands[1], operands[3]);
374 if (cnst_diff1 == 0)
375 return 0;
376
377 if (cnst_diff0 & cnst_diff1)
378 {
379 /* The source and destination operands are consecutive. */
380
381 /* If the first move writes into the source of the second move,
382 we cannot combine. */
383 if ((code0 == REG
384 && reg_overlap_mentioned_p (operands[0], operands[3]))
385 || (code0 == SUBREG
386 && subreg_overlap_mentioned_p (operands[0], operands[3])))
387 return 0;
388
389 if (cnst_diff0 & 1)
390 /* operands[0],[1] has higher addresses than operands[2],[3]. */
391 swap_operands = 0;
392 else
393 /* operands[0],[1] has lower addresses than operands[2],[3]. */
394 swap_operands = 1;
395 return 1;
396 }
397 return 0;
398}
399
400/* Like reg_overlap_mentioned_p, but accepts a subreg rtx instead
401 of a reg. */
402
403int
404subreg_overlap_mentioned_p (subreg, x)
405 rtx subreg, x;
406{
407 rtx reg = SUBREG_REG (subreg);
408 int regno = REGNO (reg) + SUBREG_WORD (subreg);
409 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (subreg));
410 return refers_to_regno_p (regno, endregno, x, 0);
411}
412
413/* Return 1 if OP0 is a consecutive operand to OP1, 2 if OP1 is a
414 consecutive operand to OP0.
415
416 This function is used to determine if addresses are consecutive,
417 and therefore possible to combine to fewer instructions. */
418
419int
420consecutive_operands (op0, op1)
421 rtx op0, op1;
422{
423 RTX_CODE code0, code1;
424 int cnst_diff;
425 int regno_off0, regno_off1;
426
427 code0 = GET_CODE (op0);
428 code1 = GET_CODE (op1);
429
430 regno_off0 = 0;
431 if (code0 == SUBREG)
432 {
433 if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))) <= UNITS_PER_WORD)
434 return 0;
435 regno_off0 = SUBREG_WORD (op0);
436 op0 = SUBREG_REG (op0);
437 code0 = REG;
438 }
439
440 regno_off1 = 0;
441 if (code1 == SUBREG)
442 {
443 if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))) <= UNITS_PER_WORD)
444 return 0;
445 regno_off1 = SUBREG_WORD (op1);
446 op1 = SUBREG_REG (op1);
447 code1 = REG;
448 }
449
450 if (code0 != code1)
451 return 0;
452
453 switch (code0)
454 {
455 case CONST_INT:
456 /* Cannot permit any symbolic constants, even if the consecutive
457 operand is 0, since a movl really performs sign extension. */
458 if (code1 != CONST_INT)
459 return 0;
460 if ((INTVAL (op0) == 0 && INTVAL (op1) == 0)
461 || (INTVAL (op0) == -1 && INTVAL (op1) == -1))
462 return 3;
463 if ((INTVAL (op0) == 0 && INTVAL (op1) > 0)
464 || (INTVAL (op0) == -1 && INTVAL (op1) < 0))
465 return 2;
466 if ((INTVAL (op1) == 0 && INTVAL (op0) > 0)
467 || (INTVAL (op1) == -1 && INTVAL (op0) < 0))
468 return 1;
469 break;
470
471 case REG:
472 regno_off0 = REGNO (op0) + regno_off0;
473 regno_off1 = REGNO (op1) + regno_off1;
474
475 cnst_diff = regno_off0 - regno_off1;
476 if (cnst_diff == 1)
477 {
478 /* movl with the highest numbered parameter (local) register as
479 source or destination, doesn't wrap to the lowest numbered local
480 (temporary) register. */
481
482 if (regno_off0 % 16 != 0)
483 return 1;
484 else
485 return 0;
486 }
487 else if (cnst_diff == -1)
488 {
489 if (regno_off1 % 16 != 0)
490 return 2;
491 else
492 return 0;
493 }
494 break;
495
496 case MEM:
497 op0 = XEXP (op0, 0);
498 op1 = XEXP (op1, 0);
499 if (GET_CODE (op0) == CONST)
500 op0 = XEXP (op0, 0);
501 if (GET_CODE (op1) == CONST)
502 op1 = XEXP (op1, 0);
503
504 cnst_diff = constant_diff (op0, op1);
505 if (cnst_diff)
506 {
507 if (cnst_diff == 4)
508 return 1;
509 else if (cnst_diff == -4)
510 return 2;
511 }
512 break;
513 }
514 return 0;
515}
516
517/* Return the constant difference of the rtx expressions OP0 and OP1,
518 or 0 if they don't have a constant difference.
519
520 This function is used to determine if addresses are consecutive,
521 and therefore possible to combine to fewer instructions. */
522
523int
524constant_diff (op0, op1)
525 rtx op0, op1;
526{
527 RTX_CODE code0, code1;
528 int cnst_diff;
529
530 code0 = GET_CODE (op0);
531 code1 = GET_CODE (op1);
532
533 if (code0 != code1)
534 {
535 if (code0 == PLUS)
536 {
537 if (GET_CODE (XEXP (op0, 1)) == CONST_INT
538 && rtx_equal_p (op1, XEXP (op0, 0)))
539 return INTVAL (XEXP (op0, 1));
540 }
541 else if (code1 == PLUS)
542 {
543 if (GET_CODE (XEXP (op1, 1)) == CONST_INT
544 && rtx_equal_p (op0, XEXP (op1, 0)))
545 return -INTVAL (XEXP (op1, 1));
546 }
547 return 0;
548 }
549
550 if (code0 == CONST_INT)
551 return INTVAL (op0) - INTVAL (op1);
552
553 if (code0 == PLUS)
554 {
555 cnst_diff = constant_diff (XEXP (op0, 0), XEXP (op1, 0));
556 if (cnst_diff)
557 return (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1)))
558 ? cnst_diff : 0;
559 cnst_diff = constant_diff (XEXP (op0, 1), XEXP (op1, 1));
560 if (cnst_diff)
561 return (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)))
562 ? cnst_diff : 0;
563 }
564
565 return 0;
566}
567
568int
569already_sign_extended (insn, from_mode, op)
570 rtx insn;
571 enum machine_mode from_mode;
572 rtx op;
573{
574 rtx xinsn, xdest, xsrc;
575
576 for (;;)
577 {
578 insn = PREV_INSN (insn);
579 if (insn == 0)
580 return 0;
581 if (GET_CODE (insn) == NOTE || GET_CODE (insn) == JUMP_INSN)
582 continue;
583 if (GET_CODE (insn) == CALL_INSN && ! call_used_regs[REGNO (op)])
584 continue;
585 if (GET_CODE (insn) != INSN)
586 return 0;
587 xinsn = PATTERN (insn);
588
589 if (GET_CODE (xinsn) != SET)
590 return 0;
591
592 xdest = SET_DEST (xinsn);
593 xsrc = SET_SRC (xinsn);
594
595 if (GET_CODE (xdest) == SUBREG)
596 abort ();
597
598 if ( ! REG_P (xdest))
599 continue;
600
601 if (REGNO (op) == REGNO (xdest)
602 && ((GET_CODE (xsrc) == SIGN_EXTEND
603 && GET_MODE (XEXP (xsrc, 0)) == from_mode)
604 || (GET_CODE (xsrc) == MEM
605 && GET_MODE (xsrc) == from_mode)))
606 return 1;
607
608 /* The register is modified by another operation. */
609 if (reg_overlap_mentioned_p (xdest, op))
610 return 0;
611 }
612}
613
614char *
615output_move_double (operands)
616 rtx *operands;
617{
618 if (GET_CODE (operands[1]) == CONST_DOUBLE)
619 {
620 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT)
621 {
622 /* In an integer, the low-order word is in CONST_DOUBLE_LOW. */
623 rtx const_op = operands[1];
624 if ((CONST_DOUBLE_HIGH (const_op) == 0
625 && CONST_DOUBLE_LOW (const_op) >= 0)
626 || (CONST_DOUBLE_HIGH (const_op) == -1
627 && CONST_DOUBLE_LOW (const_op) < 0))
628 {
3a598fbe 629 operands[1] = GEN_INT (CONST_DOUBLE_LOW (const_op));
585021dc
CH
630 return "movl %1,%0";
631 }
3a598fbe 632 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (const_op));
585021dc 633 output_asm_insn ("movw %1,%0", operands);
c5c76735 634 operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3a598fbe 635 operands[1] = GEN_INT (CONST_DOUBLE_LOW (const_op));
585021dc
CH
636 return "movw %1,%0";
637 }
638 else
639 {
640 /* In a real, the low-address word is in CONST_DOUBLE_LOW. */
641 rtx const_op = operands[1];
642 if ((CONST_DOUBLE_LOW (const_op) == 0
643 && CONST_DOUBLE_HIGH (const_op) >= 0)
644 || (CONST_DOUBLE_LOW (const_op) == -1
645 && CONST_DOUBLE_HIGH (const_op) < 0))
646 {
3a598fbe 647 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (const_op));
585021dc
CH
648 return "movl %1,%0";
649 }
3a598fbe 650 operands[1] = GEN_INT (CONST_DOUBLE_LOW (const_op));
585021dc 651 output_asm_insn ("movw %1,%0", operands);
c5c76735 652 operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3a598fbe 653 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (const_op));
585021dc
CH
654 return "movw %1,%0";
655 }
656 }
657
658 return "movl %1,%0";
659}
660
661/* Output a shift insns, after having reduced integer arguments to
662 avoid as warnings. */
663
664char *
665output_shift (pattern, op2, mod)
666 char *pattern;
667 rtx op2;
668 int mod;
669{
670 if (GET_CODE (op2) == CONST_INT)
671 {
672 int cnt = INTVAL (op2) % mod;
673 if (cnt == 0)
674 {
675 cc_status = cc_prev_status;
676 return "";
677 }
3a598fbe 678 op2 = GEN_INT (cnt);
585021dc
CH
679 }
680 return pattern;
681}
682
683/* Return non-zero if the code of this rtx pattern is a relop. */
684
685int
686relop (op, mode)
687 rtx op;
688 enum machine_mode mode;
689{
690 switch (GET_CODE (op))
691 {
692 case EQ:
693 case NE:
694 case LT:
695 case LE:
696 case GE:
697 case GT:
698 case LTU:
699 case LEU:
700 case GEU:
701 case GTU:
702 return 1;
703 }
704 return 0;
705}
706
707void
708notice_update_cc (EXP, INSN)
709 rtx EXP, INSN;
710{
711 switch (GET_CODE (EXP))
712 {
713 case SET:
714 switch (GET_CODE (SET_DEST (EXP)))
715 {
716 case CC0:
717 cc_status.mdep = 0;
718 cc_status.flags = 0;
719 cc_status.value1 = 0;
720 cc_status.value2 = SET_SRC (EXP);
721 break;
722
723 case PC:
724 break;
725
726 case REG:
727 switch (GET_CODE (SET_SRC (EXP)))
728 {
729 case CALL:
730 goto call;
731 case MEM:
732 if (GET_MODE (SET_SRC (EXP)) == QImode
733 || GET_MODE (SET_SRC (EXP)) == HImode)
734 {
735 cc_status.mdep = 0;
736 cc_status.flags = CC_NO_OVERFLOW;
737 cc_status.value1 = SET_DEST (EXP);
738 cc_status.value2 = SET_SRC (EXP);
739 break;
740 }
741 /* else: Fall through. */
742 case CONST_INT:
743 case SYMBOL_REF:
744 case LABEL_REF:
745 case CONST:
746 case CONST_DOUBLE:
747 case REG:
748 if (cc_status.value1
749 && reg_overlap_mentioned_p (SET_DEST (EXP),
750 cc_status.value1))
751 cc_status.value1 = 0;
752 if (cc_status.value2
753 && reg_overlap_mentioned_p (SET_DEST (EXP),
754 cc_status.value2))
755 cc_status.value2 = 0;
756 break;
757
758 case UDIV:
759 case UMOD:
760 cc_status.mdep = CC_VALID_FOR_UNSIGNED;
761 cc_status.flags = CC_NO_OVERFLOW;
762 cc_status.value1 = SET_DEST (EXP);
763 cc_status.value2 = SET_SRC (EXP);
764 break;
765 default:
766 cc_status.mdep = 0;
767 cc_status.flags = CC_NO_OVERFLOW;
768 cc_status.value1 = SET_DEST (EXP);
769 cc_status.value2 = SET_SRC (EXP);
770 break;
771 }
772 break;
773
774 case MEM:
775 switch (GET_CODE (SET_SRC (EXP)))
776 {
777 case REG:
778 if (GET_MODE (SET_SRC (EXP)) == QImode
779 || GET_MODE (SET_SRC (EXP)) == HImode)
780 {
781 cc_status.flags = CC_NO_OVERFLOW;
782 cc_status.value1 = SET_DEST (EXP);
783 cc_status.value2 = SET_SRC (EXP);
784 cc_status.mdep = 0;
785 break;
786 }
787 /* else: Fall through. */
788 case CONST_INT:
789 case SYMBOL_REF:
790 case LABEL_REF:
791 case CONST:
792 case CONST_DOUBLE:
793 case MEM:
794 /* Need to forget cc_status about memory positions each
795 time a memory store is made, even if the memory store
796 insns in question doesn't modify the condition codes. */
797 if (cc_status.value1 &&
798 GET_CODE (cc_status.value1) == MEM)
799 cc_status.value1 = 0;
800 if (cc_status.value2 &&
801 GET_CODE (cc_status.value2) == MEM)
802 cc_status.value2 = 0;
803 break;
804 case SIGN_EXTEND:
805 case FLOAT_EXTEND:
806 case FLOAT_TRUNCATE:
807 case FLOAT:
808 case FIX:
809 cc_status.flags = CC_NO_OVERFLOW;
810 cc_status.value1 = SET_DEST (EXP);
811 cc_status.value2 = SET_SRC (EXP);
812 cc_status.mdep = 0;
813 break;
814
815 default:
816 abort ();
817 }
818 break;
819
820 default:
821 abort ();
822 }
823 break;
824
825 case CALL:
826 call:
827 CC_STATUS_INIT;
828 break;
829 /* Do calls preserve the condition codes? (At least forget
830 cc_status expressions if they refer to registers
831 not preserved across calls. Also forget expressions
832 about memory contents.) */
833 if (cc_status.value1
834 && (refers_to_regno_p (PYR_TREG (0), PYR_TREG (15),
835 cc_status.value1, 0)
836 || GET_CODE (cc_status.value1) == MEM))
837 cc_status.value1 = 0;
838 if (cc_status.value2
839 && (refers_to_regno_p (PYR_TREG (0), PYR_TREG (15),
840 cc_status.value2, 0)
841 || GET_CODE (cc_status.value2) == MEM))
842 cc_status.value2 = 0;
843 break;
844
845 default:
846 CC_STATUS_INIT;
847 }
848}
849
850void
851forget_cc_if_dependent (op)
852 rtx op;
853{
854 cc_status = cc_prev_status;
855 if (cc_status.value1 && reg_overlap_mentioned_p (op, cc_status.value1))
856 cc_status.value1 = 0;
857 if (cc_status.value2 && reg_overlap_mentioned_p (op, cc_status.value2))
858 cc_status.value2 = 0;
859}
7a3842b3
RH
860\f
861/* ??? None of the original definitions ever worked for stdarg.h, or
862 even for structs or float arguments. Quoting bits of the old
863 va-pyr.h for historical interest. */
864
865/**
866 *
867 * Varargs for PYR/GNU CC
868 *
869 * WARNING -- WARNING -- DANGER
870 *
871 * The code in this file implements varargs for gcc on a pyr in
872 * a way that is compatible with code compiled by the Pyramid Technology
873 * C compiler.
874 * As such, it depends strongly on the Pyramid conventions for
875 * parameter passing.ct and independent implementation.
876 * These (somewhat bizarre) parameter-passing conventions are described
877 * in the ``OSx Operating System Porting Guide''.
878 *
879 * A quick summary is useful:
880 * 12 of the 48 register-windowed regs available for
881 * parameter passing. Parameters of a function call that are eligible
882 * to be passed in registers are assigned registers from TR0/PR0 onwards;
883 * all other arguments are passed on the stack.
884 * Structure and union parameters are *never* passed in registers,
885 * even if they are small enough to fit. They are always passed on
886 * the stack.
887 *
888 * Double-sized parameters cannot be passed in TR11, because
889 * TR12 is not used for passing parameters. If, in the absence of this
890 * rule, a double-sized param would have been passed in TR11,
891 * that parameter is passed on the stack and no parameters are
892 * passed in TR11.
893 *
894 * It is only known to work for passing 32-bit integer quantities
895 * (ie chars, shorts, ints/enums, longs), doubles, or pointers.
896 * Passing structures on a Pyramid via varargs is a loser.
897 * Passing an object larger than 8 bytes on a pyramid via varargs may
898 * also be a loser.
899 *
900 */
901
902tree
903pyr_build_va_list ()
904{
905typedef struct __va_regs {
906 __voidptr __stackp,__regp,__count;
907 __voidptr __pr0,__pr1,__pr2,__pr3,__pr4,__pr5,__pr6,__pr7,__pr8,__pr9,__pr10,__pr11;
908 } __va_regs;
909
910typedef __va_regs __va_buf;
911typedef __va_buf __gnuc_va_list;
912}
913
914void
915pyr_va_start (stdarg_p, valist, nextarg)
916 int stdarg_p;
917 tree valist;
918 rtx nextarg ATTRIBUTE_UNUSED;
919{
920#define va_alist \
921 __va0,__va1,__va2,__va3,__va4,__va5,__va6,__va7,__va8,__va9,__va10,__va11, \
922 __builtin_va_alist
923
924/* The ... causes current_function_varargs to be set in cc1. */
925#define va_dcl __voidptr va_alist; __va_ellipsis
926
927
928/* __asm ("rcsp %0" : "=r" ( _AP [0]));*/
929
930#define va_start(_AP) \
931 _AP = ((struct __va_regs) { \
932 &(_AP.__pr0), (void*)&__builtin_va_alist, (void*)0, \
933 __va0,__va1,__va2,__va3,__va4,__va5, \
934 __va6,__va7,__va8,__va9,__va10,__va11})
935
936}
937
938rtx
939pyr_va_arg (valist, type)
940 tree valist, type;
941{
942#define va_arg(_AP, _MODE) \
943__extension__ \
944(*({__voidptr *__ap = (__voidptr*)&_AP; \
945 register int __size = sizeof (_MODE); \
946 register int __onstack = \
947 (__size > 8 || ( (int)(__ap[2]) > 11) || \
948 (__size==8 && (int)(__ap[2])==11)); \
949 register int* __param_addr = ((int*)((__ap) [__onstack])); \
950 \
951 ((void *)__ap[__onstack])+=__size; \
952 if (__onstack==0 || (int)(__ap[2])==11) \
953 __ap[2]+= (__size >> 2); \
954 (( _MODE *) (void *) __param_addr); \
955}))
956}