]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/vax/vax.c
4a1ecfa0da35bc03a372e26af52c58a904813567
[thirdparty/gcc.git] / gcc / config / vax / vax.c
1 /* Subroutines for insn-output.c for VAX.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "attribs.h"
31 #include "df.h"
32 #include "memmodel.h"
33 #include "tm_p.h"
34 #include "optabs.h"
35 #include "regs.h"
36 #include "emit-rtl.h"
37 #include "calls.h"
38 #include "varasm.h"
39 #include "conditions.h"
40 #include "output.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "builtins.h"
44
45 /* This file should be included last. */
46 #include "target-def.h"
47
48 static void vax_option_override (void);
49 static bool vax_legitimate_address_p (machine_mode, rtx, bool);
50 static void vax_file_start (void);
51 static void vax_init_libfuncs (void);
52 static void vax_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
53 HOST_WIDE_INT, tree);
54 static int vax_address_cost_1 (rtx);
55 static int vax_address_cost (rtx, machine_mode, addr_space_t, bool);
56 static bool vax_rtx_costs (rtx, machine_mode, int, int, int *, bool);
57 static rtx vax_function_arg (cumulative_args_t, const function_arg_info &);
58 static void vax_function_arg_advance (cumulative_args_t,
59 const function_arg_info &);
60 static rtx vax_struct_value_rtx (tree, int);
61 static void vax_asm_trampoline_template (FILE *);
62 static void vax_trampoline_init (rtx, tree, rtx);
63 static poly_int64 vax_return_pops_args (tree, tree, poly_int64);
64 static bool vax_mode_dependent_address_p (const_rtx, addr_space_t);
65 static HOST_WIDE_INT vax_starting_frame_offset (void);
66 \f
67 /* Initialize the GCC target structure. */
68 #undef TARGET_ASM_ALIGNED_HI_OP
69 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
70
71 #undef TARGET_ASM_FILE_START
72 #define TARGET_ASM_FILE_START vax_file_start
73 #undef TARGET_ASM_FILE_START_APP_OFF
74 #define TARGET_ASM_FILE_START_APP_OFF true
75
76 #undef TARGET_INIT_LIBFUNCS
77 #define TARGET_INIT_LIBFUNCS vax_init_libfuncs
78
79 #undef TARGET_ASM_OUTPUT_MI_THUNK
80 #define TARGET_ASM_OUTPUT_MI_THUNK vax_output_mi_thunk
81 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
82 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
83
84 #undef TARGET_RTX_COSTS
85 #define TARGET_RTX_COSTS vax_rtx_costs
86 #undef TARGET_ADDRESS_COST
87 #define TARGET_ADDRESS_COST vax_address_cost
88
89 #undef TARGET_PROMOTE_PROTOTYPES
90 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
91
92 #undef TARGET_FUNCTION_ARG
93 #define TARGET_FUNCTION_ARG vax_function_arg
94 #undef TARGET_FUNCTION_ARG_ADVANCE
95 #define TARGET_FUNCTION_ARG_ADVANCE vax_function_arg_advance
96
97 #undef TARGET_STRUCT_VALUE_RTX
98 #define TARGET_STRUCT_VALUE_RTX vax_struct_value_rtx
99
100 #undef TARGET_LRA_P
101 #define TARGET_LRA_P hook_bool_void_false
102
103 #undef TARGET_LEGITIMATE_ADDRESS_P
104 #define TARGET_LEGITIMATE_ADDRESS_P vax_legitimate_address_p
105 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
106 #define TARGET_MODE_DEPENDENT_ADDRESS_P vax_mode_dependent_address_p
107
108 #undef TARGET_FRAME_POINTER_REQUIRED
109 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
110
111 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
112 #define TARGET_ASM_TRAMPOLINE_TEMPLATE vax_asm_trampoline_template
113 #undef TARGET_TRAMPOLINE_INIT
114 #define TARGET_TRAMPOLINE_INIT vax_trampoline_init
115 #undef TARGET_RETURN_POPS_ARGS
116 #define TARGET_RETURN_POPS_ARGS vax_return_pops_args
117
118 #undef TARGET_OPTION_OVERRIDE
119 #define TARGET_OPTION_OVERRIDE vax_option_override
120
121 #undef TARGET_STARTING_FRAME_OFFSET
122 #define TARGET_STARTING_FRAME_OFFSET vax_starting_frame_offset
123
124 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
125 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
126
127 struct gcc_target targetm = TARGET_INITIALIZER;
128 \f
129 /* Set global variables as needed for the options enabled. */
130
131 static void
132 vax_option_override (void)
133 {
134 /* We're VAX floating point, not IEEE floating point. */
135 if (TARGET_G_FLOAT)
136 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
137
138 #ifdef SUBTARGET_OVERRIDE_OPTIONS
139 SUBTARGET_OVERRIDE_OPTIONS;
140 #endif
141 }
142
143 static void
144 vax_add_reg_cfa_offset (rtx insn, int offset, rtx src)
145 {
146 rtx x;
147
148 x = plus_constant (Pmode, frame_pointer_rtx, offset);
149 x = gen_rtx_MEM (SImode, x);
150 x = gen_rtx_SET (x, src);
151 add_reg_note (insn, REG_CFA_OFFSET, x);
152 }
153
154 /* Generate the assembly code for function entry. FILE is a stdio
155 stream to output the code to. SIZE is an int: how many units of
156 temporary storage to allocate.
157
158 Refer to the array `regs_ever_live' to determine which registers to
159 save; `regs_ever_live[I]' is nonzero if register number I is ever
160 used in the function. This function is responsible for knowing
161 which registers should not be saved even if used. */
162
163 void
164 vax_expand_prologue (void)
165 {
166 int regno, offset;
167 int mask = 0;
168 HOST_WIDE_INT size;
169 rtx insn;
170
171 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
172 if (df_regs_ever_live_p (regno) && !call_used_or_fixed_reg_p (regno))
173 mask |= 1 << regno;
174
175 insn = emit_insn (gen_procedure_entry_mask (GEN_INT (mask)));
176 RTX_FRAME_RELATED_P (insn) = 1;
177
178 /* The layout of the CALLG/S stack frame is follows:
179
180 <- CFA, AP
181 r11
182 r10
183 ... Registers saved as specified by MASK
184 r3
185 r2
186 return-addr
187 old fp
188 old ap
189 old psw
190 zero
191 <- FP, SP
192
193 The rest of the prologue will adjust the SP for the local frame. */
194
195 vax_add_reg_cfa_offset (insn, 4, arg_pointer_rtx);
196 vax_add_reg_cfa_offset (insn, 8, frame_pointer_rtx);
197 vax_add_reg_cfa_offset (insn, 12, pc_rtx);
198
199 offset = 16;
200 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
201 if (mask & (1 << regno))
202 {
203 vax_add_reg_cfa_offset (insn, offset, gen_rtx_REG (SImode, regno));
204 offset += 4;
205 }
206
207 /* Because add_reg_note pushes the notes, adding this last means that
208 it will be processed first. This is required to allow the other
209 notes be interpreted properly. */
210 add_reg_note (insn, REG_CFA_DEF_CFA,
211 plus_constant (Pmode, frame_pointer_rtx, offset));
212
213 /* Allocate the local stack frame. */
214 size = get_frame_size ();
215 size -= vax_starting_frame_offset ();
216 emit_insn (gen_addsi3 (stack_pointer_rtx,
217 stack_pointer_rtx, GEN_INT (-size)));
218
219 /* Do not allow instructions referencing local stack memory to be
220 scheduled before the frame is allocated. This is more pedantic
221 than anything else, given that VAX does not currently have a
222 scheduling description. */
223 emit_insn (gen_blockage ());
224 }
225
226 /* When debugging with stabs, we want to output an extra dummy label
227 so that gas can distinguish between D_float and G_float prior to
228 processing the .stabs directive identifying type double. */
229 static void
230 vax_file_start (void)
231 {
232 default_file_start ();
233
234 if (write_symbols == DBX_DEBUG)
235 fprintf (asm_out_file, "___vax_%c_doubles:\n", ASM_DOUBLE_CHAR);
236 }
237
238 /* We can use the BSD C library routines for the libgcc calls that are
239 still generated, since that's what they boil down to anyways. When
240 ELF, avoid the user's namespace. */
241
242 static void
243 vax_init_libfuncs (void)
244 {
245 if (TARGET_BSD_DIVMOD)
246 {
247 set_optab_libfunc (udiv_optab, SImode, TARGET_ELF ? "*__udiv" : "*udiv");
248 set_optab_libfunc (umod_optab, SImode, TARGET_ELF ? "*__urem" : "*urem");
249 }
250 }
251
252 /* This is like nonimmediate_operand with a restriction on the type of MEM. */
253
254 static void
255 split_quadword_operands (rtx insn, enum rtx_code code, rtx * operands,
256 rtx * low, int n)
257 {
258 int i;
259
260 for (i = 0; i < n; i++)
261 low[i] = 0;
262
263 for (i = 0; i < n; i++)
264 {
265 if (MEM_P (operands[i])
266 && (GET_CODE (XEXP (operands[i], 0)) == PRE_DEC
267 || GET_CODE (XEXP (operands[i], 0)) == POST_INC))
268 {
269 rtx addr = XEXP (operands[i], 0);
270 operands[i] = low[i] = gen_rtx_MEM (SImode, addr);
271 }
272 else if (optimize_size && MEM_P (operands[i])
273 && REG_P (XEXP (operands[i], 0))
274 && (code != MINUS || operands[1] != const0_rtx)
275 && find_regno_note (insn, REG_DEAD,
276 REGNO (XEXP (operands[i], 0))))
277 {
278 low[i] = gen_rtx_MEM (SImode,
279 gen_rtx_POST_INC (Pmode,
280 XEXP (operands[i], 0)));
281 operands[i] = gen_rtx_MEM (SImode, XEXP (operands[i], 0));
282 }
283 else
284 {
285 low[i] = operand_subword (operands[i], 0, 0, DImode);
286 operands[i] = operand_subword (operands[i], 1, 0, DImode);
287 }
288 }
289 }
290 \f
291 void
292 print_operand_address (FILE * file, rtx addr)
293 {
294 rtx orig = addr;
295 rtx reg1, breg, ireg;
296 rtx offset;
297
298 retry:
299 switch (GET_CODE (addr))
300 {
301 case MEM:
302 fprintf (file, "*");
303 addr = XEXP (addr, 0);
304 goto retry;
305
306 case REG:
307 fprintf (file, "(%s)", reg_names[REGNO (addr)]);
308 break;
309
310 case PRE_DEC:
311 fprintf (file, "-(%s)", reg_names[REGNO (XEXP (addr, 0))]);
312 break;
313
314 case POST_INC:
315 fprintf (file, "(%s)+", reg_names[REGNO (XEXP (addr, 0))]);
316 break;
317
318 case PLUS:
319 /* There can be either two or three things added here. One must be a
320 REG. One can be either a REG or a MULT of a REG and an appropriate
321 constant, and the third can only be a constant or a MEM.
322
323 We get these two or three things and put the constant or MEM in
324 OFFSET, the MULT or REG in IREG, and the REG in BREG. If we have
325 a register and can't tell yet if it is a base or index register,
326 put it into REG1. */
327
328 reg1 = 0; ireg = 0; breg = 0; offset = 0;
329
330 if (CONSTANT_ADDRESS_P (XEXP (addr, 0))
331 || MEM_P (XEXP (addr, 0)))
332 {
333 offset = XEXP (addr, 0);
334 addr = XEXP (addr, 1);
335 }
336 else if (CONSTANT_ADDRESS_P (XEXP (addr, 1))
337 || MEM_P (XEXP (addr, 1)))
338 {
339 offset = XEXP (addr, 1);
340 addr = XEXP (addr, 0);
341 }
342 else if (GET_CODE (XEXP (addr, 1)) == MULT)
343 {
344 ireg = XEXP (addr, 1);
345 addr = XEXP (addr, 0);
346 }
347 else if (GET_CODE (XEXP (addr, 0)) == MULT)
348 {
349 ireg = XEXP (addr, 0);
350 addr = XEXP (addr, 1);
351 }
352 else if (REG_P (XEXP (addr, 1)))
353 {
354 reg1 = XEXP (addr, 1);
355 addr = XEXP (addr, 0);
356 }
357 else if (REG_P (XEXP (addr, 0)))
358 {
359 reg1 = XEXP (addr, 0);
360 addr = XEXP (addr, 1);
361 }
362 else
363 gcc_unreachable ();
364
365 if (REG_P (addr))
366 {
367 if (reg1)
368 ireg = addr;
369 else
370 reg1 = addr;
371 }
372 else if (GET_CODE (addr) == MULT)
373 ireg = addr;
374 else
375 {
376 gcc_assert (GET_CODE (addr) == PLUS);
377 if (CONSTANT_ADDRESS_P (XEXP (addr, 0))
378 || MEM_P (XEXP (addr, 0)))
379 {
380 if (offset)
381 {
382 if (CONST_INT_P (offset))
383 offset = plus_constant (Pmode, XEXP (addr, 0),
384 INTVAL (offset));
385 else
386 {
387 gcc_assert (CONST_INT_P (XEXP (addr, 0)));
388 offset = plus_constant (Pmode, offset,
389 INTVAL (XEXP (addr, 0)));
390 }
391 }
392 offset = XEXP (addr, 0);
393 }
394 else if (REG_P (XEXP (addr, 0)))
395 {
396 if (reg1)
397 ireg = reg1, breg = XEXP (addr, 0), reg1 = 0;
398 else
399 reg1 = XEXP (addr, 0);
400 }
401 else
402 {
403 gcc_assert (GET_CODE (XEXP (addr, 0)) == MULT);
404 gcc_assert (!ireg);
405 ireg = XEXP (addr, 0);
406 }
407
408 if (CONSTANT_ADDRESS_P (XEXP (addr, 1))
409 || MEM_P (XEXP (addr, 1)))
410 {
411 if (offset)
412 {
413 if (CONST_INT_P (offset))
414 offset = plus_constant (Pmode, XEXP (addr, 1),
415 INTVAL (offset));
416 else
417 {
418 gcc_assert (CONST_INT_P (XEXP (addr, 1)));
419 offset = plus_constant (Pmode, offset,
420 INTVAL (XEXP (addr, 1)));
421 }
422 }
423 offset = XEXP (addr, 1);
424 }
425 else if (REG_P (XEXP (addr, 1)))
426 {
427 if (reg1)
428 ireg = reg1, breg = XEXP (addr, 1), reg1 = 0;
429 else
430 reg1 = XEXP (addr, 1);
431 }
432 else
433 {
434 gcc_assert (GET_CODE (XEXP (addr, 1)) == MULT);
435 gcc_assert (!ireg);
436 ireg = XEXP (addr, 1);
437 }
438 }
439
440 /* If REG1 is nonzero, figure out if it is a base or index register. */
441 if (reg1)
442 {
443 if (breg
444 || (flag_pic && GET_CODE (addr) == SYMBOL_REF)
445 || (offset
446 && (MEM_P (offset)
447 || (flag_pic && symbolic_operand (offset, SImode)))))
448 {
449 gcc_assert (!ireg);
450 ireg = reg1;
451 }
452 else
453 breg = reg1;
454 }
455
456 if (offset != 0)
457 {
458 if (flag_pic && symbolic_operand (offset, SImode))
459 {
460 if (breg && ireg)
461 {
462 debug_rtx (orig);
463 output_operand_lossage ("symbol used with both base and indexed registers");
464 }
465
466 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
467 if (flag_pic > 1 && GET_CODE (offset) == CONST
468 && GET_CODE (XEXP (XEXP (offset, 0), 0)) == SYMBOL_REF
469 && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (offset, 0), 0)))
470 {
471 debug_rtx (orig);
472 output_operand_lossage ("symbol with offset used in PIC mode");
473 }
474 #endif
475
476 /* symbol(reg) isn't PIC, but symbol[reg] is. */
477 if (breg)
478 {
479 ireg = breg;
480 breg = 0;
481 }
482
483 }
484
485 output_address (VOIDmode, offset);
486 }
487
488 if (breg != 0)
489 fprintf (file, "(%s)", reg_names[REGNO (breg)]);
490
491 if (ireg != 0)
492 {
493 if (GET_CODE (ireg) == MULT)
494 ireg = XEXP (ireg, 0);
495 gcc_assert (REG_P (ireg));
496 fprintf (file, "[%s]", reg_names[REGNO (ireg)]);
497 }
498 break;
499
500 default:
501 output_addr_const (file, addr);
502 }
503 }
504
505 void
506 print_operand (FILE *file, rtx x, int code)
507 {
508 if (code == '#')
509 fputc (ASM_DOUBLE_CHAR, file);
510 else if (code == '|')
511 fputs (REGISTER_PREFIX, file);
512 else if (code == 'c')
513 fputs (cond_name (x), file);
514 else if (code == 'C')
515 fputs (rev_cond_name (x), file);
516 else if (code == 'D' && CONST_INT_P (x) && INTVAL (x) < 0)
517 fprintf (file, "$" NEG_HWI_PRINT_HEX16, INTVAL (x));
518 else if (code == 'P' && CONST_INT_P (x))
519 fprintf (file, "$" HOST_WIDE_INT_PRINT_DEC, INTVAL (x) + 1);
520 else if (code == 'N' && CONST_INT_P (x))
521 fprintf (file, "$" HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
522 /* rotl instruction cannot deal with negative arguments. */
523 else if (code == 'R' && CONST_INT_P (x))
524 fprintf (file, "$" HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
525 else if (code == 'H' && CONST_INT_P (x))
526 fprintf (file, "$%d", (int) (0xffff & ~ INTVAL (x)));
527 else if (code == 'h' && CONST_INT_P (x))
528 fprintf (file, "$%d", (short) - INTVAL (x));
529 else if (code == 'B' && CONST_INT_P (x))
530 fprintf (file, "$%d", (int) (0xff & ~ INTVAL (x)));
531 else if (code == 'b' && CONST_INT_P (x))
532 fprintf (file, "$%d", (int) (0xff & - INTVAL (x)));
533 else if (code == 'M' && CONST_INT_P (x))
534 fprintf (file, "$%d", ~((1 << INTVAL (x)) - 1));
535 else if (code == 'x' && CONST_INT_P (x))
536 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
537 else if (REG_P (x))
538 fprintf (file, "%s", reg_names[REGNO (x)]);
539 else if (MEM_P (x))
540 output_address (GET_MODE (x), XEXP (x, 0));
541 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
542 {
543 char dstr[30];
544 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x),
545 sizeof (dstr), 0, 1);
546 fprintf (file, "$0f%s", dstr);
547 }
548 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
549 {
550 char dstr[30];
551 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x),
552 sizeof (dstr), 0, 1);
553 fprintf (file, "$0%c%s", ASM_DOUBLE_CHAR, dstr);
554 }
555 else
556 {
557 if (flag_pic > 1 && symbolic_operand (x, SImode))
558 {
559 debug_rtx (x);
560 output_operand_lossage ("symbol used as immediate operand");
561 }
562 putc ('$', file);
563 output_addr_const (file, x);
564 }
565 }
566 \f
567 const char *
568 cond_name (rtx op)
569 {
570 switch (GET_CODE (op))
571 {
572 case NE:
573 return "neq";
574 case EQ:
575 return "eql";
576 case GE:
577 return "geq";
578 case GT:
579 return "gtr";
580 case LE:
581 return "leq";
582 case LT:
583 return "lss";
584 case GEU:
585 return "gequ";
586 case GTU:
587 return "gtru";
588 case LEU:
589 return "lequ";
590 case LTU:
591 return "lssu";
592
593 default:
594 gcc_unreachable ();
595 }
596 }
597
598 const char *
599 rev_cond_name (rtx op)
600 {
601 switch (GET_CODE (op))
602 {
603 case EQ:
604 return "neq";
605 case NE:
606 return "eql";
607 case LT:
608 return "geq";
609 case LE:
610 return "gtr";
611 case GT:
612 return "leq";
613 case GE:
614 return "lss";
615 case LTU:
616 return "gequ";
617 case LEU:
618 return "gtru";
619 case GTU:
620 return "lequ";
621 case GEU:
622 return "lssu";
623
624 default:
625 gcc_unreachable ();
626 }
627 }
628
629 static bool
630 vax_float_literal (rtx c)
631 {
632 machine_mode mode;
633 const REAL_VALUE_TYPE *r;
634 REAL_VALUE_TYPE s;
635 int i;
636
637 if (GET_CODE (c) != CONST_DOUBLE)
638 return false;
639
640 mode = GET_MODE (c);
641
642 if (c == const_tiny_rtx[(int) mode][0]
643 || c == const_tiny_rtx[(int) mode][1]
644 || c == const_tiny_rtx[(int) mode][2])
645 return true;
646
647 r = CONST_DOUBLE_REAL_VALUE (c);
648
649 for (i = 0; i < 7; i++)
650 {
651 int x = 1 << i;
652 bool ok;
653 real_from_integer (&s, mode, x, SIGNED);
654
655 if (real_equal (r, &s))
656 return true;
657 ok = exact_real_inverse (mode, &s);
658 gcc_assert (ok);
659 if (real_equal (r, &s))
660 return true;
661 }
662 return false;
663 }
664
665
666 /* Return the cost in cycles of a memory address, relative to register
667 indirect.
668
669 Each of the following adds the indicated number of cycles:
670
671 1 - symbolic address
672 1 - pre-decrement
673 1 - indexing and/or offset(register)
674 2 - indirect */
675
676
677 static int
678 vax_address_cost_1 (rtx addr)
679 {
680 int reg = 0, indexed = 0, indir = 0, offset = 0, predec = 0;
681 rtx plus_op0 = 0, plus_op1 = 0;
682 restart:
683 switch (GET_CODE (addr))
684 {
685 case PRE_DEC:
686 predec = 1;
687 /* FALLTHRU */
688 case REG:
689 case SUBREG:
690 case POST_INC:
691 reg = 1;
692 break;
693 case MULT:
694 indexed = 1; /* 2 on VAX 2 */
695 break;
696 case CONST_INT:
697 /* byte offsets cost nothing (on a VAX 2, they cost 1 cycle) */
698 if (offset == 0)
699 offset = (unsigned HOST_WIDE_INT)(INTVAL(addr)+128) > 256;
700 break;
701 case CONST:
702 case SYMBOL_REF:
703 offset = 1; /* 2 on VAX 2 */
704 break;
705 case LABEL_REF: /* this is probably a byte offset from the pc */
706 if (offset == 0)
707 offset = 1;
708 break;
709 case PLUS:
710 if (plus_op0)
711 plus_op1 = XEXP (addr, 0);
712 else
713 plus_op0 = XEXP (addr, 0);
714 addr = XEXP (addr, 1);
715 goto restart;
716 case MEM:
717 indir = 2; /* 3 on VAX 2 */
718 addr = XEXP (addr, 0);
719 goto restart;
720 default:
721 break;
722 }
723
724 /* Up to 3 things can be added in an address. They are stored in
725 plus_op0, plus_op1, and addr. */
726
727 if (plus_op0)
728 {
729 addr = plus_op0;
730 plus_op0 = 0;
731 goto restart;
732 }
733 if (plus_op1)
734 {
735 addr = plus_op1;
736 plus_op1 = 0;
737 goto restart;
738 }
739 /* Indexing and register+offset can both be used (except on a VAX 2)
740 without increasing execution time over either one alone. */
741 if (reg && indexed && offset)
742 return reg + indir + offset + predec;
743 return reg + indexed + indir + offset + predec;
744 }
745
746 static int
747 vax_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
748 addr_space_t as ATTRIBUTE_UNUSED,
749 bool speed ATTRIBUTE_UNUSED)
750 {
751 return (1 + (REG_P (x) ? 0 : vax_address_cost_1 (x)));
752 }
753
754 /* Cost of an expression on a VAX. This version has costs tuned for the
755 CVAX chip (found in the VAX 3 series) with comments for variations on
756 other models.
757
758 FIXME: The costs need review, particularly for TRUNCATE, FLOAT_EXTEND
759 and FLOAT_TRUNCATE. We need a -mcpu option to allow provision of
760 costs on a per cpu basis. */
761
762 static bool
763 vax_rtx_costs (rtx x, machine_mode mode, int outer_code,
764 int opno ATTRIBUTE_UNUSED,
765 int *total, bool speed ATTRIBUTE_UNUSED)
766 {
767 int code = GET_CODE (x);
768 int i = 0; /* may be modified in switch */
769 const char *fmt = GET_RTX_FORMAT (code); /* may be modified in switch */
770
771 switch (code)
772 {
773 /* On a VAX, constants from 0..63 are cheap because they can use the
774 1 byte literal constant format. Compare to -1 should be made cheap
775 so that decrement-and-branch insns can be formed more easily (if
776 the value -1 is copied to a register some decrement-and-branch
777 patterns will not match). */
778 case CONST_INT:
779 if (INTVAL (x) == 0)
780 {
781 *total = 0;
782 return true;
783 }
784 if (outer_code == AND)
785 {
786 *total = ((unsigned HOST_WIDE_INT) ~INTVAL (x) <= 077) ? 1 : 2;
787 return true;
788 }
789 if ((unsigned HOST_WIDE_INT) INTVAL (x) <= 077
790 || (outer_code == COMPARE
791 && INTVAL (x) == -1)
792 || ((outer_code == PLUS || outer_code == MINUS)
793 && (unsigned HOST_WIDE_INT) -INTVAL (x) <= 077))
794 {
795 *total = 1;
796 return true;
797 }
798 /* FALLTHRU */
799
800 case CONST:
801 case LABEL_REF:
802 case SYMBOL_REF:
803 *total = 3;
804 return true;
805
806 case CONST_DOUBLE:
807 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
808 *total = vax_float_literal (x) ? 5 : 8;
809 else
810 *total = ((CONST_DOUBLE_HIGH (x) == 0
811 && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (x) < 64)
812 || (outer_code == PLUS
813 && CONST_DOUBLE_HIGH (x) == -1
814 && (unsigned HOST_WIDE_INT)-CONST_DOUBLE_LOW (x) < 64))
815 ? 2 : 5;
816 return true;
817
818 case POST_INC:
819 *total = 2;
820 return true; /* Implies register operand. */
821
822 case PRE_DEC:
823 *total = 3;
824 return true; /* Implies register operand. */
825
826 case MULT:
827 switch (mode)
828 {
829 case E_DFmode:
830 *total = 16; /* 4 on VAX 9000 */
831 break;
832 case E_SFmode:
833 *total = 9; /* 4 on VAX 9000, 12 on VAX 2 */
834 break;
835 case E_DImode:
836 *total = 16; /* 6 on VAX 9000, 28 on VAX 2 */
837 break;
838 case E_SImode:
839 case E_HImode:
840 case E_QImode:
841 *total = 10; /* 3-4 on VAX 9000, 20-28 on VAX 2 */
842 break;
843 default:
844 *total = MAX_COST; /* Mode is not supported. */
845 return true;
846 }
847 break;
848
849 case UDIV:
850 if (mode != SImode)
851 {
852 *total = MAX_COST; /* Mode is not supported. */
853 return true;
854 }
855 *total = 17;
856 break;
857
858 case DIV:
859 if (mode == DImode)
860 *total = 30; /* Highly variable. */
861 else if (mode == DFmode)
862 /* divide takes 28 cycles if the result is not zero, 13 otherwise */
863 *total = 24;
864 else
865 *total = 11; /* 25 on VAX 2 */
866 break;
867
868 case MOD:
869 *total = 23;
870 break;
871
872 case UMOD:
873 if (mode != SImode)
874 {
875 *total = MAX_COST; /* Mode is not supported. */
876 return true;
877 }
878 *total = 29;
879 break;
880
881 case FLOAT:
882 *total = (6 /* 4 on VAX 9000 */
883 + (mode == DFmode) + (GET_MODE (XEXP (x, 0)) != SImode));
884 break;
885
886 case FIX:
887 *total = 7; /* 17 on VAX 2 */
888 break;
889
890 case ASHIFT:
891 case LSHIFTRT:
892 case ASHIFTRT:
893 if (mode == DImode)
894 *total = 12;
895 else
896 *total = 10; /* 6 on VAX 9000 */
897 break;
898
899 case ROTATE:
900 case ROTATERT:
901 *total = 6; /* 5 on VAX 2, 4 on VAX 9000 */
902 if (CONST_INT_P (XEXP (x, 1)))
903 fmt = "e"; /* all constant rotate counts are short */
904 break;
905
906 case PLUS:
907 case MINUS:
908 *total = (mode == DFmode) ? 13 : 8; /* 6/8 on VAX 9000, 16/15 on VAX 2 */
909 /* Small integer operands can use subl2 and addl2. */
910 if ((CONST_INT_P (XEXP (x, 1)))
911 && (unsigned HOST_WIDE_INT)(INTVAL (XEXP (x, 1)) + 63) < 127)
912 fmt = "e";
913 break;
914
915 case IOR:
916 case XOR:
917 *total = 3;
918 break;
919
920 case AND:
921 /* AND is special because the first operand is complemented. */
922 *total = 3;
923 if (CONST_INT_P (XEXP (x, 0)))
924 {
925 if ((unsigned HOST_WIDE_INT)~INTVAL (XEXP (x, 0)) > 63)
926 *total = 4;
927 fmt = "e";
928 i = 1;
929 }
930 break;
931
932 case NEG:
933 if (mode == DFmode)
934 *total = 9;
935 else if (mode == SFmode)
936 *total = 6;
937 else if (mode == DImode)
938 *total = 4;
939 else
940 *total = 2;
941 break;
942
943 case NOT:
944 *total = 2;
945 break;
946
947 case ZERO_EXTRACT:
948 case SIGN_EXTRACT:
949 *total = 15;
950 break;
951
952 case MEM:
953 if (mode == DImode || mode == DFmode)
954 *total = 5; /* 7 on VAX 2 */
955 else
956 *total = 3; /* 4 on VAX 2 */
957 x = XEXP (x, 0);
958 if (!REG_P (x) && GET_CODE (x) != POST_INC)
959 *total += vax_address_cost_1 (x);
960 return true;
961
962 case FLOAT_EXTEND:
963 case FLOAT_TRUNCATE:
964 case TRUNCATE:
965 *total = 3; /* FIXME: Costs need to be checked */
966 break;
967
968 default:
969 return false;
970 }
971
972 /* Now look inside the expression. Operands which are not registers or
973 short constants add to the cost.
974
975 FMT and I may have been adjusted in the switch above for instructions
976 which require special handling. */
977
978 while (*fmt++ == 'e')
979 {
980 rtx op = XEXP (x, i);
981
982 i += 1;
983 code = GET_CODE (op);
984
985 /* A NOT is likely to be found as the first operand of an AND
986 (in which case the relevant cost is of the operand inside
987 the not) and not likely to be found anywhere else. */
988 if (code == NOT)
989 op = XEXP (op, 0), code = GET_CODE (op);
990
991 switch (code)
992 {
993 case CONST_INT:
994 if ((unsigned HOST_WIDE_INT)INTVAL (op) > 63
995 && mode != QImode)
996 *total += 1; /* 2 on VAX 2 */
997 break;
998 case CONST:
999 case LABEL_REF:
1000 case SYMBOL_REF:
1001 *total += 1; /* 2 on VAX 2 */
1002 break;
1003 case CONST_DOUBLE:
1004 if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT)
1005 {
1006 /* Registers are faster than floating point constants -- even
1007 those constants which can be encoded in a single byte. */
1008 if (vax_float_literal (op))
1009 *total += 1;
1010 else
1011 *total += (GET_MODE (x) == DFmode) ? 3 : 2;
1012 }
1013 else
1014 {
1015 if (CONST_DOUBLE_HIGH (op) != 0
1016 || (unsigned HOST_WIDE_INT)CONST_DOUBLE_LOW (op) > 63)
1017 *total += 2;
1018 }
1019 break;
1020 case MEM:
1021 *total += 1; /* 2 on VAX 2 */
1022 if (!REG_P (XEXP (op, 0)))
1023 *total += vax_address_cost_1 (XEXP (op, 0));
1024 break;
1025 case REG:
1026 case SUBREG:
1027 break;
1028 default:
1029 *total += 1;
1030 break;
1031 }
1032 }
1033 return true;
1034 }
1035 \f
1036 /* Output code to add DELTA to the first argument, and then jump to FUNCTION.
1037 Used for C++ multiple inheritance.
1038 .mask ^m<r2,r3,r4,r5,r6,r7,r8,r9,r10,r11> #conservative entry mask
1039 addl2 $DELTA, 4(ap) #adjust first argument
1040 jmp FUNCTION+2 #jump beyond FUNCTION's entry mask
1041 */
1042
1043 static void
1044 vax_output_mi_thunk (FILE * file,
1045 tree thunk ATTRIBUTE_UNUSED,
1046 HOST_WIDE_INT delta,
1047 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
1048 tree function)
1049 {
1050 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk));
1051
1052 assemble_start_function (thunk, fnname);
1053 fprintf (file, "\t.word 0x0ffc\n\taddl2 $" HOST_WIDE_INT_PRINT_DEC, delta);
1054 asm_fprintf (file, ",4(%Rap)\n");
1055 fprintf (file, "\tjmp ");
1056 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
1057 fprintf (file, "+2\n");
1058 assemble_end_function (thunk, fnname);
1059 }
1060 \f
1061 static rtx
1062 vax_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
1063 int incoming ATTRIBUTE_UNUSED)
1064 {
1065 return gen_rtx_REG (Pmode, VAX_STRUCT_VALUE_REGNUM);
1066 }
1067
1068 /* Worker function for NOTICE_UPDATE_CC. */
1069
1070 void
1071 vax_notice_update_cc (rtx exp, rtx insn ATTRIBUTE_UNUSED)
1072 {
1073 if (GET_CODE (exp) == SET)
1074 {
1075 if (GET_CODE (SET_SRC (exp)) == CALL)
1076 CC_STATUS_INIT;
1077 else if (GET_CODE (SET_DEST (exp)) != ZERO_EXTRACT
1078 && GET_CODE (SET_DEST (exp)) != PC)
1079 {
1080 cc_status.flags = 0;
1081 /* The integer operations below don't set carry or
1082 set it in an incompatible way. That's ok though
1083 as the Z bit is all we need when doing unsigned
1084 comparisons on the result of these insns (since
1085 they're always with 0). Set CC_NO_OVERFLOW to
1086 generate the correct unsigned branches. */
1087 switch (GET_CODE (SET_SRC (exp)))
1088 {
1089 case NEG:
1090 if (GET_MODE_CLASS (GET_MODE (exp)) == MODE_FLOAT)
1091 break;
1092 /* FALLTHRU */
1093 case AND:
1094 case IOR:
1095 case XOR:
1096 case NOT:
1097 case MEM:
1098 case REG:
1099 cc_status.flags = CC_NO_OVERFLOW;
1100 break;
1101 default:
1102 break;
1103 }
1104 cc_status.value1 = SET_DEST (exp);
1105 cc_status.value2 = SET_SRC (exp);
1106 }
1107 }
1108 else if (GET_CODE (exp) == PARALLEL
1109 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
1110 {
1111 if (GET_CODE (SET_SRC (XVECEXP (exp, 0, 0))) == CALL)
1112 CC_STATUS_INIT;
1113 else if (GET_CODE (SET_DEST (XVECEXP (exp, 0, 0))) != PC)
1114 {
1115 cc_status.flags = 0;
1116 cc_status.value1 = SET_DEST (XVECEXP (exp, 0, 0));
1117 cc_status.value2 = SET_SRC (XVECEXP (exp, 0, 0));
1118 }
1119 else
1120 /* PARALLELs whose first element sets the PC are aob,
1121 sob insns. They do change the cc's. */
1122 CC_STATUS_INIT;
1123 }
1124 else
1125 CC_STATUS_INIT;
1126 if (cc_status.value1 && REG_P (cc_status.value1)
1127 && cc_status.value2
1128 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
1129 cc_status.value2 = 0;
1130 if (cc_status.value1 && MEM_P (cc_status.value1)
1131 && cc_status.value2
1132 && MEM_P (cc_status.value2))
1133 cc_status.value2 = 0;
1134 /* Actual condition, one line up, should be that value2's address
1135 depends on value1, but that is too much of a pain. */
1136 }
1137
1138 /* Output integer move instructions. */
1139
1140 const char *
1141 vax_output_int_move (rtx insn ATTRIBUTE_UNUSED, rtx *operands,
1142 machine_mode mode)
1143 {
1144 rtx hi[3], lo[3];
1145 const char *pattern_hi, *pattern_lo;
1146
1147 switch (mode)
1148 {
1149 case E_DImode:
1150 if (operands[1] == const0_rtx)
1151 return "clrq %0";
1152 if (TARGET_QMATH && optimize_size
1153 && (CONST_INT_P (operands[1])
1154 || GET_CODE (operands[1]) == CONST_DOUBLE))
1155 {
1156 unsigned HOST_WIDE_INT hval, lval;
1157 int n;
1158
1159 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1160 {
1161 gcc_assert (HOST_BITS_PER_WIDE_INT != 64);
1162
1163 /* Make sure only the low 32 bits are valid. */
1164 lval = CONST_DOUBLE_LOW (operands[1]) & 0xffffffff;
1165 hval = CONST_DOUBLE_HIGH (operands[1]) & 0xffffffff;
1166 }
1167 else
1168 {
1169 lval = INTVAL (operands[1]);
1170 hval = 0;
1171 }
1172
1173 /* Here we see if we are trying to see if the 64bit value is really
1174 a 6bit shifted some arbitrary amount. If so, we can use ashq to
1175 shift it to the correct value saving 7 bytes (1 addr-mode-byte +
1176 8 bytes - 1 shift byte - 1 short literal byte. */
1177 if (lval != 0
1178 && (n = exact_log2 (lval & (- lval))) != -1
1179 && (lval >> n) < 64)
1180 {
1181 lval >>= n;
1182
1183 /* On 32bit platforms, if the 6bits didn't overflow into the
1184 upper 32bit value that value better be 0. If we have
1185 overflowed, make sure it wasn't too much. */
1186 if (HOST_BITS_PER_WIDE_INT == 32 && hval != 0)
1187 {
1188 if (n <= 26 || hval >= ((unsigned)1 << (n - 26)))
1189 n = 0; /* failure */
1190 else
1191 lval |= hval << (32 - n);
1192 }
1193 /* If n is 0, then ashq is not the best way to emit this. */
1194 if (n > 0)
1195 {
1196 operands[1] = GEN_INT (lval);
1197 operands[2] = GEN_INT (n);
1198 return "ashq %2,%D1,%0";
1199 }
1200 #if HOST_BITS_PER_WIDE_INT == 32
1201 }
1202 /* On 32bit platforms, if the low 32bit value is 0, checkout the
1203 upper 32bit value. */
1204 else if (hval != 0
1205 && (n = exact_log2 (hval & (- hval)) - 1) != -1
1206 && (hval >> n) < 64)
1207 {
1208 operands[1] = GEN_INT (hval >> n);
1209 operands[2] = GEN_INT (n + 32);
1210 return "ashq %2,%D1,%0";
1211 #endif
1212 }
1213 }
1214
1215 if (TARGET_QMATH
1216 && (!MEM_P (operands[0])
1217 || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
1218 || GET_CODE (XEXP (operands[0], 0)) == POST_INC
1219 || !illegal_addsub_di_memory_operand (operands[0], DImode))
1220 && ((CONST_INT_P (operands[1])
1221 && (unsigned HOST_WIDE_INT) INTVAL (operands[1]) >= 64)
1222 || GET_CODE (operands[1]) == CONST_DOUBLE))
1223 {
1224 hi[0] = operands[0];
1225 hi[1] = operands[1];
1226
1227 split_quadword_operands (insn, SET, hi, lo, 2);
1228
1229 pattern_lo = vax_output_int_move (NULL, lo, SImode);
1230 pattern_hi = vax_output_int_move (NULL, hi, SImode);
1231
1232 /* The patterns are just movl/movl or pushl/pushl then a movq will
1233 be shorter (1 opcode byte + 1 addrmode byte + 8 immediate value
1234 bytes .vs. 2 opcode bytes + 2 addrmode bytes + 8 immediate value
1235 value bytes. */
1236 if ((!strncmp (pattern_lo, "movl", 4)
1237 && !strncmp (pattern_hi, "movl", 4))
1238 || (!strncmp (pattern_lo, "pushl", 5)
1239 && !strncmp (pattern_hi, "pushl", 5)))
1240 return "movq %1,%0";
1241
1242 if (MEM_P (operands[0])
1243 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
1244 {
1245 output_asm_insn (pattern_hi, hi);
1246 operands[0] = lo[0];
1247 operands[1] = lo[1];
1248 operands[2] = lo[2];
1249 return pattern_lo;
1250 }
1251 else
1252 {
1253 output_asm_insn (pattern_lo, lo);
1254 operands[0] = hi[0];
1255 operands[1] = hi[1];
1256 operands[2] = hi[2];
1257 return pattern_hi;
1258 }
1259 }
1260 return "movq %1,%0";
1261
1262 case E_SImode:
1263 if (symbolic_operand (operands[1], SImode))
1264 {
1265 if (push_operand (operands[0], SImode))
1266 return "pushab %a1";
1267 return "movab %a1,%0";
1268 }
1269
1270 if (operands[1] == const0_rtx)
1271 {
1272 if (push_operand (operands[1], SImode))
1273 return "pushl %1";
1274 return "clrl %0";
1275 }
1276
1277 if (CONST_INT_P (operands[1])
1278 && (unsigned HOST_WIDE_INT) INTVAL (operands[1]) >= 64)
1279 {
1280 HOST_WIDE_INT i = INTVAL (operands[1]);
1281 int n;
1282 if ((unsigned HOST_WIDE_INT)(~i) < 64)
1283 return "mcoml %N1,%0";
1284 if ((unsigned HOST_WIDE_INT)i < 0x100)
1285 return "movzbl %1,%0";
1286 if (i >= -0x80 && i < 0)
1287 return "cvtbl %1,%0";
1288 if (optimize_size
1289 && (n = exact_log2 (i & (-i))) != -1
1290 && ((unsigned HOST_WIDE_INT)i >> n) < 64)
1291 {
1292 operands[1] = GEN_INT ((unsigned HOST_WIDE_INT)i >> n);
1293 operands[2] = GEN_INT (n);
1294 return "ashl %2,%1,%0";
1295 }
1296 if ((unsigned HOST_WIDE_INT)i < 0x10000)
1297 return "movzwl %1,%0";
1298 if (i >= -0x8000 && i < 0)
1299 return "cvtwl %1,%0";
1300 }
1301 if (push_operand (operands[0], SImode))
1302 return "pushl %1";
1303 return "movl %1,%0";
1304
1305 case E_HImode:
1306 if (CONST_INT_P (operands[1]))
1307 {
1308 HOST_WIDE_INT i = INTVAL (operands[1]);
1309 if (i == 0)
1310 return "clrw %0";
1311 else if ((unsigned HOST_WIDE_INT)i < 64)
1312 return "movw %1,%0";
1313 else if ((unsigned HOST_WIDE_INT)~i < 64)
1314 return "mcomw %H1,%0";
1315 else if ((unsigned HOST_WIDE_INT)i < 256)
1316 return "movzbw %1,%0";
1317 else if (i >= -0x80 && i < 0)
1318 return "cvtbw %1,%0";
1319 }
1320 return "movw %1,%0";
1321
1322 case E_QImode:
1323 if (CONST_INT_P (operands[1]))
1324 {
1325 HOST_WIDE_INT i = INTVAL (operands[1]);
1326 if (i == 0)
1327 return "clrb %0";
1328 else if ((unsigned HOST_WIDE_INT)~i < 64)
1329 return "mcomb %B1,%0";
1330 }
1331 return "movb %1,%0";
1332
1333 default:
1334 gcc_unreachable ();
1335 }
1336 }
1337
1338 /* Output integer add instructions.
1339
1340 The space-time-opcode tradeoffs for addition vary by model of VAX.
1341
1342 On a VAX 3 "movab (r1)[r2],r3" is faster than "addl3 r1,r2,r3",
1343 but it not faster on other models.
1344
1345 "movab #(r1),r2" is usually shorter than "addl3 #,r1,r2", and is
1346 faster on a VAX 3, but some VAXen (e.g. VAX 9000) will stall if
1347 a register is used in an address too soon after it is set.
1348 Compromise by using movab only when it is shorter than the add
1349 or the base register in the address is one of sp, ap, and fp,
1350 which are not modified very often. */
1351
1352 const char *
1353 vax_output_int_add (rtx_insn *insn, rtx *operands, machine_mode mode)
1354 {
1355 switch (mode)
1356 {
1357 case E_DImode:
1358 {
1359 rtx low[3];
1360 const char *pattern;
1361 int carry = 1;
1362 bool sub;
1363
1364 if (TARGET_QMATH && 0)
1365 debug_rtx (insn);
1366
1367 split_quadword_operands (insn, PLUS, operands, low, 3);
1368
1369 if (TARGET_QMATH)
1370 {
1371 gcc_assert (rtx_equal_p (operands[0], operands[1]));
1372 #ifdef NO_EXTERNAL_INDIRECT_ADDRESSS
1373 gcc_assert (!flag_pic || !external_memory_operand (low[2], SImode));
1374 gcc_assert (!flag_pic || !external_memory_operand (low[0], SImode));
1375 #endif
1376
1377 /* No reason to add a 0 to the low part and thus no carry, so just
1378 emit the appropriate add/sub instruction. */
1379 if (low[2] == const0_rtx)
1380 return vax_output_int_add (NULL, operands, SImode);
1381
1382 /* Are we doing addition or subtraction? */
1383 sub = CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 0;
1384
1385 /* We can't use vax_output_int_add since some the patterns don't
1386 modify the carry bit. */
1387 if (sub)
1388 {
1389 if (low[2] == constm1_rtx)
1390 pattern = "decl %0";
1391 else
1392 pattern = "subl2 $%n2,%0";
1393 }
1394 else
1395 {
1396 if (low[2] == const1_rtx)
1397 pattern = "incl %0";
1398 else
1399 pattern = "addl2 %2,%0";
1400 }
1401 output_asm_insn (pattern, low);
1402
1403 /* In 2's complement, -n = ~n + 1. Since we are dealing with
1404 two 32bit parts, we complement each and then add one to
1405 low part. We know that the low part can't overflow since
1406 it's value can never be 0. */
1407 if (sub)
1408 return "sbwc %N2,%0";
1409 return "adwc %2,%0";
1410 }
1411
1412 /* Add low parts. */
1413 if (rtx_equal_p (operands[0], operands[1]))
1414 {
1415 if (low[2] == const0_rtx)
1416 /* Should examine operand, punt if not POST_INC. */
1417 pattern = "tstl %0", carry = 0;
1418 else if (low[2] == const1_rtx)
1419 pattern = "incl %0";
1420 else
1421 pattern = "addl2 %2,%0";
1422 }
1423 else
1424 {
1425 if (low[2] == const0_rtx)
1426 pattern = "movl %1,%0", carry = 0;
1427 else
1428 pattern = "addl3 %2,%1,%0";
1429 }
1430 if (pattern)
1431 output_asm_insn (pattern, low);
1432 if (!carry)
1433 /* If CARRY is 0, we don't have any carry value to worry about. */
1434 return get_insn_template (CODE_FOR_addsi3, insn);
1435 /* %0 = C + %1 + %2 */
1436 if (!rtx_equal_p (operands[0], operands[1]))
1437 output_asm_insn ((operands[1] == const0_rtx
1438 ? "clrl %0"
1439 : "movl %1,%0"), operands);
1440 return "adwc %2,%0";
1441 }
1442
1443 case E_SImode:
1444 if (rtx_equal_p (operands[0], operands[1]))
1445 {
1446 if (operands[2] == const1_rtx)
1447 return "incl %0";
1448 if (operands[2] == constm1_rtx)
1449 return "decl %0";
1450 if (CONST_INT_P (operands[2])
1451 && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
1452 return "subl2 $%n2,%0";
1453 if (CONST_INT_P (operands[2])
1454 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 64
1455 && REG_P (operands[1])
1456 && ((INTVAL (operands[2]) < 32767 && INTVAL (operands[2]) > -32768)
1457 || REGNO (operands[1]) > 11))
1458 return "movab %c2(%1),%0";
1459 if (REG_P (operands[0]) && symbolic_operand (operands[2], SImode))
1460 return "movab %a2[%0],%0";
1461 return "addl2 %2,%0";
1462 }
1463
1464 if (rtx_equal_p (operands[0], operands[2]))
1465 {
1466 if (REG_P (operands[0]) && symbolic_operand (operands[1], SImode))
1467 return "movab %a1[%0],%0";
1468 return "addl2 %1,%0";
1469 }
1470
1471 if (CONST_INT_P (operands[2])
1472 && INTVAL (operands[2]) < 32767
1473 && INTVAL (operands[2]) > -32768
1474 && REG_P (operands[1])
1475 && push_operand (operands[0], SImode))
1476 return "pushab %c2(%1)";
1477
1478 if (CONST_INT_P (operands[2])
1479 && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
1480 return "subl3 $%n2,%1,%0";
1481
1482 if (CONST_INT_P (operands[2])
1483 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 64
1484 && REG_P (operands[1])
1485 && ((INTVAL (operands[2]) < 32767 && INTVAL (operands[2]) > -32768)
1486 || REGNO (operands[1]) > 11))
1487 return "movab %c2(%1),%0";
1488
1489 /* Add this if using gcc on a VAX 3xxx:
1490 if (REG_P (operands[1]) && REG_P (operands[2]))
1491 return "movab (%1)[%2],%0";
1492 */
1493
1494 if (REG_P (operands[1]) && symbolic_operand (operands[2], SImode))
1495 {
1496 if (push_operand (operands[0], SImode))
1497 return "pushab %a2[%1]";
1498 return "movab %a2[%1],%0";
1499 }
1500
1501 if (REG_P (operands[2]) && symbolic_operand (operands[1], SImode))
1502 {
1503 if (push_operand (operands[0], SImode))
1504 return "pushab %a1[%2]";
1505 return "movab %a1[%2],%0";
1506 }
1507
1508 if (flag_pic && REG_P (operands[0])
1509 && symbolic_operand (operands[2], SImode))
1510 return "movab %a2,%0;addl2 %1,%0";
1511
1512 if (flag_pic
1513 && (symbolic_operand (operands[1], SImode)
1514 || symbolic_operand (operands[1], SImode)))
1515 debug_rtx (insn);
1516
1517 return "addl3 %1,%2,%0";
1518
1519 case E_HImode:
1520 if (rtx_equal_p (operands[0], operands[1]))
1521 {
1522 if (operands[2] == const1_rtx)
1523 return "incw %0";
1524 if (operands[2] == constm1_rtx)
1525 return "decw %0";
1526 if (CONST_INT_P (operands[2])
1527 && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
1528 return "subw2 $%n2,%0";
1529 return "addw2 %2,%0";
1530 }
1531 if (rtx_equal_p (operands[0], operands[2]))
1532 return "addw2 %1,%0";
1533 if (CONST_INT_P (operands[2])
1534 && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
1535 return "subw3 $%n2,%1,%0";
1536 return "addw3 %1,%2,%0";
1537
1538 case E_QImode:
1539 if (rtx_equal_p (operands[0], operands[1]))
1540 {
1541 if (operands[2] == const1_rtx)
1542 return "incb %0";
1543 if (operands[2] == constm1_rtx)
1544 return "decb %0";
1545 if (CONST_INT_P (operands[2])
1546 && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
1547 return "subb2 $%n2,%0";
1548 return "addb2 %2,%0";
1549 }
1550 if (rtx_equal_p (operands[0], operands[2]))
1551 return "addb2 %1,%0";
1552 if (CONST_INT_P (operands[2])
1553 && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
1554 return "subb3 $%n2,%1,%0";
1555 return "addb3 %1,%2,%0";
1556
1557 default:
1558 gcc_unreachable ();
1559 }
1560 }
1561
1562 const char *
1563 vax_output_int_subtract (rtx_insn *insn, rtx *operands, machine_mode mode)
1564 {
1565 switch (mode)
1566 {
1567 case E_DImode:
1568 {
1569 rtx low[3];
1570 const char *pattern;
1571 int carry = 1;
1572
1573 if (TARGET_QMATH && 0)
1574 debug_rtx (insn);
1575
1576 split_quadword_operands (insn, MINUS, operands, low, 3);
1577
1578 if (TARGET_QMATH)
1579 {
1580 if (operands[1] == const0_rtx && low[1] == const0_rtx)
1581 {
1582 /* Negation is tricky. It's basically complement and increment.
1583 Negate hi, then lo, and subtract the carry back. */
1584 if ((MEM_P (low[0]) && GET_CODE (XEXP (low[0], 0)) == POST_INC)
1585 || (MEM_P (operands[0])
1586 && GET_CODE (XEXP (operands[0], 0)) == POST_INC))
1587 fatal_insn ("illegal operand detected", insn);
1588 output_asm_insn ("mnegl %2,%0", operands);
1589 output_asm_insn ("mnegl %2,%0", low);
1590 return "sbwc $0,%0";
1591 }
1592 gcc_assert (rtx_equal_p (operands[0], operands[1]));
1593 gcc_assert (rtx_equal_p (low[0], low[1]));
1594 if (low[2] == const1_rtx)
1595 output_asm_insn ("decl %0", low);
1596 else
1597 output_asm_insn ("subl2 %2,%0", low);
1598 return "sbwc %2,%0";
1599 }
1600
1601 /* Subtract low parts. */
1602 if (rtx_equal_p (operands[0], operands[1]))
1603 {
1604 if (low[2] == const0_rtx)
1605 pattern = 0, carry = 0;
1606 else if (low[2] == constm1_rtx)
1607 pattern = "decl %0";
1608 else
1609 pattern = "subl2 %2,%0";
1610 }
1611 else
1612 {
1613 if (low[2] == constm1_rtx)
1614 pattern = "decl %0";
1615 else if (low[2] == const0_rtx)
1616 pattern = get_insn_template (CODE_FOR_movsi, insn), carry = 0;
1617 else
1618 pattern = "subl3 %2,%1,%0";
1619 }
1620 if (pattern)
1621 output_asm_insn (pattern, low);
1622 if (carry)
1623 {
1624 if (!rtx_equal_p (operands[0], operands[1]))
1625 return "movl %1,%0;sbwc %2,%0";
1626 return "sbwc %2,%0";
1627 /* %0 = %2 - %1 - C */
1628 }
1629 return get_insn_template (CODE_FOR_subsi3, insn);
1630 }
1631
1632 default:
1633 gcc_unreachable ();
1634 }
1635 }
1636
1637 /* True if X is an rtx for a constant that is a valid address. */
1638
1639 bool
1640 legitimate_constant_address_p (rtx x)
1641 {
1642 if (GET_CODE (x) == LABEL_REF || GET_CODE (x) == SYMBOL_REF
1643 || CONST_INT_P (x) || GET_CODE (x) == HIGH)
1644 return true;
1645 if (GET_CODE (x) != CONST)
1646 return false;
1647 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
1648 if (flag_pic
1649 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
1650 && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (x, 0), 0)))
1651 return false;
1652 #endif
1653 return true;
1654 }
1655
1656 /* The other macros defined here are used only in legitimate_address_p (). */
1657
1658 /* Nonzero if X is a hard reg that can be used as an index
1659 or, if not strict, if it is a pseudo reg. */
1660 #define INDEX_REGISTER_P(X, STRICT) \
1661 (REG_P (X) && (!(STRICT) || REGNO_OK_FOR_INDEX_P (REGNO (X))))
1662
1663 /* Nonzero if X is a hard reg that can be used as a base reg
1664 or, if not strict, if it is a pseudo reg. */
1665 #define BASE_REGISTER_P(X, STRICT) \
1666 (REG_P (X) && (!(STRICT) || REGNO_OK_FOR_BASE_P (REGNO (X))))
1667
1668 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
1669
1670 /* Re-definition of CONSTANT_ADDRESS_P, which is true only when there
1671 are no SYMBOL_REFs for external symbols present. */
1672
1673 static bool
1674 indirectable_constant_address_p (rtx x, bool indirect)
1675 {
1676 if (GET_CODE (x) == SYMBOL_REF)
1677 return !flag_pic || SYMBOL_REF_LOCAL_P (x) || !indirect;
1678
1679 if (GET_CODE (x) == CONST)
1680 return !flag_pic
1681 || GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
1682 || SYMBOL_REF_LOCAL_P (XEXP (XEXP (x, 0), 0));
1683
1684 return CONSTANT_ADDRESS_P (x);
1685 }
1686
1687 #else /* not NO_EXTERNAL_INDIRECT_ADDRESS */
1688
1689 static bool
1690 indirectable_constant_address_p (rtx x, bool indirect ATTRIBUTE_UNUSED)
1691 {
1692 return CONSTANT_ADDRESS_P (x);
1693 }
1694
1695 #endif /* not NO_EXTERNAL_INDIRECT_ADDRESS */
1696
1697 /* True if X is an address which can be indirected. External symbols
1698 could be in a sharable image library, so we disallow those. */
1699
1700 static bool
1701 indirectable_address_p (rtx x, bool strict, bool indirect)
1702 {
1703 if (indirectable_constant_address_p (x, indirect)
1704 || BASE_REGISTER_P (x, strict))
1705 return true;
1706 if (GET_CODE (x) != PLUS
1707 || !BASE_REGISTER_P (XEXP (x, 0), strict)
1708 || (flag_pic && !CONST_INT_P (XEXP (x, 1))))
1709 return false;
1710 return indirectable_constant_address_p (XEXP (x, 1), indirect);
1711 }
1712
1713 /* Return true if x is a valid address not using indexing.
1714 (This much is the easy part.) */
1715 static bool
1716 nonindexed_address_p (rtx x, bool strict)
1717 {
1718 rtx xfoo0;
1719 if (REG_P (x))
1720 {
1721 if (! reload_in_progress
1722 || reg_equiv_mem (REGNO (x)) == 0
1723 || indirectable_address_p (reg_equiv_mem (REGNO (x)), strict, false))
1724 return true;
1725 }
1726 if (indirectable_constant_address_p (x, false))
1727 return true;
1728 if (indirectable_address_p (x, strict, false))
1729 return true;
1730 xfoo0 = XEXP (x, 0);
1731 if (MEM_P (x) && indirectable_address_p (xfoo0, strict, true))
1732 return true;
1733 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1734 && BASE_REGISTER_P (xfoo0, strict))
1735 return true;
1736 return false;
1737 }
1738
1739 /* True if PROD is either a reg times size of mode MODE and MODE is less
1740 than or equal 8 bytes, or just a reg if MODE is one byte. */
1741
1742 static bool
1743 index_term_p (rtx prod, machine_mode mode, bool strict)
1744 {
1745 rtx xfoo0, xfoo1;
1746
1747 if (GET_MODE_SIZE (mode) == 1)
1748 return BASE_REGISTER_P (prod, strict);
1749
1750 if (GET_CODE (prod) != MULT || GET_MODE_SIZE (mode) > 8)
1751 return false;
1752
1753 xfoo0 = XEXP (prod, 0);
1754 xfoo1 = XEXP (prod, 1);
1755
1756 if (CONST_INT_P (xfoo0)
1757 && INTVAL (xfoo0) == (int)GET_MODE_SIZE (mode)
1758 && INDEX_REGISTER_P (xfoo1, strict))
1759 return true;
1760
1761 if (CONST_INT_P (xfoo1)
1762 && INTVAL (xfoo1) == (int)GET_MODE_SIZE (mode)
1763 && INDEX_REGISTER_P (xfoo0, strict))
1764 return true;
1765
1766 return false;
1767 }
1768
1769 /* Return true if X is the sum of a register
1770 and a valid index term for mode MODE. */
1771 static bool
1772 reg_plus_index_p (rtx x, machine_mode mode, bool strict)
1773 {
1774 rtx xfoo0, xfoo1;
1775
1776 if (GET_CODE (x) != PLUS)
1777 return false;
1778
1779 xfoo0 = XEXP (x, 0);
1780 xfoo1 = XEXP (x, 1);
1781
1782 if (BASE_REGISTER_P (xfoo0, strict) && index_term_p (xfoo1, mode, strict))
1783 return true;
1784
1785 if (BASE_REGISTER_P (xfoo1, strict) && index_term_p (xfoo0, mode, strict))
1786 return true;
1787
1788 return false;
1789 }
1790
1791 /* Return true if xfoo0 and xfoo1 constitute a valid indexed address. */
1792 static bool
1793 indexable_address_p (rtx xfoo0, rtx xfoo1, machine_mode mode, bool strict)
1794 {
1795 if (!CONSTANT_ADDRESS_P (xfoo0))
1796 return false;
1797 if (BASE_REGISTER_P (xfoo1, strict))
1798 return !flag_pic || mode == QImode;
1799 if (flag_pic && symbolic_operand (xfoo0, SImode))
1800 return false;
1801 return reg_plus_index_p (xfoo1, mode, strict);
1802 }
1803
1804 /* legitimate_address_p returns true if it recognizes an RTL expression "x"
1805 that is a valid memory address for an instruction.
1806 The MODE argument is the machine mode for the MEM expression
1807 that wants to use this address. */
1808 bool
1809 vax_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1810 {
1811 rtx xfoo0, xfoo1;
1812
1813 if (nonindexed_address_p (x, strict))
1814 return true;
1815
1816 if (GET_CODE (x) != PLUS)
1817 return false;
1818
1819 /* Handle <address>[index] represented with index-sum outermost */
1820
1821 xfoo0 = XEXP (x, 0);
1822 xfoo1 = XEXP (x, 1);
1823
1824 if (index_term_p (xfoo0, mode, strict)
1825 && nonindexed_address_p (xfoo1, strict))
1826 return true;
1827
1828 if (index_term_p (xfoo1, mode, strict)
1829 && nonindexed_address_p (xfoo0, strict))
1830 return true;
1831
1832 /* Handle offset(reg)[index] with offset added outermost */
1833
1834 if (indexable_address_p (xfoo0, xfoo1, mode, strict)
1835 || indexable_address_p (xfoo1, xfoo0, mode, strict))
1836 return true;
1837
1838 return false;
1839 }
1840
1841 /* Return true if x (a legitimate address expression) has an effect that
1842 depends on the machine mode it is used for. On the VAX, the predecrement
1843 and postincrement address depend thus (the amount of decrement or
1844 increment being the length of the operand) and all indexed address depend
1845 thus (because the index scale factor is the length of the operand). */
1846
1847 static bool
1848 vax_mode_dependent_address_p (const_rtx x, addr_space_t as ATTRIBUTE_UNUSED)
1849 {
1850 rtx xfoo0, xfoo1;
1851
1852 /* Auto-increment cases are now dealt with generically in recog.c. */
1853 if (GET_CODE (x) != PLUS)
1854 return false;
1855
1856 xfoo0 = XEXP (x, 0);
1857 xfoo1 = XEXP (x, 1);
1858
1859 if (CONST_INT_P (xfoo0) && REG_P (xfoo1))
1860 return false;
1861 if (CONST_INT_P (xfoo1) && REG_P (xfoo0))
1862 return false;
1863 if (!flag_pic && CONSTANT_ADDRESS_P (xfoo0) && REG_P (xfoo1))
1864 return false;
1865 if (!flag_pic && CONSTANT_ADDRESS_P (xfoo1) && REG_P (xfoo0))
1866 return false;
1867
1868 return true;
1869 }
1870
1871 static rtx
1872 fixup_mathdi_operand (rtx x, machine_mode mode)
1873 {
1874 if (illegal_addsub_di_memory_operand (x, mode))
1875 {
1876 rtx addr = XEXP (x, 0);
1877 rtx temp = gen_reg_rtx (Pmode);
1878 rtx offset = 0;
1879 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
1880 if (GET_CODE (addr) == CONST && flag_pic)
1881 {
1882 offset = XEXP (XEXP (addr, 0), 1);
1883 addr = XEXP (XEXP (addr, 0), 0);
1884 }
1885 #endif
1886 emit_move_insn (temp, addr);
1887 if (offset)
1888 temp = gen_rtx_PLUS (Pmode, temp, offset);
1889 x = gen_rtx_MEM (DImode, temp);
1890 }
1891 return x;
1892 }
1893
1894 void
1895 vax_expand_addsub_di_operands (rtx * operands, enum rtx_code code)
1896 {
1897 int hi_only = operand_subword (operands[2], 0, 0, DImode) == const0_rtx;
1898 rtx temp;
1899
1900 rtx (*gen_old_insn)(rtx, rtx, rtx);
1901 rtx (*gen_si_insn)(rtx, rtx, rtx);
1902 rtx (*gen_insn)(rtx, rtx, rtx);
1903
1904 if (code == PLUS)
1905 {
1906 gen_old_insn = gen_adddi3_old;
1907 gen_si_insn = gen_addsi3;
1908 gen_insn = gen_adcdi3;
1909 }
1910 else if (code == MINUS)
1911 {
1912 gen_old_insn = gen_subdi3_old;
1913 gen_si_insn = gen_subsi3;
1914 gen_insn = gen_sbcdi3;
1915 }
1916 else
1917 gcc_unreachable ();
1918
1919 /* If this is addition (thus operands are commutative) and if there is one
1920 addend that duplicates the desination, we want that addend to be the
1921 first addend. */
1922 if (code == PLUS
1923 && rtx_equal_p (operands[0], operands[2])
1924 && !rtx_equal_p (operands[1], operands[2]))
1925 {
1926 temp = operands[2];
1927 operands[2] = operands[1];
1928 operands[1] = temp;
1929 }
1930
1931 if (!TARGET_QMATH)
1932 {
1933 emit_insn ((*gen_old_insn) (operands[0], operands[1], operands[2]));
1934 }
1935 else if (hi_only)
1936 {
1937 if (!rtx_equal_p (operands[0], operands[1])
1938 && (REG_P (operands[0]) && MEM_P (operands[1])))
1939 {
1940 emit_move_insn (operands[0], operands[1]);
1941 operands[1] = operands[0];
1942 }
1943
1944 operands[0] = fixup_mathdi_operand (operands[0], DImode);
1945 operands[1] = fixup_mathdi_operand (operands[1], DImode);
1946 operands[2] = fixup_mathdi_operand (operands[2], DImode);
1947
1948 if (!rtx_equal_p (operands[0], operands[1]))
1949 emit_move_insn (operand_subword (operands[0], 0, 0, DImode),
1950 operand_subword (operands[1], 0, 0, DImode));
1951
1952 emit_insn ((*gen_si_insn) (operand_subword (operands[0], 1, 0, DImode),
1953 operand_subword (operands[1], 1, 0, DImode),
1954 operand_subword (operands[2], 1, 0, DImode)));
1955 }
1956 else
1957 {
1958 /* If are adding the same value together, that's really a multiply by 2,
1959 and that's just a left shift of 1. */
1960 if (rtx_equal_p (operands[1], operands[2]))
1961 {
1962 gcc_assert (code != MINUS);
1963 emit_insn (gen_ashldi3 (operands[0], operands[1], const1_rtx));
1964 return;
1965 }
1966
1967 operands[0] = fixup_mathdi_operand (operands[0], DImode);
1968
1969 /* If an operand is the same as operand[0], use the operand[0] rtx
1970 because fixup will an equivalent rtx but not an equal one. */
1971
1972 if (rtx_equal_p (operands[0], operands[1]))
1973 operands[1] = operands[0];
1974 else
1975 operands[1] = fixup_mathdi_operand (operands[1], DImode);
1976
1977 if (rtx_equal_p (operands[0], operands[2]))
1978 operands[2] = operands[0];
1979 else
1980 operands[2] = fixup_mathdi_operand (operands[2], DImode);
1981
1982 /* If we are subtracting not from ourselves [d = a - b], and because the
1983 carry ops are two operand only, we would need to do a move prior to
1984 the subtract. And if d == b, we would need a temp otherwise
1985 [d = a, d -= d] and we end up with 0. Instead we rewrite d = a - b
1986 into d = -b, d += a. Since -b can never overflow, even if b == d,
1987 no temp is needed.
1988
1989 If we are doing addition, since the carry ops are two operand, if
1990 we aren't adding to ourselves, move the first addend to the
1991 destination first. */
1992
1993 gcc_assert (operands[1] != const0_rtx || code == MINUS);
1994 if (!rtx_equal_p (operands[0], operands[1]) && operands[1] != const0_rtx)
1995 {
1996 if (code == MINUS && CONSTANT_P (operands[1]))
1997 {
1998 temp = gen_reg_rtx (DImode);
1999 emit_insn (gen_sbcdi3 (operands[0], const0_rtx, operands[2]));
2000 code = PLUS;
2001 gen_insn = gen_adcdi3;
2002 operands[2] = operands[1];
2003 operands[1] = operands[0];
2004 }
2005 else
2006 emit_move_insn (operands[0], operands[1]);
2007 }
2008
2009 /* Subtracting a constant will have been rewritten to an addition of the
2010 negative of that constant before we get here. */
2011 gcc_assert (!CONSTANT_P (operands[2]) || code == PLUS);
2012 emit_insn ((*gen_insn) (operands[0], operands[1], operands[2]));
2013 }
2014 }
2015
2016 bool
2017 adjacent_operands_p (rtx lo, rtx hi, machine_mode mode)
2018 {
2019 HOST_WIDE_INT lo_offset;
2020 HOST_WIDE_INT hi_offset;
2021
2022 if (GET_CODE (lo) != GET_CODE (hi))
2023 return false;
2024
2025 if (REG_P (lo))
2026 return mode == SImode && REGNO (lo) + 1 == REGNO (hi);
2027 if (CONST_INT_P (lo))
2028 return INTVAL (hi) == 0 && UINTVAL (lo) < 64;
2029 if (CONST_INT_P (lo))
2030 return mode != SImode;
2031
2032 if (!MEM_P (lo))
2033 return false;
2034
2035 if (MEM_VOLATILE_P (lo) || MEM_VOLATILE_P (hi))
2036 return false;
2037
2038 lo = XEXP (lo, 0);
2039 hi = XEXP (hi, 0);
2040
2041 if (GET_CODE (lo) == POST_INC /* || GET_CODE (lo) == PRE_DEC */)
2042 return rtx_equal_p (lo, hi);
2043
2044 switch (GET_CODE (lo))
2045 {
2046 case REG:
2047 case SYMBOL_REF:
2048 lo_offset = 0;
2049 break;
2050 case CONST:
2051 lo = XEXP (lo, 0);
2052 /* FALLTHROUGH */
2053 case PLUS:
2054 if (!CONST_INT_P (XEXP (lo, 1)))
2055 return false;
2056 lo_offset = INTVAL (XEXP (lo, 1));
2057 lo = XEXP (lo, 0);
2058 break;
2059 default:
2060 return false;
2061 }
2062
2063 switch (GET_CODE (hi))
2064 {
2065 case REG:
2066 case SYMBOL_REF:
2067 hi_offset = 0;
2068 break;
2069 case CONST:
2070 hi = XEXP (hi, 0);
2071 /* FALLTHROUGH */
2072 case PLUS:
2073 if (!CONST_INT_P (XEXP (hi, 1)))
2074 return false;
2075 hi_offset = INTVAL (XEXP (hi, 1));
2076 hi = XEXP (hi, 0);
2077 break;
2078 default:
2079 return false;
2080 }
2081
2082 if (GET_CODE (lo) == MULT || GET_CODE (lo) == PLUS)
2083 return false;
2084
2085 return rtx_equal_p (lo, hi)
2086 && hi_offset - lo_offset == GET_MODE_SIZE (mode);
2087 }
2088
2089 /* Output assembler code for a block containing the constant parts
2090 of a trampoline, leaving space for the variable parts. */
2091
2092 /* On the VAX, the trampoline contains an entry mask and two instructions:
2093 .word NN
2094 movl $STATIC,r0 (store the functions static chain)
2095 jmp *$FUNCTION (jump to function code at address FUNCTION) */
2096
2097 static void
2098 vax_asm_trampoline_template (FILE *f ATTRIBUTE_UNUSED)
2099 {
2100 assemble_aligned_integer (2, const0_rtx);
2101 assemble_aligned_integer (2, GEN_INT (0x8fd0));
2102 assemble_aligned_integer (4, const0_rtx);
2103 assemble_aligned_integer (1, GEN_INT (0x50 + STATIC_CHAIN_REGNUM));
2104 assemble_aligned_integer (2, GEN_INT (0x9f17));
2105 assemble_aligned_integer (4, const0_rtx);
2106 }
2107
2108 /* We copy the register-mask from the function's pure code
2109 to the start of the trampoline. */
2110
2111 static void
2112 vax_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
2113 {
2114 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
2115 rtx mem;
2116
2117 emit_block_move (m_tramp, assemble_trampoline_template (),
2118 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
2119
2120 mem = adjust_address (m_tramp, HImode, 0);
2121 emit_move_insn (mem, gen_const_mem (HImode, fnaddr));
2122
2123 mem = adjust_address (m_tramp, SImode, 4);
2124 emit_move_insn (mem, cxt);
2125 mem = adjust_address (m_tramp, SImode, 11);
2126 emit_move_insn (mem, plus_constant (Pmode, fnaddr, 2));
2127 emit_insn (gen_sync_istream ());
2128 }
2129
2130 /* Value is the number of bytes of arguments automatically
2131 popped when returning from a subroutine call.
2132 FUNDECL is the declaration node of the function (as a tree),
2133 FUNTYPE is the data type of the function (as a tree),
2134 or for a library call it is an identifier node for the subroutine name.
2135 SIZE is the number of bytes of arguments passed on the stack.
2136
2137 On the VAX, the RET insn pops a maximum of 255 args for any function. */
2138
2139 static poly_int64
2140 vax_return_pops_args (tree fundecl ATTRIBUTE_UNUSED,
2141 tree funtype ATTRIBUTE_UNUSED, poly_int64 size)
2142 {
2143 return size > 255 * 4 ? 0 : (HOST_WIDE_INT) size;
2144 }
2145
2146 /* Implement TARGET_FUNCTION_ARG. On the VAX all args are pushed. */
2147
2148 static rtx
2149 vax_function_arg (cumulative_args_t, const function_arg_info &)
2150 {
2151 return NULL_RTX;
2152 }
2153
2154 /* Update the data in CUM to advance over argument ARG. */
2155
2156 static void
2157 vax_function_arg_advance (cumulative_args_t cum_v,
2158 const function_arg_info &arg)
2159 {
2160 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2161
2162 *cum += (arg.promoted_size_in_bytes () + 3) & ~3;
2163 }
2164
2165 static HOST_WIDE_INT
2166 vax_starting_frame_offset (void)
2167 {
2168 /* On ELF targets, reserve the top of the stack for exception handler
2169 stackadj value. */
2170 return TARGET_ELF ? -4 : 0;
2171 }
2172