1 /* Subroutines for insn-output.c for VAX.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
24 #include "coretypes.h"
29 #include "stringpool.h"
39 #include "conditions.h"
45 /* This file should be included last. */
46 #include "target-def.h"
48 static void vax_option_override (void);
49 static bool vax_legitimate_address_p (machine_mode
, rtx
, bool);
50 static void vax_file_start (void);
51 static void vax_init_libfuncs (void);
52 static void vax_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
54 static int vax_address_cost_1 (rtx
);
55 static int vax_address_cost (rtx
, machine_mode
, addr_space_t
, bool);
56 static bool vax_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
57 static rtx
vax_function_arg (cumulative_args_t
, const function_arg_info
&);
58 static void vax_function_arg_advance (cumulative_args_t
,
59 const function_arg_info
&);
60 static rtx
vax_struct_value_rtx (tree
, int);
61 static void vax_asm_trampoline_template (FILE *);
62 static void vax_trampoline_init (rtx
, tree
, rtx
);
63 static poly_int64
vax_return_pops_args (tree
, tree
, poly_int64
);
64 static bool vax_mode_dependent_address_p (const_rtx
, addr_space_t
);
65 static HOST_WIDE_INT
vax_starting_frame_offset (void);
67 /* Initialize the GCC target structure. */
68 #undef TARGET_ASM_ALIGNED_HI_OP
69 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
71 #undef TARGET_ASM_FILE_START
72 #define TARGET_ASM_FILE_START vax_file_start
73 #undef TARGET_ASM_FILE_START_APP_OFF
74 #define TARGET_ASM_FILE_START_APP_OFF true
76 #undef TARGET_INIT_LIBFUNCS
77 #define TARGET_INIT_LIBFUNCS vax_init_libfuncs
79 #undef TARGET_ASM_OUTPUT_MI_THUNK
80 #define TARGET_ASM_OUTPUT_MI_THUNK vax_output_mi_thunk
81 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
82 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
84 #undef TARGET_RTX_COSTS
85 #define TARGET_RTX_COSTS vax_rtx_costs
86 #undef TARGET_ADDRESS_COST
87 #define TARGET_ADDRESS_COST vax_address_cost
89 #undef TARGET_PROMOTE_PROTOTYPES
90 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
92 #undef TARGET_FUNCTION_ARG
93 #define TARGET_FUNCTION_ARG vax_function_arg
94 #undef TARGET_FUNCTION_ARG_ADVANCE
95 #define TARGET_FUNCTION_ARG_ADVANCE vax_function_arg_advance
97 #undef TARGET_STRUCT_VALUE_RTX
98 #define TARGET_STRUCT_VALUE_RTX vax_struct_value_rtx
101 #define TARGET_LRA_P hook_bool_void_false
103 #undef TARGET_LEGITIMATE_ADDRESS_P
104 #define TARGET_LEGITIMATE_ADDRESS_P vax_legitimate_address_p
105 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
106 #define TARGET_MODE_DEPENDENT_ADDRESS_P vax_mode_dependent_address_p
108 #undef TARGET_FRAME_POINTER_REQUIRED
109 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
111 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
112 #define TARGET_ASM_TRAMPOLINE_TEMPLATE vax_asm_trampoline_template
113 #undef TARGET_TRAMPOLINE_INIT
114 #define TARGET_TRAMPOLINE_INIT vax_trampoline_init
115 #undef TARGET_RETURN_POPS_ARGS
116 #define TARGET_RETURN_POPS_ARGS vax_return_pops_args
118 #undef TARGET_OPTION_OVERRIDE
119 #define TARGET_OPTION_OVERRIDE vax_option_override
121 #undef TARGET_STARTING_FRAME_OFFSET
122 #define TARGET_STARTING_FRAME_OFFSET vax_starting_frame_offset
124 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
125 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
127 struct gcc_target targetm
= TARGET_INITIALIZER
;
129 /* Set global variables as needed for the options enabled. */
132 vax_option_override (void)
134 /* We're VAX floating point, not IEEE floating point. */
136 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
138 #ifdef SUBTARGET_OVERRIDE_OPTIONS
139 SUBTARGET_OVERRIDE_OPTIONS
;
144 vax_add_reg_cfa_offset (rtx insn
, int offset
, rtx src
)
148 x
= plus_constant (Pmode
, frame_pointer_rtx
, offset
);
149 x
= gen_rtx_MEM (SImode
, x
);
150 x
= gen_rtx_SET (x
, src
);
151 add_reg_note (insn
, REG_CFA_OFFSET
, x
);
154 /* Generate the assembly code for function entry. FILE is a stdio
155 stream to output the code to. SIZE is an int: how many units of
156 temporary storage to allocate.
158 Refer to the array `regs_ever_live' to determine which registers to
159 save; `regs_ever_live[I]' is nonzero if register number I is ever
160 used in the function. This function is responsible for knowing
161 which registers should not be saved even if used. */
164 vax_expand_prologue (void)
171 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
172 if (df_regs_ever_live_p (regno
) && !call_used_or_fixed_reg_p (regno
))
175 insn
= emit_insn (gen_procedure_entry_mask (GEN_INT (mask
)));
176 RTX_FRAME_RELATED_P (insn
) = 1;
178 /* The layout of the CALLG/S stack frame is follows:
183 ... Registers saved as specified by MASK
193 The rest of the prologue will adjust the SP for the local frame. */
195 vax_add_reg_cfa_offset (insn
, 4, arg_pointer_rtx
);
196 vax_add_reg_cfa_offset (insn
, 8, frame_pointer_rtx
);
197 vax_add_reg_cfa_offset (insn
, 12, pc_rtx
);
200 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
201 if (mask
& (1 << regno
))
203 vax_add_reg_cfa_offset (insn
, offset
, gen_rtx_REG (SImode
, regno
));
207 /* Because add_reg_note pushes the notes, adding this last means that
208 it will be processed first. This is required to allow the other
209 notes be interpreted properly. */
210 add_reg_note (insn
, REG_CFA_DEF_CFA
,
211 plus_constant (Pmode
, frame_pointer_rtx
, offset
));
213 /* Allocate the local stack frame. */
214 size
= get_frame_size ();
215 size
-= vax_starting_frame_offset ();
216 emit_insn (gen_addsi3 (stack_pointer_rtx
,
217 stack_pointer_rtx
, GEN_INT (-size
)));
219 /* Do not allow instructions referencing local stack memory to be
220 scheduled before the frame is allocated. This is more pedantic
221 than anything else, given that VAX does not currently have a
222 scheduling description. */
223 emit_insn (gen_blockage ());
226 /* When debugging with stabs, we want to output an extra dummy label
227 so that gas can distinguish between D_float and G_float prior to
228 processing the .stabs directive identifying type double. */
230 vax_file_start (void)
232 default_file_start ();
234 if (write_symbols
== DBX_DEBUG
)
235 fprintf (asm_out_file
, "___vax_%c_doubles:\n", ASM_DOUBLE_CHAR
);
238 /* We can use the BSD C library routines for the libgcc calls that are
239 still generated, since that's what they boil down to anyways. When
240 ELF, avoid the user's namespace. */
243 vax_init_libfuncs (void)
245 if (TARGET_BSD_DIVMOD
)
247 set_optab_libfunc (udiv_optab
, SImode
, TARGET_ELF
? "*__udiv" : "*udiv");
248 set_optab_libfunc (umod_optab
, SImode
, TARGET_ELF
? "*__urem" : "*urem");
252 /* This is like nonimmediate_operand with a restriction on the type of MEM. */
255 split_quadword_operands (rtx insn
, enum rtx_code code
, rtx
* operands
,
260 for (i
= 0; i
< n
; i
++)
263 for (i
= 0; i
< n
; i
++)
265 if (MEM_P (operands
[i
])
266 && (GET_CODE (XEXP (operands
[i
], 0)) == PRE_DEC
267 || GET_CODE (XEXP (operands
[i
], 0)) == POST_INC
))
269 rtx addr
= XEXP (operands
[i
], 0);
270 operands
[i
] = low
[i
] = gen_rtx_MEM (SImode
, addr
);
272 else if (optimize_size
&& MEM_P (operands
[i
])
273 && REG_P (XEXP (operands
[i
], 0))
274 && (code
!= MINUS
|| operands
[1] != const0_rtx
)
275 && find_regno_note (insn
, REG_DEAD
,
276 REGNO (XEXP (operands
[i
], 0))))
278 low
[i
] = gen_rtx_MEM (SImode
,
279 gen_rtx_POST_INC (Pmode
,
280 XEXP (operands
[i
], 0)));
281 operands
[i
] = gen_rtx_MEM (SImode
, XEXP (operands
[i
], 0));
285 low
[i
] = operand_subword (operands
[i
], 0, 0, DImode
);
286 operands
[i
] = operand_subword (operands
[i
], 1, 0, DImode
);
292 print_operand_address (FILE * file
, rtx addr
)
295 rtx reg1
, breg
, ireg
;
299 switch (GET_CODE (addr
))
303 addr
= XEXP (addr
, 0);
307 fprintf (file
, "(%s)", reg_names
[REGNO (addr
)]);
311 fprintf (file
, "-(%s)", reg_names
[REGNO (XEXP (addr
, 0))]);
315 fprintf (file
, "(%s)+", reg_names
[REGNO (XEXP (addr
, 0))]);
319 /* There can be either two or three things added here. One must be a
320 REG. One can be either a REG or a MULT of a REG and an appropriate
321 constant, and the third can only be a constant or a MEM.
323 We get these two or three things and put the constant or MEM in
324 OFFSET, the MULT or REG in IREG, and the REG in BREG. If we have
325 a register and can't tell yet if it is a base or index register,
328 reg1
= 0; ireg
= 0; breg
= 0; offset
= 0;
330 if (CONSTANT_ADDRESS_P (XEXP (addr
, 0))
331 || MEM_P (XEXP (addr
, 0)))
333 offset
= XEXP (addr
, 0);
334 addr
= XEXP (addr
, 1);
336 else if (CONSTANT_ADDRESS_P (XEXP (addr
, 1))
337 || MEM_P (XEXP (addr
, 1)))
339 offset
= XEXP (addr
, 1);
340 addr
= XEXP (addr
, 0);
342 else if (GET_CODE (XEXP (addr
, 1)) == MULT
)
344 ireg
= XEXP (addr
, 1);
345 addr
= XEXP (addr
, 0);
347 else if (GET_CODE (XEXP (addr
, 0)) == MULT
)
349 ireg
= XEXP (addr
, 0);
350 addr
= XEXP (addr
, 1);
352 else if (REG_P (XEXP (addr
, 1)))
354 reg1
= XEXP (addr
, 1);
355 addr
= XEXP (addr
, 0);
357 else if (REG_P (XEXP (addr
, 0)))
359 reg1
= XEXP (addr
, 0);
360 addr
= XEXP (addr
, 1);
372 else if (GET_CODE (addr
) == MULT
)
376 gcc_assert (GET_CODE (addr
) == PLUS
);
377 if (CONSTANT_ADDRESS_P (XEXP (addr
, 0))
378 || MEM_P (XEXP (addr
, 0)))
382 if (CONST_INT_P (offset
))
383 offset
= plus_constant (Pmode
, XEXP (addr
, 0),
387 gcc_assert (CONST_INT_P (XEXP (addr
, 0)));
388 offset
= plus_constant (Pmode
, offset
,
389 INTVAL (XEXP (addr
, 0)));
392 offset
= XEXP (addr
, 0);
394 else if (REG_P (XEXP (addr
, 0)))
397 ireg
= reg1
, breg
= XEXP (addr
, 0), reg1
= 0;
399 reg1
= XEXP (addr
, 0);
403 gcc_assert (GET_CODE (XEXP (addr
, 0)) == MULT
);
405 ireg
= XEXP (addr
, 0);
408 if (CONSTANT_ADDRESS_P (XEXP (addr
, 1))
409 || MEM_P (XEXP (addr
, 1)))
413 if (CONST_INT_P (offset
))
414 offset
= plus_constant (Pmode
, XEXP (addr
, 1),
418 gcc_assert (CONST_INT_P (XEXP (addr
, 1)));
419 offset
= plus_constant (Pmode
, offset
,
420 INTVAL (XEXP (addr
, 1)));
423 offset
= XEXP (addr
, 1);
425 else if (REG_P (XEXP (addr
, 1)))
428 ireg
= reg1
, breg
= XEXP (addr
, 1), reg1
= 0;
430 reg1
= XEXP (addr
, 1);
434 gcc_assert (GET_CODE (XEXP (addr
, 1)) == MULT
);
436 ireg
= XEXP (addr
, 1);
440 /* If REG1 is nonzero, figure out if it is a base or index register. */
444 || (flag_pic
&& GET_CODE (addr
) == SYMBOL_REF
)
447 || (flag_pic
&& symbolic_operand (offset
, SImode
)))))
458 if (flag_pic
&& symbolic_operand (offset
, SImode
))
463 output_operand_lossage ("symbol used with both base and indexed registers");
466 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
467 if (flag_pic
> 1 && GET_CODE (offset
) == CONST
468 && GET_CODE (XEXP (XEXP (offset
, 0), 0)) == SYMBOL_REF
469 && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (offset
, 0), 0)))
472 output_operand_lossage ("symbol with offset used in PIC mode");
476 /* symbol(reg) isn't PIC, but symbol[reg] is. */
485 output_address (VOIDmode
, offset
);
489 fprintf (file
, "(%s)", reg_names
[REGNO (breg
)]);
493 if (GET_CODE (ireg
) == MULT
)
494 ireg
= XEXP (ireg
, 0);
495 gcc_assert (REG_P (ireg
));
496 fprintf (file
, "[%s]", reg_names
[REGNO (ireg
)]);
501 output_addr_const (file
, addr
);
506 print_operand (FILE *file
, rtx x
, int code
)
509 fputc (ASM_DOUBLE_CHAR
, file
);
510 else if (code
== '|')
511 fputs (REGISTER_PREFIX
, file
);
512 else if (code
== 'c')
513 fputs (cond_name (x
), file
);
514 else if (code
== 'C')
515 fputs (rev_cond_name (x
), file
);
516 else if (code
== 'D' && CONST_INT_P (x
) && INTVAL (x
) < 0)
517 fprintf (file
, "$" NEG_HWI_PRINT_HEX16
, INTVAL (x
));
518 else if (code
== 'P' && CONST_INT_P (x
))
519 fprintf (file
, "$" HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) + 1);
520 else if (code
== 'N' && CONST_INT_P (x
))
521 fprintf (file
, "$" HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
522 /* rotl instruction cannot deal with negative arguments. */
523 else if (code
== 'R' && CONST_INT_P (x
))
524 fprintf (file
, "$" HOST_WIDE_INT_PRINT_DEC
, 32 - INTVAL (x
));
525 else if (code
== 'H' && CONST_INT_P (x
))
526 fprintf (file
, "$%d", (int) (0xffff & ~ INTVAL (x
)));
527 else if (code
== 'h' && CONST_INT_P (x
))
528 fprintf (file
, "$%d", (short) - INTVAL (x
));
529 else if (code
== 'B' && CONST_INT_P (x
))
530 fprintf (file
, "$%d", (int) (0xff & ~ INTVAL (x
)));
531 else if (code
== 'b' && CONST_INT_P (x
))
532 fprintf (file
, "$%d", (int) (0xff & - INTVAL (x
)));
533 else if (code
== 'M' && CONST_INT_P (x
))
534 fprintf (file
, "$%d", ~((1 << INTVAL (x
)) - 1));
535 else if (code
== 'x' && CONST_INT_P (x
))
536 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (x
));
538 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
540 output_address (GET_MODE (x
), XEXP (x
, 0));
541 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == SFmode
)
544 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
),
545 sizeof (dstr
), 0, 1);
546 fprintf (file
, "$0f%s", dstr
);
548 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == DFmode
)
551 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
),
552 sizeof (dstr
), 0, 1);
553 fprintf (file
, "$0%c%s", ASM_DOUBLE_CHAR
, dstr
);
557 if (flag_pic
> 1 && symbolic_operand (x
, SImode
))
560 output_operand_lossage ("symbol used as immediate operand");
563 output_addr_const (file
, x
);
570 switch (GET_CODE (op
))
599 rev_cond_name (rtx op
)
601 switch (GET_CODE (op
))
630 vax_float_literal (rtx c
)
633 const REAL_VALUE_TYPE
*r
;
637 if (GET_CODE (c
) != CONST_DOUBLE
)
642 if (c
== const_tiny_rtx
[(int) mode
][0]
643 || c
== const_tiny_rtx
[(int) mode
][1]
644 || c
== const_tiny_rtx
[(int) mode
][2])
647 r
= CONST_DOUBLE_REAL_VALUE (c
);
649 for (i
= 0; i
< 7; i
++)
653 real_from_integer (&s
, mode
, x
, SIGNED
);
655 if (real_equal (r
, &s
))
657 ok
= exact_real_inverse (mode
, &s
);
659 if (real_equal (r
, &s
))
666 /* Return the cost in cycles of a memory address, relative to register
669 Each of the following adds the indicated number of cycles:
673 1 - indexing and/or offset(register)
678 vax_address_cost_1 (rtx addr
)
680 int reg
= 0, indexed
= 0, indir
= 0, offset
= 0, predec
= 0;
681 rtx plus_op0
= 0, plus_op1
= 0;
683 switch (GET_CODE (addr
))
694 indexed
= 1; /* 2 on VAX 2 */
697 /* byte offsets cost nothing (on a VAX 2, they cost 1 cycle) */
699 offset
= (unsigned HOST_WIDE_INT
)(INTVAL(addr
)+128) > 256;
703 offset
= 1; /* 2 on VAX 2 */
705 case LABEL_REF
: /* this is probably a byte offset from the pc */
711 plus_op1
= XEXP (addr
, 0);
713 plus_op0
= XEXP (addr
, 0);
714 addr
= XEXP (addr
, 1);
717 indir
= 2; /* 3 on VAX 2 */
718 addr
= XEXP (addr
, 0);
724 /* Up to 3 things can be added in an address. They are stored in
725 plus_op0, plus_op1, and addr. */
739 /* Indexing and register+offset can both be used (except on a VAX 2)
740 without increasing execution time over either one alone. */
741 if (reg
&& indexed
&& offset
)
742 return reg
+ indir
+ offset
+ predec
;
743 return reg
+ indexed
+ indir
+ offset
+ predec
;
747 vax_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
748 addr_space_t as ATTRIBUTE_UNUSED
,
749 bool speed ATTRIBUTE_UNUSED
)
751 return (1 + (REG_P (x
) ? 0 : vax_address_cost_1 (x
)));
754 /* Cost of an expression on a VAX. This version has costs tuned for the
755 CVAX chip (found in the VAX 3 series) with comments for variations on
758 FIXME: The costs need review, particularly for TRUNCATE, FLOAT_EXTEND
759 and FLOAT_TRUNCATE. We need a -mcpu option to allow provision of
760 costs on a per cpu basis. */
763 vax_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
764 int opno ATTRIBUTE_UNUSED
,
765 int *total
, bool speed ATTRIBUTE_UNUSED
)
767 int code
= GET_CODE (x
);
768 int i
= 0; /* may be modified in switch */
769 const char *fmt
= GET_RTX_FORMAT (code
); /* may be modified in switch */
773 /* On a VAX, constants from 0..63 are cheap because they can use the
774 1 byte literal constant format. Compare to -1 should be made cheap
775 so that decrement-and-branch insns can be formed more easily (if
776 the value -1 is copied to a register some decrement-and-branch
777 patterns will not match). */
784 if (outer_code
== AND
)
786 *total
= ((unsigned HOST_WIDE_INT
) ~INTVAL (x
) <= 077) ? 1 : 2;
789 if ((unsigned HOST_WIDE_INT
) INTVAL (x
) <= 077
790 || (outer_code
== COMPARE
792 || ((outer_code
== PLUS
|| outer_code
== MINUS
)
793 && (unsigned HOST_WIDE_INT
) -INTVAL (x
) <= 077))
807 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
808 *total
= vax_float_literal (x
) ? 5 : 8;
810 *total
= ((CONST_DOUBLE_HIGH (x
) == 0
811 && (unsigned HOST_WIDE_INT
) CONST_DOUBLE_LOW (x
) < 64)
812 || (outer_code
== PLUS
813 && CONST_DOUBLE_HIGH (x
) == -1
814 && (unsigned HOST_WIDE_INT
)-CONST_DOUBLE_LOW (x
) < 64))
820 return true; /* Implies register operand. */
824 return true; /* Implies register operand. */
830 *total
= 16; /* 4 on VAX 9000 */
833 *total
= 9; /* 4 on VAX 9000, 12 on VAX 2 */
836 *total
= 16; /* 6 on VAX 9000, 28 on VAX 2 */
841 *total
= 10; /* 3-4 on VAX 9000, 20-28 on VAX 2 */
844 *total
= MAX_COST
; /* Mode is not supported. */
852 *total
= MAX_COST
; /* Mode is not supported. */
860 *total
= 30; /* Highly variable. */
861 else if (mode
== DFmode
)
862 /* divide takes 28 cycles if the result is not zero, 13 otherwise */
865 *total
= 11; /* 25 on VAX 2 */
875 *total
= MAX_COST
; /* Mode is not supported. */
882 *total
= (6 /* 4 on VAX 9000 */
883 + (mode
== DFmode
) + (GET_MODE (XEXP (x
, 0)) != SImode
));
887 *total
= 7; /* 17 on VAX 2 */
896 *total
= 10; /* 6 on VAX 9000 */
901 *total
= 6; /* 5 on VAX 2, 4 on VAX 9000 */
902 if (CONST_INT_P (XEXP (x
, 1)))
903 fmt
= "e"; /* all constant rotate counts are short */
908 *total
= (mode
== DFmode
) ? 13 : 8; /* 6/8 on VAX 9000, 16/15 on VAX 2 */
909 /* Small integer operands can use subl2 and addl2. */
910 if ((CONST_INT_P (XEXP (x
, 1)))
911 && (unsigned HOST_WIDE_INT
)(INTVAL (XEXP (x
, 1)) + 63) < 127)
921 /* AND is special because the first operand is complemented. */
923 if (CONST_INT_P (XEXP (x
, 0)))
925 if ((unsigned HOST_WIDE_INT
)~INTVAL (XEXP (x
, 0)) > 63)
935 else if (mode
== SFmode
)
937 else if (mode
== DImode
)
953 if (mode
== DImode
|| mode
== DFmode
)
954 *total
= 5; /* 7 on VAX 2 */
956 *total
= 3; /* 4 on VAX 2 */
958 if (!REG_P (x
) && GET_CODE (x
) != POST_INC
)
959 *total
+= vax_address_cost_1 (x
);
965 *total
= 3; /* FIXME: Costs need to be checked */
972 /* Now look inside the expression. Operands which are not registers or
973 short constants add to the cost.
975 FMT and I may have been adjusted in the switch above for instructions
976 which require special handling. */
978 while (*fmt
++ == 'e')
980 rtx op
= XEXP (x
, i
);
983 code
= GET_CODE (op
);
985 /* A NOT is likely to be found as the first operand of an AND
986 (in which case the relevant cost is of the operand inside
987 the not) and not likely to be found anywhere else. */
989 op
= XEXP (op
, 0), code
= GET_CODE (op
);
994 if ((unsigned HOST_WIDE_INT
)INTVAL (op
) > 63
996 *total
+= 1; /* 2 on VAX 2 */
1001 *total
+= 1; /* 2 on VAX 2 */
1004 if (GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
)
1006 /* Registers are faster than floating point constants -- even
1007 those constants which can be encoded in a single byte. */
1008 if (vax_float_literal (op
))
1011 *total
+= (GET_MODE (x
) == DFmode
) ? 3 : 2;
1015 if (CONST_DOUBLE_HIGH (op
) != 0
1016 || (unsigned HOST_WIDE_INT
)CONST_DOUBLE_LOW (op
) > 63)
1021 *total
+= 1; /* 2 on VAX 2 */
1022 if (!REG_P (XEXP (op
, 0)))
1023 *total
+= vax_address_cost_1 (XEXP (op
, 0));
1036 /* Output code to add DELTA to the first argument, and then jump to FUNCTION.
1037 Used for C++ multiple inheritance.
1038 .mask ^m<r2,r3,r4,r5,r6,r7,r8,r9,r10,r11> #conservative entry mask
1039 addl2 $DELTA, 4(ap) #adjust first argument
1040 jmp FUNCTION+2 #jump beyond FUNCTION's entry mask
1044 vax_output_mi_thunk (FILE * file
,
1045 tree thunk ATTRIBUTE_UNUSED
,
1046 HOST_WIDE_INT delta
,
1047 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
1050 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk
));
1052 assemble_start_function (thunk
, fnname
);
1053 fprintf (file
, "\t.word 0x0ffc\n\taddl2 $" HOST_WIDE_INT_PRINT_DEC
, delta
);
1054 asm_fprintf (file
, ",4(%Rap)\n");
1055 fprintf (file
, "\tjmp ");
1056 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
1057 fprintf (file
, "+2\n");
1058 assemble_end_function (thunk
, fnname
);
1062 vax_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
1063 int incoming ATTRIBUTE_UNUSED
)
1065 return gen_rtx_REG (Pmode
, VAX_STRUCT_VALUE_REGNUM
);
1068 /* Worker function for NOTICE_UPDATE_CC. */
1071 vax_notice_update_cc (rtx exp
, rtx insn ATTRIBUTE_UNUSED
)
1073 if (GET_CODE (exp
) == SET
)
1075 if (GET_CODE (SET_SRC (exp
)) == CALL
)
1077 else if (GET_CODE (SET_DEST (exp
)) != ZERO_EXTRACT
1078 && GET_CODE (SET_DEST (exp
)) != PC
)
1080 cc_status
.flags
= 0;
1081 /* The integer operations below don't set carry or
1082 set it in an incompatible way. That's ok though
1083 as the Z bit is all we need when doing unsigned
1084 comparisons on the result of these insns (since
1085 they're always with 0). Set CC_NO_OVERFLOW to
1086 generate the correct unsigned branches. */
1087 switch (GET_CODE (SET_SRC (exp
)))
1090 if (GET_MODE_CLASS (GET_MODE (exp
)) == MODE_FLOAT
)
1099 cc_status
.flags
= CC_NO_OVERFLOW
;
1104 cc_status
.value1
= SET_DEST (exp
);
1105 cc_status
.value2
= SET_SRC (exp
);
1108 else if (GET_CODE (exp
) == PARALLEL
1109 && GET_CODE (XVECEXP (exp
, 0, 0)) == SET
)
1111 if (GET_CODE (SET_SRC (XVECEXP (exp
, 0, 0))) == CALL
)
1113 else if (GET_CODE (SET_DEST (XVECEXP (exp
, 0, 0))) != PC
)
1115 cc_status
.flags
= 0;
1116 cc_status
.value1
= SET_DEST (XVECEXP (exp
, 0, 0));
1117 cc_status
.value2
= SET_SRC (XVECEXP (exp
, 0, 0));
1120 /* PARALLELs whose first element sets the PC are aob,
1121 sob insns. They do change the cc's. */
1126 if (cc_status
.value1
&& REG_P (cc_status
.value1
)
1128 && reg_overlap_mentioned_p (cc_status
.value1
, cc_status
.value2
))
1129 cc_status
.value2
= 0;
1130 if (cc_status
.value1
&& MEM_P (cc_status
.value1
)
1132 && MEM_P (cc_status
.value2
))
1133 cc_status
.value2
= 0;
1134 /* Actual condition, one line up, should be that value2's address
1135 depends on value1, but that is too much of a pain. */
1138 /* Output integer move instructions. */
1141 vax_output_int_move (rtx insn ATTRIBUTE_UNUSED
, rtx
*operands
,
1145 const char *pattern_hi
, *pattern_lo
;
1150 if (operands
[1] == const0_rtx
)
1152 if (TARGET_QMATH
&& optimize_size
1153 && (CONST_INT_P (operands
[1])
1154 || GET_CODE (operands
[1]) == CONST_DOUBLE
))
1156 unsigned HOST_WIDE_INT hval
, lval
;
1159 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
1161 gcc_assert (HOST_BITS_PER_WIDE_INT
!= 64);
1163 /* Make sure only the low 32 bits are valid. */
1164 lval
= CONST_DOUBLE_LOW (operands
[1]) & 0xffffffff;
1165 hval
= CONST_DOUBLE_HIGH (operands
[1]) & 0xffffffff;
1169 lval
= INTVAL (operands
[1]);
1173 /* Here we see if we are trying to see if the 64bit value is really
1174 a 6bit shifted some arbitrary amount. If so, we can use ashq to
1175 shift it to the correct value saving 7 bytes (1 addr-mode-byte +
1176 8 bytes - 1 shift byte - 1 short literal byte. */
1178 && (n
= exact_log2 (lval
& (- lval
))) != -1
1179 && (lval
>> n
) < 64)
1183 /* On 32bit platforms, if the 6bits didn't overflow into the
1184 upper 32bit value that value better be 0. If we have
1185 overflowed, make sure it wasn't too much. */
1186 if (HOST_BITS_PER_WIDE_INT
== 32 && hval
!= 0)
1188 if (n
<= 26 || hval
>= ((unsigned)1 << (n
- 26)))
1189 n
= 0; /* failure */
1191 lval
|= hval
<< (32 - n
);
1193 /* If n is 0, then ashq is not the best way to emit this. */
1196 operands
[1] = GEN_INT (lval
);
1197 operands
[2] = GEN_INT (n
);
1198 return "ashq %2,%D1,%0";
1200 #if HOST_BITS_PER_WIDE_INT == 32
1202 /* On 32bit platforms, if the low 32bit value is 0, checkout the
1203 upper 32bit value. */
1205 && (n
= exact_log2 (hval
& (- hval
)) - 1) != -1
1206 && (hval
>> n
) < 64)
1208 operands
[1] = GEN_INT (hval
>> n
);
1209 operands
[2] = GEN_INT (n
+ 32);
1210 return "ashq %2,%D1,%0";
1216 && (!MEM_P (operands
[0])
1217 || GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
1218 || GET_CODE (XEXP (operands
[0], 0)) == POST_INC
1219 || !illegal_addsub_di_memory_operand (operands
[0], DImode
))
1220 && ((CONST_INT_P (operands
[1])
1221 && (unsigned HOST_WIDE_INT
) INTVAL (operands
[1]) >= 64)
1222 || GET_CODE (operands
[1]) == CONST_DOUBLE
))
1224 hi
[0] = operands
[0];
1225 hi
[1] = operands
[1];
1227 split_quadword_operands (insn
, SET
, hi
, lo
, 2);
1229 pattern_lo
= vax_output_int_move (NULL
, lo
, SImode
);
1230 pattern_hi
= vax_output_int_move (NULL
, hi
, SImode
);
1232 /* The patterns are just movl/movl or pushl/pushl then a movq will
1233 be shorter (1 opcode byte + 1 addrmode byte + 8 immediate value
1234 bytes .vs. 2 opcode bytes + 2 addrmode bytes + 8 immediate value
1236 if ((!strncmp (pattern_lo
, "movl", 4)
1237 && !strncmp (pattern_hi
, "movl", 4))
1238 || (!strncmp (pattern_lo
, "pushl", 5)
1239 && !strncmp (pattern_hi
, "pushl", 5)))
1240 return "movq %1,%0";
1242 if (MEM_P (operands
[0])
1243 && GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
)
1245 output_asm_insn (pattern_hi
, hi
);
1246 operands
[0] = lo
[0];
1247 operands
[1] = lo
[1];
1248 operands
[2] = lo
[2];
1253 output_asm_insn (pattern_lo
, lo
);
1254 operands
[0] = hi
[0];
1255 operands
[1] = hi
[1];
1256 operands
[2] = hi
[2];
1260 return "movq %1,%0";
1263 if (symbolic_operand (operands
[1], SImode
))
1265 if (push_operand (operands
[0], SImode
))
1266 return "pushab %a1";
1267 return "movab %a1,%0";
1270 if (operands
[1] == const0_rtx
)
1272 if (push_operand (operands
[1], SImode
))
1277 if (CONST_INT_P (operands
[1])
1278 && (unsigned HOST_WIDE_INT
) INTVAL (operands
[1]) >= 64)
1280 HOST_WIDE_INT i
= INTVAL (operands
[1]);
1282 if ((unsigned HOST_WIDE_INT
)(~i
) < 64)
1283 return "mcoml %N1,%0";
1284 if ((unsigned HOST_WIDE_INT
)i
< 0x100)
1285 return "movzbl %1,%0";
1286 if (i
>= -0x80 && i
< 0)
1287 return "cvtbl %1,%0";
1289 && (n
= exact_log2 (i
& (-i
))) != -1
1290 && ((unsigned HOST_WIDE_INT
)i
>> n
) < 64)
1292 operands
[1] = GEN_INT ((unsigned HOST_WIDE_INT
)i
>> n
);
1293 operands
[2] = GEN_INT (n
);
1294 return "ashl %2,%1,%0";
1296 if ((unsigned HOST_WIDE_INT
)i
< 0x10000)
1297 return "movzwl %1,%0";
1298 if (i
>= -0x8000 && i
< 0)
1299 return "cvtwl %1,%0";
1301 if (push_operand (operands
[0], SImode
))
1303 return "movl %1,%0";
1306 if (CONST_INT_P (operands
[1]))
1308 HOST_WIDE_INT i
= INTVAL (operands
[1]);
1311 else if ((unsigned HOST_WIDE_INT
)i
< 64)
1312 return "movw %1,%0";
1313 else if ((unsigned HOST_WIDE_INT
)~i
< 64)
1314 return "mcomw %H1,%0";
1315 else if ((unsigned HOST_WIDE_INT
)i
< 256)
1316 return "movzbw %1,%0";
1317 else if (i
>= -0x80 && i
< 0)
1318 return "cvtbw %1,%0";
1320 return "movw %1,%0";
1323 if (CONST_INT_P (operands
[1]))
1325 HOST_WIDE_INT i
= INTVAL (operands
[1]);
1328 else if ((unsigned HOST_WIDE_INT
)~i
< 64)
1329 return "mcomb %B1,%0";
1331 return "movb %1,%0";
1338 /* Output integer add instructions.
1340 The space-time-opcode tradeoffs for addition vary by model of VAX.
1342 On a VAX 3 "movab (r1)[r2],r3" is faster than "addl3 r1,r2,r3",
1343 but it not faster on other models.
1345 "movab #(r1),r2" is usually shorter than "addl3 #,r1,r2", and is
1346 faster on a VAX 3, but some VAXen (e.g. VAX 9000) will stall if
1347 a register is used in an address too soon after it is set.
1348 Compromise by using movab only when it is shorter than the add
1349 or the base register in the address is one of sp, ap, and fp,
1350 which are not modified very often. */
1353 vax_output_int_add (rtx_insn
*insn
, rtx
*operands
, machine_mode mode
)
1360 const char *pattern
;
1364 if (TARGET_QMATH
&& 0)
1367 split_quadword_operands (insn
, PLUS
, operands
, low
, 3);
1371 gcc_assert (rtx_equal_p (operands
[0], operands
[1]));
1372 #ifdef NO_EXTERNAL_INDIRECT_ADDRESSS
1373 gcc_assert (!flag_pic
|| !external_memory_operand (low
[2], SImode
));
1374 gcc_assert (!flag_pic
|| !external_memory_operand (low
[0], SImode
));
1377 /* No reason to add a 0 to the low part and thus no carry, so just
1378 emit the appropriate add/sub instruction. */
1379 if (low
[2] == const0_rtx
)
1380 return vax_output_int_add (NULL
, operands
, SImode
);
1382 /* Are we doing addition or subtraction? */
1383 sub
= CONST_INT_P (operands
[2]) && INTVAL (operands
[2]) < 0;
1385 /* We can't use vax_output_int_add since some the patterns don't
1386 modify the carry bit. */
1389 if (low
[2] == constm1_rtx
)
1390 pattern
= "decl %0";
1392 pattern
= "subl2 $%n2,%0";
1396 if (low
[2] == const1_rtx
)
1397 pattern
= "incl %0";
1399 pattern
= "addl2 %2,%0";
1401 output_asm_insn (pattern
, low
);
1403 /* In 2's complement, -n = ~n + 1. Since we are dealing with
1404 two 32bit parts, we complement each and then add one to
1405 low part. We know that the low part can't overflow since
1406 it's value can never be 0. */
1408 return "sbwc %N2,%0";
1409 return "adwc %2,%0";
1412 /* Add low parts. */
1413 if (rtx_equal_p (operands
[0], operands
[1]))
1415 if (low
[2] == const0_rtx
)
1416 /* Should examine operand, punt if not POST_INC. */
1417 pattern
= "tstl %0", carry
= 0;
1418 else if (low
[2] == const1_rtx
)
1419 pattern
= "incl %0";
1421 pattern
= "addl2 %2,%0";
1425 if (low
[2] == const0_rtx
)
1426 pattern
= "movl %1,%0", carry
= 0;
1428 pattern
= "addl3 %2,%1,%0";
1431 output_asm_insn (pattern
, low
);
1433 /* If CARRY is 0, we don't have any carry value to worry about. */
1434 return get_insn_template (CODE_FOR_addsi3
, insn
);
1435 /* %0 = C + %1 + %2 */
1436 if (!rtx_equal_p (operands
[0], operands
[1]))
1437 output_asm_insn ((operands
[1] == const0_rtx
1439 : "movl %1,%0"), operands
);
1440 return "adwc %2,%0";
1444 if (rtx_equal_p (operands
[0], operands
[1]))
1446 if (operands
[2] == const1_rtx
)
1448 if (operands
[2] == constm1_rtx
)
1450 if (CONST_INT_P (operands
[2])
1451 && (unsigned HOST_WIDE_INT
) (- INTVAL (operands
[2])) < 64)
1452 return "subl2 $%n2,%0";
1453 if (CONST_INT_P (operands
[2])
1454 && (unsigned HOST_WIDE_INT
) INTVAL (operands
[2]) >= 64
1455 && REG_P (operands
[1])
1456 && ((INTVAL (operands
[2]) < 32767 && INTVAL (operands
[2]) > -32768)
1457 || REGNO (operands
[1]) > 11))
1458 return "movab %c2(%1),%0";
1459 if (REG_P (operands
[0]) && symbolic_operand (operands
[2], SImode
))
1460 return "movab %a2[%0],%0";
1461 return "addl2 %2,%0";
1464 if (rtx_equal_p (operands
[0], operands
[2]))
1466 if (REG_P (operands
[0]) && symbolic_operand (operands
[1], SImode
))
1467 return "movab %a1[%0],%0";
1468 return "addl2 %1,%0";
1471 if (CONST_INT_P (operands
[2])
1472 && INTVAL (operands
[2]) < 32767
1473 && INTVAL (operands
[2]) > -32768
1474 && REG_P (operands
[1])
1475 && push_operand (operands
[0], SImode
))
1476 return "pushab %c2(%1)";
1478 if (CONST_INT_P (operands
[2])
1479 && (unsigned HOST_WIDE_INT
) (- INTVAL (operands
[2])) < 64)
1480 return "subl3 $%n2,%1,%0";
1482 if (CONST_INT_P (operands
[2])
1483 && (unsigned HOST_WIDE_INT
) INTVAL (operands
[2]) >= 64
1484 && REG_P (operands
[1])
1485 && ((INTVAL (operands
[2]) < 32767 && INTVAL (operands
[2]) > -32768)
1486 || REGNO (operands
[1]) > 11))
1487 return "movab %c2(%1),%0";
1489 /* Add this if using gcc on a VAX 3xxx:
1490 if (REG_P (operands[1]) && REG_P (operands[2]))
1491 return "movab (%1)[%2],%0";
1494 if (REG_P (operands
[1]) && symbolic_operand (operands
[2], SImode
))
1496 if (push_operand (operands
[0], SImode
))
1497 return "pushab %a2[%1]";
1498 return "movab %a2[%1],%0";
1501 if (REG_P (operands
[2]) && symbolic_operand (operands
[1], SImode
))
1503 if (push_operand (operands
[0], SImode
))
1504 return "pushab %a1[%2]";
1505 return "movab %a1[%2],%0";
1508 if (flag_pic
&& REG_P (operands
[0])
1509 && symbolic_operand (operands
[2], SImode
))
1510 return "movab %a2,%0;addl2 %1,%0";
1513 && (symbolic_operand (operands
[1], SImode
)
1514 || symbolic_operand (operands
[1], SImode
)))
1517 return "addl3 %1,%2,%0";
1520 if (rtx_equal_p (operands
[0], operands
[1]))
1522 if (operands
[2] == const1_rtx
)
1524 if (operands
[2] == constm1_rtx
)
1526 if (CONST_INT_P (operands
[2])
1527 && (unsigned HOST_WIDE_INT
) (- INTVAL (operands
[2])) < 64)
1528 return "subw2 $%n2,%0";
1529 return "addw2 %2,%0";
1531 if (rtx_equal_p (operands
[0], operands
[2]))
1532 return "addw2 %1,%0";
1533 if (CONST_INT_P (operands
[2])
1534 && (unsigned HOST_WIDE_INT
) (- INTVAL (operands
[2])) < 64)
1535 return "subw3 $%n2,%1,%0";
1536 return "addw3 %1,%2,%0";
1539 if (rtx_equal_p (operands
[0], operands
[1]))
1541 if (operands
[2] == const1_rtx
)
1543 if (operands
[2] == constm1_rtx
)
1545 if (CONST_INT_P (operands
[2])
1546 && (unsigned HOST_WIDE_INT
) (- INTVAL (operands
[2])) < 64)
1547 return "subb2 $%n2,%0";
1548 return "addb2 %2,%0";
1550 if (rtx_equal_p (operands
[0], operands
[2]))
1551 return "addb2 %1,%0";
1552 if (CONST_INT_P (operands
[2])
1553 && (unsigned HOST_WIDE_INT
) (- INTVAL (operands
[2])) < 64)
1554 return "subb3 $%n2,%1,%0";
1555 return "addb3 %1,%2,%0";
1563 vax_output_int_subtract (rtx_insn
*insn
, rtx
*operands
, machine_mode mode
)
1570 const char *pattern
;
1573 if (TARGET_QMATH
&& 0)
1576 split_quadword_operands (insn
, MINUS
, operands
, low
, 3);
1580 if (operands
[1] == const0_rtx
&& low
[1] == const0_rtx
)
1582 /* Negation is tricky. It's basically complement and increment.
1583 Negate hi, then lo, and subtract the carry back. */
1584 if ((MEM_P (low
[0]) && GET_CODE (XEXP (low
[0], 0)) == POST_INC
)
1585 || (MEM_P (operands
[0])
1586 && GET_CODE (XEXP (operands
[0], 0)) == POST_INC
))
1587 fatal_insn ("illegal operand detected", insn
);
1588 output_asm_insn ("mnegl %2,%0", operands
);
1589 output_asm_insn ("mnegl %2,%0", low
);
1590 return "sbwc $0,%0";
1592 gcc_assert (rtx_equal_p (operands
[0], operands
[1]));
1593 gcc_assert (rtx_equal_p (low
[0], low
[1]));
1594 if (low
[2] == const1_rtx
)
1595 output_asm_insn ("decl %0", low
);
1597 output_asm_insn ("subl2 %2,%0", low
);
1598 return "sbwc %2,%0";
1601 /* Subtract low parts. */
1602 if (rtx_equal_p (operands
[0], operands
[1]))
1604 if (low
[2] == const0_rtx
)
1605 pattern
= 0, carry
= 0;
1606 else if (low
[2] == constm1_rtx
)
1607 pattern
= "decl %0";
1609 pattern
= "subl2 %2,%0";
1613 if (low
[2] == constm1_rtx
)
1614 pattern
= "decl %0";
1615 else if (low
[2] == const0_rtx
)
1616 pattern
= get_insn_template (CODE_FOR_movsi
, insn
), carry
= 0;
1618 pattern
= "subl3 %2,%1,%0";
1621 output_asm_insn (pattern
, low
);
1624 if (!rtx_equal_p (operands
[0], operands
[1]))
1625 return "movl %1,%0;sbwc %2,%0";
1626 return "sbwc %2,%0";
1627 /* %0 = %2 - %1 - C */
1629 return get_insn_template (CODE_FOR_subsi3
, insn
);
1637 /* True if X is an rtx for a constant that is a valid address. */
1640 legitimate_constant_address_p (rtx x
)
1642 if (GET_CODE (x
) == LABEL_REF
|| GET_CODE (x
) == SYMBOL_REF
1643 || CONST_INT_P (x
) || GET_CODE (x
) == HIGH
)
1645 if (GET_CODE (x
) != CONST
)
1647 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
1649 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
1650 && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (x
, 0), 0)))
1656 /* The other macros defined here are used only in legitimate_address_p (). */
1658 /* Nonzero if X is a hard reg that can be used as an index
1659 or, if not strict, if it is a pseudo reg. */
1660 #define INDEX_REGISTER_P(X, STRICT) \
1661 (REG_P (X) && (!(STRICT) || REGNO_OK_FOR_INDEX_P (REGNO (X))))
1663 /* Nonzero if X is a hard reg that can be used as a base reg
1664 or, if not strict, if it is a pseudo reg. */
1665 #define BASE_REGISTER_P(X, STRICT) \
1666 (REG_P (X) && (!(STRICT) || REGNO_OK_FOR_BASE_P (REGNO (X))))
1668 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
1670 /* Re-definition of CONSTANT_ADDRESS_P, which is true only when there
1671 are no SYMBOL_REFs for external symbols present. */
1674 indirectable_constant_address_p (rtx x
, bool indirect
)
1676 if (GET_CODE (x
) == SYMBOL_REF
)
1677 return !flag_pic
|| SYMBOL_REF_LOCAL_P (x
) || !indirect
;
1679 if (GET_CODE (x
) == CONST
)
1681 || GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
1682 || SYMBOL_REF_LOCAL_P (XEXP (XEXP (x
, 0), 0));
1684 return CONSTANT_ADDRESS_P (x
);
1687 #else /* not NO_EXTERNAL_INDIRECT_ADDRESS */
1690 indirectable_constant_address_p (rtx x
, bool indirect ATTRIBUTE_UNUSED
)
1692 return CONSTANT_ADDRESS_P (x
);
1695 #endif /* not NO_EXTERNAL_INDIRECT_ADDRESS */
1697 /* True if X is an address which can be indirected. External symbols
1698 could be in a sharable image library, so we disallow those. */
1701 indirectable_address_p (rtx x
, bool strict
, bool indirect
)
1703 if (indirectable_constant_address_p (x
, indirect
)
1704 || BASE_REGISTER_P (x
, strict
))
1706 if (GET_CODE (x
) != PLUS
1707 || !BASE_REGISTER_P (XEXP (x
, 0), strict
)
1708 || (flag_pic
&& !CONST_INT_P (XEXP (x
, 1))))
1710 return indirectable_constant_address_p (XEXP (x
, 1), indirect
);
1713 /* Return true if x is a valid address not using indexing.
1714 (This much is the easy part.) */
1716 nonindexed_address_p (rtx x
, bool strict
)
1721 if (! reload_in_progress
1722 || reg_equiv_mem (REGNO (x
)) == 0
1723 || indirectable_address_p (reg_equiv_mem (REGNO (x
)), strict
, false))
1726 if (indirectable_constant_address_p (x
, false))
1728 if (indirectable_address_p (x
, strict
, false))
1730 xfoo0
= XEXP (x
, 0);
1731 if (MEM_P (x
) && indirectable_address_p (xfoo0
, strict
, true))
1733 if ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_INC
)
1734 && BASE_REGISTER_P (xfoo0
, strict
))
1739 /* True if PROD is either a reg times size of mode MODE and MODE is less
1740 than or equal 8 bytes, or just a reg if MODE is one byte. */
1743 index_term_p (rtx prod
, machine_mode mode
, bool strict
)
1747 if (GET_MODE_SIZE (mode
) == 1)
1748 return BASE_REGISTER_P (prod
, strict
);
1750 if (GET_CODE (prod
) != MULT
|| GET_MODE_SIZE (mode
) > 8)
1753 xfoo0
= XEXP (prod
, 0);
1754 xfoo1
= XEXP (prod
, 1);
1756 if (CONST_INT_P (xfoo0
)
1757 && INTVAL (xfoo0
) == (int)GET_MODE_SIZE (mode
)
1758 && INDEX_REGISTER_P (xfoo1
, strict
))
1761 if (CONST_INT_P (xfoo1
)
1762 && INTVAL (xfoo1
) == (int)GET_MODE_SIZE (mode
)
1763 && INDEX_REGISTER_P (xfoo0
, strict
))
1769 /* Return true if X is the sum of a register
1770 and a valid index term for mode MODE. */
1772 reg_plus_index_p (rtx x
, machine_mode mode
, bool strict
)
1776 if (GET_CODE (x
) != PLUS
)
1779 xfoo0
= XEXP (x
, 0);
1780 xfoo1
= XEXP (x
, 1);
1782 if (BASE_REGISTER_P (xfoo0
, strict
) && index_term_p (xfoo1
, mode
, strict
))
1785 if (BASE_REGISTER_P (xfoo1
, strict
) && index_term_p (xfoo0
, mode
, strict
))
1791 /* Return true if xfoo0 and xfoo1 constitute a valid indexed address. */
1793 indexable_address_p (rtx xfoo0
, rtx xfoo1
, machine_mode mode
, bool strict
)
1795 if (!CONSTANT_ADDRESS_P (xfoo0
))
1797 if (BASE_REGISTER_P (xfoo1
, strict
))
1798 return !flag_pic
|| mode
== QImode
;
1799 if (flag_pic
&& symbolic_operand (xfoo0
, SImode
))
1801 return reg_plus_index_p (xfoo1
, mode
, strict
);
1804 /* legitimate_address_p returns true if it recognizes an RTL expression "x"
1805 that is a valid memory address for an instruction.
1806 The MODE argument is the machine mode for the MEM expression
1807 that wants to use this address. */
1809 vax_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1813 if (nonindexed_address_p (x
, strict
))
1816 if (GET_CODE (x
) != PLUS
)
1819 /* Handle <address>[index] represented with index-sum outermost */
1821 xfoo0
= XEXP (x
, 0);
1822 xfoo1
= XEXP (x
, 1);
1824 if (index_term_p (xfoo0
, mode
, strict
)
1825 && nonindexed_address_p (xfoo1
, strict
))
1828 if (index_term_p (xfoo1
, mode
, strict
)
1829 && nonindexed_address_p (xfoo0
, strict
))
1832 /* Handle offset(reg)[index] with offset added outermost */
1834 if (indexable_address_p (xfoo0
, xfoo1
, mode
, strict
)
1835 || indexable_address_p (xfoo1
, xfoo0
, mode
, strict
))
1841 /* Return true if x (a legitimate address expression) has an effect that
1842 depends on the machine mode it is used for. On the VAX, the predecrement
1843 and postincrement address depend thus (the amount of decrement or
1844 increment being the length of the operand) and all indexed address depend
1845 thus (because the index scale factor is the length of the operand). */
1848 vax_mode_dependent_address_p (const_rtx x
, addr_space_t as ATTRIBUTE_UNUSED
)
1852 /* Auto-increment cases are now dealt with generically in recog.c. */
1853 if (GET_CODE (x
) != PLUS
)
1856 xfoo0
= XEXP (x
, 0);
1857 xfoo1
= XEXP (x
, 1);
1859 if (CONST_INT_P (xfoo0
) && REG_P (xfoo1
))
1861 if (CONST_INT_P (xfoo1
) && REG_P (xfoo0
))
1863 if (!flag_pic
&& CONSTANT_ADDRESS_P (xfoo0
) && REG_P (xfoo1
))
1865 if (!flag_pic
&& CONSTANT_ADDRESS_P (xfoo1
) && REG_P (xfoo0
))
1872 fixup_mathdi_operand (rtx x
, machine_mode mode
)
1874 if (illegal_addsub_di_memory_operand (x
, mode
))
1876 rtx addr
= XEXP (x
, 0);
1877 rtx temp
= gen_reg_rtx (Pmode
);
1879 #ifdef NO_EXTERNAL_INDIRECT_ADDRESS
1880 if (GET_CODE (addr
) == CONST
&& flag_pic
)
1882 offset
= XEXP (XEXP (addr
, 0), 1);
1883 addr
= XEXP (XEXP (addr
, 0), 0);
1886 emit_move_insn (temp
, addr
);
1888 temp
= gen_rtx_PLUS (Pmode
, temp
, offset
);
1889 x
= gen_rtx_MEM (DImode
, temp
);
1895 vax_expand_addsub_di_operands (rtx
* operands
, enum rtx_code code
)
1897 int hi_only
= operand_subword (operands
[2], 0, 0, DImode
) == const0_rtx
;
1900 rtx (*gen_old_insn
)(rtx
, rtx
, rtx
);
1901 rtx (*gen_si_insn
)(rtx
, rtx
, rtx
);
1902 rtx (*gen_insn
)(rtx
, rtx
, rtx
);
1906 gen_old_insn
= gen_adddi3_old
;
1907 gen_si_insn
= gen_addsi3
;
1908 gen_insn
= gen_adcdi3
;
1910 else if (code
== MINUS
)
1912 gen_old_insn
= gen_subdi3_old
;
1913 gen_si_insn
= gen_subsi3
;
1914 gen_insn
= gen_sbcdi3
;
1919 /* If this is addition (thus operands are commutative) and if there is one
1920 addend that duplicates the desination, we want that addend to be the
1923 && rtx_equal_p (operands
[0], operands
[2])
1924 && !rtx_equal_p (operands
[1], operands
[2]))
1927 operands
[2] = operands
[1];
1933 emit_insn ((*gen_old_insn
) (operands
[0], operands
[1], operands
[2]));
1937 if (!rtx_equal_p (operands
[0], operands
[1])
1938 && (REG_P (operands
[0]) && MEM_P (operands
[1])))
1940 emit_move_insn (operands
[0], operands
[1]);
1941 operands
[1] = operands
[0];
1944 operands
[0] = fixup_mathdi_operand (operands
[0], DImode
);
1945 operands
[1] = fixup_mathdi_operand (operands
[1], DImode
);
1946 operands
[2] = fixup_mathdi_operand (operands
[2], DImode
);
1948 if (!rtx_equal_p (operands
[0], operands
[1]))
1949 emit_move_insn (operand_subword (operands
[0], 0, 0, DImode
),
1950 operand_subword (operands
[1], 0, 0, DImode
));
1952 emit_insn ((*gen_si_insn
) (operand_subword (operands
[0], 1, 0, DImode
),
1953 operand_subword (operands
[1], 1, 0, DImode
),
1954 operand_subword (operands
[2], 1, 0, DImode
)));
1958 /* If are adding the same value together, that's really a multiply by 2,
1959 and that's just a left shift of 1. */
1960 if (rtx_equal_p (operands
[1], operands
[2]))
1962 gcc_assert (code
!= MINUS
);
1963 emit_insn (gen_ashldi3 (operands
[0], operands
[1], const1_rtx
));
1967 operands
[0] = fixup_mathdi_operand (operands
[0], DImode
);
1969 /* If an operand is the same as operand[0], use the operand[0] rtx
1970 because fixup will an equivalent rtx but not an equal one. */
1972 if (rtx_equal_p (operands
[0], operands
[1]))
1973 operands
[1] = operands
[0];
1975 operands
[1] = fixup_mathdi_operand (operands
[1], DImode
);
1977 if (rtx_equal_p (operands
[0], operands
[2]))
1978 operands
[2] = operands
[0];
1980 operands
[2] = fixup_mathdi_operand (operands
[2], DImode
);
1982 /* If we are subtracting not from ourselves [d = a - b], and because the
1983 carry ops are two operand only, we would need to do a move prior to
1984 the subtract. And if d == b, we would need a temp otherwise
1985 [d = a, d -= d] and we end up with 0. Instead we rewrite d = a - b
1986 into d = -b, d += a. Since -b can never overflow, even if b == d,
1989 If we are doing addition, since the carry ops are two operand, if
1990 we aren't adding to ourselves, move the first addend to the
1991 destination first. */
1993 gcc_assert (operands
[1] != const0_rtx
|| code
== MINUS
);
1994 if (!rtx_equal_p (operands
[0], operands
[1]) && operands
[1] != const0_rtx
)
1996 if (code
== MINUS
&& CONSTANT_P (operands
[1]))
1998 temp
= gen_reg_rtx (DImode
);
1999 emit_insn (gen_sbcdi3 (operands
[0], const0_rtx
, operands
[2]));
2001 gen_insn
= gen_adcdi3
;
2002 operands
[2] = operands
[1];
2003 operands
[1] = operands
[0];
2006 emit_move_insn (operands
[0], operands
[1]);
2009 /* Subtracting a constant will have been rewritten to an addition of the
2010 negative of that constant before we get here. */
2011 gcc_assert (!CONSTANT_P (operands
[2]) || code
== PLUS
);
2012 emit_insn ((*gen_insn
) (operands
[0], operands
[1], operands
[2]));
2017 adjacent_operands_p (rtx lo
, rtx hi
, machine_mode mode
)
2019 HOST_WIDE_INT lo_offset
;
2020 HOST_WIDE_INT hi_offset
;
2022 if (GET_CODE (lo
) != GET_CODE (hi
))
2026 return mode
== SImode
&& REGNO (lo
) + 1 == REGNO (hi
);
2027 if (CONST_INT_P (lo
))
2028 return INTVAL (hi
) == 0 && UINTVAL (lo
) < 64;
2029 if (CONST_INT_P (lo
))
2030 return mode
!= SImode
;
2035 if (MEM_VOLATILE_P (lo
) || MEM_VOLATILE_P (hi
))
2041 if (GET_CODE (lo
) == POST_INC
/* || GET_CODE (lo) == PRE_DEC */)
2042 return rtx_equal_p (lo
, hi
);
2044 switch (GET_CODE (lo
))
2054 if (!CONST_INT_P (XEXP (lo
, 1)))
2056 lo_offset
= INTVAL (XEXP (lo
, 1));
2063 switch (GET_CODE (hi
))
2073 if (!CONST_INT_P (XEXP (hi
, 1)))
2075 hi_offset
= INTVAL (XEXP (hi
, 1));
2082 if (GET_CODE (lo
) == MULT
|| GET_CODE (lo
) == PLUS
)
2085 return rtx_equal_p (lo
, hi
)
2086 && hi_offset
- lo_offset
== GET_MODE_SIZE (mode
);
2089 /* Output assembler code for a block containing the constant parts
2090 of a trampoline, leaving space for the variable parts. */
2092 /* On the VAX, the trampoline contains an entry mask and two instructions:
2094 movl $STATIC,r0 (store the functions static chain)
2095 jmp *$FUNCTION (jump to function code at address FUNCTION) */
2098 vax_asm_trampoline_template (FILE *f ATTRIBUTE_UNUSED
)
2100 assemble_aligned_integer (2, const0_rtx
);
2101 assemble_aligned_integer (2, GEN_INT (0x8fd0));
2102 assemble_aligned_integer (4, const0_rtx
);
2103 assemble_aligned_integer (1, GEN_INT (0x50 + STATIC_CHAIN_REGNUM
));
2104 assemble_aligned_integer (2, GEN_INT (0x9f17));
2105 assemble_aligned_integer (4, const0_rtx
);
2108 /* We copy the register-mask from the function's pure code
2109 to the start of the trampoline. */
2112 vax_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
2114 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2117 emit_block_move (m_tramp
, assemble_trampoline_template (),
2118 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
2120 mem
= adjust_address (m_tramp
, HImode
, 0);
2121 emit_move_insn (mem
, gen_const_mem (HImode
, fnaddr
));
2123 mem
= adjust_address (m_tramp
, SImode
, 4);
2124 emit_move_insn (mem
, cxt
);
2125 mem
= adjust_address (m_tramp
, SImode
, 11);
2126 emit_move_insn (mem
, plus_constant (Pmode
, fnaddr
, 2));
2127 emit_insn (gen_sync_istream ());
2130 /* Value is the number of bytes of arguments automatically
2131 popped when returning from a subroutine call.
2132 FUNDECL is the declaration node of the function (as a tree),
2133 FUNTYPE is the data type of the function (as a tree),
2134 or for a library call it is an identifier node for the subroutine name.
2135 SIZE is the number of bytes of arguments passed on the stack.
2137 On the VAX, the RET insn pops a maximum of 255 args for any function. */
2140 vax_return_pops_args (tree fundecl ATTRIBUTE_UNUSED
,
2141 tree funtype ATTRIBUTE_UNUSED
, poly_int64 size
)
2143 return size
> 255 * 4 ? 0 : (HOST_WIDE_INT
) size
;
2146 /* Implement TARGET_FUNCTION_ARG. On the VAX all args are pushed. */
2149 vax_function_arg (cumulative_args_t
, const function_arg_info
&)
2154 /* Update the data in CUM to advance over argument ARG. */
2157 vax_function_arg_advance (cumulative_args_t cum_v
,
2158 const function_arg_info
&arg
)
2160 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
2162 *cum
+= (arg
.promoted_size_in_bytes () + 3) & ~3;
2165 static HOST_WIDE_INT
2166 vax_starting_frame_offset (void)
2168 /* On ELF targets, reserve the top of the stack for exception handler
2170 return TARGET_ELF
? -4 : 0;