1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #define IN_TARGET_CODE 1
26 #include "coretypes.h"
31 #include "stringpool.h"
44 #include "diagnostic-core.h"
46 #include "fold-const.h"
47 #include "stor-layout.h"
51 #include "insn-attr.h"
56 #include "common/common-target.h"
58 #include "langhooks.h"
60 #include "tree-pass.h"
62 #include "gimple-iterator.h"
64 #include "tree-stdarg.h"
65 #include "tm-constrs.h"
72 /* This file should be included last. */
73 #include "target-def.h"
75 /* Specify which cpu to schedule for. */
76 enum processor_type alpha_tune
;
78 /* Which cpu we're generating code for. */
79 enum processor_type alpha_cpu
;
81 static const char * const alpha_cpu_name
[] =
86 /* Specify how accurate floating-point traps need to be. */
88 enum alpha_trap_precision alpha_tp
;
90 /* Specify the floating-point rounding mode. */
92 enum alpha_fp_rounding_mode alpha_fprm
;
94 /* Specify which things cause traps. */
96 enum alpha_fp_trap_mode alpha_fptm
;
98 /* Nonzero if inside of a function, because the Alpha asm can't
99 handle .files inside of functions. */
101 static int inside_function
= FALSE
;
103 /* The number of cycles of latency we should assume on memory reads. */
105 static int alpha_memory_latency
= 3;
107 /* Whether the function needs the GP. */
109 static int alpha_function_needs_gp
;
111 /* The assembler name of the current function. */
113 static const char *alpha_fnname
;
115 /* The next explicit relocation sequence number. */
116 extern GTY(()) int alpha_next_sequence_number
;
117 int alpha_next_sequence_number
= 1;
119 /* The literal and gpdisp sequence numbers for this insn, as printed
120 by %# and %* respectively. */
121 extern GTY(()) int alpha_this_literal_sequence_number
;
122 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
123 int alpha_this_literal_sequence_number
;
124 int alpha_this_gpdisp_sequence_number
;
126 /* Costs of various operations on the different architectures. */
128 struct alpha_rtx_cost_data
130 unsigned char fp_add
;
131 unsigned char fp_mult
;
132 unsigned char fp_div_sf
;
133 unsigned char fp_div_df
;
134 unsigned char int_mult_si
;
135 unsigned char int_mult_di
;
136 unsigned char int_shift
;
137 unsigned char int_cmov
;
138 unsigned short int_div
;
141 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
144 COSTS_N_INSNS (6), /* fp_add */
145 COSTS_N_INSNS (6), /* fp_mult */
146 COSTS_N_INSNS (34), /* fp_div_sf */
147 COSTS_N_INSNS (63), /* fp_div_df */
148 COSTS_N_INSNS (23), /* int_mult_si */
149 COSTS_N_INSNS (23), /* int_mult_di */
150 COSTS_N_INSNS (2), /* int_shift */
151 COSTS_N_INSNS (2), /* int_cmov */
152 COSTS_N_INSNS (97), /* int_div */
155 COSTS_N_INSNS (4), /* fp_add */
156 COSTS_N_INSNS (4), /* fp_mult */
157 COSTS_N_INSNS (15), /* fp_div_sf */
158 COSTS_N_INSNS (22), /* fp_div_df */
159 COSTS_N_INSNS (8), /* int_mult_si */
160 COSTS_N_INSNS (12), /* int_mult_di */
161 COSTS_N_INSNS (1) + 1, /* int_shift */
162 COSTS_N_INSNS (1), /* int_cmov */
163 COSTS_N_INSNS (83), /* int_div */
166 COSTS_N_INSNS (4), /* fp_add */
167 COSTS_N_INSNS (4), /* fp_mult */
168 COSTS_N_INSNS (12), /* fp_div_sf */
169 COSTS_N_INSNS (15), /* fp_div_df */
170 COSTS_N_INSNS (7), /* int_mult_si */
171 COSTS_N_INSNS (7), /* int_mult_di */
172 COSTS_N_INSNS (1), /* int_shift */
173 COSTS_N_INSNS (2), /* int_cmov */
174 COSTS_N_INSNS (86), /* int_div */
178 /* Similar but tuned for code size instead of execution latency. The
179 extra +N is fractional cost tuning based on latency. It's used to
180 encourage use of cheaper insns like shift, but only if there's just
183 static struct alpha_rtx_cost_data
const alpha_rtx_cost_size
=
185 COSTS_N_INSNS (1), /* fp_add */
186 COSTS_N_INSNS (1), /* fp_mult */
187 COSTS_N_INSNS (1), /* fp_div_sf */
188 COSTS_N_INSNS (1) + 1, /* fp_div_df */
189 COSTS_N_INSNS (1) + 1, /* int_mult_si */
190 COSTS_N_INSNS (1) + 2, /* int_mult_di */
191 COSTS_N_INSNS (1), /* int_shift */
192 COSTS_N_INSNS (1), /* int_cmov */
193 COSTS_N_INSNS (6), /* int_div */
196 /* Get the number of args of a function in one of two ways. */
197 #if TARGET_ABI_OPEN_VMS
198 #define NUM_ARGS crtl->args.info.num_args
200 #define NUM_ARGS crtl->args.info
206 /* Declarations of static functions. */
207 static struct machine_function
*alpha_init_machine_status (void);
208 static rtx
alpha_emit_xfloating_compare (enum rtx_code
*, rtx
, rtx
);
209 static void alpha_handle_trap_shadows (void);
210 static void alpha_align_insns (void);
211 static void alpha_override_options_after_change (void);
213 #if TARGET_ABI_OPEN_VMS
214 static void alpha_write_linkage (FILE *, const char *);
215 static bool vms_valid_pointer_mode (scalar_int_mode
);
217 #define vms_patch_builtins() gcc_unreachable()
221 rest_of_handle_trap_shadows (void)
223 alpha_handle_trap_shadows ();
229 const pass_data pass_data_handle_trap_shadows
=
232 "trap_shadows", /* name */
233 OPTGROUP_NONE
, /* optinfo_flags */
235 0, /* properties_required */
236 0, /* properties_provided */
237 0, /* properties_destroyed */
238 0, /* todo_flags_start */
239 TODO_df_finish
, /* todo_flags_finish */
242 class pass_handle_trap_shadows
: public rtl_opt_pass
245 pass_handle_trap_shadows(gcc::context
*ctxt
)
246 : rtl_opt_pass(pass_data_handle_trap_shadows
, ctxt
)
249 /* opt_pass methods: */
250 virtual bool gate (function
*)
252 return alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
;
255 virtual unsigned int execute (function
*)
257 return rest_of_handle_trap_shadows ();
260 }; // class pass_handle_trap_shadows
265 make_pass_handle_trap_shadows (gcc::context
*ctxt
)
267 return new pass_handle_trap_shadows (ctxt
);
271 rest_of_align_insns (void)
273 alpha_align_insns ();
279 const pass_data pass_data_align_insns
=
282 "align_insns", /* name */
283 OPTGROUP_NONE
, /* optinfo_flags */
285 0, /* properties_required */
286 0, /* properties_provided */
287 0, /* properties_destroyed */
288 0, /* todo_flags_start */
289 TODO_df_finish
, /* todo_flags_finish */
292 class pass_align_insns
: public rtl_opt_pass
295 pass_align_insns(gcc::context
*ctxt
)
296 : rtl_opt_pass(pass_data_align_insns
, ctxt
)
299 /* opt_pass methods: */
300 virtual bool gate (function
*)
302 /* Due to the number of extra trapb insns, don't bother fixing up
303 alignment when trap precision is instruction. Moreover, we can
304 only do our job when sched2 is run. */
305 return ((alpha_tune
== PROCESSOR_EV4
306 || alpha_tune
== PROCESSOR_EV5
)
307 && optimize
&& !optimize_size
308 && alpha_tp
!= ALPHA_TP_INSN
309 && flag_schedule_insns_after_reload
);
312 virtual unsigned int execute (function
*)
314 return rest_of_align_insns ();
317 }; // class pass_align_insns
322 make_pass_align_insns (gcc::context
*ctxt
)
324 return new pass_align_insns (ctxt
);
327 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
328 /* Implement TARGET_MANGLE_TYPE. */
331 alpha_mangle_type (const_tree type
)
333 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
334 && TARGET_LONG_DOUBLE_128
)
337 /* For all other types, use normal C++ mangling. */
342 /* Parse target option strings. */
345 alpha_option_override (void)
347 static const struct cpu_table
{
348 const char *const name
;
349 const enum processor_type processor
;
351 const unsigned short line_size
; /* in bytes */
352 const unsigned short l1_size
; /* in kb. */
353 const unsigned short l2_size
; /* in kb. */
355 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
356 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
357 had 64k to 8M 8-byte direct Bcache. */
358 { "ev4", PROCESSOR_EV4
, 0, 32, 8, 8*1024 },
359 { "21064", PROCESSOR_EV4
, 0, 32, 8, 8*1024 },
360 { "ev45", PROCESSOR_EV4
, 0, 32, 16, 16*1024 },
362 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
363 and 1M to 16M 64 byte L3 (not modeled).
364 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
365 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
366 { "ev5", PROCESSOR_EV5
, 0, 32, 8, 96 },
367 { "21164", PROCESSOR_EV5
, 0, 32, 8, 96 },
368 { "ev56", PROCESSOR_EV5
, MASK_BWX
, 32, 8, 96 },
369 { "21164a", PROCESSOR_EV5
, MASK_BWX
, 32, 8, 96 },
370 { "pca56", PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
371 { "21164PC",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
372 { "21164pc",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
374 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
375 { "ev6", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
, 64, 64, 16*1024 },
376 { "21264", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
, 64, 64, 16*1024 },
377 { "ev67", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
,
379 { "21264a", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
,
383 int const ct_size
= ARRAY_SIZE (cpu_table
);
384 int line_size
= 0, l1_size
= 0, l2_size
= 0;
387 #ifdef SUBTARGET_OVERRIDE_OPTIONS
388 SUBTARGET_OVERRIDE_OPTIONS
;
391 /* Default to full IEEE compliance mode for Go language. */
392 if (strcmp (lang_hooks
.name
, "GNU Go") == 0
393 && !(target_flags_explicit
& MASK_IEEE
))
394 target_flags
|= MASK_IEEE
;
396 alpha_fprm
= ALPHA_FPRM_NORM
;
397 alpha_tp
= ALPHA_TP_PROG
;
398 alpha_fptm
= ALPHA_FPTM_N
;
402 alpha_tp
= ALPHA_TP_INSN
;
403 alpha_fptm
= ALPHA_FPTM_SU
;
405 if (TARGET_IEEE_WITH_INEXACT
)
407 alpha_tp
= ALPHA_TP_INSN
;
408 alpha_fptm
= ALPHA_FPTM_SUI
;
413 if (! strcmp (alpha_tp_string
, "p"))
414 alpha_tp
= ALPHA_TP_PROG
;
415 else if (! strcmp (alpha_tp_string
, "f"))
416 alpha_tp
= ALPHA_TP_FUNC
;
417 else if (! strcmp (alpha_tp_string
, "i"))
418 alpha_tp
= ALPHA_TP_INSN
;
420 error ("bad value %qs for %<-mtrap-precision%> switch",
424 if (alpha_fprm_string
)
426 if (! strcmp (alpha_fprm_string
, "n"))
427 alpha_fprm
= ALPHA_FPRM_NORM
;
428 else if (! strcmp (alpha_fprm_string
, "m"))
429 alpha_fprm
= ALPHA_FPRM_MINF
;
430 else if (! strcmp (alpha_fprm_string
, "c"))
431 alpha_fprm
= ALPHA_FPRM_CHOP
;
432 else if (! strcmp (alpha_fprm_string
,"d"))
433 alpha_fprm
= ALPHA_FPRM_DYN
;
435 error ("bad value %qs for %<-mfp-rounding-mode%> switch",
439 if (alpha_fptm_string
)
441 if (strcmp (alpha_fptm_string
, "n") == 0)
442 alpha_fptm
= ALPHA_FPTM_N
;
443 else if (strcmp (alpha_fptm_string
, "u") == 0)
444 alpha_fptm
= ALPHA_FPTM_U
;
445 else if (strcmp (alpha_fptm_string
, "su") == 0)
446 alpha_fptm
= ALPHA_FPTM_SU
;
447 else if (strcmp (alpha_fptm_string
, "sui") == 0)
448 alpha_fptm
= ALPHA_FPTM_SUI
;
450 error ("bad value %qs for %<-mfp-trap-mode%> switch",
454 if (alpha_cpu_string
)
456 for (i
= 0; i
< ct_size
; i
++)
457 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
459 alpha_tune
= alpha_cpu
= cpu_table
[i
].processor
;
460 line_size
= cpu_table
[i
].line_size
;
461 l1_size
= cpu_table
[i
].l1_size
;
462 l2_size
= cpu_table
[i
].l2_size
;
463 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
);
464 target_flags
|= cpu_table
[i
].flags
;
468 error ("bad value %qs for %<-mcpu%> switch", alpha_cpu_string
);
471 if (alpha_tune_string
)
473 for (i
= 0; i
< ct_size
; i
++)
474 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
476 alpha_tune
= cpu_table
[i
].processor
;
477 line_size
= cpu_table
[i
].line_size
;
478 l1_size
= cpu_table
[i
].l1_size
;
479 l2_size
= cpu_table
[i
].l2_size
;
483 error ("bad value %qs for %<-mtune%> switch", alpha_tune_string
);
487 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
, line_size
,
488 global_options
.x_param_values
,
489 global_options_set
.x_param_values
);
491 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, l1_size
,
492 global_options
.x_param_values
,
493 global_options_set
.x_param_values
);
495 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, l2_size
,
496 global_options
.x_param_values
,
497 global_options_set
.x_param_values
);
499 /* Do some sanity checks on the above options. */
501 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
502 && alpha_tp
!= ALPHA_TP_INSN
&& alpha_cpu
!= PROCESSOR_EV6
)
504 warning (0, "fp software completion requires %<-mtrap-precision=i%>");
505 alpha_tp
= ALPHA_TP_INSN
;
508 if (alpha_cpu
== PROCESSOR_EV6
)
510 /* Except for EV6 pass 1 (not released), we always have precise
511 arithmetic traps. Which means we can do software completion
512 without minding trap shadows. */
513 alpha_tp
= ALPHA_TP_PROG
;
516 if (TARGET_FLOAT_VAX
)
518 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
520 warning (0, "rounding mode not supported for VAX floats");
521 alpha_fprm
= ALPHA_FPRM_NORM
;
523 if (alpha_fptm
== ALPHA_FPTM_SUI
)
525 warning (0, "trap mode not supported for VAX floats");
526 alpha_fptm
= ALPHA_FPTM_SU
;
528 if (target_flags_explicit
& MASK_LONG_DOUBLE_128
)
529 warning (0, "128-bit %<long double%> not supported for VAX floats");
530 target_flags
&= ~MASK_LONG_DOUBLE_128
;
537 if (!alpha_mlat_string
)
538 alpha_mlat_string
= "L1";
540 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
541 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
543 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
544 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
545 && alpha_mlat_string
[2] == '\0')
547 static int const cache_latency
[][4] =
549 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
550 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
551 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
554 lat
= alpha_mlat_string
[1] - '0';
555 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_tune
][lat
-1] == -1)
557 warning (0, "L%d cache latency unknown for %s",
558 lat
, alpha_cpu_name
[alpha_tune
]);
562 lat
= cache_latency
[alpha_tune
][lat
-1];
564 else if (! strcmp (alpha_mlat_string
, "main"))
566 /* Most current memories have about 370ns latency. This is
567 a reasonable guess for a fast cpu. */
572 warning (0, "bad value %qs for %<-mmemory-latency%>",
577 alpha_memory_latency
= lat
;
580 /* Default the definition of "small data" to 8 bytes. */
581 if (!global_options_set
.x_g_switch_value
)
584 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
586 target_flags
|= MASK_SMALL_DATA
;
587 else if (flag_pic
== 2)
588 target_flags
&= ~MASK_SMALL_DATA
;
590 alpha_override_options_after_change ();
592 /* Register variables and functions with the garbage collector. */
594 /* Set up function hooks. */
595 init_machine_status
= alpha_init_machine_status
;
597 /* Tell the compiler when we're using VAX floating point. */
598 if (TARGET_FLOAT_VAX
)
600 REAL_MODE_FORMAT (SFmode
) = &vax_f_format
;
601 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
602 REAL_MODE_FORMAT (TFmode
) = NULL
;
605 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
606 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
607 target_flags
|= MASK_LONG_DOUBLE_128
;
612 /* Implement targetm.override_options_after_change. */
615 alpha_override_options_after_change (void)
617 /* Align labels and loops for optimal branching. */
618 /* ??? Kludge these by not doing anything if we don't optimize. */
621 if (flag_align_loops
&& !str_align_loops
)
622 str_align_loops
= "16";
623 if (flag_align_jumps
&& !str_align_jumps
)
624 str_align_jumps
= "16";
626 if (flag_align_functions
&& !str_align_functions
)
627 str_align_functions
= "16";
630 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
633 zap_mask (HOST_WIDE_INT value
)
637 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
639 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
645 /* Return true if OP is valid for a particular TLS relocation.
646 We are already guaranteed that OP is a CONST. */
649 tls_symbolic_operand_1 (rtx op
, int size
, int unspec
)
653 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
655 op
= XVECEXP (op
, 0, 0);
657 if (GET_CODE (op
) != SYMBOL_REF
)
660 switch (SYMBOL_REF_TLS_MODEL (op
))
662 case TLS_MODEL_LOCAL_DYNAMIC
:
663 return unspec
== UNSPEC_DTPREL
&& size
== alpha_tls_size
;
664 case TLS_MODEL_INITIAL_EXEC
:
665 return unspec
== UNSPEC_TPREL
&& size
== 64;
666 case TLS_MODEL_LOCAL_EXEC
:
667 return unspec
== UNSPEC_TPREL
&& size
== alpha_tls_size
;
673 /* Used by aligned_memory_operand and unaligned_memory_operand to
674 resolve what reload is going to do with OP if it's a register. */
677 resolve_reload_operand (rtx op
)
679 if (reload_in_progress
)
683 tmp
= SUBREG_REG (tmp
);
685 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
687 op
= reg_equiv_memory_loc (REGNO (tmp
));
695 /* The scalar modes supported differs from the default check-what-c-supports
696 version in that sometimes TFmode is available even when long double
697 indicates only DFmode. */
700 alpha_scalar_mode_supported_p (scalar_mode mode
)
708 case E_TImode
: /* via optabs.c */
716 return TARGET_HAS_XFLOATING_LIBS
;
723 /* Alpha implements a couple of integer vector mode operations when
724 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
725 which allows the vectorizer to operate on e.g. move instructions,
726 or when expand_vector_operations can do something useful. */
729 alpha_vector_mode_supported_p (machine_mode mode
)
731 return mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
;
734 /* Return the TLS model to use for SYMBOL. */
736 static enum tls_model
737 tls_symbolic_operand_type (rtx symbol
)
739 enum tls_model model
;
741 if (GET_CODE (symbol
) != SYMBOL_REF
)
742 return TLS_MODEL_NONE
;
743 model
= SYMBOL_REF_TLS_MODEL (symbol
);
745 /* Local-exec with a 64-bit size is the same code as initial-exec. */
746 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
747 model
= TLS_MODEL_INITIAL_EXEC
;
752 /* Return true if the function DECL will share the same GP as any
753 function in the current unit of translation. */
756 decl_has_samegp (const_tree decl
)
758 /* Functions that are not local can be overridden, and thus may
759 not share the same gp. */
760 if (!(*targetm
.binds_local_p
) (decl
))
763 /* If -msmall-data is in effect, assume that there is only one GP
764 for the module, and so any local symbol has this property. We
765 need explicit relocations to be able to enforce this for symbols
766 not defined in this unit of translation, however. */
767 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
770 /* Functions that are not external are defined in this UoT. */
771 /* ??? Irritatingly, static functions not yet emitted are still
772 marked "external". Apply this to non-static functions only. */
773 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
776 /* Return true if EXP should be placed in the small data section. */
779 alpha_in_small_data_p (const_tree exp
)
781 /* We want to merge strings, so we never consider them small data. */
782 if (TREE_CODE (exp
) == STRING_CST
)
785 /* Functions are never in the small data area. Duh. */
786 if (TREE_CODE (exp
) == FUNCTION_DECL
)
789 /* COMMON symbols are never small data. */
790 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_COMMON (exp
))
793 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
795 const char *section
= DECL_SECTION_NAME (exp
);
796 if (strcmp (section
, ".sdata") == 0
797 || strcmp (section
, ".sbss") == 0)
802 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
804 /* If this is an incomplete type with size 0, then we can't put it
805 in sdata because it might be too big when completed. */
806 if (size
> 0 && size
<= g_switch_value
)
813 #if TARGET_ABI_OPEN_VMS
815 vms_valid_pointer_mode (scalar_int_mode mode
)
817 return (mode
== SImode
|| mode
== DImode
);
821 alpha_linkage_symbol_p (const char *symname
)
823 int symlen
= strlen (symname
);
826 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
831 #define LINKAGE_SYMBOL_REF_P(X) \
832 ((GET_CODE (X) == SYMBOL_REF \
833 && alpha_linkage_symbol_p (XSTR (X, 0))) \
834 || (GET_CODE (X) == CONST \
835 && GET_CODE (XEXP (X, 0)) == PLUS \
836 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
837 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
840 /* legitimate_address_p recognizes an RTL expression that is a valid
841 memory address for an instruction. The MODE argument is the
842 machine mode for the MEM expression that wants to use this address.
844 For Alpha, we have either a constant address or the sum of a
845 register and a constant address, or just a register. For DImode,
846 any of those forms can be surrounded with an AND that clear the
847 low-order three bits; this is an "unaligned" access. */
850 alpha_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
852 /* If this is an ldq_u type address, discard the outer AND. */
854 && GET_CODE (x
) == AND
855 && CONST_INT_P (XEXP (x
, 1))
856 && INTVAL (XEXP (x
, 1)) == -8)
859 /* Discard non-paradoxical subregs. */
861 && (GET_MODE_SIZE (GET_MODE (x
))
862 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
865 /* Unadorned general registers are valid. */
868 ? STRICT_REG_OK_FOR_BASE_P (x
)
869 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
872 /* Constant addresses (i.e. +/- 32k) are valid. */
873 if (CONSTANT_ADDRESS_P (x
))
876 #if TARGET_ABI_OPEN_VMS
877 if (LINKAGE_SYMBOL_REF_P (x
))
881 /* Register plus a small constant offset is valid. */
882 if (GET_CODE (x
) == PLUS
)
884 rtx ofs
= XEXP (x
, 1);
887 /* Discard non-paradoxical subregs. */
889 && (GET_MODE_SIZE (GET_MODE (x
))
890 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
896 && NONSTRICT_REG_OK_FP_BASE_P (x
)
897 && CONST_INT_P (ofs
))
900 ? STRICT_REG_OK_FOR_BASE_P (x
)
901 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
902 && CONSTANT_ADDRESS_P (ofs
))
907 /* If we're managing explicit relocations, LO_SUM is valid, as are small
908 data symbols. Avoid explicit relocations of modes larger than word
909 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
910 else if (TARGET_EXPLICIT_RELOCS
911 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
913 if (small_symbolic_operand (x
, Pmode
))
916 if (GET_CODE (x
) == LO_SUM
)
918 rtx ofs
= XEXP (x
, 1);
921 /* Discard non-paradoxical subregs. */
923 && (GET_MODE_SIZE (GET_MODE (x
))
924 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
927 /* Must have a valid base register. */
930 ? STRICT_REG_OK_FOR_BASE_P (x
)
931 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
934 /* The symbol must be local. */
935 if (local_symbolic_operand (ofs
, Pmode
)
936 || dtp32_symbolic_operand (ofs
, Pmode
)
937 || tp32_symbolic_operand (ofs
, Pmode
))
945 /* Build the SYMBOL_REF for __tls_get_addr. */
947 static GTY(()) rtx tls_get_addr_libfunc
;
950 get_tls_get_addr (void)
952 if (!tls_get_addr_libfunc
)
953 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
954 return tls_get_addr_libfunc
;
957 /* Try machine-dependent ways of modifying an illegitimate address
958 to be legitimate. If we find one, return the new, valid address. */
961 alpha_legitimize_address_1 (rtx x
, rtx scratch
, machine_mode mode
)
963 HOST_WIDE_INT addend
;
965 /* If the address is (plus reg const_int) and the CONST_INT is not a
966 valid offset, compute the high part of the constant and add it to
967 the register. Then our address is (plus temp low-part-const). */
968 if (GET_CODE (x
) == PLUS
969 && REG_P (XEXP (x
, 0))
970 && CONST_INT_P (XEXP (x
, 1))
971 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
973 addend
= INTVAL (XEXP (x
, 1));
978 /* If the address is (const (plus FOO const_int)), find the low-order
979 part of the CONST_INT. Then load FOO plus any high-order part of the
980 CONST_INT into a register. Our address is (plus reg low-part-const).
981 This is done to reduce the number of GOT entries. */
982 if (can_create_pseudo_p ()
983 && GET_CODE (x
) == CONST
984 && GET_CODE (XEXP (x
, 0)) == PLUS
985 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
987 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
988 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
992 /* If we have a (plus reg const), emit the load as in (2), then add
993 the two registers, and finally generate (plus reg low-part-const) as
995 if (can_create_pseudo_p ()
996 && GET_CODE (x
) == PLUS
997 && REG_P (XEXP (x
, 0))
998 && GET_CODE (XEXP (x
, 1)) == CONST
999 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
1000 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 1), 0), 1)))
1002 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
1003 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
1004 XEXP (XEXP (XEXP (x
, 1), 0), 0),
1005 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1009 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1010 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1011 around +/- 32k offset. */
1012 if (TARGET_EXPLICIT_RELOCS
1013 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
1014 && symbolic_operand (x
, Pmode
))
1016 rtx r0
, r16
, eqv
, tga
, tp
, dest
, seq
;
1019 switch (tls_symbolic_operand_type (x
))
1021 case TLS_MODEL_NONE
:
1024 case TLS_MODEL_GLOBAL_DYNAMIC
:
1028 r0
= gen_rtx_REG (Pmode
, 0);
1029 r16
= gen_rtx_REG (Pmode
, 16);
1030 tga
= get_tls_get_addr ();
1031 dest
= gen_reg_rtx (Pmode
);
1032 seq
= GEN_INT (alpha_next_sequence_number
++);
1034 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
1035 rtx val
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
1036 insn
= emit_call_insn (val
);
1037 RTL_CONST_CALL_P (insn
) = 1;
1038 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1040 insn
= get_insns ();
1043 emit_libcall_block (insn
, dest
, r0
, x
);
1047 case TLS_MODEL_LOCAL_DYNAMIC
:
1051 r0
= gen_rtx_REG (Pmode
, 0);
1052 r16
= gen_rtx_REG (Pmode
, 16);
1053 tga
= get_tls_get_addr ();
1054 scratch
= gen_reg_rtx (Pmode
);
1055 seq
= GEN_INT (alpha_next_sequence_number
++);
1057 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
1058 rtx val
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
1059 insn
= emit_call_insn (val
);
1060 RTL_CONST_CALL_P (insn
) = 1;
1061 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1063 insn
= get_insns ();
1066 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
1067 UNSPEC_TLSLDM_CALL
);
1068 emit_libcall_block (insn
, scratch
, r0
, eqv
);
1070 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
1071 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1073 if (alpha_tls_size
== 64)
1075 dest
= gen_reg_rtx (Pmode
);
1076 emit_insn (gen_rtx_SET (dest
, eqv
));
1077 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
1080 if (alpha_tls_size
== 32)
1082 rtx temp
= gen_rtx_HIGH (Pmode
, eqv
);
1083 temp
= gen_rtx_PLUS (Pmode
, scratch
, temp
);
1084 scratch
= gen_reg_rtx (Pmode
);
1085 emit_insn (gen_rtx_SET (scratch
, temp
));
1087 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
1090 case TLS_MODEL_INITIAL_EXEC
:
1091 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1092 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1093 tp
= gen_reg_rtx (Pmode
);
1094 scratch
= gen_reg_rtx (Pmode
);
1095 dest
= gen_reg_rtx (Pmode
);
1097 emit_insn (gen_get_thread_pointerdi (tp
));
1098 emit_insn (gen_rtx_SET (scratch
, eqv
));
1099 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
1102 case TLS_MODEL_LOCAL_EXEC
:
1103 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1104 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1105 tp
= gen_reg_rtx (Pmode
);
1107 emit_insn (gen_get_thread_pointerdi (tp
));
1108 if (alpha_tls_size
== 32)
1110 rtx temp
= gen_rtx_HIGH (Pmode
, eqv
);
1111 temp
= gen_rtx_PLUS (Pmode
, tp
, temp
);
1112 tp
= gen_reg_rtx (Pmode
);
1113 emit_insn (gen_rtx_SET (tp
, temp
));
1115 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
1121 if (local_symbolic_operand (x
, Pmode
))
1123 if (small_symbolic_operand (x
, Pmode
))
1127 if (can_create_pseudo_p ())
1128 scratch
= gen_reg_rtx (Pmode
);
1129 emit_insn (gen_rtx_SET (scratch
, gen_rtx_HIGH (Pmode
, x
)));
1130 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1139 HOST_WIDE_INT low
, high
;
1141 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1143 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1147 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1148 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1149 1, OPTAB_LIB_WIDEN
);
1151 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1152 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1153 1, OPTAB_LIB_WIDEN
);
1155 return plus_constant (Pmode
, x
, low
);
1160 /* Try machine-dependent ways of modifying an illegitimate address
1161 to be legitimate. Return X or the new, valid address. */
1164 alpha_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1167 rtx new_x
= alpha_legitimize_address_1 (x
, NULL_RTX
, mode
);
1168 return new_x
? new_x
: x
;
1171 /* Return true if ADDR has an effect that depends on the machine mode it
1172 is used for. On the Alpha this is true only for the unaligned modes.
1173 We can simplify the test since we know that the address must be valid. */
1176 alpha_mode_dependent_address_p (const_rtx addr
,
1177 addr_space_t as ATTRIBUTE_UNUSED
)
1179 return GET_CODE (addr
) == AND
;
1182 /* Primarily this is required for TLS symbols, but given that our move
1183 patterns *ought* to be able to handle any symbol at any time, we
1184 should never be spilling symbolic operands to the constant pool, ever. */
1187 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1189 enum rtx_code code
= GET_CODE (x
);
1190 return code
== SYMBOL_REF
|| code
== LABEL_REF
|| code
== CONST
;
1193 /* We do not allow indirect calls to be optimized into sibling calls, nor
1194 can we allow a call to a function with a different GP to be optimized
1198 alpha_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
1200 /* Can't do indirect tail calls, since we don't know if the target
1201 uses the same GP. */
1205 /* Otherwise, we can make a tail call if the target function shares
1207 return decl_has_samegp (decl
);
1211 some_small_symbolic_operand_int (rtx x
)
1213 subrtx_var_iterator::array_type array
;
1214 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, ALL
)
1217 /* Don't re-split. */
1218 if (GET_CODE (x
) == LO_SUM
)
1219 iter
.skip_subrtxes ();
1220 else if (small_symbolic_operand (x
, Pmode
))
1227 split_small_symbolic_operand (rtx x
)
1230 subrtx_ptr_iterator::array_type array
;
1231 FOR_EACH_SUBRTX_PTR (iter
, array
, &x
, ALL
)
1235 /* Don't re-split. */
1236 if (GET_CODE (x
) == LO_SUM
)
1237 iter
.skip_subrtxes ();
1238 else if (small_symbolic_operand (x
, Pmode
))
1240 *ptr
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
1241 iter
.skip_subrtxes ();
1247 /* Indicate that INSN cannot be duplicated. This is true for any insn
1248 that we've marked with gpdisp relocs, since those have to stay in
1249 1-1 correspondence with one another.
1251 Technically we could copy them if we could set up a mapping from one
1252 sequence number to another, across the set of insns to be duplicated.
1253 This seems overly complicated and error-prone since interblock motion
1254 from sched-ebb could move one of the pair of insns to a different block.
1256 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1257 then they'll be in a different block from their ldgp. Which could lead
1258 the bb reorder code to think that it would be ok to copy just the block
1259 containing the call and branch to the block containing the ldgp. */
1262 alpha_cannot_copy_insn_p (rtx_insn
*insn
)
1264 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
1266 if (recog_memoized (insn
) >= 0)
1267 return get_attr_cannot_copy (insn
);
1273 /* Try a machine-dependent way of reloading an illegitimate address
1274 operand. If we find one, push the reload and return the new rtx. */
1277 alpha_legitimize_reload_address (rtx x
,
1278 machine_mode mode ATTRIBUTE_UNUSED
,
1279 int opnum
, int type
,
1280 int ind_levels ATTRIBUTE_UNUSED
)
1282 /* We must recognize output that we have already generated ourselves. */
1283 if (GET_CODE (x
) == PLUS
1284 && GET_CODE (XEXP (x
, 0)) == PLUS
1285 && REG_P (XEXP (XEXP (x
, 0), 0))
1286 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
1287 && CONST_INT_P (XEXP (x
, 1)))
1289 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1290 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1291 opnum
, (enum reload_type
) type
);
1295 /* We wish to handle large displacements off a base register by
1296 splitting the addend across an ldah and the mem insn. This
1297 cuts number of extra insns needed from 3 to 1. */
1298 if (GET_CODE (x
) == PLUS
1299 && REG_P (XEXP (x
, 0))
1300 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1301 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1302 && CONST_INT_P (XEXP (x
, 1)))
1304 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1305 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1307 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1309 /* Check for 32-bit overflow. */
1310 if (high
+ low
!= val
)
1313 /* Reload the high part into a base reg; leave the low part
1314 in the mem directly. */
1315 x
= gen_rtx_PLUS (GET_MODE (x
),
1316 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1320 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1321 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1322 opnum
, (enum reload_type
) type
);
1329 /* Return the cost of moving between registers of various classes. Moving
1330 between FLOAT_REGS and anything else except float regs is expensive.
1331 In fact, we make it quite expensive because we really don't want to
1332 do these moves unless it is clearly worth it. Optimizations may
1333 reduce the impact of not being able to allocate a pseudo to a
1337 alpha_register_move_cost (machine_mode
/*mode*/,
1338 reg_class_t from
, reg_class_t to
)
1340 if ((from
== FLOAT_REGS
) == (to
== FLOAT_REGS
))
1344 return (from
== FLOAT_REGS
) ? 6 : 8;
1346 return 4 + 2 * alpha_memory_latency
;
1349 /* Return the cost of moving data of MODE from a register to
1350 or from memory. On the Alpha, bump this up a bit. */
1353 alpha_memory_move_cost (machine_mode
/*mode*/, reg_class_t
/*regclass*/,
1356 return 2 * alpha_memory_latency
;
1359 /* Compute a (partial) cost for rtx X. Return true if the complete
1360 cost has been computed, and false if subexpressions should be
1361 scanned. In either case, *TOTAL contains the cost result. */
1364 alpha_rtx_costs (rtx x
, machine_mode mode
, int outer_code
, int opno
, int *total
,
1367 int code
= GET_CODE (x
);
1368 bool float_mode_p
= FLOAT_MODE_P (mode
);
1369 const struct alpha_rtx_cost_data
*cost_data
;
1372 cost_data
= &alpha_rtx_cost_size
;
1374 cost_data
= &alpha_rtx_cost_data
[alpha_tune
];
1379 /* If this is an 8-bit constant, return zero since it can be used
1380 nearly anywhere with no cost. If it is a valid operand for an
1381 ADD or AND, likewise return 0 if we know it will be used in that
1382 context. Otherwise, return 2 since it might be used there later.
1383 All other constants take at least two insns. */
1384 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
1392 case CONST_WIDE_INT
:
1393 if (x
== CONST0_RTX (mode
))
1395 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
1396 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
1398 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
1401 *total
= COSTS_N_INSNS (2);
1407 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
1408 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
1409 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
1410 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
1411 else if (tls_symbolic_operand_type (x
))
1412 /* Estimate of cost for call_pal rduniq. */
1413 /* ??? How many insns do we emit here? More than one... */
1414 *total
= COSTS_N_INSNS (15);
1416 /* Otherwise we do a load from the GOT. */
1417 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1421 /* This is effectively an add_operand. */
1428 *total
= cost_data
->fp_add
;
1429 else if (GET_CODE (XEXP (x
, 0)) == ASHIFT
1430 && const23_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
1432 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
1433 (enum rtx_code
) outer_code
, opno
, speed
)
1434 + rtx_cost (XEXP (x
, 1), mode
,
1435 (enum rtx_code
) outer_code
, opno
, speed
)
1436 + COSTS_N_INSNS (1));
1443 *total
= cost_data
->fp_mult
;
1444 else if (mode
== DImode
)
1445 *total
= cost_data
->int_mult_di
;
1447 *total
= cost_data
->int_mult_si
;
1451 if (CONST_INT_P (XEXP (x
, 1))
1452 && INTVAL (XEXP (x
, 1)) <= 3)
1454 *total
= COSTS_N_INSNS (1);
1461 *total
= cost_data
->int_shift
;
1466 *total
= cost_data
->fp_add
;
1468 *total
= cost_data
->int_cmov
;
1476 *total
= cost_data
->int_div
;
1477 else if (mode
== SFmode
)
1478 *total
= cost_data
->fp_div_sf
;
1480 *total
= cost_data
->fp_div_df
;
1484 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1490 *total
= COSTS_N_INSNS (1);
1498 *total
= COSTS_N_INSNS (1) + cost_data
->int_cmov
;
1504 case UNSIGNED_FLOAT
:
1507 case FLOAT_TRUNCATE
:
1508 *total
= cost_data
->fp_add
;
1512 if (MEM_P (XEXP (x
, 0)))
1515 *total
= cost_data
->fp_add
;
1523 /* REF is an alignable memory location. Place an aligned SImode
1524 reference into *PALIGNED_MEM and the number of bits to shift into
1525 *PBITNUM. SCRATCH is a free register for use in reloading out
1526 of range stack slots. */
1529 get_aligned_mem (rtx ref
, rtx
*paligned_mem
, rtx
*pbitnum
)
1532 HOST_WIDE_INT disp
, offset
;
1534 gcc_assert (MEM_P (ref
));
1536 if (reload_in_progress
)
1538 base
= find_replacement (&XEXP (ref
, 0));
1539 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1542 base
= XEXP (ref
, 0);
1544 if (GET_CODE (base
) == PLUS
)
1545 disp
= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1549 /* Find the byte offset within an aligned word. If the memory itself is
1550 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1551 will have examined the base register and determined it is aligned, and
1552 thus displacements from it are naturally alignable. */
1553 if (MEM_ALIGN (ref
) >= 32)
1558 /* The location should not cross aligned word boundary. */
1559 gcc_assert (offset
+ GET_MODE_SIZE (GET_MODE (ref
))
1560 <= GET_MODE_SIZE (SImode
));
1562 /* Access the entire aligned word. */
1563 *paligned_mem
= widen_memory_access (ref
, SImode
, -offset
);
1565 /* Convert the byte offset within the word to a bit offset. */
1566 offset
*= BITS_PER_UNIT
;
1567 *pbitnum
= GEN_INT (offset
);
1570 /* Similar, but just get the address. Handle the two reload cases.
1571 Add EXTRA_OFFSET to the address we return. */
1574 get_unaligned_address (rtx ref
)
1577 HOST_WIDE_INT offset
= 0;
1579 gcc_assert (MEM_P (ref
));
1581 if (reload_in_progress
)
1583 base
= find_replacement (&XEXP (ref
, 0));
1584 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1587 base
= XEXP (ref
, 0);
1589 if (GET_CODE (base
) == PLUS
)
1590 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1592 return plus_constant (Pmode
, base
, offset
);
1595 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1596 X is always returned in a register. */
1599 get_unaligned_offset (rtx addr
, HOST_WIDE_INT ofs
)
1601 if (GET_CODE (addr
) == PLUS
)
1603 ofs
+= INTVAL (XEXP (addr
, 1));
1604 addr
= XEXP (addr
, 0);
1607 return expand_simple_binop (Pmode
, PLUS
, addr
, GEN_INT (ofs
& 7),
1608 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1611 /* On the Alpha, all (non-symbolic) constants except zero go into
1612 a floating-point register via memory. Note that we cannot
1613 return anything that is not a subset of RCLASS, and that some
1614 symbolic constants cannot be dropped to memory. */
1617 alpha_preferred_reload_class(rtx x
, enum reg_class rclass
)
1619 /* Zero is present in any register class. */
1620 if (x
== CONST0_RTX (GET_MODE (x
)))
1623 /* These sorts of constants we can easily drop to memory. */
1624 if (CONST_SCALAR_INT_P (x
)
1625 || CONST_DOUBLE_P (x
)
1626 || GET_CODE (x
) == CONST_VECTOR
)
1628 if (rclass
== FLOAT_REGS
)
1630 if (rclass
== ALL_REGS
)
1631 return GENERAL_REGS
;
1635 /* All other kinds of constants should not (and in the case of HIGH
1636 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1637 secondary reload. */
1639 return (rclass
== ALL_REGS
? GENERAL_REGS
: rclass
);
1644 /* Inform reload about cases where moving X with a mode MODE to a register in
1645 RCLASS requires an extra scratch or immediate register. Return the class
1646 needed for the immediate register. */
1649 alpha_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1650 machine_mode mode
, secondary_reload_info
*sri
)
1652 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1654 /* Loading and storing HImode or QImode values to and from memory
1655 usually requires a scratch register. */
1656 if (!TARGET_BWX
&& (mode
== QImode
|| mode
== HImode
|| mode
== CQImode
))
1658 if (any_memory_operand (x
, mode
))
1662 if (!aligned_memory_operand (x
, mode
))
1663 sri
->icode
= direct_optab_handler (reload_in_optab
, mode
);
1666 sri
->icode
= direct_optab_handler (reload_out_optab
, mode
);
1671 /* We also cannot do integral arithmetic into FP regs, as might result
1672 from register elimination into a DImode fp register. */
1673 if (rclass
== FLOAT_REGS
)
1675 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
1676 return GENERAL_REGS
;
1677 if (in_p
&& INTEGRAL_MODE_P (mode
)
1678 && !MEM_P (x
) && !REG_P (x
) && !CONST_INT_P (x
))
1679 return GENERAL_REGS
;
1685 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
1687 If we are copying between general and FP registers, we need a memory
1688 location unless the FIX extension is available. */
1691 alpha_secondary_memory_needed (machine_mode
, reg_class_t class1
,
1695 && ((class1
== FLOAT_REGS
&& class2
!= FLOAT_REGS
)
1696 || (class2
== FLOAT_REGS
&& class1
!= FLOAT_REGS
)));
1699 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE. If MODE is
1700 floating-point, use it. Otherwise, widen to a word like the default.
1701 This is needed because we always store integers in FP registers in
1702 quadword format. This whole area is very tricky! */
1705 alpha_secondary_memory_needed_mode (machine_mode mode
)
1707 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1709 if (GET_MODE_SIZE (mode
) >= 4)
1711 return mode_for_size (BITS_PER_WORD
, GET_MODE_CLASS (mode
), 0).require ();
1714 /* Given SEQ, which is an INSN list, look for any MEMs in either
1715 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1716 volatile flags from REF into each of the MEMs found. If REF is not
1717 a MEM, don't do anything. */
1720 alpha_set_memflags (rtx seq
, rtx ref
)
1727 /* This is only called from alpha.md, after having had something
1728 generated from one of the insn patterns. So if everything is
1729 zero, the pattern is already up-to-date. */
1730 if (!MEM_VOLATILE_P (ref
)
1731 && !MEM_NOTRAP_P (ref
)
1732 && !MEM_READONLY_P (ref
))
1735 subrtx_var_iterator::array_type array
;
1736 for (insn
= as_a
<rtx_insn
*> (seq
); insn
; insn
= NEXT_INSN (insn
))
1738 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1743 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (ref
);
1744 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (ref
);
1745 MEM_READONLY_P (x
) = MEM_READONLY_P (ref
);
1746 /* Sadly, we cannot use alias sets because the extra
1747 aliasing produced by the AND interferes. Given that
1748 two-byte quantities are the only thing we would be
1749 able to differentiate anyway, there does not seem to
1750 be any point in convoluting the early out of the
1752 iter
.skip_subrtxes ();
1759 static rtx
alpha_emit_set_const (rtx
, machine_mode
, HOST_WIDE_INT
,
1762 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1763 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1764 and return pc_rtx if successful. */
1767 alpha_emit_set_const_1 (rtx target
, machine_mode mode
,
1768 HOST_WIDE_INT c
, int n
, bool no_output
)
1770 HOST_WIDE_INT new_const
;
1772 /* Use a pseudo if highly optimizing and still generating RTL. */
1774 = (flag_expensive_optimizations
&& can_create_pseudo_p () ? 0 : target
);
1777 /* If this is a sign-extended 32-bit constant, we can do this in at most
1778 three insns, so do it if we have enough insns left. */
1780 if (c
>> 31 == -1 || c
>> 31 == 0)
1782 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1783 HOST_WIDE_INT tmp1
= c
- low
;
1784 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
1785 HOST_WIDE_INT extra
= 0;
1787 /* If HIGH will be interpreted as negative but the constant is
1788 positive, we must adjust it to do two ldha insns. */
1790 if ((high
& 0x8000) != 0 && c
>= 0)
1794 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
1797 if (c
== low
|| (low
== 0 && extra
== 0))
1799 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1800 but that meant that we can't handle INT_MIN on 32-bit machines
1801 (like NT/Alpha), because we recurse indefinitely through
1802 emit_move_insn to gen_movdi. So instead, since we know exactly
1803 what we want, create it explicitly. */
1808 target
= gen_reg_rtx (mode
);
1809 emit_insn (gen_rtx_SET (target
, GEN_INT (c
)));
1812 else if (n
>= 2 + (extra
!= 0))
1816 if (!can_create_pseudo_p ())
1818 emit_insn (gen_rtx_SET (target
, GEN_INT (high
<< 16)));
1822 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16),
1825 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1826 This means that if we go through expand_binop, we'll try to
1827 generate extensions, etc, which will require new pseudos, which
1828 will fail during some split phases. The SImode add patterns
1829 still exist, but are not named. So build the insns by hand. */
1834 subtarget
= gen_reg_rtx (mode
);
1835 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
1836 insn
= gen_rtx_SET (subtarget
, insn
);
1842 target
= gen_reg_rtx (mode
);
1843 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1844 insn
= gen_rtx_SET (target
, insn
);
1850 /* If we couldn't do it that way, try some other methods. But if we have
1851 no instructions left, don't bother. Likewise, if this is SImode and
1852 we can't make pseudos, we can't do anything since the expand_binop
1853 and expand_unop calls will widen and try to make pseudos. */
1855 if (n
== 1 || (mode
== SImode
&& !can_create_pseudo_p ()))
1858 /* Next, see if we can load a related constant and then shift and possibly
1859 negate it to get the constant we want. Try this once each increasing
1860 numbers of insns. */
1862 for (i
= 1; i
< n
; i
++)
1864 /* First, see if minus some low bits, we've an easy load of
1867 new_const
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1870 temp
= alpha_emit_set_const (subtarget
, mode
, c
- new_const
, i
, no_output
);
1875 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new_const
),
1876 target
, 0, OPTAB_WIDEN
);
1880 /* Next try complementing. */
1881 temp
= alpha_emit_set_const (subtarget
, mode
, ~c
, i
, no_output
);
1886 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
1889 /* Next try to form a constant and do a left shift. We can do this
1890 if some low-order bits are zero; the exact_log2 call below tells
1891 us that information. The bits we are shifting out could be any
1892 value, but here we'll just try the 0- and sign-extended forms of
1893 the constant. To try to increase the chance of having the same
1894 constant in more than one insn, start at the highest number of
1895 bits to shift, but try all possibilities in case a ZAPNOT will
1898 bits
= exact_log2 (c
& -c
);
1900 for (; bits
> 0; bits
--)
1902 new_const
= c
>> bits
;
1903 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1906 new_const
= (unsigned HOST_WIDE_INT
)c
>> bits
;
1907 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1914 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
1915 target
, 0, OPTAB_WIDEN
);
1919 /* Now try high-order zero bits. Here we try the shifted-in bits as
1920 all zero and all ones. Be careful to avoid shifting outside the
1921 mode and to avoid shifting outside the host wide int size. */
1923 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1924 - floor_log2 (c
) - 1);
1926 for (; bits
> 0; bits
--)
1928 new_const
= c
<< bits
;
1929 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1932 new_const
= (c
<< bits
) | ((HOST_WIDE_INT_1U
<< bits
) - 1);
1933 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1940 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
1941 target
, 1, OPTAB_WIDEN
);
1945 /* Now try high-order 1 bits. We get that with a sign-extension.
1946 But one bit isn't enough here. Be careful to avoid shifting outside
1947 the mode and to avoid shifting outside the host wide int size. */
1949 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1950 - floor_log2 (~ c
) - 2);
1952 for (; bits
> 0; bits
--)
1954 new_const
= c
<< bits
;
1955 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1958 new_const
= (c
<< bits
) | ((HOST_WIDE_INT_1U
<< bits
) - 1);
1959 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1966 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
1967 target
, 0, OPTAB_WIDEN
);
1972 /* Finally, see if can load a value into the target that is the same as the
1973 constant except that all bytes that are 0 are changed to be 0xff. If we
1974 can, then we can do a ZAPNOT to obtain the desired constant. */
1977 for (i
= 0; i
< 64; i
+= 8)
1978 if ((new_const
& ((HOST_WIDE_INT
) 0xff << i
)) == 0)
1979 new_const
|= (HOST_WIDE_INT
) 0xff << i
;
1981 /* We are only called for SImode and DImode. If this is SImode, ensure that
1982 we are sign extended to a full word. */
1985 new_const
= ((new_const
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1989 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, n
- 1, no_output
);
1994 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new_const
),
1995 target
, 0, OPTAB_WIDEN
);
2002 /* Try to output insns to set TARGET equal to the constant C if it can be
2003 done in less than N insns. Do all computations in MODE. Returns the place
2004 where the output has been placed if it can be done and the insns have been
2005 emitted. If it would take more than N insns, zero is returned and no
2006 insns and emitted. */
2009 alpha_emit_set_const (rtx target
, machine_mode mode
,
2010 HOST_WIDE_INT c
, int n
, bool no_output
)
2012 machine_mode orig_mode
= mode
;
2013 rtx orig_target
= target
;
2017 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2018 can't load this constant in one insn, do this in DImode. */
2019 if (!can_create_pseudo_p () && mode
== SImode
2020 && REG_P (target
) && REGNO (target
) < FIRST_PSEUDO_REGISTER
)
2022 result
= alpha_emit_set_const_1 (target
, mode
, c
, 1, no_output
);
2026 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
2029 else if (mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
)
2031 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
2035 /* Try 1 insn, then 2, then up to N. */
2036 for (i
= 1; i
<= n
; i
++)
2038 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
, no_output
);
2047 insn
= get_last_insn ();
2048 set
= single_set (insn
);
2049 if (! CONSTANT_P (SET_SRC (set
)))
2050 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
2055 /* Allow for the case where we changed the mode of TARGET. */
2058 if (result
== target
)
2059 result
= orig_target
;
2060 else if (mode
!= orig_mode
)
2061 result
= gen_lowpart (orig_mode
, result
);
2067 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2068 fall back to a straight forward decomposition. We do this to avoid
2069 exponential run times encountered when looking for longer sequences
2070 with alpha_emit_set_const. */
2073 alpha_emit_set_long_const (rtx target
, HOST_WIDE_INT c1
)
2075 HOST_WIDE_INT d1
, d2
, d3
, d4
;
2077 /* Decompose the entire word */
2079 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2081 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2082 c1
= (c1
- d2
) >> 32;
2083 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2085 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2086 gcc_assert (c1
== d4
);
2088 /* Construct the high word */
2091 emit_move_insn (target
, GEN_INT (d4
));
2093 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
2096 emit_move_insn (target
, GEN_INT (d3
));
2098 /* Shift it into place */
2099 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2101 /* Add in the low bits. */
2103 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2105 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2110 /* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
2112 static HOST_WIDE_INT
2113 alpha_extract_integer (rtx x
)
2115 if (GET_CODE (x
) == CONST_VECTOR
)
2116 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
2118 gcc_assert (CONST_INT_P (x
));
2123 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2124 we are willing to load the value into a register via a move pattern.
2125 Normally this is all symbolic constants, integral constants that
2126 take three or fewer instructions, and floating-point zero. */
2129 alpha_legitimate_constant_p (machine_mode mode
, rtx x
)
2133 switch (GET_CODE (x
))
2140 if (GET_CODE (XEXP (x
, 0)) == PLUS
2141 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
2142 x
= XEXP (XEXP (x
, 0), 0);
2146 if (GET_CODE (x
) != SYMBOL_REF
)
2151 /* TLS symbols are never valid. */
2152 return SYMBOL_REF_TLS_MODEL (x
) == 0;
2154 case CONST_WIDE_INT
:
2155 if (TARGET_BUILD_CONSTANTS
)
2157 if (x
== CONST0_RTX (mode
))
2160 gcc_assert (CONST_WIDE_INT_NUNITS (x
) == 2);
2161 i0
= CONST_WIDE_INT_ELT (x
, 1);
2162 if (alpha_emit_set_const_1 (NULL_RTX
, mode
, i0
, 3, true) == NULL
)
2164 i0
= CONST_WIDE_INT_ELT (x
, 0);
2168 if (x
== CONST0_RTX (mode
))
2173 if (x
== CONST0_RTX (mode
))
2175 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
2177 if (GET_MODE_SIZE (mode
) != 8)
2182 if (TARGET_BUILD_CONSTANTS
)
2184 i0
= alpha_extract_integer (x
);
2186 return alpha_emit_set_const_1 (NULL_RTX
, mode
, i0
, 3, true) != NULL
;
2193 /* Operand 1 is known to be a constant, and should require more than one
2194 instruction to load. Emit that multi-part load. */
2197 alpha_split_const_mov (machine_mode mode
, rtx
*operands
)
2200 rtx temp
= NULL_RTX
;
2202 i0
= alpha_extract_integer (operands
[1]);
2204 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3, false);
2206 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2207 temp
= alpha_emit_set_long_const (operands
[0], i0
);
2211 if (!rtx_equal_p (operands
[0], temp
))
2212 emit_move_insn (operands
[0], temp
);
2219 /* Expand a move instruction; return true if all work is done.
2220 We don't handle non-bwx subword loads here. */
2223 alpha_expand_mov (machine_mode mode
, rtx
*operands
)
2227 /* If the output is not a register, the input must be. */
2228 if (MEM_P (operands
[0])
2229 && ! reg_or_0_operand (operands
[1], mode
))
2230 operands
[1] = force_reg (mode
, operands
[1]);
2232 /* Allow legitimize_address to perform some simplifications. */
2233 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
2235 tmp
= alpha_legitimize_address_1 (operands
[1], operands
[0], mode
);
2238 if (tmp
== operands
[0])
2245 /* Early out for non-constants and valid constants. */
2246 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2249 /* Split large integers. */
2250 if (CONST_INT_P (operands
[1])
2251 || GET_CODE (operands
[1]) == CONST_VECTOR
)
2253 if (alpha_split_const_mov (mode
, operands
))
2257 /* Otherwise we've nothing left but to drop the thing to memory. */
2258 tmp
= force_const_mem (mode
, operands
[1]);
2260 if (tmp
== NULL_RTX
)
2263 if (reload_in_progress
)
2265 emit_move_insn (operands
[0], XEXP (tmp
, 0));
2266 operands
[1] = replace_equiv_address (tmp
, operands
[0]);
2269 operands
[1] = validize_mem (tmp
);
2273 /* Expand a non-bwx QImode or HImode move instruction;
2274 return true if all work is done. */
2277 alpha_expand_mov_nobwx (machine_mode mode
, rtx
*operands
)
2281 /* If the output is not a register, the input must be. */
2282 if (MEM_P (operands
[0]))
2283 operands
[1] = force_reg (mode
, operands
[1]);
2285 /* Handle four memory cases, unaligned and aligned for either the input
2286 or the output. The only case where we can be called during reload is
2287 for aligned loads; all other cases require temporaries. */
2289 if (any_memory_operand (operands
[1], mode
))
2291 if (aligned_memory_operand (operands
[1], mode
))
2293 if (reload_in_progress
)
2295 seq
= gen_reload_in_aligned (mode
, operands
[0], operands
[1]);
2300 rtx aligned_mem
, bitnum
;
2301 rtx scratch
= gen_reg_rtx (SImode
);
2305 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2307 subtarget
= operands
[0];
2308 if (REG_P (subtarget
))
2309 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2311 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2314 seq
= gen_aligned_loadqi (subtarget
, aligned_mem
,
2317 seq
= gen_aligned_loadhi (subtarget
, aligned_mem
,
2322 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2327 /* Don't pass these as parameters since that makes the generated
2328 code depend on parameter evaluation order which will cause
2329 bootstrap failures. */
2331 rtx temp1
, temp2
, subtarget
, ua
;
2334 temp1
= gen_reg_rtx (DImode
);
2335 temp2
= gen_reg_rtx (DImode
);
2337 subtarget
= operands
[0];
2338 if (REG_P (subtarget
))
2339 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2341 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2343 ua
= get_unaligned_address (operands
[1]);
2345 seq
= gen_unaligned_loadqi (subtarget
, ua
, temp1
, temp2
);
2347 seq
= gen_unaligned_loadhi (subtarget
, ua
, temp1
, temp2
);
2349 alpha_set_memflags (seq
, operands
[1]);
2353 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2358 if (any_memory_operand (operands
[0], mode
))
2360 if (aligned_memory_operand (operands
[0], mode
))
2362 rtx aligned_mem
, bitnum
;
2363 rtx temp1
= gen_reg_rtx (SImode
);
2364 rtx temp2
= gen_reg_rtx (SImode
);
2366 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2368 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2373 rtx temp1
= gen_reg_rtx (DImode
);
2374 rtx temp2
= gen_reg_rtx (DImode
);
2375 rtx temp3
= gen_reg_rtx (DImode
);
2376 rtx ua
= get_unaligned_address (operands
[0]);
2378 seq
= gen_unaligned_store
2379 (mode
, ua
, operands
[1], temp1
, temp2
, temp3
);
2381 alpha_set_memflags (seq
, operands
[0]);
2390 /* Implement the movmisalign patterns. One of the operands is a memory
2391 that is not naturally aligned. Emit instructions to load it. */
2394 alpha_expand_movmisalign (machine_mode mode
, rtx
*operands
)
2396 /* Honor misaligned loads, for those we promised to do so. */
2397 if (MEM_P (operands
[1]))
2401 if (register_operand (operands
[0], mode
))
2404 tmp
= gen_reg_rtx (mode
);
2406 alpha_expand_unaligned_load (tmp
, operands
[1], 8, 0, 0);
2407 if (tmp
!= operands
[0])
2408 emit_move_insn (operands
[0], tmp
);
2410 else if (MEM_P (operands
[0]))
2412 if (!reg_or_0_operand (operands
[1], mode
))
2413 operands
[1] = force_reg (mode
, operands
[1]);
2414 alpha_expand_unaligned_store (operands
[0], operands
[1], 8, 0);
2420 /* Generate an unsigned DImode to FP conversion. This is the same code
2421 optabs would emit if we didn't have TFmode patterns.
2423 For SFmode, this is the only construction I've found that can pass
2424 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2425 intermediates will work, because you'll get intermediate rounding
2426 that ruins the end result. Some of this could be fixed by turning
2427 on round-to-positive-infinity, but that requires diddling the fpsr,
2428 which kills performance. I tried turning this around and converting
2429 to a negative number, so that I could turn on /m, but either I did
2430 it wrong or there's something else cause I wound up with the exact
2431 same single-bit error. There is a branch-less form of this same code:
2442 fcmoveq $f10,$f11,$f0
2444 I'm not using it because it's the same number of instructions as
2445 this branch-full form, and it has more serialized long latency
2446 instructions on the critical path.
2448 For DFmode, we can avoid rounding errors by breaking up the word
2449 into two pieces, converting them separately, and adding them back:
2451 LC0: .long 0,0x5f800000
2456 cpyse $f11,$f31,$f10
2457 cpyse $f31,$f11,$f11
2465 This doesn't seem to be a clear-cut win over the optabs form.
2466 It probably all depends on the distribution of numbers being
2467 converted -- in the optabs form, all but high-bit-set has a
2468 much lower minimum execution time. */
2471 alpha_emit_floatuns (rtx operands
[2])
2473 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2477 in
= force_reg (DImode
, operands
[1]);
2478 mode
= GET_MODE (out
);
2479 neglab
= gen_label_rtx ();
2480 donelab
= gen_label_rtx ();
2481 i0
= gen_reg_rtx (DImode
);
2482 i1
= gen_reg_rtx (DImode
);
2483 f0
= gen_reg_rtx (mode
);
2485 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2487 emit_insn (gen_rtx_SET (out
, gen_rtx_FLOAT (mode
, in
)));
2488 emit_jump_insn (gen_jump (donelab
));
2491 emit_label (neglab
);
2493 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2494 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2495 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2496 emit_insn (gen_rtx_SET (f0
, gen_rtx_FLOAT (mode
, i0
)));
2497 emit_insn (gen_rtx_SET (out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2499 emit_label (donelab
);
2502 /* Generate the comparison for a conditional branch. */
2505 alpha_emit_conditional_branch (rtx operands
[], machine_mode cmp_mode
)
2507 enum rtx_code cmp_code
, branch_code
;
2508 machine_mode branch_mode
= VOIDmode
;
2509 enum rtx_code code
= GET_CODE (operands
[0]);
2510 rtx op0
= operands
[1], op1
= operands
[2];
2513 if (cmp_mode
== TFmode
)
2515 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2520 /* The general case: fold the comparison code to the types of compares
2521 that we have, choosing the branch as necessary. */
2524 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2526 /* We have these compares. */
2527 cmp_code
= code
, branch_code
= NE
;
2532 /* These must be reversed. */
2533 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2536 case GE
: case GT
: case GEU
: case GTU
:
2537 /* For FP, we swap them, for INT, we reverse them. */
2538 if (cmp_mode
== DFmode
)
2540 cmp_code
= swap_condition (code
);
2542 std::swap (op0
, op1
);
2546 cmp_code
= reverse_condition (code
);
2555 if (cmp_mode
== DFmode
)
2557 if (flag_unsafe_math_optimizations
&& cmp_code
!= UNORDERED
)
2559 /* When we are not as concerned about non-finite values, and we
2560 are comparing against zero, we can branch directly. */
2561 if (op1
== CONST0_RTX (DFmode
))
2562 cmp_code
= UNKNOWN
, branch_code
= code
;
2563 else if (op0
== CONST0_RTX (DFmode
))
2565 /* Undo the swap we probably did just above. */
2566 std::swap (op0
, op1
);
2567 branch_code
= swap_condition (cmp_code
);
2573 /* ??? We mark the branch mode to be CCmode to prevent the
2574 compare and branch from being combined, since the compare
2575 insn follows IEEE rules that the branch does not. */
2576 branch_mode
= CCmode
;
2581 /* The following optimizations are only for signed compares. */
2582 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2584 /* Whee. Compare and branch against 0 directly. */
2585 if (op1
== const0_rtx
)
2586 cmp_code
= UNKNOWN
, branch_code
= code
;
2588 /* If the constants doesn't fit into an immediate, but can
2589 be generated by lda/ldah, we adjust the argument and
2590 compare against zero, so we can use beq/bne directly. */
2591 /* ??? Don't do this when comparing against symbols, otherwise
2592 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2593 be declared false out of hand (at least for non-weak). */
2594 else if (CONST_INT_P (op1
)
2595 && (code
== EQ
|| code
== NE
)
2596 && !(symbolic_operand (op0
, VOIDmode
)
2597 || (REG_P (op0
) && REG_POINTER (op0
))))
2599 rtx n_op1
= GEN_INT (-INTVAL (op1
));
2601 if (! satisfies_constraint_I (op1
)
2602 && (satisfies_constraint_K (n_op1
)
2603 || satisfies_constraint_L (n_op1
)))
2604 cmp_code
= PLUS
, branch_code
= code
, op1
= n_op1
;
2608 if (!reg_or_0_operand (op0
, DImode
))
2609 op0
= force_reg (DImode
, op0
);
2610 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2611 op1
= force_reg (DImode
, op1
);
2614 /* Emit an initial compare instruction, if necessary. */
2616 if (cmp_code
!= UNKNOWN
)
2618 tem
= gen_reg_rtx (cmp_mode
);
2619 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2622 /* Emit the branch instruction. */
2623 tem
= gen_rtx_SET (pc_rtx
,
2624 gen_rtx_IF_THEN_ELSE (VOIDmode
,
2625 gen_rtx_fmt_ee (branch_code
,
2627 CONST0_RTX (cmp_mode
)),
2628 gen_rtx_LABEL_REF (VOIDmode
,
2631 emit_jump_insn (tem
);
2634 /* Certain simplifications can be done to make invalid setcc operations
2635 valid. Return the final comparison, or NULL if we can't work. */
2638 alpha_emit_setcc (rtx operands
[], machine_mode cmp_mode
)
2640 enum rtx_code cmp_code
;
2641 enum rtx_code code
= GET_CODE (operands
[1]);
2642 rtx op0
= operands
[2], op1
= operands
[3];
2645 if (cmp_mode
== TFmode
)
2647 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2652 if (cmp_mode
== DFmode
&& !TARGET_FIX
)
2655 /* The general case: fold the comparison code to the types of compares
2656 that we have, choosing the branch as necessary. */
2661 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2663 /* We have these compares. */
2664 if (cmp_mode
== DFmode
)
2665 cmp_code
= code
, code
= NE
;
2669 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2674 cmp_code
= reverse_condition (code
);
2678 case GE
: case GT
: case GEU
: case GTU
:
2679 /* These normally need swapping, but for integer zero we have
2680 special patterns that recognize swapped operands. */
2681 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2683 code
= swap_condition (code
);
2684 if (cmp_mode
== DFmode
)
2685 cmp_code
= code
, code
= NE
;
2686 std::swap (op0
, op1
);
2693 if (cmp_mode
== DImode
)
2695 if (!register_operand (op0
, DImode
))
2696 op0
= force_reg (DImode
, op0
);
2697 if (!reg_or_8bit_operand (op1
, DImode
))
2698 op1
= force_reg (DImode
, op1
);
2701 /* Emit an initial compare instruction, if necessary. */
2702 if (cmp_code
!= UNKNOWN
)
2704 tmp
= gen_reg_rtx (cmp_mode
);
2705 emit_insn (gen_rtx_SET (tmp
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
,
2708 op0
= cmp_mode
!= DImode
? gen_lowpart (DImode
, tmp
) : tmp
;
2712 /* Emit the setcc instruction. */
2713 emit_insn (gen_rtx_SET (operands
[0], gen_rtx_fmt_ee (code
, DImode
,
2719 /* Rewrite a comparison against zero CMP of the form
2720 (CODE (cc0) (const_int 0)) so it can be written validly in
2721 a conditional move (if_then_else CMP ...).
2722 If both of the operands that set cc0 are nonzero we must emit
2723 an insn to perform the compare (it can't be done within
2724 the conditional move). */
2727 alpha_emit_conditional_move (rtx cmp
, machine_mode mode
)
2729 enum rtx_code code
= GET_CODE (cmp
);
2730 enum rtx_code cmov_code
= NE
;
2731 rtx op0
= XEXP (cmp
, 0);
2732 rtx op1
= XEXP (cmp
, 1);
2733 machine_mode cmp_mode
2734 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2735 machine_mode cmov_mode
= VOIDmode
;
2736 int local_fast_math
= flag_unsafe_math_optimizations
;
2739 if (cmp_mode
== TFmode
)
2741 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2746 gcc_assert (cmp_mode
== DFmode
|| cmp_mode
== DImode
);
2748 if (FLOAT_MODE_P (cmp_mode
) != FLOAT_MODE_P (mode
))
2750 enum rtx_code cmp_code
;
2755 /* If we have fp<->int register move instructions, do a cmov by
2756 performing the comparison in fp registers, and move the
2757 zero/nonzero value to integer registers, where we can then
2758 use a normal cmov, or vice-versa. */
2762 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2764 /* We have these compares. */
2765 cmp_code
= code
, code
= NE
;
2770 /* These must be reversed. */
2771 cmp_code
= reverse_condition (code
), code
= EQ
;
2774 case GE
: case GT
: case GEU
: case GTU
:
2775 /* These normally need swapping, but for integer zero we have
2776 special patterns that recognize swapped operands. */
2777 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2778 cmp_code
= code
, code
= NE
;
2781 cmp_code
= swap_condition (code
);
2783 std::swap (op0
, op1
);
2791 if (cmp_mode
== DImode
)
2793 if (!reg_or_0_operand (op0
, DImode
))
2794 op0
= force_reg (DImode
, op0
);
2795 if (!reg_or_8bit_operand (op1
, DImode
))
2796 op1
= force_reg (DImode
, op1
);
2799 tem
= gen_reg_rtx (cmp_mode
);
2800 emit_insn (gen_rtx_SET (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
,
2803 cmp_mode
= cmp_mode
== DImode
? E_DFmode
: E_DImode
;
2804 op0
= gen_lowpart (cmp_mode
, tem
);
2805 op1
= CONST0_RTX (cmp_mode
);
2806 cmp
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2807 local_fast_math
= 1;
2810 if (cmp_mode
== DImode
)
2812 if (!reg_or_0_operand (op0
, DImode
))
2813 op0
= force_reg (DImode
, op0
);
2814 if (!reg_or_8bit_operand (op1
, DImode
))
2815 op1
= force_reg (DImode
, op1
);
2818 /* We may be able to use a conditional move directly.
2819 This avoids emitting spurious compares. */
2820 if (signed_comparison_operator (cmp
, VOIDmode
)
2821 && (cmp_mode
== DImode
|| local_fast_math
)
2822 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2823 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2825 /* We can't put the comparison inside the conditional move;
2826 emit a compare instruction and put that inside the
2827 conditional move. Make sure we emit only comparisons we have;
2828 swap or reverse as necessary. */
2830 if (!can_create_pseudo_p ())
2835 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2837 /* We have these compares: */
2842 /* These must be reversed. */
2843 code
= reverse_condition (code
);
2847 case GE
: case GT
: case GEU
: case GTU
:
2848 /* These normally need swapping, but for integer zero we have
2849 special patterns that recognize swapped operands. */
2850 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2852 code
= swap_condition (code
);
2853 std::swap (op0
, op1
);
2860 if (cmp_mode
== DImode
)
2862 if (!reg_or_0_operand (op0
, DImode
))
2863 op0
= force_reg (DImode
, op0
);
2864 if (!reg_or_8bit_operand (op1
, DImode
))
2865 op1
= force_reg (DImode
, op1
);
2868 /* ??? We mark the branch mode to be CCmode to prevent the compare
2869 and cmov from being combined, since the compare insn follows IEEE
2870 rules that the cmov does not. */
2871 if (cmp_mode
== DFmode
&& !local_fast_math
)
2874 tem
= gen_reg_rtx (cmp_mode
);
2875 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_mode
, op0
, op1
));
2876 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_mode
));
2879 /* Simplify a conditional move of two constants into a setcc with
2880 arithmetic. This is done with a splitter since combine would
2881 just undo the work if done during code generation. It also catches
2882 cases we wouldn't have before cse. */
2885 alpha_split_conditional_move (enum rtx_code code
, rtx dest
, rtx cond
,
2886 rtx t_rtx
, rtx f_rtx
)
2888 HOST_WIDE_INT t
, f
, diff
;
2890 rtx target
, subtarget
, tmp
;
2892 mode
= GET_MODE (dest
);
2897 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2898 || (code
== GE
|| code
== GT
))
2900 code
= reverse_condition (code
);
2905 subtarget
= target
= dest
;
2908 target
= gen_lowpart (DImode
, dest
);
2909 if (can_create_pseudo_p ())
2910 subtarget
= gen_reg_rtx (DImode
);
2914 /* Below, we must be careful to use copy_rtx on target and subtarget
2915 in intermediate insns, as they may be a subreg rtx, which may not
2918 if (f
== 0 && exact_log2 (diff
) > 0
2919 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2920 viable over a longer latency cmove. On EV5, the E0 slot is a
2921 scarce resource, and on EV4 shift has the same latency as a cmove. */
2922 && (diff
<= 8 || alpha_tune
== PROCESSOR_EV6
))
2924 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2925 emit_insn (gen_rtx_SET (copy_rtx (subtarget
), tmp
));
2927 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2928 GEN_INT (exact_log2 (t
)));
2929 emit_insn (gen_rtx_SET (target
, tmp
));
2931 else if (f
== 0 && t
== -1)
2933 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2934 emit_insn (gen_rtx_SET (copy_rtx (subtarget
), tmp
));
2936 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
2938 else if (diff
== 1 || diff
== 4 || diff
== 8)
2942 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2943 emit_insn (gen_rtx_SET (copy_rtx (subtarget
), tmp
));
2946 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
2949 add_op
= GEN_INT (f
);
2950 if (sext_add_operand (add_op
, mode
))
2952 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2953 GEN_INT (exact_log2 (diff
)));
2954 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
2955 emit_insn (gen_rtx_SET (target
, tmp
));
2967 /* Look up the function X_floating library function name for the
2970 struct GTY(()) xfloating_op
2972 const enum rtx_code code
;
2973 const char *const GTY((skip
)) osf_func
;
2974 const char *const GTY((skip
)) vms_func
;
2978 static GTY(()) struct xfloating_op xfloating_ops
[] =
2980 { PLUS
, "_OtsAddX", "OTS$ADD_X", 0 },
2981 { MINUS
, "_OtsSubX", "OTS$SUB_X", 0 },
2982 { MULT
, "_OtsMulX", "OTS$MUL_X", 0 },
2983 { DIV
, "_OtsDivX", "OTS$DIV_X", 0 },
2984 { EQ
, "_OtsEqlX", "OTS$EQL_X", 0 },
2985 { NE
, "_OtsNeqX", "OTS$NEQ_X", 0 },
2986 { LT
, "_OtsLssX", "OTS$LSS_X", 0 },
2987 { LE
, "_OtsLeqX", "OTS$LEQ_X", 0 },
2988 { GT
, "_OtsGtrX", "OTS$GTR_X", 0 },
2989 { GE
, "_OtsGeqX", "OTS$GEQ_X", 0 },
2990 { FIX
, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2991 { FLOAT
, "_OtsCvtQX", "OTS$CVTQX", 0 },
2992 { UNSIGNED_FLOAT
, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2993 { FLOAT_EXTEND
, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2994 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2997 static GTY(()) struct xfloating_op vax_cvt_ops
[] =
2999 { FLOAT_EXTEND
, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3000 { FLOAT_TRUNCATE
, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3004 alpha_lookup_xfloating_lib_func (enum rtx_code code
)
3006 struct xfloating_op
*ops
= xfloating_ops
;
3007 long n
= ARRAY_SIZE (xfloating_ops
);
3010 gcc_assert (TARGET_HAS_XFLOATING_LIBS
);
3012 /* How irritating. Nothing to key off for the main table. */
3013 if (TARGET_FLOAT_VAX
&& (code
== FLOAT_EXTEND
|| code
== FLOAT_TRUNCATE
))
3016 n
= ARRAY_SIZE (vax_cvt_ops
);
3019 for (i
= 0; i
< n
; ++i
, ++ops
)
3020 if (ops
->code
== code
)
3022 rtx func
= ops
->libcall
;
3025 func
= init_one_libfunc (TARGET_ABI_OPEN_VMS
3026 ? ops
->vms_func
: ops
->osf_func
);
3027 ops
->libcall
= func
;
3035 /* Most X_floating operations take the rounding mode as an argument.
3036 Compute that here. */
3039 alpha_compute_xfloating_mode_arg (enum rtx_code code
,
3040 enum alpha_fp_rounding_mode round
)
3046 case ALPHA_FPRM_NORM
:
3049 case ALPHA_FPRM_MINF
:
3052 case ALPHA_FPRM_CHOP
:
3055 case ALPHA_FPRM_DYN
:
3061 /* XXX For reference, round to +inf is mode = 3. */
3064 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3070 /* Emit an X_floating library function call.
3072 Note that these functions do not follow normal calling conventions:
3073 TFmode arguments are passed in two integer registers (as opposed to
3074 indirect); TFmode return values appear in R16+R17.
3076 FUNC is the function to call.
3077 TARGET is where the output belongs.
3078 OPERANDS are the inputs.
3079 NOPERANDS is the count of inputs.
3080 EQUIV is the expression equivalent for the function.
3084 alpha_emit_xfloating_libcall (rtx func
, rtx target
, rtx operands
[],
3085 int noperands
, rtx equiv
)
3087 rtx usage
= NULL_RTX
, reg
;
3092 for (i
= 0; i
< noperands
; ++i
)
3094 switch (GET_MODE (operands
[i
]))
3097 reg
= gen_rtx_REG (TFmode
, regno
);
3102 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
3107 gcc_assert (CONST_INT_P (operands
[i
]));
3110 reg
= gen_rtx_REG (DImode
, regno
);
3118 emit_move_insn (reg
, operands
[i
]);
3119 use_reg (&usage
, reg
);
3122 switch (GET_MODE (target
))
3125 reg
= gen_rtx_REG (TFmode
, 16);
3128 reg
= gen_rtx_REG (DFmode
, 32);
3131 reg
= gen_rtx_REG (DImode
, 0);
3137 rtx mem
= gen_rtx_MEM (QImode
, func
);
3138 rtx_insn
*tmp
= emit_call_insn (gen_call_value (reg
, mem
, const0_rtx
,
3139 const0_rtx
, const0_rtx
));
3140 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3141 RTL_CONST_CALL_P (tmp
) = 1;
3146 emit_libcall_block (tmp
, target
, reg
, equiv
);
3149 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3152 alpha_emit_xfloating_arith (enum rtx_code code
, rtx operands
[])
3156 rtx out_operands
[3];
3158 func
= alpha_lookup_xfloating_lib_func (code
);
3159 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3161 out_operands
[0] = operands
[1];
3162 out_operands
[1] = operands
[2];
3163 out_operands
[2] = GEN_INT (mode
);
3164 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3165 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3169 /* Emit an X_floating library function call for a comparison. */
3172 alpha_emit_xfloating_compare (enum rtx_code
*pcode
, rtx op0
, rtx op1
)
3174 enum rtx_code cmp_code
, res_code
;
3175 rtx func
, out
, operands
[2], note
;
3177 /* X_floating library comparison functions return
3181 Convert the compare against the raw return value. */
3209 func
= alpha_lookup_xfloating_lib_func (cmp_code
);
3213 out
= gen_reg_rtx (DImode
);
3215 /* What's actually returned is -1,0,1, not a proper boolean value. */
3216 note
= gen_rtx_fmt_ee (cmp_code
, VOIDmode
, op0
, op1
);
3217 note
= gen_rtx_UNSPEC (DImode
, gen_rtvec (1, note
), UNSPEC_XFLT_COMPARE
);
3218 alpha_emit_xfloating_libcall (func
, out
, operands
, 2, note
);
3223 /* Emit an X_floating library function call for a conversion. */
3226 alpha_emit_xfloating_cvt (enum rtx_code orig_code
, rtx operands
[])
3228 int noperands
= 1, mode
;
3229 rtx out_operands
[2];
3231 enum rtx_code code
= orig_code
;
3233 if (code
== UNSIGNED_FIX
)
3236 func
= alpha_lookup_xfloating_lib_func (code
);
3238 out_operands
[0] = operands
[1];
3243 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3244 out_operands
[1] = GEN_INT (mode
);
3247 case FLOAT_TRUNCATE
:
3248 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3249 out_operands
[1] = GEN_INT (mode
);
3256 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3257 gen_rtx_fmt_e (orig_code
,
3258 GET_MODE (operands
[0]),
3262 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3263 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3264 guarantee that the sequence
3267 is valid. Naturally, output operand ordering is little-endian.
3268 This is used by *movtf_internal and *movti_internal. */
3271 alpha_split_tmode_pair (rtx operands
[4], machine_mode mode
,
3274 switch (GET_CODE (operands
[1]))
3277 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3278 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3282 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3283 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3286 CASE_CONST_SCALAR_INT
:
3288 gcc_assert (operands
[1] == CONST0_RTX (mode
));
3289 operands
[2] = operands
[3] = const0_rtx
;
3296 switch (GET_CODE (operands
[0]))
3299 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3300 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3304 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3305 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3312 if (fixup_overlap
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
3314 std::swap (operands
[0], operands
[1]);
3315 std::swap (operands
[2], operands
[3]);
3319 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3320 op2 is a register containing the sign bit, operation is the
3321 logical operation to be performed. */
3324 alpha_split_tfmode_frobsign (rtx operands
[3], rtx (*operation
) (rtx
, rtx
, rtx
))
3326 rtx high_bit
= operands
[2];
3330 alpha_split_tmode_pair (operands
, TFmode
, false);
3332 /* Detect three flavors of operand overlap. */
3334 if (rtx_equal_p (operands
[0], operands
[2]))
3336 else if (rtx_equal_p (operands
[1], operands
[2]))
3338 if (rtx_equal_p (operands
[0], high_bit
))
3345 emit_move_insn (operands
[0], operands
[2]);
3347 /* ??? If the destination overlaps both source tf and high_bit, then
3348 assume source tf is dead in its entirety and use the other half
3349 for a scratch register. Otherwise "scratch" is just the proper
3350 destination register. */
3351 scratch
= operands
[move
< 2 ? 1 : 3];
3353 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3357 emit_move_insn (operands
[0], operands
[2]);
3359 emit_move_insn (operands
[1], scratch
);
3363 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3367 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3368 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3369 lda r3,X(r11) lda r3,X+2(r11)
3370 extwl r1,r3,r1 extql r1,r3,r1
3371 extwh r2,r3,r2 extqh r2,r3,r2
3372 or r1.r2.r1 or r1,r2,r1
3375 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3376 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3377 lda r3,X(r11) lda r3,X(r11)
3378 extll r1,r3,r1 extll r1,r3,r1
3379 extlh r2,r3,r2 extlh r2,r3,r2
3380 or r1.r2.r1 addl r1,r2,r1
3382 quad: ldq_u r1,X(r11)
3391 alpha_expand_unaligned_load (rtx tgt
, rtx mem
, HOST_WIDE_INT size
,
3392 HOST_WIDE_INT ofs
, int sign
)
3394 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3397 if (TARGET_BWX
&& size
== 2)
3399 meml
= adjust_address (mem
, QImode
, ofs
);
3400 memh
= adjust_address (mem
, QImode
, ofs
+1);
3401 extl
= gen_reg_rtx (DImode
);
3402 exth
= gen_reg_rtx (DImode
);
3403 emit_insn (gen_zero_extendqidi2 (extl
, meml
));
3404 emit_insn (gen_zero_extendqidi2 (exth
, memh
));
3405 exth
= expand_simple_binop (DImode
, ASHIFT
, exth
, GEN_INT (8),
3406 NULL
, 1, OPTAB_LIB_WIDEN
);
3407 addr
= expand_simple_binop (DImode
, IOR
, extl
, exth
,
3408 NULL
, 1, OPTAB_LIB_WIDEN
);
3410 if (sign
&& GET_MODE (tgt
) != HImode
)
3412 addr
= gen_lowpart (HImode
, addr
);
3413 emit_insn (gen_extend_insn (tgt
, addr
, GET_MODE (tgt
), HImode
, 0));
3417 if (GET_MODE (tgt
) != DImode
)
3418 addr
= gen_lowpart (GET_MODE (tgt
), addr
);
3419 emit_move_insn (tgt
, addr
);
3424 meml
= gen_reg_rtx (DImode
);
3425 memh
= gen_reg_rtx (DImode
);
3426 addr
= gen_reg_rtx (DImode
);
3427 extl
= gen_reg_rtx (DImode
);
3428 exth
= gen_reg_rtx (DImode
);
3430 mema
= XEXP (mem
, 0);
3431 if (GET_CODE (mema
) == LO_SUM
)
3432 mema
= force_reg (Pmode
, mema
);
3434 /* AND addresses cannot be in any alias set, since they may implicitly
3435 alias surrounding code. Ideally we'd have some alias set that
3436 covered all types except those with alignment 8 or higher. */
3438 tmp
= change_address (mem
, DImode
,
3439 gen_rtx_AND (DImode
,
3440 plus_constant (DImode
, mema
, ofs
),
3442 set_mem_alias_set (tmp
, 0);
3443 emit_move_insn (meml
, tmp
);
3445 tmp
= change_address (mem
, DImode
,
3446 gen_rtx_AND (DImode
,
3447 plus_constant (DImode
, mema
,
3450 set_mem_alias_set (tmp
, 0);
3451 emit_move_insn (memh
, tmp
);
3453 if (sign
&& size
== 2)
3455 emit_move_insn (addr
, plus_constant (Pmode
, mema
, ofs
+2));
3457 emit_insn (gen_extql (extl
, meml
, addr
));
3458 emit_insn (gen_extqh (exth
, memh
, addr
));
3460 /* We must use tgt here for the target. Alpha-vms port fails if we use
3461 addr for the target, because addr is marked as a pointer and combine
3462 knows that pointers are always sign-extended 32-bit values. */
3463 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3464 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3465 addr
, 1, OPTAB_WIDEN
);
3469 emit_move_insn (addr
, plus_constant (Pmode
, mema
, ofs
));
3470 emit_insn (gen_extxl (extl
, meml
, GEN_INT (size
*8), addr
));
3474 emit_insn (gen_extwh (exth
, memh
, addr
));
3478 emit_insn (gen_extlh (exth
, memh
, addr
));
3482 emit_insn (gen_extqh (exth
, memh
, addr
));
3489 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3490 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3495 emit_move_insn (tgt
, gen_lowpart (GET_MODE (tgt
), addr
));
3498 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3501 alpha_expand_unaligned_store (rtx dst
, rtx src
,
3502 HOST_WIDE_INT size
, HOST_WIDE_INT ofs
)
3504 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3506 if (TARGET_BWX
&& size
== 2)
3508 if (src
!= const0_rtx
)
3510 dstl
= gen_lowpart (QImode
, src
);
3511 dsth
= expand_simple_binop (DImode
, LSHIFTRT
, src
, GEN_INT (8),
3512 NULL
, 1, OPTAB_LIB_WIDEN
);
3513 dsth
= gen_lowpart (QImode
, dsth
);
3516 dstl
= dsth
= const0_rtx
;
3518 meml
= adjust_address (dst
, QImode
, ofs
);
3519 memh
= adjust_address (dst
, QImode
, ofs
+1);
3521 emit_move_insn (meml
, dstl
);
3522 emit_move_insn (memh
, dsth
);
3526 dstl
= gen_reg_rtx (DImode
);
3527 dsth
= gen_reg_rtx (DImode
);
3528 insl
= gen_reg_rtx (DImode
);
3529 insh
= gen_reg_rtx (DImode
);
3531 dsta
= XEXP (dst
, 0);
3532 if (GET_CODE (dsta
) == LO_SUM
)
3533 dsta
= force_reg (Pmode
, dsta
);
3535 /* AND addresses cannot be in any alias set, since they may implicitly
3536 alias surrounding code. Ideally we'd have some alias set that
3537 covered all types except those with alignment 8 or higher. */
3539 meml
= change_address (dst
, DImode
,
3540 gen_rtx_AND (DImode
,
3541 plus_constant (DImode
, dsta
, ofs
),
3543 set_mem_alias_set (meml
, 0);
3545 memh
= change_address (dst
, DImode
,
3546 gen_rtx_AND (DImode
,
3547 plus_constant (DImode
, dsta
,
3550 set_mem_alias_set (memh
, 0);
3552 emit_move_insn (dsth
, memh
);
3553 emit_move_insn (dstl
, meml
);
3555 addr
= copy_addr_to_reg (plus_constant (Pmode
, dsta
, ofs
));
3557 if (src
!= CONST0_RTX (GET_MODE (src
)))
3559 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3560 GEN_INT (size
*8), addr
));
3565 emit_insn (gen_inswl (insl
, gen_lowpart (HImode
, src
), addr
));
3568 emit_insn (gen_insll (insl
, gen_lowpart (SImode
, src
), addr
));
3571 emit_insn (gen_insql (insl
, gen_lowpart (DImode
, src
), addr
));
3578 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3583 emit_insn (gen_mskwl (dstl
, dstl
, addr
));
3586 emit_insn (gen_mskll (dstl
, dstl
, addr
));
3589 emit_insn (gen_mskql (dstl
, dstl
, addr
));
3595 if (src
!= CONST0_RTX (GET_MODE (src
)))
3597 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3598 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3601 /* Must store high before low for degenerate case of aligned. */
3602 emit_move_insn (memh
, dsth
);
3603 emit_move_insn (meml
, dstl
);
3606 /* The block move code tries to maximize speed by separating loads and
3607 stores at the expense of register pressure: we load all of the data
3608 before we store it back out. There are two secondary effects worth
3609 mentioning, that this speeds copying to/from aligned and unaligned
3610 buffers, and that it makes the code significantly easier to write. */
3612 #define MAX_MOVE_WORDS 8
3614 /* Load an integral number of consecutive unaligned quadwords. */
3617 alpha_expand_unaligned_load_words (rtx
*out_regs
, rtx smem
,
3618 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3620 rtx
const im8
= GEN_INT (-8);
3621 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3622 rtx sreg
, areg
, tmp
, smema
;
3625 smema
= XEXP (smem
, 0);
3626 if (GET_CODE (smema
) == LO_SUM
)
3627 smema
= force_reg (Pmode
, smema
);
3629 /* Generate all the tmp registers we need. */
3630 for (i
= 0; i
< words
; ++i
)
3632 data_regs
[i
] = out_regs
[i
];
3633 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3635 data_regs
[words
] = gen_reg_rtx (DImode
);
3638 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3640 /* Load up all of the source data. */
3641 for (i
= 0; i
< words
; ++i
)
3643 tmp
= change_address (smem
, DImode
,
3644 gen_rtx_AND (DImode
,
3645 plus_constant (DImode
, smema
, 8*i
),
3647 set_mem_alias_set (tmp
, 0);
3648 emit_move_insn (data_regs
[i
], tmp
);
3651 tmp
= change_address (smem
, DImode
,
3652 gen_rtx_AND (DImode
,
3653 plus_constant (DImode
, smema
,
3656 set_mem_alias_set (tmp
, 0);
3657 emit_move_insn (data_regs
[words
], tmp
);
3659 /* Extract the half-word fragments. Unfortunately DEC decided to make
3660 extxh with offset zero a noop instead of zeroing the register, so
3661 we must take care of that edge condition ourselves with cmov. */
3663 sreg
= copy_addr_to_reg (smema
);
3664 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3666 for (i
= 0; i
< words
; ++i
)
3668 emit_insn (gen_extql (data_regs
[i
], data_regs
[i
], sreg
));
3669 emit_insn (gen_extqh (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3670 emit_insn (gen_rtx_SET (ext_tmps
[i
],
3671 gen_rtx_IF_THEN_ELSE (DImode
,
3672 gen_rtx_EQ (DImode
, areg
,
3674 const0_rtx
, ext_tmps
[i
])));
3677 /* Merge the half-words into whole words. */
3678 for (i
= 0; i
< words
; ++i
)
3680 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3681 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3685 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3686 may be NULL to store zeros. */
3689 alpha_expand_unaligned_store_words (rtx
*data_regs
, rtx dmem
,
3690 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3692 rtx
const im8
= GEN_INT (-8);
3693 rtx ins_tmps
[MAX_MOVE_WORDS
];
3694 rtx st_tmp_1
, st_tmp_2
, dreg
;
3695 rtx st_addr_1
, st_addr_2
, dmema
;
3698 dmema
= XEXP (dmem
, 0);
3699 if (GET_CODE (dmema
) == LO_SUM
)
3700 dmema
= force_reg (Pmode
, dmema
);
3702 /* Generate all the tmp registers we need. */
3703 if (data_regs
!= NULL
)
3704 for (i
= 0; i
< words
; ++i
)
3705 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3706 st_tmp_1
= gen_reg_rtx(DImode
);
3707 st_tmp_2
= gen_reg_rtx(DImode
);
3710 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3712 st_addr_2
= change_address (dmem
, DImode
,
3713 gen_rtx_AND (DImode
,
3714 plus_constant (DImode
, dmema
,
3717 set_mem_alias_set (st_addr_2
, 0);
3719 st_addr_1
= change_address (dmem
, DImode
,
3720 gen_rtx_AND (DImode
, dmema
, im8
));
3721 set_mem_alias_set (st_addr_1
, 0);
3723 /* Load up the destination end bits. */
3724 emit_move_insn (st_tmp_2
, st_addr_2
);
3725 emit_move_insn (st_tmp_1
, st_addr_1
);
3727 /* Shift the input data into place. */
3728 dreg
= copy_addr_to_reg (dmema
);
3729 if (data_regs
!= NULL
)
3731 for (i
= words
-1; i
>= 0; --i
)
3733 emit_insn (gen_insqh (ins_tmps
[i
], data_regs
[i
], dreg
));
3734 emit_insn (gen_insql (data_regs
[i
], data_regs
[i
], dreg
));
3736 for (i
= words
-1; i
> 0; --i
)
3738 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3739 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3744 /* Split and merge the ends with the destination data. */
3745 emit_insn (gen_mskqh (st_tmp_2
, st_tmp_2
, dreg
));
3746 emit_insn (gen_mskql (st_tmp_1
, st_tmp_1
, dreg
));
3748 if (data_regs
!= NULL
)
3750 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3751 st_tmp_2
, 1, OPTAB_WIDEN
);
3752 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3753 st_tmp_1
, 1, OPTAB_WIDEN
);
3757 emit_move_insn (st_addr_2
, st_tmp_2
);
3758 for (i
= words
-1; i
> 0; --i
)
3760 rtx tmp
= change_address (dmem
, DImode
,
3761 gen_rtx_AND (DImode
,
3762 plus_constant (DImode
,
3765 set_mem_alias_set (tmp
, 0);
3766 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3768 emit_move_insn (st_addr_1
, st_tmp_1
);
3772 /* Expand string/block move operations.
3774 operands[0] is the pointer to the destination.
3775 operands[1] is the pointer to the source.
3776 operands[2] is the number of bytes to move.
3777 operands[3] is the alignment. */
3780 alpha_expand_block_move (rtx operands
[])
3782 rtx bytes_rtx
= operands
[2];
3783 rtx align_rtx
= operands
[3];
3784 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3785 HOST_WIDE_INT bytes
= orig_bytes
;
3786 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3787 HOST_WIDE_INT dst_align
= src_align
;
3788 rtx orig_src
= operands
[1];
3789 rtx orig_dst
= operands
[0];
3790 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3792 unsigned int i
, words
, ofs
, nregs
= 0;
3794 if (orig_bytes
<= 0)
3796 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3799 /* Look for additional alignment information from recorded register info. */
3801 tmp
= XEXP (orig_src
, 0);
3803 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3804 else if (GET_CODE (tmp
) == PLUS
3805 && REG_P (XEXP (tmp
, 0))
3806 && CONST_INT_P (XEXP (tmp
, 1)))
3808 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3809 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3813 if (a
>= 64 && c
% 8 == 0)
3815 else if (a
>= 32 && c
% 4 == 0)
3817 else if (a
>= 16 && c
% 2 == 0)
3822 tmp
= XEXP (orig_dst
, 0);
3824 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3825 else if (GET_CODE (tmp
) == PLUS
3826 && REG_P (XEXP (tmp
, 0))
3827 && CONST_INT_P (XEXP (tmp
, 1)))
3829 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3830 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3834 if (a
>= 64 && c
% 8 == 0)
3836 else if (a
>= 32 && c
% 4 == 0)
3838 else if (a
>= 16 && c
% 2 == 0)
3844 if (src_align
>= 64 && bytes
>= 8)
3848 for (i
= 0; i
< words
; ++i
)
3849 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3851 for (i
= 0; i
< words
; ++i
)
3852 emit_move_insn (data_regs
[nregs
+ i
],
3853 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
3860 if (src_align
>= 32 && bytes
>= 4)
3864 for (i
= 0; i
< words
; ++i
)
3865 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
3867 for (i
= 0; i
< words
; ++i
)
3868 emit_move_insn (data_regs
[nregs
+ i
],
3869 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
3880 for (i
= 0; i
< words
+1; ++i
)
3881 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3883 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
3891 if (! TARGET_BWX
&& bytes
>= 4)
3893 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
3894 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
3901 if (src_align
>= 16)
3904 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3905 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
3908 } while (bytes
>= 2);
3910 else if (! TARGET_BWX
)
3912 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3913 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
3921 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
3922 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
3927 gcc_assert (nregs
<= ARRAY_SIZE (data_regs
));
3929 /* Now save it back out again. */
3933 /* Write out the data in whatever chunks reading the source allowed. */
3934 if (dst_align
>= 64)
3936 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3938 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
3945 if (dst_align
>= 32)
3947 /* If the source has remaining DImode regs, write them out in
3949 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3951 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
3952 NULL_RTX
, 1, OPTAB_WIDEN
);
3954 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3955 gen_lowpart (SImode
, data_regs
[i
]));
3956 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
3957 gen_lowpart (SImode
, tmp
));
3962 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3964 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3971 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3973 /* Write out a remaining block of words using unaligned methods. */
3975 for (words
= 1; i
+ words
< nregs
; words
++)
3976 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
3980 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
3982 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
3989 /* Due to the above, this won't be aligned. */
3990 /* ??? If we have more than one of these, consider constructing full
3991 words in registers and using alpha_expand_unaligned_store_words. */
3992 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3994 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
3999 if (dst_align
>= 16)
4000 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4002 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4007 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4009 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4014 /* The remainder must be byte copies. */
4017 gcc_assert (GET_MODE (data_regs
[i
]) == QImode
);
4018 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4027 alpha_expand_block_clear (rtx operands
[])
4029 rtx bytes_rtx
= operands
[1];
4030 rtx align_rtx
= operands
[3];
4031 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4032 HOST_WIDE_INT bytes
= orig_bytes
;
4033 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4034 HOST_WIDE_INT alignofs
= 0;
4035 rtx orig_dst
= operands
[0];
4037 int i
, words
, ofs
= 0;
4039 if (orig_bytes
<= 0)
4041 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4044 /* Look for stricter alignment. */
4045 tmp
= XEXP (orig_dst
, 0);
4047 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4048 else if (GET_CODE (tmp
) == PLUS
4049 && REG_P (XEXP (tmp
, 0))
4050 && CONST_INT_P (XEXP (tmp
, 1)))
4052 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4053 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4058 align
= a
, alignofs
= 8 - c
% 8;
4060 align
= a
, alignofs
= 4 - c
% 4;
4062 align
= a
, alignofs
= 2 - c
% 2;
4066 /* Handle an unaligned prefix first. */
4070 /* Given that alignofs is bounded by align, the only time BWX could
4071 generate three stores is for a 7 byte fill. Prefer two individual
4072 stores over a load/mask/store sequence. */
4073 if ((!TARGET_BWX
|| alignofs
== 7)
4075 && !(alignofs
== 4 && bytes
>= 4))
4077 machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4078 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4082 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4083 set_mem_alias_set (mem
, 0);
4085 mask
= ~(HOST_WIDE_INT_M1U
<< (inv_alignofs
* 8));
4086 if (bytes
< alignofs
)
4088 mask
|= HOST_WIDE_INT_M1U
<< ((inv_alignofs
+ bytes
) * 8);
4099 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4100 NULL_RTX
, 1, OPTAB_WIDEN
);
4102 emit_move_insn (mem
, tmp
);
4105 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4107 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4112 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4114 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4119 if (alignofs
== 4 && bytes
>= 4)
4121 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4131 alignofs
= alignofs
& -alignofs
;
4132 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4136 /* Handle a block of contiguous long-words. */
4138 if (align
>= 64 && bytes
>= 8)
4142 for (i
= 0; i
< words
; ++i
)
4143 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4153 if (align
>= 32 && bytes
> 16)
4157 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4161 orig_dsta
= XEXP (orig_dst
, 0);
4162 if (GET_CODE (orig_dsta
) == LO_SUM
)
4163 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4166 for (i
= 0; i
< words
; ++i
)
4169 = change_address (orig_dst
, DImode
,
4170 gen_rtx_AND (DImode
,
4171 plus_constant (DImode
, orig_dsta
,
4174 set_mem_alias_set (mem
, 0);
4175 emit_move_insn (mem
, const0_rtx
);
4178 /* Depending on the alignment, the first stq_u may have overlapped
4179 with the initial stl, which means that the last stq_u didn't
4180 write as much as it would appear. Leave those questionable bytes
4182 bytes
-= words
* 8 - 4;
4183 ofs
+= words
* 8 - 4;
4186 /* Handle a smaller block of aligned words. */
4188 if ((align
>= 64 && bytes
== 4)
4189 || (align
== 32 && bytes
>= 4))
4193 for (i
= 0; i
< words
; ++i
)
4194 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4201 /* An unaligned block uses stq_u stores for as many as possible. */
4207 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4213 /* Next clean up any trailing pieces. */
4215 /* Count the number of bits in BYTES for which aligned stores could
4218 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4231 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4232 set_mem_alias_set (mem
, 0);
4234 mask
= HOST_WIDE_INT_M1U
<< (bytes
* 8);
4236 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4237 NULL_RTX
, 1, OPTAB_WIDEN
);
4239 emit_move_insn (mem
, tmp
);
4242 else if (align
>= 32 && bytes
< 4)
4247 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4248 set_mem_alias_set (mem
, 0);
4250 mask
= HOST_WIDE_INT_M1U
<< (bytes
* 8);
4252 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4253 NULL_RTX
, 1, OPTAB_WIDEN
);
4255 emit_move_insn (mem
, tmp
);
4260 if (!TARGET_BWX
&& bytes
>= 4)
4262 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4272 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4276 } while (bytes
>= 2);
4278 else if (! TARGET_BWX
)
4280 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4288 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4296 /* Returns a mask so that zap(x, value) == x & mask. */
4299 alpha_expand_zap_mask (HOST_WIDE_INT value
)
4303 HOST_WIDE_INT mask
= 0;
4305 for (i
= 7; i
>= 0; --i
)
4308 if (!((value
>> i
) & 1))
4312 result
= gen_int_mode (mask
, DImode
);
4317 alpha_expand_builtin_vector_binop (rtx (*gen
) (rtx
, rtx
, rtx
),
4319 rtx op0
, rtx op1
, rtx op2
)
4321 op0
= gen_lowpart (mode
, op0
);
4323 if (op1
== const0_rtx
)
4324 op1
= CONST0_RTX (mode
);
4326 op1
= gen_lowpart (mode
, op1
);
4328 if (op2
== const0_rtx
)
4329 op2
= CONST0_RTX (mode
);
4331 op2
= gen_lowpart (mode
, op2
);
4333 emit_insn ((*gen
) (op0
, op1
, op2
));
4336 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4337 COND is true. Mark the jump as unlikely to be taken. */
4340 emit_unlikely_jump (rtx cond
, rtx label
)
4342 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
4343 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
4344 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
4347 /* Subroutines of the atomic operation splitters. Emit barriers
4348 as needed for the memory MODEL. */
4351 alpha_pre_atomic_barrier (enum memmodel model
)
4353 if (need_atomic_barrier_p (model
, true))
4354 emit_insn (gen_memory_barrier ());
4358 alpha_post_atomic_barrier (enum memmodel model
)
4360 if (need_atomic_barrier_p (model
, false))
4361 emit_insn (gen_memory_barrier ());
4364 /* A subroutine of the atomic operation splitters. Emit an insxl
4365 instruction in MODE. */
4368 emit_insxl (machine_mode mode
, rtx op1
, rtx op2
)
4370 rtx ret
= gen_reg_rtx (DImode
);
4371 rtx (*fn
) (rtx
, rtx
, rtx
);
4391 op1
= force_reg (mode
, op1
);
4392 emit_insn (fn (ret
, op1
, op2
));
4397 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4398 to perform. MEM is the memory on which to operate. VAL is the second
4399 operand of the binary operator. BEFORE and AFTER are optional locations to
4400 return the value of MEM either before of after the operation. SCRATCH is
4401 a scratch register. */
4404 alpha_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
, rtx before
,
4405 rtx after
, rtx scratch
, enum memmodel model
)
4407 machine_mode mode
= GET_MODE (mem
);
4408 rtx label
, x
, cond
= gen_rtx_REG (DImode
, REGNO (scratch
));
4410 alpha_pre_atomic_barrier (model
);
4412 label
= gen_label_rtx ();
4414 label
= gen_rtx_LABEL_REF (DImode
, label
);
4418 emit_insn (gen_load_locked (mode
, before
, mem
));
4422 x
= gen_rtx_AND (mode
, before
, val
);
4423 emit_insn (gen_rtx_SET (val
, x
));
4425 x
= gen_rtx_NOT (mode
, val
);
4428 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
4430 emit_insn (gen_rtx_SET (after
, copy_rtx (x
)));
4431 emit_insn (gen_rtx_SET (scratch
, x
));
4433 emit_insn (gen_store_conditional (mode
, cond
, mem
, scratch
));
4435 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4436 emit_unlikely_jump (x
, label
);
4438 alpha_post_atomic_barrier (model
);
4441 /* Expand a compare and swap operation. */
4444 alpha_split_compare_and_swap (rtx operands
[])
4446 rtx cond
, retval
, mem
, oldval
, newval
;
4448 enum memmodel mod_s
, mod_f
;
4450 rtx label1
, label2
, x
;
4453 retval
= operands
[1];
4455 oldval
= operands
[3];
4456 newval
= operands
[4];
4457 is_weak
= (operands
[5] != const0_rtx
);
4458 mod_s
= memmodel_from_int (INTVAL (operands
[6]));
4459 mod_f
= memmodel_from_int (INTVAL (operands
[7]));
4460 mode
= GET_MODE (mem
);
4462 alpha_pre_atomic_barrier (mod_s
);
4467 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4468 emit_label (XEXP (label1
, 0));
4470 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4472 emit_insn (gen_load_locked (mode
, retval
, mem
));
4474 x
= gen_lowpart (DImode
, retval
);
4475 if (oldval
== const0_rtx
)
4477 emit_move_insn (cond
, const0_rtx
);
4478 x
= gen_rtx_NE (DImode
, x
, const0_rtx
);
4482 x
= gen_rtx_EQ (DImode
, x
, oldval
);
4483 emit_insn (gen_rtx_SET (cond
, x
));
4484 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4486 emit_unlikely_jump (x
, label2
);
4488 emit_move_insn (cond
, newval
);
4489 emit_insn (gen_store_conditional
4490 (mode
, cond
, mem
, gen_lowpart (mode
, cond
)));
4494 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4495 emit_unlikely_jump (x
, label1
);
4498 if (!is_mm_relaxed (mod_f
))
4499 emit_label (XEXP (label2
, 0));
4501 alpha_post_atomic_barrier (mod_s
);
4503 if (is_mm_relaxed (mod_f
))
4504 emit_label (XEXP (label2
, 0));
4508 alpha_expand_compare_and_swap_12 (rtx operands
[])
4510 rtx cond
, dst
, mem
, oldval
, newval
, is_weak
, mod_s
, mod_f
;
4512 rtx addr
, align
, wdst
;
4517 oldval
= operands
[3];
4518 newval
= operands
[4];
4519 is_weak
= operands
[5];
4520 mod_s
= operands
[6];
4521 mod_f
= operands
[7];
4522 mode
= GET_MODE (mem
);
4524 /* We forced the address into a register via mem_noofs_operand. */
4525 addr
= XEXP (mem
, 0);
4526 gcc_assert (register_operand (addr
, DImode
));
4528 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4529 NULL_RTX
, 1, OPTAB_DIRECT
);
4531 oldval
= convert_modes (DImode
, mode
, oldval
, 1);
4533 if (newval
!= const0_rtx
)
4534 newval
= emit_insxl (mode
, newval
, addr
);
4536 wdst
= gen_reg_rtx (DImode
);
4537 emit_insn (gen_atomic_compare_and_swap_1
4538 (mode
, cond
, wdst
, mem
, oldval
, newval
, align
,
4539 is_weak
, mod_s
, mod_f
));
4541 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4545 alpha_split_compare_and_swap_12 (rtx operands
[])
4547 rtx cond
, dest
, orig_mem
, oldval
, newval
, align
, scratch
;
4550 enum memmodel mod_s
, mod_f
;
4551 rtx label1
, label2
, mem
, addr
, width
, mask
, x
;
4555 orig_mem
= operands
[2];
4556 oldval
= operands
[3];
4557 newval
= operands
[4];
4558 align
= operands
[5];
4559 is_weak
= (operands
[6] != const0_rtx
);
4560 mod_s
= memmodel_from_int (INTVAL (operands
[7]));
4561 mod_f
= memmodel_from_int (INTVAL (operands
[8]));
4562 scratch
= operands
[9];
4563 mode
= GET_MODE (orig_mem
);
4564 addr
= XEXP (orig_mem
, 0);
4566 mem
= gen_rtx_MEM (DImode
, align
);
4567 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
4568 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
4569 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
4571 alpha_pre_atomic_barrier (mod_s
);
4576 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4577 emit_label (XEXP (label1
, 0));
4579 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4581 emit_insn (gen_load_locked (DImode
, scratch
, mem
));
4583 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4584 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4585 emit_insn (gen_extxl (dest
, scratch
, width
, addr
));
4587 if (oldval
== const0_rtx
)
4589 emit_move_insn (cond
, const0_rtx
);
4590 x
= gen_rtx_NE (DImode
, dest
, const0_rtx
);
4594 x
= gen_rtx_EQ (DImode
, dest
, oldval
);
4595 emit_insn (gen_rtx_SET (cond
, x
));
4596 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4598 emit_unlikely_jump (x
, label2
);
4600 emit_insn (gen_mskxl (cond
, scratch
, mask
, addr
));
4602 if (newval
!= const0_rtx
)
4603 emit_insn (gen_iordi3 (cond
, cond
, newval
));
4605 emit_insn (gen_store_conditional (DImode
, cond
, mem
, cond
));
4609 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4610 emit_unlikely_jump (x
, label1
);
4613 if (!is_mm_relaxed (mod_f
))
4614 emit_label (XEXP (label2
, 0));
4616 alpha_post_atomic_barrier (mod_s
);
4618 if (is_mm_relaxed (mod_f
))
4619 emit_label (XEXP (label2
, 0));
4622 /* Expand an atomic exchange operation. */
4625 alpha_split_atomic_exchange (rtx operands
[])
4627 rtx retval
, mem
, val
, scratch
;
4628 enum memmodel model
;
4632 retval
= operands
[0];
4635 model
= (enum memmodel
) INTVAL (operands
[3]);
4636 scratch
= operands
[4];
4637 mode
= GET_MODE (mem
);
4638 cond
= gen_lowpart (DImode
, scratch
);
4640 alpha_pre_atomic_barrier (model
);
4642 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4643 emit_label (XEXP (label
, 0));
4645 emit_insn (gen_load_locked (mode
, retval
, mem
));
4646 emit_move_insn (scratch
, val
);
4647 emit_insn (gen_store_conditional (mode
, cond
, mem
, scratch
));
4649 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4650 emit_unlikely_jump (x
, label
);
4652 alpha_post_atomic_barrier (model
);
4656 alpha_expand_atomic_exchange_12 (rtx operands
[])
4658 rtx dst
, mem
, val
, model
;
4660 rtx addr
, align
, wdst
;
4665 model
= operands
[3];
4666 mode
= GET_MODE (mem
);
4668 /* We forced the address into a register via mem_noofs_operand. */
4669 addr
= XEXP (mem
, 0);
4670 gcc_assert (register_operand (addr
, DImode
));
4672 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4673 NULL_RTX
, 1, OPTAB_DIRECT
);
4675 /* Insert val into the correct byte location within the word. */
4676 if (val
!= const0_rtx
)
4677 val
= emit_insxl (mode
, val
, addr
);
4679 wdst
= gen_reg_rtx (DImode
);
4680 emit_insn (gen_atomic_exchange_1 (mode
, wdst
, mem
, val
, align
, model
));
4682 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4686 alpha_split_atomic_exchange_12 (rtx operands
[])
4688 rtx dest
, orig_mem
, addr
, val
, align
, scratch
;
4689 rtx label
, mem
, width
, mask
, x
;
4691 enum memmodel model
;
4694 orig_mem
= operands
[1];
4696 align
= operands
[3];
4697 model
= (enum memmodel
) INTVAL (operands
[4]);
4698 scratch
= operands
[5];
4699 mode
= GET_MODE (orig_mem
);
4700 addr
= XEXP (orig_mem
, 0);
4702 mem
= gen_rtx_MEM (DImode
, align
);
4703 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
4704 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
4705 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
4707 alpha_pre_atomic_barrier (model
);
4709 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4710 emit_label (XEXP (label
, 0));
4712 emit_insn (gen_load_locked (DImode
, scratch
, mem
));
4714 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4715 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4716 emit_insn (gen_extxl (dest
, scratch
, width
, addr
));
4717 emit_insn (gen_mskxl (scratch
, scratch
, mask
, addr
));
4718 if (val
!= const0_rtx
)
4719 emit_insn (gen_iordi3 (scratch
, scratch
, val
));
4721 emit_insn (gen_store_conditional (DImode
, scratch
, mem
, scratch
));
4723 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4724 emit_unlikely_jump (x
, label
);
4726 alpha_post_atomic_barrier (model
);
4729 /* Adjust the cost of a scheduling dependency. Return the new cost of
4730 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4733 alpha_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
4736 enum attr_type dep_insn_type
;
4738 /* If the dependence is an anti-dependence, there is no cost. For an
4739 output dependence, there is sometimes a cost, but it doesn't seem
4740 worth handling those few cases. */
4744 /* If we can't recognize the insns, we can't really do anything. */
4745 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4748 dep_insn_type
= get_attr_type (dep_insn
);
4750 /* Bring in the user-defined memory latency. */
4751 if (dep_insn_type
== TYPE_ILD
4752 || dep_insn_type
== TYPE_FLD
4753 || dep_insn_type
== TYPE_LDSYM
)
4754 cost
+= alpha_memory_latency
-1;
4756 /* Everything else handled in DFA bypasses now. */
4761 /* The number of instructions that can be issued per cycle. */
4764 alpha_issue_rate (void)
4766 return (alpha_tune
== PROCESSOR_EV4
? 2 : 4);
4769 /* How many alternative schedules to try. This should be as wide as the
4770 scheduling freedom in the DFA, but no wider. Making this value too
4771 large results extra work for the scheduler.
4773 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4774 alternative schedules. For EV5, we can choose between E0/E1 and
4775 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4778 alpha_multipass_dfa_lookahead (void)
4780 return (alpha_tune
== PROCESSOR_EV6
? 4 : 2);
4783 /* Machine-specific function data. */
4785 struct GTY(()) alpha_links
;
4787 struct GTY(()) machine_function
4789 unsigned HOST_WIDE_INT sa_mask
;
4790 HOST_WIDE_INT sa_size
;
4791 HOST_WIDE_INT frame_size
;
4793 /* For flag_reorder_blocks_and_partition. */
4796 /* For VMS condition handlers. */
4797 bool uses_condition_handler
;
4799 /* Linkage entries. */
4800 hash_map
<nofree_string_hash
, alpha_links
*> *links
;
4803 /* How to allocate a 'struct machine_function'. */
4805 static struct machine_function
*
4806 alpha_init_machine_status (void)
4808 return ggc_cleared_alloc
<machine_function
> ();
4811 /* Support for frame based VMS condition handlers. */
4813 /* A VMS condition handler may be established for a function with a call to
4814 __builtin_establish_vms_condition_handler, and cancelled with a call to
4815 __builtin_revert_vms_condition_handler.
4817 The VMS Condition Handling Facility knows about the existence of a handler
4818 from the procedure descriptor .handler field. As the VMS native compilers,
4819 we store the user specified handler's address at a fixed location in the
4820 stack frame and point the procedure descriptor at a common wrapper which
4821 fetches the real handler's address and issues an indirect call.
4823 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4825 We force the procedure kind to PT_STACK, and the fixed frame location is
4826 fp+8, just before the register save area. We use the handler_data field in
4827 the procedure descriptor to state the fp offset at which the installed
4828 handler address can be found. */
4830 #define VMS_COND_HANDLER_FP_OFFSET 8
4832 /* Expand code to store the currently installed user VMS condition handler
4833 into TARGET and install HANDLER as the new condition handler. */
4836 alpha_expand_builtin_establish_vms_condition_handler (rtx target
, rtx handler
)
4838 rtx handler_slot_address
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
4839 VMS_COND_HANDLER_FP_OFFSET
);
4842 = gen_rtx_MEM (DImode
, handler_slot_address
);
4844 emit_move_insn (target
, handler_slot
);
4845 emit_move_insn (handler_slot
, handler
);
4847 /* Notify the start/prologue/epilogue emitters that the condition handler
4848 slot is needed. In addition to reserving the slot space, this will force
4849 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4850 use above is correct. */
4851 cfun
->machine
->uses_condition_handler
= true;
4854 /* Expand code to store the current VMS condition handler into TARGET and
4858 alpha_expand_builtin_revert_vms_condition_handler (rtx target
)
4860 /* We implement this by establishing a null condition handler, with the tiny
4861 side effect of setting uses_condition_handler. This is a little bit
4862 pessimistic if no actual builtin_establish call is ever issued, which is
4863 not a real problem and expected never to happen anyway. */
4865 alpha_expand_builtin_establish_vms_condition_handler (target
, const0_rtx
);
4868 /* Functions to save and restore alpha_return_addr_rtx. */
4870 /* Start the ball rolling with RETURN_ADDR_RTX. */
4873 alpha_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4878 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4881 /* Return or create a memory slot containing the gp value for the current
4882 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4885 alpha_gp_save_rtx (void)
4888 rtx m
= cfun
->machine
->gp_save_rtx
;
4894 m
= assign_stack_local (DImode
, UNITS_PER_WORD
, BITS_PER_WORD
);
4895 m
= validize_mem (m
);
4896 emit_move_insn (m
, pic_offset_table_rtx
);
4901 /* We used to simply emit the sequence after entry_of_function.
4902 However this breaks the CFG if the first instruction in the
4903 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4904 label. Emit the sequence properly on the edge. We are only
4905 invoked from dw2_build_landing_pads and finish_eh_generation
4906 will call commit_edge_insertions thanks to a kludge. */
4907 insert_insn_on_edge (seq
,
4908 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
4910 cfun
->machine
->gp_save_rtx
= m
;
4917 alpha_instantiate_decls (void)
4919 if (cfun
->machine
->gp_save_rtx
!= NULL_RTX
)
4920 instantiate_decl_rtl (cfun
->machine
->gp_save_rtx
);
4924 alpha_ra_ever_killed (void)
4928 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4929 return (int)df_regs_ever_live_p (REG_RA
);
4931 push_topmost_sequence ();
4933 pop_topmost_sequence ();
4935 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL
);
4939 /* Return the trap mode suffix applicable to the current
4940 instruction, or NULL. */
4943 get_trap_mode_suffix (void)
4945 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4949 case TRAP_SUFFIX_NONE
:
4952 case TRAP_SUFFIX_SU
:
4953 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4957 case TRAP_SUFFIX_SUI
:
4958 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4962 case TRAP_SUFFIX_V_SV
:
4970 case ALPHA_FPTM_SUI
:
4976 case TRAP_SUFFIX_V_SV_SVI
:
4985 case ALPHA_FPTM_SUI
:
4992 case TRAP_SUFFIX_U_SU_SUI
:
5001 case ALPHA_FPTM_SUI
:
5014 /* Return the rounding mode suffix applicable to the current
5015 instruction, or NULL. */
5018 get_round_mode_suffix (void)
5020 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
5024 case ROUND_SUFFIX_NONE
:
5026 case ROUND_SUFFIX_NORMAL
:
5029 case ALPHA_FPRM_NORM
:
5031 case ALPHA_FPRM_MINF
:
5033 case ALPHA_FPRM_CHOP
:
5035 case ALPHA_FPRM_DYN
:
5042 case ROUND_SUFFIX_C
:
5051 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5054 alpha_print_operand_punct_valid_p (unsigned char code
)
5056 return (code
== '/' || code
== ',' || code
== '-' || code
== '~'
5057 || code
== '#' || code
== '*' || code
== '&');
5060 /* Implement TARGET_PRINT_OPERAND. The alpha-specific
5061 operand codes are documented below. */
5064 alpha_print_operand (FILE *file
, rtx x
, int code
)
5071 /* Print the assembler name of the current function. */
5072 assemble_name (file
, alpha_fnname
);
5076 if (const char *name
= get_some_local_dynamic_name ())
5077 assemble_name (file
, name
);
5079 output_operand_lossage ("'%%&' used without any "
5080 "local dynamic TLS references");
5084 /* Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX
5085 attributes are examined to determine what is appropriate. */
5087 const char *trap
= get_trap_mode_suffix ();
5088 const char *round
= get_round_mode_suffix ();
5091 fprintf (file
, "/%s%s", (trap
? trap
: ""), (round
? round
: ""));
5096 /* Generates single precision suffix for floating point
5097 instructions (s for IEEE, f for VAX). */
5098 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5102 /* Generates double precision suffix for floating point
5103 instructions (t for IEEE, g for VAX). */
5104 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5108 if (alpha_this_literal_sequence_number
== 0)
5109 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5110 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5114 if (alpha_this_gpdisp_sequence_number
== 0)
5115 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5116 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5123 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5125 x
= XVECEXP (x
, 0, 0);
5126 lituse
= "lituse_tlsgd";
5128 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5130 x
= XVECEXP (x
, 0, 0);
5131 lituse
= "lituse_tlsldm";
5133 else if (CONST_INT_P (x
))
5134 lituse
= "lituse_jsr";
5137 output_operand_lossage ("invalid %%J value");
5141 if (x
!= const0_rtx
)
5142 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5150 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5151 lituse
= "lituse_jsrdirect";
5153 lituse
= "lituse_jsr";
5156 gcc_assert (INTVAL (x
) != 0);
5157 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5161 /* If this operand is the constant zero, write it as "$31". */
5163 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5164 else if (x
== CONST0_RTX (GET_MODE (x
)))
5165 fprintf (file
, "$31");
5167 output_operand_lossage ("invalid %%r value");
5171 /* Similar, but for floating-point. */
5173 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5174 else if (x
== CONST0_RTX (GET_MODE (x
)))
5175 fprintf (file
, "$f31");
5177 output_operand_lossage ("invalid %%R value");
5181 /* Write the 1's complement of a constant. */
5182 if (!CONST_INT_P (x
))
5183 output_operand_lossage ("invalid %%N value");
5185 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5189 /* Write 1 << C, for a constant C. */
5190 if (!CONST_INT_P (x
))
5191 output_operand_lossage ("invalid %%P value");
5193 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, HOST_WIDE_INT_1
<< INTVAL (x
));
5197 /* Write the high-order 16 bits of a constant, sign-extended. */
5198 if (!CONST_INT_P (x
))
5199 output_operand_lossage ("invalid %%h value");
5201 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5205 /* Write the low-order 16 bits of a constant, sign-extended. */
5206 if (!CONST_INT_P (x
))
5207 output_operand_lossage ("invalid %%L value");
5209 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5210 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5214 /* Write mask for ZAP insn. */
5215 if (CONST_INT_P (x
))
5217 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5219 for (i
= 0; i
< 8; i
++, value
>>= 8)
5223 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5226 output_operand_lossage ("invalid %%m value");
5230 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5231 if (!mode_width_operand (x
, VOIDmode
))
5232 output_operand_lossage ("invalid %%M value");
5234 fprintf (file
, "%s",
5235 (INTVAL (x
) == 8 ? "b"
5236 : INTVAL (x
) == 16 ? "w"
5237 : INTVAL (x
) == 32 ? "l"
5242 /* Similar, except do it from the mask. */
5243 if (CONST_INT_P (x
))
5245 HOST_WIDE_INT value
= INTVAL (x
);
5252 if (value
== 0xffff)
5257 if (value
== 0xffffffff)
5269 output_operand_lossage ("invalid %%U value");
5273 /* Write the constant value divided by 8. */
5274 if (!CONST_INT_P (x
)
5275 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5276 || (INTVAL (x
) & 7) != 0)
5277 output_operand_lossage ("invalid %%s value");
5279 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) / 8);
5282 case 'C': case 'D': case 'c': case 'd':
5283 /* Write out comparison name. */
5285 enum rtx_code c
= GET_CODE (x
);
5287 if (!COMPARISON_P (x
))
5288 output_operand_lossage ("invalid %%C value");
5290 else if (code
== 'D')
5291 c
= reverse_condition (c
);
5292 else if (code
== 'c')
5293 c
= swap_condition (c
);
5294 else if (code
== 'd')
5295 c
= swap_condition (reverse_condition (c
));
5298 fprintf (file
, "ule");
5300 fprintf (file
, "ult");
5301 else if (c
== UNORDERED
)
5302 fprintf (file
, "un");
5304 fprintf (file
, "%s", GET_RTX_NAME (c
));
5309 /* Write the divide or modulus operator. */
5310 switch (GET_CODE (x
))
5313 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5316 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5319 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5322 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5325 output_operand_lossage ("invalid %%E value");
5331 /* Write "_u" for unaligned access. */
5332 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
5333 fprintf (file
, "_u");
5338 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5340 output_address (GET_MODE (x
), XEXP (x
, 0));
5341 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5343 switch (XINT (XEXP (x
, 0), 1))
5347 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
5350 output_operand_lossage ("unknown relocation unspec");
5355 output_addr_const (file
, x
);
5359 output_operand_lossage ("invalid %%xn code");
5363 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
5366 alpha_print_operand_address (FILE *file
, machine_mode
/*mode*/, rtx addr
)
5369 HOST_WIDE_INT offset
= 0;
5371 if (GET_CODE (addr
) == AND
)
5372 addr
= XEXP (addr
, 0);
5374 if (GET_CODE (addr
) == PLUS
5375 && CONST_INT_P (XEXP (addr
, 1)))
5377 offset
= INTVAL (XEXP (addr
, 1));
5378 addr
= XEXP (addr
, 0);
5381 if (GET_CODE (addr
) == LO_SUM
)
5383 const char *reloc16
, *reloclo
;
5384 rtx op1
= XEXP (addr
, 1);
5386 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
5388 op1
= XEXP (op1
, 0);
5389 switch (XINT (op1
, 1))
5393 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
5397 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
5400 output_operand_lossage ("unknown relocation unspec");
5404 output_addr_const (file
, XVECEXP (op1
, 0, 0));
5409 reloclo
= "gprellow";
5410 output_addr_const (file
, op1
);
5414 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
5416 addr
= XEXP (addr
, 0);
5417 switch (GET_CODE (addr
))
5420 basereg
= REGNO (addr
);
5424 basereg
= subreg_regno (addr
);
5431 fprintf (file
, "($%d)\t\t!%s", basereg
,
5432 (basereg
== 29 ? reloc16
: reloclo
));
5436 switch (GET_CODE (addr
))
5439 basereg
= REGNO (addr
);
5443 basereg
= subreg_regno (addr
);
5447 offset
= INTVAL (addr
);
5451 gcc_assert(TARGET_ABI_OPEN_VMS
|| this_is_asm_operands
);
5452 fprintf (file
, "%s", XSTR (addr
, 0));
5456 gcc_assert(TARGET_ABI_OPEN_VMS
|| this_is_asm_operands
);
5457 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
5458 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
);
5459 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
5460 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
5461 INTVAL (XEXP (XEXP (addr
, 0), 1)));
5465 output_operand_lossage ("invalid operand address");
5469 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
5472 /* Emit RTL insns to initialize the variable parts of a trampoline at
5473 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5474 for the static chain value for the function. */
5477 alpha_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
5479 rtx fnaddr
, mem
, word1
, word2
;
5481 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
5483 #ifdef POINTERS_EXTEND_UNSIGNED
5484 fnaddr
= convert_memory_address (Pmode
, fnaddr
);
5485 chain_value
= convert_memory_address (Pmode
, chain_value
);
5488 if (TARGET_ABI_OPEN_VMS
)
5493 /* Construct the name of the trampoline entry point. */
5494 fnname
= XSTR (fnaddr
, 0);
5495 trname
= (char *) alloca (strlen (fnname
) + 5);
5496 strcpy (trname
, fnname
);
5497 strcat (trname
, "..tr");
5498 fnname
= ggc_alloc_string (trname
, strlen (trname
) + 1);
5499 word2
= gen_rtx_SYMBOL_REF (Pmode
, fnname
);
5501 /* Trampoline (or "bounded") procedure descriptor is constructed from
5502 the function's procedure descriptor with certain fields zeroed IAW
5503 the VMS calling standard. This is stored in the first quadword. */
5504 word1
= force_reg (DImode
, gen_const_mem (DImode
, fnaddr
));
5505 word1
= expand_and (DImode
, word1
,
5506 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5511 /* These 4 instructions are:
5516 We don't bother setting the HINT field of the jump; the nop
5517 is merely there for padding. */
5518 word1
= GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5519 word2
= GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5522 /* Store the first two words, as computed above. */
5523 mem
= adjust_address (m_tramp
, DImode
, 0);
5524 emit_move_insn (mem
, word1
);
5525 mem
= adjust_address (m_tramp
, DImode
, 8);
5526 emit_move_insn (mem
, word2
);
5528 /* Store function address and static chain value. */
5529 mem
= adjust_address (m_tramp
, Pmode
, 16);
5530 emit_move_insn (mem
, fnaddr
);
5531 mem
= adjust_address (m_tramp
, Pmode
, 24);
5532 emit_move_insn (mem
, chain_value
);
5536 emit_insn (gen_imb ());
5537 #ifdef HAVE_ENABLE_EXECUTE_STACK
5538 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5539 LCT_NORMAL
, VOIDmode
, XEXP (m_tramp
, 0), Pmode
);
5544 /* Determine where to put an argument to a function.
5545 Value is zero to push the argument on the stack,
5546 or a hard register in which to store the argument.
5548 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5549 the preceding args and about the function being called.
5550 ARG is a description of the argument.
5552 On Alpha the first 6 words of args are normally in registers
5553 and the rest are pushed. */
5556 alpha_function_arg (cumulative_args_t cum_v
, const function_arg_info
&arg
)
5558 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5562 /* Don't get confused and pass small structures in FP registers. */
5563 if (arg
.aggregate_type_p ())
5567 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5569 gcc_checking_assert (!COMPLEX_MODE_P (arg
.mode
));
5571 /* Set up defaults for FP operands passed in FP registers, and
5572 integral operands passed in integer registers. */
5573 if (TARGET_FPREGS
&& GET_MODE_CLASS (arg
.mode
) == MODE_FLOAT
)
5579 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5580 the two platforms, so we can't avoid conditional compilation. */
5581 #if TARGET_ABI_OPEN_VMS
5583 if (arg
.end_marker_p ())
5584 return alpha_arg_info_reg_val (*cum
);
5586 num_args
= cum
->num_args
;
5588 || targetm
.calls
.must_pass_in_stack (arg
))
5591 #elif TARGET_ABI_OSF
5597 if (arg
.end_marker_p ())
5599 else if (targetm
.calls
.must_pass_in_stack (arg
))
5603 #error Unhandled ABI
5606 return gen_rtx_REG (arg
.mode
, num_args
+ basereg
);
5609 /* Update the data in CUM to advance over argument ARG. */
5612 alpha_function_arg_advance (cumulative_args_t cum_v
,
5613 const function_arg_info
&arg
)
5615 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5616 bool onstack
= targetm
.calls
.must_pass_in_stack (arg
);
5617 int increment
= onstack
? 6 : ALPHA_ARG_SIZE (arg
.mode
, arg
.type
);
5622 if (!onstack
&& cum
->num_args
< 6)
5623 cum
->atypes
[cum
->num_args
] = alpha_arg_type (arg
.mode
);
5624 cum
->num_args
+= increment
;
5629 alpha_arg_partial_bytes (cumulative_args_t cum_v
, const function_arg_info
&arg
)
5632 CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
= get_cumulative_args (cum_v
);
5634 #if TARGET_ABI_OPEN_VMS
5635 if (cum
->num_args
< 6
5636 && 6 < cum
->num_args
+ ALPHA_ARG_SIZE (arg
.mode
, arg
.type
))
5637 words
= 6 - cum
->num_args
;
5638 #elif TARGET_ABI_OSF
5639 if (*cum
< 6 && 6 < *cum
+ ALPHA_ARG_SIZE (arg
.mode
, arg
.type
))
5642 #error Unhandled ABI
5645 return words
* UNITS_PER_WORD
;
5649 /* Return true if TYPE must be returned in memory, instead of in registers. */
5652 alpha_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
5654 machine_mode mode
= VOIDmode
;
5659 mode
= TYPE_MODE (type
);
5661 /* All aggregates are returned in memory, except on OpenVMS where
5662 records that fit 64 bits should be returned by immediate value
5663 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5664 if (TARGET_ABI_OPEN_VMS
5665 && TREE_CODE (type
) != ARRAY_TYPE
5666 && (unsigned HOST_WIDE_INT
) int_size_in_bytes(type
) <= 8)
5669 if (AGGREGATE_TYPE_P (type
))
5673 size
= GET_MODE_SIZE (mode
);
5674 switch (GET_MODE_CLASS (mode
))
5676 case MODE_VECTOR_FLOAT
:
5677 /* Pass all float vectors in memory, like an aggregate. */
5680 case MODE_COMPLEX_FLOAT
:
5681 /* We judge complex floats on the size of their element,
5682 not the size of the whole type. */
5683 size
= GET_MODE_UNIT_SIZE (mode
);
5688 case MODE_COMPLEX_INT
:
5689 case MODE_VECTOR_INT
:
5693 /* ??? We get called on all sorts of random stuff from
5694 aggregate_value_p. We must return something, but it's not
5695 clear what's safe to return. Pretend it's a struct I
5700 /* Otherwise types must fit in one register. */
5701 return size
> UNITS_PER_WORD
;
5704 /* Return true if ARG should be passed by invisible reference. */
5707 alpha_pass_by_reference (cumulative_args_t
, const function_arg_info
&arg
)
5709 /* Pass float and _Complex float variable arguments by reference.
5710 This avoids 64-bit store from a FP register to a pretend args save area
5711 and subsequent 32-bit load from the saved location to a FP register.
5713 Note that 32-bit loads and stores to/from a FP register on alpha reorder
5714 bits to form a canonical 64-bit value in the FP register. This fact
5715 invalidates compiler assumption that 32-bit FP value lives in the lower
5716 32-bits of the passed 64-bit FP value, so loading the 32-bit value from
5717 the stored 64-bit location using 32-bit FP load is invalid on alpha.
5719 This introduces sort of ABI incompatibility, but until _Float32 was
5720 introduced, C-family languages promoted 32-bit float variable arg to
5721 a 64-bit double, and it was not allowed to pass float as a varible
5722 argument. Passing _Complex float as a variable argument never
5723 worked on alpha. Thus, we have no backward compatibility issues
5724 to worry about, and passing unpromoted _Float32 and _Complex float
5725 as a variable argument will actually work in the future. */
5727 if (arg
.mode
== SFmode
|| arg
.mode
== SCmode
)
5730 return arg
.mode
== TFmode
|| arg
.mode
== TCmode
;
5733 /* Define how to find the value returned by a function. VALTYPE is the
5734 data type of the value (as a tree). If the precise function being
5735 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5736 MODE is set instead of VALTYPE for libcalls.
5738 On Alpha the value is found in $0 for integer functions and
5739 $f0 for floating-point functions. */
5742 alpha_function_value_1 (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
,
5745 unsigned int regnum
, dummy ATTRIBUTE_UNUSED
;
5746 enum mode_class mclass
;
5748 gcc_assert (!valtype
|| !alpha_return_in_memory (valtype
, func
));
5751 mode
= TYPE_MODE (valtype
);
5753 mclass
= GET_MODE_CLASS (mode
);
5757 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5758 where we have them returning both SImode and DImode. */
5759 if (!(TARGET_ABI_OPEN_VMS
&& valtype
&& AGGREGATE_TYPE_P (valtype
)))
5760 PROMOTE_MODE (mode
, dummy
, valtype
);
5763 case MODE_COMPLEX_INT
:
5764 case MODE_VECTOR_INT
:
5772 case MODE_COMPLEX_FLOAT
:
5774 machine_mode cmode
= GET_MODE_INNER (mode
);
5776 return gen_rtx_PARALLEL
5779 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 32),
5781 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 33),
5782 GEN_INT (GET_MODE_SIZE (cmode
)))));
5786 /* We should only reach here for BLKmode on VMS. */
5787 gcc_assert (TARGET_ABI_OPEN_VMS
&& mode
== BLKmode
);
5795 return gen_rtx_REG (mode
, regnum
);
5798 /* Implement TARGET_FUNCTION_VALUE. */
5801 alpha_function_value (const_tree valtype
, const_tree fn_decl_or_type
,
5804 return alpha_function_value_1 (valtype
, fn_decl_or_type
, VOIDmode
);
5807 /* Implement TARGET_LIBCALL_VALUE. */
5810 alpha_libcall_value (machine_mode mode
, const_rtx
/*fun*/)
5812 return alpha_function_value_1 (NULL_TREE
, NULL_TREE
, mode
);
5815 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5817 On the Alpha, $0 $1 and $f0 $f1 are the only register thus used. */
5820 alpha_function_value_regno_p (const unsigned int regno
)
5822 return (regno
== 0 || regno
== 1 || regno
== 32 || regno
== 33);
5825 /* TCmode complex values are passed by invisible reference. We
5826 should not split these values. */
5829 alpha_split_complex_arg (const_tree type
)
5831 return TYPE_MODE (type
) != TCmode
;
5835 alpha_build_builtin_va_list (void)
5837 tree base
, ofs
, space
, record
, type_decl
;
5839 if (TARGET_ABI_OPEN_VMS
)
5840 return ptr_type_node
;
5842 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5843 type_decl
= build_decl (BUILTINS_LOCATION
,
5844 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5845 TYPE_STUB_DECL (record
) = type_decl
;
5846 TYPE_NAME (record
) = type_decl
;
5848 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5850 /* Dummy field to prevent alignment warnings. */
5851 space
= build_decl (BUILTINS_LOCATION
,
5852 FIELD_DECL
, NULL_TREE
, integer_type_node
);
5853 DECL_FIELD_CONTEXT (space
) = record
;
5854 DECL_ARTIFICIAL (space
) = 1;
5855 DECL_IGNORED_P (space
) = 1;
5857 ofs
= build_decl (BUILTINS_LOCATION
,
5858 FIELD_DECL
, get_identifier ("__offset"),
5860 DECL_FIELD_CONTEXT (ofs
) = record
;
5861 DECL_CHAIN (ofs
) = space
;
5863 base
= build_decl (BUILTINS_LOCATION
,
5864 FIELD_DECL
, get_identifier ("__base"),
5866 DECL_FIELD_CONTEXT (base
) = record
;
5867 DECL_CHAIN (base
) = ofs
;
5869 TYPE_FIELDS (record
) = base
;
5870 layout_type (record
);
5872 va_list_gpr_counter_field
= ofs
;
5877 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5878 and constant additions. */
5881 va_list_skip_additions (tree lhs
)
5887 enum tree_code code
;
5889 stmt
= SSA_NAME_DEF_STMT (lhs
);
5891 if (gimple_code (stmt
) == GIMPLE_PHI
)
5894 if (!is_gimple_assign (stmt
)
5895 || gimple_assign_lhs (stmt
) != lhs
)
5898 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
5900 code
= gimple_assign_rhs_code (stmt
);
5901 if (!CONVERT_EXPR_CODE_P (code
)
5902 && ((code
!= PLUS_EXPR
&& code
!= POINTER_PLUS_EXPR
)
5903 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
5904 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt
))))
5907 lhs
= gimple_assign_rhs1 (stmt
);
5911 /* Check if LHS = RHS statement is
5912 LHS = *(ap.__base + ap.__offset + cst)
5915 + ((ap.__offset + cst <= 47)
5916 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5917 If the former, indicate that GPR registers are needed,
5918 if the latter, indicate that FPR registers are needed.
5920 Also look for LHS = (*ptr).field, where ptr is one of the forms
5923 On alpha, cfun->va_list_gpr_size is used as size of the needed
5924 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5925 registers are needed and bit 1 set if FPR registers are needed.
5926 Return true if va_list references should not be scanned for the
5927 current statement. */
5930 alpha_stdarg_optimize_hook (struct stdarg_info
*si
, const gimple
*stmt
)
5932 tree base
, offset
, rhs
;
5936 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
))
5937 != GIMPLE_SINGLE_RHS
)
5940 rhs
= gimple_assign_rhs1 (stmt
);
5941 while (handled_component_p (rhs
))
5942 rhs
= TREE_OPERAND (rhs
, 0);
5943 if (TREE_CODE (rhs
) != MEM_REF
5944 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5947 stmt
= va_list_skip_additions (TREE_OPERAND (rhs
, 0));
5949 || !is_gimple_assign (stmt
)
5950 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
5953 base
= gimple_assign_rhs1 (stmt
);
5954 if (TREE_CODE (base
) == SSA_NAME
)
5956 base_stmt
= va_list_skip_additions (base
);
5958 && is_gimple_assign (base_stmt
)
5959 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
5960 base
= gimple_assign_rhs1 (base_stmt
);
5963 if (TREE_CODE (base
) != COMPONENT_REF
5964 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5966 base
= gimple_assign_rhs2 (stmt
);
5967 if (TREE_CODE (base
) == SSA_NAME
)
5969 base_stmt
= va_list_skip_additions (base
);
5971 && is_gimple_assign (base_stmt
)
5972 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
5973 base
= gimple_assign_rhs1 (base_stmt
);
5976 if (TREE_CODE (base
) != COMPONENT_REF
5977 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5983 base
= get_base_address (base
);
5984 if (TREE_CODE (base
) != VAR_DECL
5985 || !bitmap_bit_p (si
->va_list_vars
, DECL_UID (base
) + num_ssa_names
))
5988 offset
= gimple_op (stmt
, 1 + offset_arg
);
5989 if (TREE_CODE (offset
) == SSA_NAME
)
5991 gimple
*offset_stmt
= va_list_skip_additions (offset
);
5994 && gimple_code (offset_stmt
) == GIMPLE_PHI
)
5997 gimple
*arg1_stmt
, *arg2_stmt
;
5999 enum tree_code code1
, code2
;
6001 if (gimple_phi_num_args (offset_stmt
) != 2)
6005 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 0));
6007 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 1));
6008 if (arg1_stmt
== NULL
6009 || !is_gimple_assign (arg1_stmt
)
6010 || arg2_stmt
== NULL
6011 || !is_gimple_assign (arg2_stmt
))
6014 code1
= gimple_assign_rhs_code (arg1_stmt
);
6015 code2
= gimple_assign_rhs_code (arg2_stmt
);
6016 if (code1
== COMPONENT_REF
6017 && (code2
== MINUS_EXPR
|| code2
== PLUS_EXPR
))
6019 else if (code2
== COMPONENT_REF
6020 && (code1
== MINUS_EXPR
|| code1
== PLUS_EXPR
))
6022 std::swap (arg1_stmt
, arg2_stmt
);
6028 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt
)))
6031 sub
= tree_to_shwi (gimple_assign_rhs2 (arg2_stmt
));
6032 if (code2
== MINUS_EXPR
)
6034 if (sub
< -48 || sub
> -32)
6037 arg1
= gimple_assign_rhs1 (arg1_stmt
);
6038 arg2
= gimple_assign_rhs1 (arg2_stmt
);
6039 if (TREE_CODE (arg2
) == SSA_NAME
)
6041 arg2_stmt
= va_list_skip_additions (arg2
);
6042 if (arg2_stmt
== NULL
6043 || !is_gimple_assign (arg2_stmt
)
6044 || gimple_assign_rhs_code (arg2_stmt
) != COMPONENT_REF
)
6046 arg2
= gimple_assign_rhs1 (arg2_stmt
);
6051 if (TREE_CODE (arg1
) != COMPONENT_REF
6052 || TREE_OPERAND (arg1
, 1) != va_list_gpr_counter_field
6053 || get_base_address (arg1
) != base
)
6056 /* Need floating point regs. */
6057 cfun
->va_list_fpr_size
|= 2;
6061 && is_gimple_assign (offset_stmt
)
6062 && gimple_assign_rhs_code (offset_stmt
) == COMPONENT_REF
)
6063 offset
= gimple_assign_rhs1 (offset_stmt
);
6065 if (TREE_CODE (offset
) != COMPONENT_REF
6066 || TREE_OPERAND (offset
, 1) != va_list_gpr_counter_field
6067 || get_base_address (offset
) != base
)
6070 /* Need general regs. */
6071 cfun
->va_list_fpr_size
|= 1;
6075 si
->va_list_escapes
= true;
6080 /* Perform any needed actions needed for a function that is receiving a
6081 variable number of arguments. */
6084 alpha_setup_incoming_varargs (cumulative_args_t pcum
,
6085 const function_arg_info
&arg
,
6086 int *pretend_size
, int no_rtl
)
6088 CUMULATIVE_ARGS cum
= *get_cumulative_args (pcum
);
6090 /* Skip the current argument. */
6091 targetm
.calls
.function_arg_advance (pack_cumulative_args (&cum
), arg
);
6093 #if TARGET_ABI_OPEN_VMS
6094 /* For VMS, we allocate space for all 6 arg registers plus a count.
6096 However, if NO registers need to be saved, don't allocate any space.
6097 This is not only because we won't need the space, but because AP
6098 includes the current_pretend_args_size and we don't want to mess up
6099 any ap-relative addresses already made. */
6100 if (cum
.num_args
< 6)
6104 emit_move_insn (gen_rtx_REG (DImode
, 1), virtual_incoming_args_rtx
);
6105 emit_insn (gen_arg_home ());
6107 *pretend_size
= 7 * UNITS_PER_WORD
;
6110 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6111 only push those that are remaining. However, if NO registers need to
6112 be saved, don't allocate any space. This is not only because we won't
6113 need the space, but because AP includes the current_pretend_args_size
6114 and we don't want to mess up any ap-relative addresses already made.
6116 If we are not to use the floating-point registers, save the integer
6117 registers where we would put the floating-point registers. This is
6118 not the most efficient way to implement varargs with just one register
6119 class, but it isn't worth doing anything more efficient in this rare
6127 alias_set_type set
= get_varargs_alias_set ();
6130 count
= cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
6131 if (count
> 6 - cum
)
6134 /* Detect whether integer registers or floating-point registers
6135 are needed by the detected va_arg statements. See above for
6136 how these values are computed. Note that the "escape" value
6137 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6139 gcc_assert ((VA_LIST_MAX_FPR_SIZE
& 3) == 3);
6141 if (cfun
->va_list_fpr_size
& 1)
6143 tmp
= gen_rtx_MEM (BLKmode
,
6144 plus_constant (Pmode
, virtual_incoming_args_rtx
,
6145 (cum
+ 6) * UNITS_PER_WORD
));
6146 MEM_NOTRAP_P (tmp
) = 1;
6147 set_mem_alias_set (tmp
, set
);
6148 move_block_from_reg (16 + cum
, tmp
, count
);
6151 if (cfun
->va_list_fpr_size
& 2)
6153 tmp
= gen_rtx_MEM (BLKmode
,
6154 plus_constant (Pmode
, virtual_incoming_args_rtx
,
6155 cum
* UNITS_PER_WORD
));
6156 MEM_NOTRAP_P (tmp
) = 1;
6157 set_mem_alias_set (tmp
, set
);
6158 move_block_from_reg (16 + cum
+ TARGET_FPREGS
*32, tmp
, count
);
6161 *pretend_size
= 12 * UNITS_PER_WORD
;
6166 alpha_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6168 HOST_WIDE_INT offset
;
6169 tree t
, offset_field
, base_field
;
6171 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6174 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6175 up by 48, storing fp arg registers in the first 48 bytes, and the
6176 integer arg registers in the next 48 bytes. This is only done,
6177 however, if any integer registers need to be stored.
6179 If no integer registers need be stored, then we must subtract 48
6180 in order to account for the integer arg registers which are counted
6181 in argsize above, but which are not actually stored on the stack.
6182 Must further be careful here about structures straddling the last
6183 integer argument register; that futzes with pretend_args_size,
6184 which changes the meaning of AP. */
6187 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6189 offset
= -6 * UNITS_PER_WORD
+ crtl
->args
.pretend_args_size
;
6191 if (TARGET_ABI_OPEN_VMS
)
6193 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6194 t
= fold_build_pointer_plus_hwi (t
, offset
+ NUM_ARGS
* UNITS_PER_WORD
);
6195 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
6196 TREE_SIDE_EFFECTS (t
) = 1;
6197 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6201 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6202 offset_field
= DECL_CHAIN (base_field
);
6204 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6205 valist
, base_field
, NULL_TREE
);
6206 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6207 valist
, offset_field
, NULL_TREE
);
6209 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6210 t
= fold_build_pointer_plus_hwi (t
, offset
);
6211 t
= build2 (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6212 TREE_SIDE_EFFECTS (t
) = 1;
6213 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6215 t
= build_int_cst (NULL_TREE
, NUM_ARGS
* UNITS_PER_WORD
);
6216 t
= build2 (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6217 TREE_SIDE_EFFECTS (t
) = 1;
6218 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6223 alpha_gimplify_va_arg_1 (tree type
, tree base
, tree offset
,
6226 tree type_size
, ptr_type
, addend
, t
, addr
;
6227 gimple_seq internal_post
;
6229 /* If the type could not be passed in registers, skip the block
6230 reserved for the registers. */
6231 if (must_pass_va_arg_in_stack (type
))
6233 t
= build_int_cst (TREE_TYPE (offset
), 6*8);
6234 gimplify_assign (offset
,
6235 build2 (MAX_EXPR
, TREE_TYPE (offset
), offset
, t
),
6240 ptr_type
= build_pointer_type_for_mode (type
, ptr_mode
, true);
6242 if (TREE_CODE (type
) == COMPLEX_TYPE
)
6244 tree real_part
, imag_part
, real_temp
;
6246 real_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6249 /* Copy the value into a new temporary, lest the formal temporary
6250 be reused out from under us. */
6251 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6253 imag_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6256 return build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6258 else if (TREE_CODE (type
) == REAL_TYPE
)
6260 tree fpaddend
, cond
, fourtyeight
;
6262 fourtyeight
= build_int_cst (TREE_TYPE (addend
), 6*8);
6263 fpaddend
= fold_build2 (MINUS_EXPR
, TREE_TYPE (addend
),
6264 addend
, fourtyeight
);
6265 cond
= fold_build2 (LT_EXPR
, boolean_type_node
, addend
, fourtyeight
);
6266 addend
= fold_build3 (COND_EXPR
, TREE_TYPE (addend
), cond
,
6270 /* Build the final address and force that value into a temporary. */
6271 addr
= fold_build_pointer_plus (fold_convert (ptr_type
, base
), addend
);
6272 internal_post
= NULL
;
6273 gimplify_expr (&addr
, pre_p
, &internal_post
, is_gimple_val
, fb_rvalue
);
6274 gimple_seq_add_seq (pre_p
, internal_post
);
6276 /* Update the offset field. */
6277 type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
));
6278 if (type_size
== NULL
|| TREE_OVERFLOW (type_size
))
6282 t
= size_binop (PLUS_EXPR
, type_size
, size_int (7));
6283 t
= size_binop (TRUNC_DIV_EXPR
, t
, size_int (8));
6284 t
= size_binop (MULT_EXPR
, t
, size_int (8));
6286 t
= fold_convert (TREE_TYPE (offset
), t
);
6287 gimplify_assign (offset
, build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, t
),
6290 return build_va_arg_indirect_ref (addr
);
6294 alpha_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
6297 tree offset_field
, base_field
, offset
, base
, t
, r
;
6300 if (TARGET_ABI_OPEN_VMS
)
6301 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6303 base_field
= TYPE_FIELDS (va_list_type_node
);
6304 offset_field
= DECL_CHAIN (base_field
);
6305 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6306 valist
, base_field
, NULL_TREE
);
6307 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6308 valist
, offset_field
, NULL_TREE
);
6310 /* Pull the fields of the structure out into temporaries. Since we never
6311 modify the base field, we can use a formal temporary. Sign-extend the
6312 offset field so that it's the proper width for pointer arithmetic. */
6313 base
= get_formal_tmp_var (base_field
, pre_p
);
6315 t
= fold_convert (build_nonstandard_integer_type (64, 0), offset_field
);
6316 offset
= get_initialized_tmp_var (t
, pre_p
, NULL
);
6318 indirect
= pass_va_arg_by_reference (type
);
6322 if (TREE_CODE (type
) == COMPLEX_TYPE
6323 && targetm
.calls
.split_complex_arg (type
))
6325 tree real_part
, imag_part
, real_temp
;
6327 tree ptr_type
= build_pointer_type_for_mode (TREE_TYPE (type
),
6330 real_part
= alpha_gimplify_va_arg_1 (ptr_type
, base
,
6332 real_part
= build_va_arg_indirect_ref (real_part
);
6334 /* Copy the value into a new temporary, lest the formal temporary
6335 be reused out from under us. */
6336 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6338 imag_part
= alpha_gimplify_va_arg_1 (ptr_type
, base
,
6340 imag_part
= build_va_arg_indirect_ref (imag_part
);
6342 r
= build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6344 /* Stuff the offset temporary back into its field. */
6345 gimplify_assign (unshare_expr (offset_field
),
6346 fold_convert (TREE_TYPE (offset_field
), offset
),
6351 type
= build_pointer_type_for_mode (type
, ptr_mode
, true);
6354 /* Find the value. Note that this will be a stable indirection, or
6355 a composite of stable indirections in the case of complex. */
6356 r
= alpha_gimplify_va_arg_1 (type
, base
, offset
, pre_p
);
6358 /* Stuff the offset temporary back into its field. */
6359 gimplify_assign (unshare_expr (offset_field
),
6360 fold_convert (TREE_TYPE (offset_field
), offset
), pre_p
);
6363 r
= build_va_arg_indirect_ref (r
);
6372 ALPHA_BUILTIN_CMPBGE
,
6373 ALPHA_BUILTIN_EXTBL
,
6374 ALPHA_BUILTIN_EXTWL
,
6375 ALPHA_BUILTIN_EXTLL
,
6376 ALPHA_BUILTIN_EXTQL
,
6377 ALPHA_BUILTIN_EXTWH
,
6378 ALPHA_BUILTIN_EXTLH
,
6379 ALPHA_BUILTIN_EXTQH
,
6380 ALPHA_BUILTIN_INSBL
,
6381 ALPHA_BUILTIN_INSWL
,
6382 ALPHA_BUILTIN_INSLL
,
6383 ALPHA_BUILTIN_INSQL
,
6384 ALPHA_BUILTIN_INSWH
,
6385 ALPHA_BUILTIN_INSLH
,
6386 ALPHA_BUILTIN_INSQH
,
6387 ALPHA_BUILTIN_MSKBL
,
6388 ALPHA_BUILTIN_MSKWL
,
6389 ALPHA_BUILTIN_MSKLL
,
6390 ALPHA_BUILTIN_MSKQL
,
6391 ALPHA_BUILTIN_MSKWH
,
6392 ALPHA_BUILTIN_MSKLH
,
6393 ALPHA_BUILTIN_MSKQH
,
6394 ALPHA_BUILTIN_UMULH
,
6396 ALPHA_BUILTIN_ZAPNOT
,
6397 ALPHA_BUILTIN_AMASK
,
6398 ALPHA_BUILTIN_IMPLVER
,
6400 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER
,
6401 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER
,
6404 ALPHA_BUILTIN_MINUB8
,
6405 ALPHA_BUILTIN_MINSB8
,
6406 ALPHA_BUILTIN_MINUW4
,
6407 ALPHA_BUILTIN_MINSW4
,
6408 ALPHA_BUILTIN_MAXUB8
,
6409 ALPHA_BUILTIN_MAXSB8
,
6410 ALPHA_BUILTIN_MAXUW4
,
6411 ALPHA_BUILTIN_MAXSW4
,
6415 ALPHA_BUILTIN_UNPKBL
,
6416 ALPHA_BUILTIN_UNPKBW
,
6421 ALPHA_BUILTIN_CTPOP
,
6426 static enum insn_code
const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6427 CODE_FOR_builtin_cmpbge
,
6435 CODE_FOR_builtin_insbl
,
6436 CODE_FOR_builtin_inswl
,
6437 CODE_FOR_builtin_insll
,
6449 CODE_FOR_umuldi3_highpart
,
6450 CODE_FOR_builtin_zap
,
6451 CODE_FOR_builtin_zapnot
,
6452 CODE_FOR_builtin_amask
,
6453 CODE_FOR_builtin_implver
,
6454 CODE_FOR_builtin_rpcc
,
6455 CODE_FOR_builtin_establish_vms_condition_handler
,
6456 CODE_FOR_builtin_revert_vms_condition_handler
,
6459 CODE_FOR_builtin_minub8
,
6460 CODE_FOR_builtin_minsb8
,
6461 CODE_FOR_builtin_minuw4
,
6462 CODE_FOR_builtin_minsw4
,
6463 CODE_FOR_builtin_maxub8
,
6464 CODE_FOR_builtin_maxsb8
,
6465 CODE_FOR_builtin_maxuw4
,
6466 CODE_FOR_builtin_maxsw4
,
6467 CODE_FOR_builtin_perr
,
6468 CODE_FOR_builtin_pklb
,
6469 CODE_FOR_builtin_pkwb
,
6470 CODE_FOR_builtin_unpkbl
,
6471 CODE_FOR_builtin_unpkbw
,
6476 CODE_FOR_popcountdi2
6479 struct alpha_builtin_def
6482 enum alpha_builtin code
;
6483 unsigned int target_mask
;
6487 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6488 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0, true },
6489 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0, false }
6492 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6493 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0, true },
6494 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
, true },
6495 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
, true },
6496 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
, true },
6497 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
, true },
6498 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
, true },
6499 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
, true },
6500 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
, true }
6503 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6504 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0, true },
6505 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0, true },
6506 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0, true },
6507 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0, true },
6508 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0, true },
6509 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0, true },
6510 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0, true },
6511 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0, true },
6512 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0, true },
6513 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0, true },
6514 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0, true },
6515 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0, true },
6516 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0, true },
6517 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0, true },
6518 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0, true },
6519 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0, true },
6520 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0, true },
6521 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0, true },
6522 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0, true },
6523 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0, true },
6524 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0, true },
6525 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0, true },
6526 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0, true },
6527 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0, true },
6528 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0, true },
6529 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
, true },
6530 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
, true },
6531 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
, true },
6532 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
, true },
6533 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
, true },
6534 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
, true },
6535 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
, true },
6536 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
, true },
6537 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
, true }
6540 static GTY(()) tree alpha_dimode_u
;
6541 static GTY(()) tree alpha_v8qi_u
;
6542 static GTY(()) tree alpha_v8qi_s
;
6543 static GTY(()) tree alpha_v4hi_u
;
6544 static GTY(()) tree alpha_v4hi_s
;
6546 static GTY(()) tree alpha_builtins
[(int) ALPHA_BUILTIN_max
];
6548 /* Return the alpha builtin for CODE. */
6551 alpha_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
6553 if (code
>= ALPHA_BUILTIN_max
)
6554 return error_mark_node
;
6555 return alpha_builtins
[code
];
6558 /* Helper function of alpha_init_builtins. Add the built-in specified
6559 by NAME, TYPE, CODE, and ECF. */
6562 alpha_builtin_function (const char *name
, tree ftype
,
6563 enum alpha_builtin code
, unsigned ecf
)
6565 tree decl
= add_builtin_function (name
, ftype
, (int) code
,
6566 BUILT_IN_MD
, NULL
, NULL_TREE
);
6568 if (ecf
& ECF_CONST
)
6569 TREE_READONLY (decl
) = 1;
6570 if (ecf
& ECF_NOTHROW
)
6571 TREE_NOTHROW (decl
) = 1;
6573 alpha_builtins
[(int) code
] = decl
;
6576 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6577 functions pointed to by P, with function type FTYPE. */
6580 alpha_add_builtins (const struct alpha_builtin_def
*p
, size_t count
,
6585 for (i
= 0; i
< count
; ++i
, ++p
)
6586 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6587 alpha_builtin_function (p
->name
, ftype
, p
->code
,
6588 (p
->is_const
? ECF_CONST
: 0) | ECF_NOTHROW
);
6592 alpha_init_builtins (void)
6596 alpha_dimode_u
= lang_hooks
.types
.type_for_mode (DImode
, 1);
6597 alpha_v8qi_u
= build_vector_type (unsigned_intQI_type_node
, 8);
6598 alpha_v8qi_s
= build_vector_type (intQI_type_node
, 8);
6599 alpha_v4hi_u
= build_vector_type (unsigned_intHI_type_node
, 4);
6600 alpha_v4hi_s
= build_vector_type (intHI_type_node
, 4);
6602 ftype
= build_function_type_list (alpha_dimode_u
, NULL_TREE
);
6603 alpha_add_builtins (zero_arg_builtins
, ARRAY_SIZE (zero_arg_builtins
), ftype
);
6605 ftype
= build_function_type_list (alpha_dimode_u
, alpha_dimode_u
, NULL_TREE
);
6606 alpha_add_builtins (one_arg_builtins
, ARRAY_SIZE (one_arg_builtins
), ftype
);
6608 ftype
= build_function_type_list (alpha_dimode_u
, alpha_dimode_u
,
6609 alpha_dimode_u
, NULL_TREE
);
6610 alpha_add_builtins (two_arg_builtins
, ARRAY_SIZE (two_arg_builtins
), ftype
);
6612 if (TARGET_ABI_OPEN_VMS
)
6614 ftype
= build_function_type_list (ptr_type_node
, ptr_type_node
,
6616 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6618 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER
,
6621 ftype
= build_function_type_list (ptr_type_node
, void_type_node
,
6623 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype
,
6624 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER
, 0);
6626 vms_patch_builtins ();
6630 /* Expand an expression EXP that calls a built-in function,
6631 with result going to TARGET if that's convenient
6632 (and in mode MODE if that's convenient).
6633 SUBTARGET may be used as the target for computing one of EXP's operands.
6634 IGNORE is nonzero if the value is to be ignored. */
6637 alpha_expand_builtin (tree exp
, rtx target
,
6638 rtx subtarget ATTRIBUTE_UNUSED
,
6639 machine_mode mode ATTRIBUTE_UNUSED
,
6640 int ignore ATTRIBUTE_UNUSED
)
6644 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6645 unsigned int fcode
= DECL_MD_FUNCTION_CODE (fndecl
);
6647 call_expr_arg_iterator iter
;
6648 enum insn_code icode
;
6649 rtx op
[MAX_ARGS
], pat
;
6653 if (fcode
>= ALPHA_BUILTIN_max
)
6654 internal_error ("bad builtin fcode");
6655 icode
= code_for_builtin
[fcode
];
6657 internal_error ("bad builtin fcode");
6659 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6662 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
6664 const struct insn_operand_data
*insn_op
;
6666 if (arg
== error_mark_node
)
6668 if (arity
> MAX_ARGS
)
6671 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6673 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, EXPAND_NORMAL
);
6675 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6676 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6682 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6684 || GET_MODE (target
) != tmode
6685 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6686 target
= gen_reg_rtx (tmode
);
6692 pat
= GEN_FCN (icode
) (target
);
6696 pat
= GEN_FCN (icode
) (target
, op
[0]);
6698 pat
= GEN_FCN (icode
) (op
[0]);
6701 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6716 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6717 with an 8-bit output vector. OPINT contains the integer operands; bit N
6718 of OP_CONST is set if OPINT[N] is valid. */
6721 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint
[], long op_const
)
6726 for (i
= 0, val
= 0; i
< 8; ++i
)
6728 unsigned HOST_WIDE_INT c0
= (opint
[0] >> (i
* 8)) & 0xff;
6729 unsigned HOST_WIDE_INT c1
= (opint
[1] >> (i
* 8)) & 0xff;
6733 return build_int_cst (alpha_dimode_u
, val
);
6735 else if (op_const
== 2 && opint
[1] == 0)
6736 return build_int_cst (alpha_dimode_u
, 0xff);
6740 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6741 specialized form of an AND operation. Other byte manipulation instructions
6742 are defined in terms of this instruction, so this is also used as a
6743 subroutine for other builtins.
6745 OP contains the tree operands; OPINT contains the extracted integer values.
6746 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6747 OPINT may be considered. */
6750 alpha_fold_builtin_zapnot (tree
*op
, unsigned HOST_WIDE_INT opint
[],
6755 unsigned HOST_WIDE_INT mask
= 0;
6758 for (i
= 0; i
< 8; ++i
)
6759 if ((opint
[1] >> i
) & 1)
6760 mask
|= (unsigned HOST_WIDE_INT
)0xff << (i
* 8);
6763 return build_int_cst (alpha_dimode_u
, opint
[0] & mask
);
6766 return fold_build2 (BIT_AND_EXPR
, alpha_dimode_u
, op
[0],
6767 build_int_cst (alpha_dimode_u
, mask
));
6769 else if ((op_const
& 1) && opint
[0] == 0)
6770 return build_int_cst (alpha_dimode_u
, 0);
6774 /* Fold the builtins for the EXT family of instructions. */
6777 alpha_fold_builtin_extxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6778 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6782 tree
*zap_op
= NULL
;
6786 unsigned HOST_WIDE_INT loc
;
6789 loc
*= BITS_PER_UNIT
;
6795 unsigned HOST_WIDE_INT temp
= opint
[0];
6808 opint
[1] = bytemask
;
6809 return alpha_fold_builtin_zapnot (zap_op
, opint
, zap_const
);
6812 /* Fold the builtins for the INS family of instructions. */
6815 alpha_fold_builtin_insxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6816 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6819 if ((op_const
& 1) && opint
[0] == 0)
6820 return build_int_cst (alpha_dimode_u
, 0);
6824 unsigned HOST_WIDE_INT temp
, loc
, byteloc
;
6825 tree
*zap_op
= NULL
;
6833 byteloc
= (64 - (loc
* 8)) & 0x3f;
6850 opint
[1] = bytemask
;
6851 return alpha_fold_builtin_zapnot (zap_op
, opint
, op_const
);
6858 alpha_fold_builtin_mskxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6859 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6864 unsigned HOST_WIDE_INT loc
;
6872 opint
[1] = bytemask
^ 0xff;
6875 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
6879 alpha_fold_vector_minmax (enum tree_code code
, tree op
[], tree vtype
)
6881 tree op0
= fold_convert (vtype
, op
[0]);
6882 tree op1
= fold_convert (vtype
, op
[1]);
6883 tree val
= fold_build2 (code
, vtype
, op0
, op1
);
6884 return fold_build1 (VIEW_CONVERT_EXPR
, alpha_dimode_u
, val
);
6888 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint
[], long op_const
)
6890 unsigned HOST_WIDE_INT temp
= 0;
6896 for (i
= 0; i
< 8; ++i
)
6898 unsigned HOST_WIDE_INT a
= (opint
[0] >> (i
* 8)) & 0xff;
6899 unsigned HOST_WIDE_INT b
= (opint
[1] >> (i
* 8)) & 0xff;
6906 return build_int_cst (alpha_dimode_u
, temp
);
6910 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6912 unsigned HOST_WIDE_INT temp
;
6917 temp
= opint
[0] & 0xff;
6918 temp
|= (opint
[0] >> 24) & 0xff00;
6920 return build_int_cst (alpha_dimode_u
, temp
);
6924 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6926 unsigned HOST_WIDE_INT temp
;
6931 temp
= opint
[0] & 0xff;
6932 temp
|= (opint
[0] >> 8) & 0xff00;
6933 temp
|= (opint
[0] >> 16) & 0xff0000;
6934 temp
|= (opint
[0] >> 24) & 0xff000000;
6936 return build_int_cst (alpha_dimode_u
, temp
);
6940 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint
[], long op_const
)
6942 unsigned HOST_WIDE_INT temp
;
6947 temp
= opint
[0] & 0xff;
6948 temp
|= (opint
[0] & 0xff00) << 24;
6950 return build_int_cst (alpha_dimode_u
, temp
);
6954 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint
[], long op_const
)
6956 unsigned HOST_WIDE_INT temp
;
6961 temp
= opint
[0] & 0xff;
6962 temp
|= (opint
[0] & 0x0000ff00) << 8;
6963 temp
|= (opint
[0] & 0x00ff0000) << 16;
6964 temp
|= (opint
[0] & 0xff000000) << 24;
6966 return build_int_cst (alpha_dimode_u
, temp
);
6970 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6972 unsigned HOST_WIDE_INT temp
;
6980 temp
= exact_log2 (opint
[0] & -opint
[0]);
6982 return build_int_cst (alpha_dimode_u
, temp
);
6986 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6988 unsigned HOST_WIDE_INT temp
;
6996 temp
= 64 - floor_log2 (opint
[0]) - 1;
6998 return build_int_cst (alpha_dimode_u
, temp
);
7002 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint
[], long op_const
)
7004 unsigned HOST_WIDE_INT temp
, op
;
7012 temp
++, op
&= op
- 1;
7014 return build_int_cst (alpha_dimode_u
, temp
);
7017 /* Fold one of our builtin functions. */
7020 alpha_fold_builtin (tree fndecl
, int n_args
, tree
*op
,
7021 bool ignore ATTRIBUTE_UNUSED
)
7023 unsigned HOST_WIDE_INT opint
[MAX_ARGS
];
7027 if (n_args
> MAX_ARGS
)
7030 for (i
= 0; i
< n_args
; i
++)
7033 if (arg
== error_mark_node
)
7037 if (TREE_CODE (arg
) == INTEGER_CST
)
7039 op_const
|= 1L << i
;
7040 opint
[i
] = int_cst_value (arg
);
7044 switch (DECL_MD_FUNCTION_CODE (fndecl
))
7046 case ALPHA_BUILTIN_CMPBGE
:
7047 return alpha_fold_builtin_cmpbge (opint
, op_const
);
7049 case ALPHA_BUILTIN_EXTBL
:
7050 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x01, false);
7051 case ALPHA_BUILTIN_EXTWL
:
7052 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, false);
7053 case ALPHA_BUILTIN_EXTLL
:
7054 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, false);
7055 case ALPHA_BUILTIN_EXTQL
:
7056 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, false);
7057 case ALPHA_BUILTIN_EXTWH
:
7058 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, true);
7059 case ALPHA_BUILTIN_EXTLH
:
7060 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, true);
7061 case ALPHA_BUILTIN_EXTQH
:
7062 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, true);
7064 case ALPHA_BUILTIN_INSBL
:
7065 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x01, false);
7066 case ALPHA_BUILTIN_INSWL
:
7067 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, false);
7068 case ALPHA_BUILTIN_INSLL
:
7069 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, false);
7070 case ALPHA_BUILTIN_INSQL
:
7071 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, false);
7072 case ALPHA_BUILTIN_INSWH
:
7073 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, true);
7074 case ALPHA_BUILTIN_INSLH
:
7075 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, true);
7076 case ALPHA_BUILTIN_INSQH
:
7077 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, true);
7079 case ALPHA_BUILTIN_MSKBL
:
7080 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x01, false);
7081 case ALPHA_BUILTIN_MSKWL
:
7082 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, false);
7083 case ALPHA_BUILTIN_MSKLL
:
7084 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, false);
7085 case ALPHA_BUILTIN_MSKQL
:
7086 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, false);
7087 case ALPHA_BUILTIN_MSKWH
:
7088 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, true);
7089 case ALPHA_BUILTIN_MSKLH
:
7090 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, true);
7091 case ALPHA_BUILTIN_MSKQH
:
7092 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, true);
7094 case ALPHA_BUILTIN_ZAP
:
7097 case ALPHA_BUILTIN_ZAPNOT
:
7098 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
7100 case ALPHA_BUILTIN_MINUB8
:
7101 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_u
);
7102 case ALPHA_BUILTIN_MINSB8
:
7103 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_s
);
7104 case ALPHA_BUILTIN_MINUW4
:
7105 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_u
);
7106 case ALPHA_BUILTIN_MINSW4
:
7107 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_s
);
7108 case ALPHA_BUILTIN_MAXUB8
:
7109 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_u
);
7110 case ALPHA_BUILTIN_MAXSB8
:
7111 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_s
);
7112 case ALPHA_BUILTIN_MAXUW4
:
7113 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_u
);
7114 case ALPHA_BUILTIN_MAXSW4
:
7115 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_s
);
7117 case ALPHA_BUILTIN_PERR
:
7118 return alpha_fold_builtin_perr (opint
, op_const
);
7119 case ALPHA_BUILTIN_PKLB
:
7120 return alpha_fold_builtin_pklb (opint
, op_const
);
7121 case ALPHA_BUILTIN_PKWB
:
7122 return alpha_fold_builtin_pkwb (opint
, op_const
);
7123 case ALPHA_BUILTIN_UNPKBL
:
7124 return alpha_fold_builtin_unpkbl (opint
, op_const
);
7125 case ALPHA_BUILTIN_UNPKBW
:
7126 return alpha_fold_builtin_unpkbw (opint
, op_const
);
7128 case ALPHA_BUILTIN_CTTZ
:
7129 return alpha_fold_builtin_cttz (opint
, op_const
);
7130 case ALPHA_BUILTIN_CTLZ
:
7131 return alpha_fold_builtin_ctlz (opint
, op_const
);
7132 case ALPHA_BUILTIN_CTPOP
:
7133 return alpha_fold_builtin_ctpop (opint
, op_const
);
7135 case ALPHA_BUILTIN_AMASK
:
7136 case ALPHA_BUILTIN_IMPLVER
:
7137 case ALPHA_BUILTIN_RPCC
:
7138 /* None of these are foldable at compile-time. */
7145 alpha_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
7147 bool changed
= false;
7148 gimple
*stmt
= gsi_stmt (*gsi
);
7149 tree call
= gimple_call_fn (stmt
);
7150 gimple
*new_stmt
= NULL
;
7154 tree fndecl
= gimple_call_fndecl (stmt
);
7160 switch (DECL_MD_FUNCTION_CODE (fndecl
))
7162 case ALPHA_BUILTIN_UMULH
:
7163 arg0
= gimple_call_arg (stmt
, 0);
7164 arg1
= gimple_call_arg (stmt
, 1);
7166 new_stmt
= gimple_build_assign (gimple_call_lhs (stmt
),
7167 MULT_HIGHPART_EXPR
, arg0
, arg1
);
7177 gsi_replace (gsi
, new_stmt
, true);
7184 /* This page contains routines that are used to determine what the function
7185 prologue and epilogue code will do and write them out. */
7187 /* Compute the size of the save area in the stack. */
7189 /* These variables are used for communication between the following functions.
7190 They indicate various things about the current function being compiled
7191 that are used to tell what kind of prologue, epilogue and procedure
7192 descriptor to generate. */
7194 /* Nonzero if we need a stack procedure. */
7195 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
7196 static enum alpha_procedure_types alpha_procedure_type
;
7198 /* Register number (either FP or SP) that is used to unwind the frame. */
7199 static int vms_unwind_regno
;
7201 /* Register number used to save FP. We need not have one for RA since
7202 we don't modify it for register procedures. This is only defined
7203 for register frame procedures. */
7204 static int vms_save_fp_regno
;
7206 /* Register number used to reference objects off our PV. */
7207 static int vms_base_regno
;
7209 /* Compute register masks for saved registers, register save area size,
7210 and total frame size. */
7212 alpha_compute_frame_layout (void)
7214 unsigned HOST_WIDE_INT sa_mask
= 0;
7215 HOST_WIDE_INT frame_size
;
7218 /* When outputting a thunk, we don't have valid register life info,
7219 but assemble_start_function wants to output .frame and .mask
7221 if (!cfun
->is_thunk
)
7223 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7224 sa_mask
|= HOST_WIDE_INT_1U
<< HARD_FRAME_POINTER_REGNUM
;
7226 /* One for every register we have to save. */
7227 for (unsigned i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7228 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
7229 && df_regs_ever_live_p (i
) && i
!= REG_RA
)
7230 sa_mask
|= HOST_WIDE_INT_1U
<< i
;
7232 /* We need to restore these for the handler. */
7233 if (crtl
->calls_eh_return
)
7235 for (unsigned i
= 0; ; ++i
)
7237 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
7238 if (regno
== INVALID_REGNUM
)
7240 sa_mask
|= HOST_WIDE_INT_1U
<< regno
;
7244 /* If any register spilled, then spill the return address also. */
7245 /* ??? This is required by the Digital stack unwind specification
7246 and isn't needed if we're doing Dwarf2 unwinding. */
7247 if (sa_mask
|| alpha_ra_ever_killed ())
7248 sa_mask
|= HOST_WIDE_INT_1U
<< REG_RA
;
7251 sa_size
= popcount_hwi(sa_mask
);
7252 frame_size
= get_frame_size ();
7254 if (TARGET_ABI_OPEN_VMS
)
7256 /* Start with a stack procedure if we make any calls (REG_RA used), or
7257 need a frame pointer, with a register procedure if we otherwise need
7258 at least a slot, and with a null procedure in other cases. */
7259 if ((sa_mask
>> REG_RA
) & 1 || frame_pointer_needed
)
7260 alpha_procedure_type
= PT_STACK
;
7261 else if (frame_size
!= 0)
7262 alpha_procedure_type
= PT_REGISTER
;
7264 alpha_procedure_type
= PT_NULL
;
7266 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7267 made the final decision on stack procedure vs register procedure. */
7268 if (alpha_procedure_type
== PT_STACK
)
7271 /* Decide whether to refer to objects off our PV via FP or PV.
7272 If we need FP for something else or if we receive a nonlocal
7273 goto (which expects PV to contain the value), we must use PV.
7274 Otherwise, start by assuming we can use FP. */
7277 = (frame_pointer_needed
7278 || cfun
->has_nonlocal_label
7279 || alpha_procedure_type
== PT_STACK
7280 || crtl
->outgoing_args_size
)
7281 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7283 /* If we want to copy PV into FP, we need to find some register
7284 in which to save FP. */
7285 vms_save_fp_regno
= -1;
7286 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7287 for (unsigned i
= 0; i
< 32; i
++)
7288 if (! fixed_regs
[i
] && call_used_regs
[i
]
7289 && ! df_regs_ever_live_p (i
))
7291 vms_save_fp_regno
= i
;
7295 /* A VMS condition handler requires a stack procedure in our
7296 implementation. (not required by the calling standard). */
7297 if ((vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7298 || cfun
->machine
->uses_condition_handler
)
7299 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7300 else if (alpha_procedure_type
== PT_NULL
)
7301 vms_base_regno
= REG_PV
;
7303 /* Stack unwinding should be done via FP unless we use it for PV. */
7304 vms_unwind_regno
= (vms_base_regno
== REG_PV
7305 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7307 /* If this is a stack procedure, allow space for saving FP, RA and
7308 a condition handler slot if needed. */
7309 if (alpha_procedure_type
== PT_STACK
)
7310 sa_size
+= 2 + cfun
->machine
->uses_condition_handler
;
7314 /* Our size must be even (multiple of 16 bytes). */
7320 if (TARGET_ABI_OPEN_VMS
)
7321 frame_size
= ALPHA_ROUND (sa_size
7322 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7324 + crtl
->args
.pretend_args_size
);
7326 frame_size
= (ALPHA_ROUND (crtl
->outgoing_args_size
)
7328 + ALPHA_ROUND (frame_size
+ crtl
->args
.pretend_args_size
));
7330 cfun
->machine
->sa_mask
= sa_mask
;
7331 cfun
->machine
->sa_size
= sa_size
;
7332 cfun
->machine
->frame_size
= frame_size
;
7335 #undef TARGET_COMPUTE_FRAME_LAYOUT
7336 #define TARGET_COMPUTE_FRAME_LAYOUT alpha_compute_frame_layout
7338 /* Return 1 if this function can directly return via $26. */
7341 direct_return (void)
7343 return (TARGET_ABI_OSF
7345 && cfun
->machine
->frame_size
== 0);
7348 /* Define the offset between two registers, one to be eliminated,
7349 and the other its replacement, at the start of a routine. */
7352 alpha_initial_elimination_offset (unsigned int from
,
7353 unsigned int to ATTRIBUTE_UNUSED
)
7357 ret
= cfun
->machine
->sa_size
;
7358 ret
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7362 case FRAME_POINTER_REGNUM
:
7365 case ARG_POINTER_REGNUM
:
7366 ret
+= (ALPHA_ROUND (get_frame_size ()
7367 + crtl
->args
.pretend_args_size
)
7368 - crtl
->args
.pretend_args_size
);
7378 #if TARGET_ABI_OPEN_VMS
7380 /* Worker function for TARGET_CAN_ELIMINATE. */
7383 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
7385 switch (alpha_procedure_type
)
7388 /* NULL procedures have no frame of their own and we only
7389 know how to resolve from the current stack pointer. */
7390 return to
== STACK_POINTER_REGNUM
;
7394 /* We always eliminate except to the stack pointer if there is no
7395 usable frame pointer at hand. */
7396 return (to
!= STACK_POINTER_REGNUM
7397 || vms_unwind_regno
!= HARD_FRAME_POINTER_REGNUM
);
7403 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7404 designates the same location as FROM. */
7407 alpha_vms_initial_elimination_offset (unsigned int from
, unsigned int to
)
7409 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7410 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7411 on the proper computations and will need the register save area size
7414 HOST_WIDE_INT sa_size
= cfun
->machine
->sa_size
;
7416 /* PT_NULL procedures have no frame of their own and we only allow
7417 elimination to the stack pointer. This is the argument pointer and we
7418 resolve the soft frame pointer to that as well. */
7420 if (alpha_procedure_type
== PT_NULL
)
7423 /* For a PT_STACK procedure the frame layout looks as follows
7425 -----> decreasing addresses
7427 < size rounded up to 16 | likewise >
7428 --------------#------------------------------+++--------------+++-------#
7429 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7430 --------------#---------------------------------------------------------#
7432 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7435 PT_REGISTER procedures are similar in that they may have a frame of their
7436 own. They have no regs-sa/pv/outgoing-args area.
7438 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7439 to STACK_PTR if need be. */
7442 HOST_WIDE_INT offset
;
7443 HOST_WIDE_INT pv_save_size
= alpha_procedure_type
== PT_STACK
? 8 : 0;
7447 case FRAME_POINTER_REGNUM
:
7448 offset
= ALPHA_ROUND (sa_size
+ pv_save_size
);
7450 case ARG_POINTER_REGNUM
:
7451 offset
= (ALPHA_ROUND (sa_size
+ pv_save_size
7453 + crtl
->args
.pretend_args_size
)
7454 - crtl
->args
.pretend_args_size
);
7460 if (to
== STACK_POINTER_REGNUM
)
7461 offset
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7467 #define COMMON_OBJECT "common_object"
7470 common_object_handler (tree
*node
, tree name ATTRIBUTE_UNUSED
,
7471 tree args ATTRIBUTE_UNUSED
, int flags ATTRIBUTE_UNUSED
,
7472 bool *no_add_attrs ATTRIBUTE_UNUSED
)
7475 gcc_assert (DECL_P (decl
));
7477 DECL_COMMON (decl
) = 1;
7481 static const struct attribute_spec vms_attribute_table
[] =
7483 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
7484 affects_type_identity, handler, exclude } */
7485 { COMMON_OBJECT
, 0, 1, true, false, false, false, common_object_handler
,
7487 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
7491 vms_output_aligned_decl_common(FILE *file
, tree decl
, const char *name
,
7492 unsigned HOST_WIDE_INT size
,
7495 tree attr
= DECL_ATTRIBUTES (decl
);
7496 fprintf (file
, "%s", COMMON_ASM_OP
);
7497 assemble_name (file
, name
);
7498 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
, size
);
7499 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7500 fprintf (file
, ",%u", align
/ BITS_PER_UNIT
);
7503 attr
= lookup_attribute (COMMON_OBJECT
, attr
);
7505 fprintf (file
, ",%s",
7506 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
7511 #undef COMMON_OBJECT
7516 alpha_find_lo_sum_using_gp (rtx insn
)
7518 subrtx_iterator::array_type array
;
7519 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
7521 const_rtx x
= *iter
;
7522 if (GET_CODE (x
) == LO_SUM
&& XEXP (x
, 0) == pic_offset_table_rtx
)
7529 alpha_does_function_need_gp (void)
7533 /* The GP being variable is an OSF abi thing. */
7534 if (! TARGET_ABI_OSF
)
7537 /* We need the gp to load the address of __mcount. */
7538 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7541 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7545 /* The nonlocal receiver pattern assumes that the gp is valid for
7546 the nested function. Reasonable because it's almost always set
7547 correctly already. For the cases where that's wrong, make sure
7548 the nested function loads its gp on entry. */
7549 if (crtl
->has_nonlocal_goto
)
7552 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7553 Even if we are a static function, we still need to do this in case
7554 our address is taken and passed to something like qsort. */
7556 push_topmost_sequence ();
7557 insn
= get_insns ();
7558 pop_topmost_sequence ();
7560 for (; insn
; insn
= NEXT_INSN (insn
))
7561 if (NONDEBUG_INSN_P (insn
)
7562 && GET_CODE (PATTERN (insn
)) != USE
7563 && GET_CODE (PATTERN (insn
)) != CLOBBER
7564 && get_attr_usegp (insn
))
7571 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7575 set_frame_related_p (void)
7577 rtx_insn
*seq
= get_insns ();
7588 while (insn
!= NULL_RTX
)
7590 RTX_FRAME_RELATED_P (insn
) = 1;
7591 insn
= NEXT_INSN (insn
);
7593 seq
= emit_insn (seq
);
7597 seq
= emit_insn (seq
);
7598 RTX_FRAME_RELATED_P (seq
) = 1;
7603 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7605 /* Generates a store with the proper unwind info attached. VALUE is
7606 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7607 contains SP+FRAME_BIAS, and that is the unwind info that should be
7608 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7609 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7612 emit_frame_store_1 (rtx value
, rtx base_reg
, HOST_WIDE_INT frame_bias
,
7613 HOST_WIDE_INT base_ofs
, rtx frame_reg
)
7618 addr
= plus_constant (Pmode
, base_reg
, base_ofs
);
7619 mem
= gen_frame_mem (DImode
, addr
);
7621 insn
= emit_move_insn (mem
, value
);
7622 RTX_FRAME_RELATED_P (insn
) = 1;
7624 if (frame_bias
|| value
!= frame_reg
)
7628 addr
= plus_constant (Pmode
, stack_pointer_rtx
,
7629 frame_bias
+ base_ofs
);
7630 mem
= gen_rtx_MEM (DImode
, addr
);
7633 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
7634 gen_rtx_SET (mem
, frame_reg
));
7639 emit_frame_store (unsigned int regno
, rtx base_reg
,
7640 HOST_WIDE_INT frame_bias
, HOST_WIDE_INT base_ofs
)
7642 rtx reg
= gen_rtx_REG (DImode
, regno
);
7643 emit_frame_store_1 (reg
, base_reg
, frame_bias
, base_ofs
, reg
);
7646 /* Write function prologue. */
7648 /* On vms we have two kinds of functions:
7650 - stack frame (PROC_STACK)
7651 these are 'normal' functions with local vars and which are
7652 calling other functions
7653 - register frame (PROC_REGISTER)
7654 keeps all data in registers, needs no stack
7656 We must pass this to the assembler so it can generate the
7657 proper pdsc (procedure descriptor)
7658 This is done with the '.pdesc' command.
7660 On not-vms, we don't really differentiate between the two, as we can
7661 simply allocate stack without saving registers. */
7664 alpha_expand_prologue (void)
7666 /* Registers to save. */
7667 unsigned HOST_WIDE_INT sa_mask
= cfun
->machine
->sa_mask
;
7668 /* Stack space needed for pushing registers clobbered by us. */
7669 HOST_WIDE_INT sa_size
= cfun
->machine
->sa_size
;
7670 /* Complete stack size needed. */
7671 HOST_WIDE_INT frame_size
= cfun
->machine
->frame_size
;
7672 /* Probed stack size; it additionally includes the size of
7673 the "reserve region" if any. */
7674 HOST_WIDE_INT probed_size
, sa_bias
;
7675 /* Offset from base reg to register save area. */
7676 HOST_WIDE_INT reg_offset
;
7679 if (flag_stack_usage_info
)
7680 current_function_static_stack_size
= frame_size
;
7682 if (TARGET_ABI_OPEN_VMS
)
7683 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
7685 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7687 /* Emit an insn to reload GP, if needed. */
7690 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7691 if (alpha_function_needs_gp
)
7692 emit_insn (gen_prologue_ldgp ());
7695 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7696 the call to mcount ourselves, rather than having the linker do it
7697 magically in response to -pg. Since _mcount has special linkage,
7698 don't represent the call as a call. */
7699 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7700 emit_insn (gen_prologue_mcount ());
7702 /* Adjust the stack by the frame size. If the frame size is > 4096
7703 bytes, we need to be sure we probe somewhere in the first and last
7704 4096 bytes (we can probably get away without the latter test) and
7705 every 8192 bytes in between. If the frame size is > 32768, we
7706 do this in a loop. Otherwise, we generate the explicit probe
7709 Note that we are only allowed to adjust sp once in the prologue. */
7711 probed_size
= frame_size
;
7712 if (flag_stack_check
|| flag_stack_clash_protection
)
7713 probed_size
+= get_stack_check_protect ();
7715 if (probed_size
<= 32768)
7717 if (probed_size
> 4096)
7721 for (probed
= 4096; probed
< probed_size
; probed
+= 8192)
7722 emit_insn (gen_stack_probe_internal (GEN_INT (-probed
)));
7724 /* We only have to do this probe if we aren't saving registers or
7725 if we are probing beyond the frame because of -fstack-check. */
7726 if ((sa_size
== 0 && probed_size
> probed
- 4096)
7727 || flag_stack_check
|| flag_stack_clash_protection
)
7728 emit_insn (gen_stack_probe_internal (GEN_INT (-probed_size
)));
7731 if (frame_size
!= 0)
7732 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7733 GEN_INT (-frame_size
))));
7737 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7738 number of 8192 byte blocks to probe. We then probe each block
7739 in the loop and then set SP to the proper location. If the
7740 amount remaining is > 4096, we have to do one more probe if we
7741 are not saving any registers or if we are probing beyond the
7742 frame because of -fstack-check. */
7744 HOST_WIDE_INT blocks
= (probed_size
+ 4096) / 8192;
7745 HOST_WIDE_INT leftover
= probed_size
+ 4096 - blocks
* 8192;
7746 rtx ptr
= gen_rtx_REG (DImode
, 22);
7747 rtx count
= gen_rtx_REG (DImode
, 23);
7750 emit_move_insn (count
, GEN_INT (blocks
));
7751 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
, GEN_INT (4096)));
7753 /* Because of the difficulty in emitting a new basic block this
7754 late in the compilation, generate the loop as a single insn. */
7755 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7757 if ((leftover
> 4096 && sa_size
== 0)
7758 || flag_stack_check
|| flag_stack_clash_protection
)
7760 rtx last
= gen_rtx_MEM (DImode
,
7761 plus_constant (Pmode
, ptr
, -leftover
));
7762 MEM_VOLATILE_P (last
) = 1;
7763 emit_move_insn (last
, const0_rtx
);
7766 if (flag_stack_check
|| flag_stack_clash_protection
)
7768 /* If -fstack-check is specified we have to load the entire
7769 constant into a register and subtract from the sp in one go,
7770 because the probed stack size is not equal to the frame size. */
7771 HOST_WIDE_INT lo
, hi
;
7772 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7773 hi
= frame_size
- lo
;
7775 emit_move_insn (ptr
, GEN_INT (hi
));
7776 emit_insn (gen_adddi3 (ptr
, ptr
, GEN_INT (lo
)));
7777 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7782 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7783 GEN_INT (-leftover
)));
7786 /* This alternative is special, because the DWARF code cannot
7787 possibly intuit through the loop above. So we invent this
7788 note it looks at instead. */
7789 RTX_FRAME_RELATED_P (seq
) = 1;
7790 add_reg_note (seq
, REG_FRAME_RELATED_EXPR
,
7791 gen_rtx_SET (stack_pointer_rtx
,
7792 plus_constant (Pmode
, stack_pointer_rtx
,
7796 /* Cope with very large offsets to the register save area. */
7798 sa_reg
= stack_pointer_rtx
;
7799 if (reg_offset
+ sa_size
> 0x8000)
7801 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7804 if (low
+ sa_size
<= 0x8000)
7805 sa_bias
= reg_offset
- low
, reg_offset
= low
;
7807 sa_bias
= reg_offset
, reg_offset
= 0;
7809 sa_reg
= gen_rtx_REG (DImode
, 24);
7810 sa_bias_rtx
= GEN_INT (sa_bias
);
7812 if (add_operand (sa_bias_rtx
, DImode
))
7813 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_bias_rtx
));
7816 emit_move_insn (sa_reg
, sa_bias_rtx
);
7817 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_reg
));
7821 /* Save regs in stack order. Beginning with VMS PV. */
7822 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7823 emit_frame_store (REG_PV
, stack_pointer_rtx
, 0, 0);
7825 /* Save register RA next, followed by any other registers
7826 that need to be saved. */
7827 for (unsigned i
= REG_RA
; sa_mask
!= 0; i
= ctz_hwi(sa_mask
))
7829 emit_frame_store (i
, sa_reg
, sa_bias
, reg_offset
);
7831 sa_mask
&= ~(HOST_WIDE_INT_1U
<< i
);
7834 if (TARGET_ABI_OPEN_VMS
)
7836 /* Register frame procedures save the fp. */
7837 if (alpha_procedure_type
== PT_REGISTER
)
7840 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7841 hard_frame_pointer_rtx
);
7842 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
7843 RTX_FRAME_RELATED_P (insn
) = 1;
7846 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7847 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7848 gen_rtx_REG (DImode
, REG_PV
)));
7850 if (alpha_procedure_type
!= PT_NULL
7851 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7852 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7854 /* If we have to allocate space for outgoing args, do it now. */
7855 if (crtl
->outgoing_args_size
!= 0)
7858 = emit_move_insn (stack_pointer_rtx
,
7860 (Pmode
, hard_frame_pointer_rtx
,
7862 (crtl
->outgoing_args_size
))));
7864 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7865 if ! frame_pointer_needed. Setting the bit will change the CFA
7866 computation rule to use sp again, which would be wrong if we had
7867 frame_pointer_needed, as this means sp might move unpredictably
7871 frame_pointer_needed
7872 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7874 crtl->outgoing_args_size != 0
7875 => alpha_procedure_type != PT_NULL,
7877 so when we are not setting the bit here, we are guaranteed to
7878 have emitted an FRP frame pointer update just before. */
7879 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7884 /* If we need a frame pointer, set it from the stack pointer. */
7885 if (frame_pointer_needed
)
7887 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7888 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7890 /* This must always be the last instruction in the
7891 prologue, thus we emit a special move + clobber. */
7892 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7893 stack_pointer_rtx
, sa_reg
)));
7897 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7898 the prologue, for exception handling reasons, we cannot do this for
7899 any insn that might fault. We could prevent this for mems with a
7900 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7901 have to prevent all such scheduling with a blockage.
7903 Linux, on the other hand, never bothered to implement OSF/1's
7904 exception handling, and so doesn't care about such things. Anyone
7905 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7907 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7908 emit_insn (gen_blockage ());
7911 /* Count the number of .file directives, so that .loc is up to date. */
7912 int num_source_filenames
= 0;
7914 /* Output the textual info surrounding the prologue. */
7917 alpha_start_function (FILE *file
, const char *fnname
,
7918 tree decl ATTRIBUTE_UNUSED
)
7920 unsigned long imask
, fmask
;
7921 /* Complete stack size needed. */
7922 HOST_WIDE_INT frame_size
= cfun
->machine
->frame_size
;
7923 /* The maximum debuggable frame size. */
7924 const HOST_WIDE_INT max_frame_size
= HOST_WIDE_INT_1
<< 31;
7925 /* Offset from base reg to register save area. */
7926 HOST_WIDE_INT reg_offset
;
7927 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7928 char *tramp_label
= (char *) alloca (strlen (fnname
) + 6);
7931 #if TARGET_ABI_OPEN_VMS
7932 vms_start_function (fnname
);
7935 alpha_fnname
= fnname
;
7937 if (TARGET_ABI_OPEN_VMS
)
7938 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
7940 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7942 imask
= cfun
->machine
->sa_mask
& 0xffffffffu
;
7943 fmask
= cfun
->machine
->sa_mask
>> 32;
7945 /* Issue function start and label. */
7946 if (TARGET_ABI_OPEN_VMS
|| !flag_inhibit_size_directive
)
7948 fputs ("\t.ent ", file
);
7949 assemble_name (file
, fnname
);
7952 /* If the function needs GP, we'll write the "..ng" label there.
7953 Otherwise, do it here. */
7955 && ! alpha_function_needs_gp
7956 && ! cfun
->is_thunk
)
7959 assemble_name (file
, fnname
);
7960 fputs ("..ng:\n", file
);
7963 /* Nested functions on VMS that are potentially called via trampoline
7964 get a special transfer entry point that loads the called functions
7965 procedure descriptor and static chain. */
7966 if (TARGET_ABI_OPEN_VMS
7967 && !TREE_PUBLIC (decl
)
7968 && DECL_CONTEXT (decl
)
7969 && !TYPE_P (DECL_CONTEXT (decl
))
7970 && TREE_CODE (DECL_CONTEXT (decl
)) != TRANSLATION_UNIT_DECL
)
7972 strcpy (tramp_label
, fnname
);
7973 strcat (tramp_label
, "..tr");
7974 ASM_OUTPUT_LABEL (file
, tramp_label
);
7975 fprintf (file
, "\tldq $1,24($27)\n");
7976 fprintf (file
, "\tldq $27,16($27)\n");
7979 strcpy (entry_label
, fnname
);
7980 if (TARGET_ABI_OPEN_VMS
)
7981 strcat (entry_label
, "..en");
7983 ASM_OUTPUT_LABEL (file
, entry_label
);
7984 inside_function
= TRUE
;
7986 if (TARGET_ABI_OPEN_VMS
)
7987 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7990 && TARGET_IEEE_CONFORMANT
7991 && !flag_inhibit_size_directive
)
7993 /* Set flags in procedure descriptor to request IEEE-conformant
7994 math-library routines. The value we set it to is PDSC_EXC_IEEE
7995 (/usr/include/pdsc.h). */
7996 fputs ("\t.eflag 48\n", file
);
7999 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8000 alpha_auto_offset
= -frame_size
+ crtl
->args
.pretend_args_size
;
8001 alpha_arg_offset
= -frame_size
+ 48;
8003 /* Describe our frame. If the frame size is larger than an integer,
8004 print it as zero to avoid an assembler error. We won't be
8005 properly describing such a frame, but that's the best we can do. */
8006 if (TARGET_ABI_OPEN_VMS
)
8007 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
8008 HOST_WIDE_INT_PRINT_DEC
"\n",
8010 frame_size
>= max_frame_size
? 0 : frame_size
,
8012 else if (!flag_inhibit_size_directive
)
8013 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
8014 (frame_pointer_needed
8015 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
8016 frame_size
>= max_frame_size
? 0 : frame_size
,
8017 crtl
->args
.pretend_args_size
);
8019 /* Describe which registers were spilled. */
8020 if (TARGET_ABI_OPEN_VMS
)
8023 /* ??? Does VMS care if mask contains ra? The old code didn't
8024 set it, so I don't here. */
8025 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
8027 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
8028 if (alpha_procedure_type
== PT_REGISTER
)
8029 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
8031 else if (!flag_inhibit_size_directive
)
8035 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
8036 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
8038 for (i
= 0; i
< 32; ++i
)
8039 if (imask
& (1UL << i
))
8044 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
8045 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
8048 #if TARGET_ABI_OPEN_VMS
8049 /* If a user condition handler has been installed at some point, emit
8050 the procedure descriptor bits to point the Condition Handling Facility
8051 at the indirection wrapper, and state the fp offset at which the user
8052 handler may be found. */
8053 if (cfun
->machine
->uses_condition_handler
)
8055 fprintf (file
, "\t.handler __gcc_shell_handler\n");
8056 fprintf (file
, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET
);
8059 #ifdef TARGET_VMS_CRASH_DEBUG
8060 /* Support of minimal traceback info. */
8061 switch_to_section (readonly_data_section
);
8062 fprintf (file
, "\t.align 3\n");
8063 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
8064 fputs ("\t.ascii \"", file
);
8065 assemble_name (file
, fnname
);
8066 fputs ("\\0\"\n", file
);
8067 switch_to_section (text_section
);
8069 #endif /* TARGET_ABI_OPEN_VMS */
8072 /* Emit the .prologue note at the scheduled end of the prologue. */
8075 alpha_output_function_end_prologue (FILE *file
)
8077 if (TARGET_ABI_OPEN_VMS
)
8078 fputs ("\t.prologue\n", file
);
8079 else if (!flag_inhibit_size_directive
)
8080 fprintf (file
, "\t.prologue %d\n",
8081 alpha_function_needs_gp
|| cfun
->is_thunk
);
8084 /* Write function epilogue. */
8087 alpha_expand_epilogue (void)
8089 /* Registers to save. */
8090 unsigned HOST_WIDE_INT sa_mask
= cfun
->machine
->sa_mask
;
8091 /* Stack space needed for pushing registers clobbered by us. */
8092 HOST_WIDE_INT sa_size
= cfun
->machine
->sa_size
;
8093 /* Complete stack size needed. */
8094 HOST_WIDE_INT frame_size
= cfun
->machine
->frame_size
;
8095 /* Offset from base reg to register save area. */
8096 HOST_WIDE_INT reg_offset
;
8097 int fp_is_frame_pointer
, fp_offset
;
8098 rtx sa_reg
, sa_reg_exp
= NULL
;
8099 rtx sp_adj1
, sp_adj2
, mem
, reg
, insn
;
8101 rtx cfa_restores
= NULL_RTX
;
8103 if (TARGET_ABI_OPEN_VMS
)
8105 if (alpha_procedure_type
== PT_STACK
)
8106 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
8111 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
8114 = (TARGET_ABI_OPEN_VMS
8115 ? alpha_procedure_type
== PT_STACK
8116 : frame_pointer_needed
);
8118 sa_reg
= stack_pointer_rtx
;
8120 if (crtl
->calls_eh_return
)
8121 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
8127 /* If we have a frame pointer, restore SP from it. */
8128 if (TARGET_ABI_OPEN_VMS
8129 ? vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
8130 : frame_pointer_needed
)
8131 emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
8133 /* Cope with very large offsets to the register save area. */
8134 if (reg_offset
+ sa_size
> 0x8000)
8136 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
8139 if (low
+ sa_size
<= 0x8000)
8140 bias
= reg_offset
- low
, reg_offset
= low
;
8142 bias
= reg_offset
, reg_offset
= 0;
8144 sa_reg
= gen_rtx_REG (DImode
, 22);
8145 sa_reg_exp
= plus_constant (Pmode
, stack_pointer_rtx
, bias
);
8147 emit_move_insn (sa_reg
, sa_reg_exp
);
8150 /* Restore registers in order, excepting a true frame pointer. */
8151 for (unsigned i
= REG_RA
; sa_mask
!= 0; i
= ctz_hwi(sa_mask
))
8153 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
8154 fp_offset
= reg_offset
;
8157 mem
= gen_frame_mem (DImode
,
8158 plus_constant (Pmode
, sa_reg
,
8160 reg
= gen_rtx_REG (DImode
, i
);
8161 emit_move_insn (reg
, mem
);
8162 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
8166 sa_mask
&= ~(HOST_WIDE_INT_1U
<< i
);
8170 if (frame_size
|| eh_ofs
)
8172 sp_adj1
= stack_pointer_rtx
;
8176 sp_adj1
= gen_rtx_REG (DImode
, 23);
8177 emit_move_insn (sp_adj1
,
8178 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
8181 /* If the stack size is large, begin computation into a temporary
8182 register so as not to interfere with a potential fp restore,
8183 which must be consecutive with an SP restore. */
8184 if (frame_size
< 32768 && !cfun
->calls_alloca
)
8185 sp_adj2
= GEN_INT (frame_size
);
8186 else if (frame_size
< 0x40007fffL
)
8188 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
8190 sp_adj2
= plus_constant (Pmode
, sp_adj1
, frame_size
- low
);
8191 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
8195 sp_adj1
= gen_rtx_REG (DImode
, 23);
8196 emit_move_insn (sp_adj1
, sp_adj2
);
8198 sp_adj2
= GEN_INT (low
);
8202 rtx tmp
= gen_rtx_REG (DImode
, 23);
8203 sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3, false);
8206 /* We can't drop new things to memory this late, afaik,
8207 so build it up by pieces. */
8208 sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
);
8209 gcc_assert (sp_adj2
);
8213 /* From now on, things must be in order. So emit blockages. */
8215 /* Restore the frame pointer. */
8216 if (fp_is_frame_pointer
)
8218 emit_insn (gen_blockage ());
8219 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
, sa_reg
,
8221 emit_move_insn (hard_frame_pointer_rtx
, mem
);
8222 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
8223 hard_frame_pointer_rtx
, cfa_restores
);
8225 else if (TARGET_ABI_OPEN_VMS
)
8227 emit_insn (gen_blockage ());
8228 emit_move_insn (hard_frame_pointer_rtx
,
8229 gen_rtx_REG (DImode
, vms_save_fp_regno
));
8230 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
8231 hard_frame_pointer_rtx
, cfa_restores
);
8234 /* Restore the stack pointer. */
8235 emit_insn (gen_blockage ());
8236 if (sp_adj2
== const0_rtx
)
8237 insn
= emit_move_insn (stack_pointer_rtx
, sp_adj1
);
8239 insn
= emit_move_insn (stack_pointer_rtx
,
8240 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
));
8241 REG_NOTES (insn
) = cfa_restores
;
8242 add_reg_note (insn
, REG_CFA_DEF_CFA
, stack_pointer_rtx
);
8243 RTX_FRAME_RELATED_P (insn
) = 1;
8247 gcc_assert (cfa_restores
== NULL
);
8249 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8251 emit_insn (gen_blockage ());
8252 insn
= emit_move_insn (hard_frame_pointer_rtx
,
8253 gen_rtx_REG (DImode
, vms_save_fp_regno
));
8254 add_reg_note (insn
, REG_CFA_RESTORE
, hard_frame_pointer_rtx
);
8255 RTX_FRAME_RELATED_P (insn
) = 1;
8260 /* Output the rest of the textual info surrounding the epilogue. */
8263 alpha_end_function (FILE *file
, const char *fnname
, tree decl ATTRIBUTE_UNUSED
)
8267 /* We output a nop after noreturn calls at the very end of the function to
8268 ensure that the return address always remains in the caller's code range,
8269 as not doing so might confuse unwinding engines. */
8270 insn
= get_last_insn ();
8272 insn
= prev_active_insn (insn
);
8273 if (insn
&& CALL_P (insn
))
8274 output_asm_insn (get_insn_template (CODE_FOR_nop
, NULL
), NULL
);
8276 #if TARGET_ABI_OPEN_VMS
8277 /* Write the linkage entries. */
8278 alpha_write_linkage (file
, fnname
);
8281 /* End the function. */
8282 if (TARGET_ABI_OPEN_VMS
8283 || !flag_inhibit_size_directive
)
8285 fputs ("\t.end ", file
);
8286 assemble_name (file
, fnname
);
8289 inside_function
= FALSE
;
8293 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8295 In order to avoid the hordes of differences between generated code
8296 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8297 lots of code loading up large constants, generate rtl and emit it
8298 instead of going straight to text.
8300 Not sure why this idea hasn't been explored before... */
8303 alpha_output_mi_thunk_osf (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8304 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8307 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl
));
8308 HOST_WIDE_INT hi
, lo
;
8309 rtx this_rtx
, funexp
;
8312 /* We always require a valid GP. */
8313 emit_insn (gen_prologue_ldgp ());
8314 emit_note (NOTE_INSN_PROLOGUE_END
);
8316 /* Find the "this" pointer. If the function returns a structure,
8317 the structure return pointer is in $16. */
8318 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8319 this_rtx
= gen_rtx_REG (Pmode
, 17);
8321 this_rtx
= gen_rtx_REG (Pmode
, 16);
8323 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8324 entire constant for the add. */
8325 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8326 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8327 if (hi
+ lo
== delta
)
8330 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (hi
)));
8332 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (lo
)));
8336 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0), delta
);
8337 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8340 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8345 tmp
= gen_rtx_REG (Pmode
, 0);
8346 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
8348 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8349 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8350 if (hi
+ lo
== vcall_offset
)
8353 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8357 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8359 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8363 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8366 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8368 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8371 /* Generate a tail call to the target function. */
8372 if (! TREE_USED (function
))
8374 assemble_external (function
);
8375 TREE_USED (function
) = 1;
8377 funexp
= XEXP (DECL_RTL (function
), 0);
8378 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8379 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8380 SIBLING_CALL_P (insn
) = 1;
8382 /* Run just enough of rest_of_compilation to get the insns emitted.
8383 There's not really enough bulk here to make other passes such as
8384 instruction scheduling worth while. */
8385 insn
= get_insns ();
8386 shorten_branches (insn
);
8387 assemble_start_function (thunk_fndecl
, fnname
);
8388 final_start_function (insn
, file
, 1);
8389 final (insn
, file
, 1);
8390 final_end_function ();
8391 assemble_end_function (thunk_fndecl
, fnname
);
8393 #endif /* TARGET_ABI_OSF */
8395 /* Debugging support. */
8399 /* Name of the file containing the current function. */
8401 static const char *current_function_file
= "";
8403 /* Offsets to alpha virtual arg/local debugging pointers. */
8405 long alpha_arg_offset
;
8406 long alpha_auto_offset
;
8408 /* Emit a new filename to a stream. */
8411 alpha_output_filename (FILE *stream
, const char *name
)
8413 static int first_time
= TRUE
;
8418 ++num_source_filenames
;
8419 current_function_file
= name
;
8420 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8421 output_quoted_string (stream
, name
);
8422 fprintf (stream
, "\n");
8425 else if (name
!= current_function_file
8426 && strcmp (name
, current_function_file
) != 0)
8428 ++num_source_filenames
;
8429 current_function_file
= name
;
8430 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8432 output_quoted_string (stream
, name
);
8433 fprintf (stream
, "\n");
8437 /* Structure to show the current status of registers and memory. */
8439 struct shadow_summary
8442 unsigned int i
: 31; /* Mask of int regs */
8443 unsigned int fp
: 31; /* Mask of fp regs */
8444 unsigned int mem
: 1; /* mem == imem | fpmem */
8448 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8449 to the summary structure. SET is nonzero if the insn is setting the
8450 object, otherwise zero. */
8453 summarize_insn (rtx x
, struct shadow_summary
*sum
, int set
)
8455 const char *format_ptr
;
8461 switch (GET_CODE (x
))
8463 /* ??? Note that this case would be incorrect if the Alpha had a
8464 ZERO_EXTRACT in SET_DEST. */
8466 summarize_insn (SET_SRC (x
), sum
, 0);
8467 summarize_insn (SET_DEST (x
), sum
, 1);
8471 summarize_insn (XEXP (x
, 0), sum
, 1);
8475 summarize_insn (XEXP (x
, 0), sum
, 0);
8479 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8480 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8484 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8485 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8489 summarize_insn (SUBREG_REG (x
), sum
, 0);
8494 int regno
= REGNO (x
);
8495 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8497 if (regno
== 31 || regno
== 63)
8503 sum
->defd
.i
|= mask
;
8505 sum
->defd
.fp
|= mask
;
8510 sum
->used
.i
|= mask
;
8512 sum
->used
.fp
|= mask
;
8523 /* Find the regs used in memory address computation: */
8524 summarize_insn (XEXP (x
, 0), sum
, 0);
8527 case CONST_INT
: case CONST_WIDE_INT
: case CONST_DOUBLE
:
8528 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8529 case SCRATCH
: case ASM_INPUT
:
8532 /* Handle common unary and binary ops for efficiency. */
8533 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8534 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8535 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8536 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8537 case NE
: case EQ
: case GE
: case GT
: case LE
:
8538 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8539 summarize_insn (XEXP (x
, 0), sum
, 0);
8540 summarize_insn (XEXP (x
, 1), sum
, 0);
8543 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8544 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8545 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8546 case SQRT
: case FFS
:
8547 summarize_insn (XEXP (x
, 0), sum
, 0);
8551 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8552 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8553 switch (format_ptr
[i
])
8556 summarize_insn (XEXP (x
, i
), sum
, 0);
8560 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8561 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8573 /* Ensure a sufficient number of `trapb' insns are in the code when
8574 the user requests code with a trap precision of functions or
8577 In naive mode, when the user requests a trap-precision of
8578 "instruction", a trapb is needed after every instruction that may
8579 generate a trap. This ensures that the code is resumption safe but
8582 When optimizations are turned on, we delay issuing a trapb as long
8583 as possible. In this context, a trap shadow is the sequence of
8584 instructions that starts with a (potentially) trap generating
8585 instruction and extends to the next trapb or call_pal instruction
8586 (but GCC never generates call_pal by itself). We can delay (and
8587 therefore sometimes omit) a trapb subject to the following
8590 (a) On entry to the trap shadow, if any Alpha register or memory
8591 location contains a value that is used as an operand value by some
8592 instruction in the trap shadow (live on entry), then no instruction
8593 in the trap shadow may modify the register or memory location.
8595 (b) Within the trap shadow, the computation of the base register
8596 for a memory load or store instruction may not involve using the
8597 result of an instruction that might generate an UNPREDICTABLE
8600 (c) Within the trap shadow, no register may be used more than once
8601 as a destination register. (This is to make life easier for the
8604 (d) The trap shadow may not include any branch instructions. */
8607 alpha_handle_trap_shadows (void)
8609 struct shadow_summary shadow
;
8610 int trap_pending
, exception_nesting
;
8614 exception_nesting
= 0;
8617 shadow
.used
.mem
= 0;
8618 shadow
.defd
= shadow
.used
;
8620 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8624 switch (NOTE_KIND (i
))
8626 case NOTE_INSN_EH_REGION_BEG
:
8627 exception_nesting
++;
8632 case NOTE_INSN_EH_REGION_END
:
8633 exception_nesting
--;
8638 case NOTE_INSN_EPILOGUE_BEG
:
8639 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8644 else if (trap_pending
)
8646 if (alpha_tp
== ALPHA_TP_FUNC
)
8649 && GET_CODE (PATTERN (i
)) == RETURN
)
8652 else if (alpha_tp
== ALPHA_TP_INSN
)
8656 struct shadow_summary sum
;
8661 sum
.defd
= sum
.used
;
8663 switch (GET_CODE (i
))
8666 /* Annoyingly, get_attr_trap will die on these. */
8667 if (GET_CODE (PATTERN (i
)) == USE
8668 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8671 summarize_insn (PATTERN (i
), &sum
, 0);
8673 if ((sum
.defd
.i
& shadow
.defd
.i
)
8674 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8676 /* (c) would be violated */
8680 /* Combine shadow with summary of current insn: */
8681 shadow
.used
.i
|= sum
.used
.i
;
8682 shadow
.used
.fp
|= sum
.used
.fp
;
8683 shadow
.used
.mem
|= sum
.used
.mem
;
8684 shadow
.defd
.i
|= sum
.defd
.i
;
8685 shadow
.defd
.fp
|= sum
.defd
.fp
;
8686 shadow
.defd
.mem
|= sum
.defd
.mem
;
8688 if ((sum
.defd
.i
& shadow
.used
.i
)
8689 || (sum
.defd
.fp
& shadow
.used
.fp
)
8690 || (sum
.defd
.mem
& shadow
.used
.mem
))
8692 /* (a) would be violated (also takes care of (b)) */
8693 gcc_assert (get_attr_trap (i
) != TRAP_YES
8694 || (!(sum
.defd
.i
& sum
.used
.i
)
8695 && !(sum
.defd
.fp
& sum
.used
.fp
)));
8702 /* __builtin_unreachable can expand to no code at all,
8703 leaving (barrier) RTXes in the instruction stream. */
8704 goto close_shadow_notrapb
;
8718 n
= emit_insn_before (gen_trapb (), i
);
8719 PUT_MODE (n
, TImode
);
8720 PUT_MODE (i
, TImode
);
8721 close_shadow_notrapb
:
8725 shadow
.used
.mem
= 0;
8726 shadow
.defd
= shadow
.used
;
8731 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8732 && NONJUMP_INSN_P (i
)
8733 && GET_CODE (PATTERN (i
)) != USE
8734 && GET_CODE (PATTERN (i
)) != CLOBBER
8735 && get_attr_trap (i
) == TRAP_YES
)
8737 if (optimize
&& !trap_pending
)
8738 summarize_insn (PATTERN (i
), &shadow
, 0);
8744 /* Alpha can only issue instruction groups simultaneously if they are
8745 suitably aligned. This is very processor-specific. */
8746 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8747 that are marked "fake". These instructions do not exist on that target,
8748 but it is possible to see these insns with deranged combinations of
8749 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8750 choose a result at random. */
8752 enum alphaev4_pipe
{
8759 enum alphaev5_pipe
{
8770 static enum alphaev4_pipe
8771 alphaev4_insn_pipe (rtx_insn
*insn
)
8773 if (recog_memoized (insn
) < 0)
8775 if (get_attr_length (insn
) != 4)
8778 switch (get_attr_type (insn
))
8794 case TYPE_MVI
: /* fake */
8809 case TYPE_FSQRT
: /* fake */
8810 case TYPE_FTOI
: /* fake */
8811 case TYPE_ITOF
: /* fake */
8819 static enum alphaev5_pipe
8820 alphaev5_insn_pipe (rtx_insn
*insn
)
8822 if (recog_memoized (insn
) < 0)
8824 if (get_attr_length (insn
) != 4)
8827 switch (get_attr_type (insn
))
8847 case TYPE_FTOI
: /* fake */
8848 case TYPE_ITOF
: /* fake */
8863 case TYPE_FSQRT
: /* fake */
8874 /* IN_USE is a mask of the slots currently filled within the insn group.
8875 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8876 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8878 LEN is, of course, the length of the group in bytes. */
8881 alphaev4_next_group (rtx_insn
*insn
, int *pin_use
, int *plen
)
8888 || GET_CODE (PATTERN (insn
)) == CLOBBER
8889 || GET_CODE (PATTERN (insn
)) == USE
)
8894 enum alphaev4_pipe pipe
;
8896 pipe
= alphaev4_insn_pipe (insn
);
8900 /* Force complex instructions to start new groups. */
8904 /* If this is a completely unrecognized insn, it's an asm.
8905 We don't know how long it is, so record length as -1 to
8906 signal a needed realignment. */
8907 if (recog_memoized (insn
) < 0)
8910 len
= get_attr_length (insn
);
8914 if (in_use
& EV4_IB0
)
8916 if (in_use
& EV4_IB1
)
8921 in_use
|= EV4_IB0
| EV4_IBX
;
8925 if (in_use
& EV4_IB0
)
8927 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8935 if (in_use
& EV4_IB1
)
8945 /* Haifa doesn't do well scheduling branches. */
8950 insn
= next_nonnote_insn (insn
);
8952 if (!insn
|| ! INSN_P (insn
))
8955 /* Let Haifa tell us where it thinks insn group boundaries are. */
8956 if (GET_MODE (insn
) == TImode
)
8959 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8964 insn
= next_nonnote_insn (insn
);
8972 /* IN_USE is a mask of the slots currently filled within the insn group.
8973 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8974 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8976 LEN is, of course, the length of the group in bytes. */
8979 alphaev5_next_group (rtx_insn
*insn
, int *pin_use
, int *plen
)
8986 || GET_CODE (PATTERN (insn
)) == CLOBBER
8987 || GET_CODE (PATTERN (insn
)) == USE
)
8992 enum alphaev5_pipe pipe
;
8994 pipe
= alphaev5_insn_pipe (insn
);
8998 /* Force complex instructions to start new groups. */
9002 /* If this is a completely unrecognized insn, it's an asm.
9003 We don't know how long it is, so record length as -1 to
9004 signal a needed realignment. */
9005 if (recog_memoized (insn
) < 0)
9008 len
= get_attr_length (insn
);
9011 /* ??? Most of the places below, we would like to assert never
9012 happen, as it would indicate an error either in Haifa, or
9013 in the scheduling description. Unfortunately, Haifa never
9014 schedules the last instruction of the BB, so we don't have
9015 an accurate TI bit to go off. */
9017 if (in_use
& EV5_E0
)
9019 if (in_use
& EV5_E1
)
9024 in_use
|= EV5_E0
| EV5_E01
;
9028 if (in_use
& EV5_E0
)
9030 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
9038 if (in_use
& EV5_E1
)
9044 if (in_use
& EV5_FA
)
9046 if (in_use
& EV5_FM
)
9051 in_use
|= EV5_FA
| EV5_FAM
;
9055 if (in_use
& EV5_FA
)
9061 if (in_use
& EV5_FM
)
9074 /* Haifa doesn't do well scheduling branches. */
9075 /* ??? If this is predicted not-taken, slotting continues, except
9076 that no more IBR, FBR, or JSR insns may be slotted. */
9081 insn
= next_nonnote_insn (insn
);
9083 if (!insn
|| ! INSN_P (insn
))
9086 /* Let Haifa tell us where it thinks insn group boundaries are. */
9087 if (GET_MODE (insn
) == TImode
)
9090 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9095 insn
= next_nonnote_insn (insn
);
9104 alphaev4_next_nop (int *pin_use
)
9106 int in_use
= *pin_use
;
9109 if (!(in_use
& EV4_IB0
))
9114 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
9119 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
9132 alphaev5_next_nop (int *pin_use
)
9134 int in_use
= *pin_use
;
9137 if (!(in_use
& EV5_E1
))
9142 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
9147 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
9159 /* The instruction group alignment main loop. */
9162 alpha_align_insns_1 (unsigned int max_align
,
9163 rtx_insn
*(*next_group
) (rtx_insn
*, int *, int *),
9164 rtx (*next_nop
) (int *))
9166 /* ALIGN is the known alignment for the insn group. */
9168 /* OFS is the offset of the current insn in the insn group. */
9170 int prev_in_use
, in_use
, len
, ldgp
;
9173 /* Let shorten branches care for assigning alignments to code labels. */
9174 shorten_branches (get_insns ());
9176 unsigned int option_alignment
= align_functions
.levels
[0].get_value ();
9177 if (option_alignment
< 4)
9179 else if ((unsigned int) option_alignment
< max_align
)
9180 align
= option_alignment
;
9184 ofs
= prev_in_use
= 0;
9187 i
= next_nonnote_insn (i
);
9189 ldgp
= alpha_function_needs_gp
? 8 : 0;
9193 next
= (*next_group
) (i
, &in_use
, &len
);
9195 /* When we see a label, resync alignment etc. */
9198 unsigned int new_align
9199 = label_to_alignment (i
).levels
[0].get_value ();
9201 if (new_align
>= align
)
9203 align
= new_align
< max_align
? new_align
: max_align
;
9207 else if (ofs
& (new_align
-1))
9208 ofs
= (ofs
| (new_align
-1)) + 1;
9212 /* Handle complex instructions special. */
9213 else if (in_use
== 0)
9215 /* Asms will have length < 0. This is a signal that we have
9216 lost alignment knowledge. Assume, however, that the asm
9217 will not mis-align instructions. */
9226 /* If the known alignment is smaller than the recognized insn group,
9227 realign the output. */
9228 else if ((int) align
< len
)
9230 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9231 rtx_insn
*prev
, *where
;
9233 where
= prev
= prev_nonnote_insn (i
);
9234 if (!where
|| !LABEL_P (where
))
9237 /* Can't realign between a call and its gp reload. */
9238 if (! (TARGET_EXPLICIT_RELOCS
9239 && prev
&& CALL_P (prev
)))
9241 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9242 align
= 1 << new_log_align
;
9247 /* We may not insert padding inside the initial ldgp sequence. */
9251 /* If the group won't fit in the same INT16 as the previous,
9252 we need to add padding to keep the group together. Rather
9253 than simply leaving the insn filling to the assembler, we
9254 can make use of the knowledge of what sorts of instructions
9255 were issued in the previous group to make sure that all of
9256 the added nops are really free. */
9257 else if (ofs
+ len
> (int) align
)
9259 int nop_count
= (align
- ofs
) / 4;
9262 /* Insert nops before labels, branches, and calls to truly merge
9263 the execution of the nops with the previous instruction group. */
9264 where
= prev_nonnote_insn (i
);
9267 if (LABEL_P (where
))
9269 rtx_insn
*where2
= prev_nonnote_insn (where
);
9270 if (where2
&& JUMP_P (where2
))
9273 else if (NONJUMP_INSN_P (where
))
9280 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9281 while (--nop_count
);
9285 ofs
= (ofs
+ len
) & (align
- 1);
9286 prev_in_use
= in_use
;
9292 alpha_align_insns (void)
9294 if (alpha_tune
== PROCESSOR_EV4
)
9295 alpha_align_insns_1 (8, alphaev4_next_group
, alphaev4_next_nop
);
9296 else if (alpha_tune
== PROCESSOR_EV5
)
9297 alpha_align_insns_1 (16, alphaev5_next_group
, alphaev5_next_nop
);
9302 /* Insert an unop between sibcall or noreturn function call and GP load. */
9305 alpha_pad_function_end (void)
9307 rtx_insn
*insn
, *next
;
9309 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9312 || !(SIBLING_CALL_P (insn
)
9313 || find_reg_note (insn
, REG_NORETURN
, NULL_RTX
)))
9316 next
= next_active_insn (insn
);
9319 rtx pat
= PATTERN (next
);
9321 if (GET_CODE (pat
) == SET
9322 && GET_CODE (SET_SRC (pat
)) == UNSPEC_VOLATILE
9323 && XINT (SET_SRC (pat
), 1) == UNSPECV_LDGP1
)
9324 emit_insn_after (gen_unop (), insn
);
9329 /* Machine dependent reorg pass. */
9334 /* Workaround for a linker error that triggers when an exception
9335 handler immediatelly follows a sibcall or a noreturn function.
9337 In the sibcall case:
9339 The instruction stream from an object file:
9341 1d8: 00 00 fb 6b jmp (t12)
9342 1dc: 00 00 ba 27 ldah gp,0(ra)
9343 1e0: 00 00 bd 23 lda gp,0(gp)
9344 1e4: 00 00 7d a7 ldq t12,0(gp)
9345 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9347 was converted in the final link pass to:
9349 12003aa88: 67 fa ff c3 br 120039428 <...>
9350 12003aa8c: 00 00 fe 2f unop
9351 12003aa90: 00 00 fe 2f unop
9352 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9353 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9355 And in the noreturn case:
9357 The instruction stream from an object file:
9359 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9360 58: 00 00 ba 27 ldah gp,0(ra)
9361 5c: 00 00 bd 23 lda gp,0(gp)
9362 60: 00 00 7d a7 ldq t12,0(gp)
9363 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9365 was converted in the final link pass to:
9367 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9368 fdb28: 00 00 fe 2f unop
9369 fdb2c: 00 00 fe 2f unop
9370 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9371 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9373 GP load instructions were wrongly cleared by the linker relaxation
9374 pass. This workaround prevents removal of GP loads by inserting
9375 an unop instruction between a sibcall or noreturn function call and
9376 exception handler prologue. */
9378 if (current_function_has_exception_handlers ())
9379 alpha_pad_function_end ();
9381 /* CALL_PAL that implements trap insn, updates program counter to point
9382 after the insn. In case trap is the last insn in the function,
9383 emit NOP to guarantee that PC remains inside function boundaries.
9384 This workaround is needed to get reliable backtraces. */
9386 rtx_insn
*insn
= prev_active_insn (get_last_insn ());
9388 if (insn
&& NONJUMP_INSN_P (insn
))
9390 rtx pat
= PATTERN (insn
);
9391 if (GET_CODE (pat
) == PARALLEL
)
9393 rtx vec
= XVECEXP (pat
, 0, 0);
9394 if (GET_CODE (vec
) == TRAP_IF
9395 && XEXP (vec
, 0) == const1_rtx
)
9396 emit_insn_after (gen_unop (), insn
);
9402 alpha_file_start (void)
9404 default_file_start ();
9406 fputs ("\t.set noreorder\n", asm_out_file
);
9407 fputs ("\t.set volatile\n", asm_out_file
);
9409 fputs ("\t.set noat\n", asm_out_file
);
9410 if (TARGET_EXPLICIT_RELOCS
)
9411 fputs ("\t.set nomacro\n", asm_out_file
);
9412 if (TARGET_SUPPORT_ARCH
| TARGET_BWX
| TARGET_MAX
| TARGET_FIX
| TARGET_CIX
)
9416 if (alpha_cpu
== PROCESSOR_EV6
|| TARGET_FIX
|| TARGET_CIX
)
9418 else if (TARGET_MAX
)
9420 else if (TARGET_BWX
)
9422 else if (alpha_cpu
== PROCESSOR_EV5
)
9427 fprintf (asm_out_file
, "\t.arch %s\n", arch
);
9431 /* Since we don't have a .dynbss section, we should not allow global
9432 relocations in the .rodata section. */
9435 alpha_elf_reloc_rw_mask (void)
9437 return flag_pic
? 3 : 2;
9440 /* Return a section for X. The only special thing we do here is to
9441 honor small data. */
9444 alpha_elf_select_rtx_section (machine_mode mode
, rtx x
,
9445 unsigned HOST_WIDE_INT align
)
9447 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9448 /* ??? Consider using mergeable sdata sections. */
9449 return sdata_section
;
9451 return default_elf_select_rtx_section (mode
, x
, align
);
9455 alpha_elf_section_type_flags (tree decl
, const char *name
, int reloc
)
9457 unsigned int flags
= 0;
9459 if (strcmp (name
, ".sdata") == 0
9460 || strncmp (name
, ".sdata.", 7) == 0
9461 || strncmp (name
, ".gnu.linkonce.s.", 16) == 0
9462 || strcmp (name
, ".sbss") == 0
9463 || strncmp (name
, ".sbss.", 6) == 0
9464 || strncmp (name
, ".gnu.linkonce.sb.", 17) == 0)
9465 flags
= SECTION_SMALL
;
9467 flags
|= default_section_type_flags (decl
, name
, reloc
);
9471 /* Structure to collect function names for final output in link section. */
9472 /* Note that items marked with GTY can't be ifdef'ed out. */
9480 struct GTY(()) alpha_links
9484 enum reloc_kind rkind
;
9487 #if TARGET_ABI_OPEN_VMS
9489 /* Return the VMS argument type corresponding to MODE. */
9492 alpha_arg_type (machine_mode mode
)
9497 return TARGET_FLOAT_VAX
? FF
: FS
;
9499 return TARGET_FLOAT_VAX
? FD
: FT
;
9505 /* Return an rtx for an integer representing the VMS Argument Information
9509 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum
)
9511 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9514 for (i
= 0; i
< 6; i
++)
9515 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9517 return GEN_INT (regval
);
9521 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9522 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9523 this is the reference to the linkage pointer value, 0 if this is the
9524 reference to the function entry value. RFLAG is 1 if this a reduced
9525 reference (code address only), 0 if this is a full reference. */
9528 alpha_use_linkage (rtx func
, bool lflag
, bool rflag
)
9530 struct alpha_links
*al
= NULL
;
9531 const char *name
= XSTR (func
, 0);
9533 if (cfun
->machine
->links
)
9535 /* Is this name already defined? */
9536 alpha_links
**slot
= cfun
->machine
->links
->get (name
);
9541 cfun
->machine
->links
9542 = hash_map
<nofree_string_hash
, alpha_links
*>::create_ggc (64);
9553 /* Follow transparent alias, as this is used for CRTL translations. */
9554 id
= maybe_get_identifier (name
);
9557 while (IDENTIFIER_TRANSPARENT_ALIAS (id
))
9558 id
= TREE_CHAIN (id
);
9559 name
= IDENTIFIER_POINTER (id
);
9562 buf_len
= strlen (name
) + 8 + 9;
9563 linksym
= (char *) alloca (buf_len
);
9564 snprintf (linksym
, buf_len
, "$%d..%s..lk", cfun
->funcdef_no
, name
);
9566 al
= ggc_alloc
<alpha_links
> ();
9568 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (linksym
));
9570 cfun
->machine
->links
->put (ggc_strdup (name
), al
);
9573 al
->rkind
= rflag
? KIND_CODEADDR
: KIND_LINKAGE
;
9576 return gen_rtx_MEM (Pmode
, plus_constant (Pmode
, al
->linkage
, 8));
9582 alpha_write_one_linkage (const char *name
, alpha_links
*link
, FILE *stream
)
9584 ASM_OUTPUT_INTERNAL_LABEL (stream
, XSTR (link
->linkage
, 0));
9585 if (link
->rkind
== KIND_CODEADDR
)
9587 /* External and used, request code address. */
9588 fprintf (stream
, "\t.code_address ");
9592 if (!SYMBOL_REF_EXTERNAL_P (link
->func
)
9593 && SYMBOL_REF_LOCAL_P (link
->func
))
9595 /* Locally defined, build linkage pair. */
9596 fprintf (stream
, "\t.quad %s..en\n", name
);
9597 fprintf (stream
, "\t.quad ");
9601 /* External, request linkage pair. */
9602 fprintf (stream
, "\t.linkage ");
9605 assemble_name (stream
, name
);
9606 fputs ("\n", stream
);
9612 alpha_write_linkage (FILE *stream
, const char *funname
)
9614 fprintf (stream
, "\t.link\n");
9615 fprintf (stream
, "\t.align 3\n");
9618 #ifdef TARGET_VMS_CRASH_DEBUG
9619 fputs ("\t.name ", stream
);
9620 assemble_name (stream
, funname
);
9621 fputs ("..na\n", stream
);
9624 ASM_OUTPUT_LABEL (stream
, funname
);
9625 fprintf (stream
, "\t.pdesc ");
9626 assemble_name (stream
, funname
);
9627 fprintf (stream
, "..en,%s\n",
9628 alpha_procedure_type
== PT_STACK
? "stack"
9629 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9631 if (cfun
->machine
->links
)
9633 hash_map
<nofree_string_hash
, alpha_links
*>::iterator iter
9634 = cfun
->machine
->links
->begin ();
9635 for (; iter
!= cfun
->machine
->links
->end (); ++iter
)
9636 alpha_write_one_linkage ((*iter
).first
, (*iter
).second
, stream
);
9640 /* Switch to an arbitrary section NAME with attributes as specified
9641 by FLAGS. ALIGN specifies any known alignment requirements for
9642 the section; 0 if the default should be used. */
9645 vms_asm_named_section (const char *name
, unsigned int flags
,
9646 tree decl ATTRIBUTE_UNUSED
)
9648 fputc ('\n', asm_out_file
);
9649 fprintf (asm_out_file
, ".section\t%s", name
);
9651 if (flags
& SECTION_DEBUG
)
9652 fprintf (asm_out_file
, ",NOWRT");
9654 fputc ('\n', asm_out_file
);
9657 /* Record an element in the table of global constructors. SYMBOL is
9658 a SYMBOL_REF of the function to be called; PRIORITY is a number
9659 between 0 and MAX_INIT_PRIORITY.
9661 Differs from default_ctors_section_asm_out_constructor in that the
9662 width of the .ctors entry is always 64 bits, rather than the 32 bits
9663 used by a normal pointer. */
9666 vms_asm_out_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9668 switch_to_section (ctors_section
);
9669 assemble_align (BITS_PER_WORD
);
9670 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9674 vms_asm_out_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9676 switch_to_section (dtors_section
);
9677 assemble_align (BITS_PER_WORD
);
9678 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9682 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED
,
9683 bool lflag ATTRIBUTE_UNUSED
,
9684 bool rflag ATTRIBUTE_UNUSED
)
9689 #endif /* TARGET_ABI_OPEN_VMS */
9692 alpha_init_libfuncs (void)
9694 if (TARGET_ABI_OPEN_VMS
)
9696 /* Use the VMS runtime library functions for division and
9698 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
9699 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
9700 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
9701 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
9702 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
9703 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
9704 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
9705 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
9706 #ifdef MEM_LIBFUNCS_INIT
9712 /* On the Alpha, we use this to disable the floating-point registers
9713 when they don't exist. */
9716 alpha_conditional_register_usage (void)
9719 if (! TARGET_FPREGS
)
9720 for (i
= 32; i
< 63; i
++)
9721 fixed_regs
[i
] = call_used_regs
[i
] = 1;
9724 /* Canonicalize a comparison from one we don't have to one we do have. */
9727 alpha_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
9728 bool op0_preserve_value
)
9730 if (!op0_preserve_value
9731 && (*code
== GE
|| *code
== GT
|| *code
== GEU
|| *code
== GTU
)
9732 && (REG_P (*op1
) || *op1
== const0_rtx
))
9734 std::swap (*op0
, *op1
);
9735 *code
= (int)swap_condition ((enum rtx_code
)*code
);
9738 if ((*code
== LT
|| *code
== LTU
)
9739 && CONST_INT_P (*op1
) && INTVAL (*op1
) == 256)
9741 *code
= *code
== LT
? LE
: LEU
;
9742 *op1
= GEN_INT (255);
9746 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9749 alpha_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
9751 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK
= (0x3fUL
<< 17);
9753 tree fenv_var
, get_fpscr
, set_fpscr
, mask
, ld_fenv
, masked_fenv
;
9754 tree new_fenv_var
, reload_fenv
, restore_fnenv
;
9755 tree update_call
, atomic_feraiseexcept
, hold_fnclex
;
9757 /* Assume OSF/1 compatible interfaces. */
9758 if (!TARGET_ABI_OSF
)
9761 /* Generate the equivalent of :
9762 unsigned long fenv_var;
9763 fenv_var = __ieee_get_fp_control ();
9765 unsigned long masked_fenv;
9766 masked_fenv = fenv_var & mask;
9768 __ieee_set_fp_control (masked_fenv); */
9770 fenv_var
= create_tmp_var_raw (long_unsigned_type_node
);
9772 = build_fn_decl ("__ieee_get_fp_control",
9773 build_function_type_list (long_unsigned_type_node
, NULL
));
9775 = build_fn_decl ("__ieee_set_fp_control",
9776 build_function_type_list (void_type_node
, NULL
));
9777 mask
= build_int_cst (long_unsigned_type_node
, ~SWCR_STATUS_MASK
);
9778 ld_fenv
= build2 (MODIFY_EXPR
, long_unsigned_type_node
,
9779 fenv_var
, build_call_expr (get_fpscr
, 0));
9780 masked_fenv
= build2 (BIT_AND_EXPR
, long_unsigned_type_node
, fenv_var
, mask
);
9781 hold_fnclex
= build_call_expr (set_fpscr
, 1, masked_fenv
);
9782 *hold
= build2 (COMPOUND_EXPR
, void_type_node
,
9783 build2 (COMPOUND_EXPR
, void_type_node
, masked_fenv
, ld_fenv
),
9786 /* Store the value of masked_fenv to clear the exceptions:
9787 __ieee_set_fp_control (masked_fenv); */
9789 *clear
= build_call_expr (set_fpscr
, 1, masked_fenv
);
9791 /* Generate the equivalent of :
9792 unsigned long new_fenv_var;
9793 new_fenv_var = __ieee_get_fp_control ();
9795 __ieee_set_fp_control (fenv_var);
9797 __atomic_feraiseexcept (new_fenv_var); */
9799 new_fenv_var
= create_tmp_var_raw (long_unsigned_type_node
);
9800 reload_fenv
= build2 (MODIFY_EXPR
, long_unsigned_type_node
, new_fenv_var
,
9801 build_call_expr (get_fpscr
, 0));
9802 restore_fnenv
= build_call_expr (set_fpscr
, 1, fenv_var
);
9803 atomic_feraiseexcept
= builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
9805 = build_call_expr (atomic_feraiseexcept
, 1,
9806 fold_convert (integer_type_node
, new_fenv_var
));
9807 *update
= build2 (COMPOUND_EXPR
, void_type_node
,
9808 build2 (COMPOUND_EXPR
, void_type_node
,
9809 reload_fenv
, restore_fnenv
), update_call
);
9812 /* Implement TARGET_HARD_REGNO_MODE_OK. On Alpha, the integer registers
9813 can hold any mode. The floating-point registers can hold 64-bit
9814 integers as well, but not smaller values. */
9817 alpha_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
9819 if (IN_RANGE (regno
, 32, 62))
9820 return (mode
== SFmode
9828 /* Implement TARGET_MODES_TIEABLE_P. This asymmetric test is true when
9829 MODE1 could be put in an FP register but MODE2 could not. */
9832 alpha_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
9834 return (alpha_hard_regno_mode_ok (32, mode1
)
9835 ? alpha_hard_regno_mode_ok (32, mode2
)
9839 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
9842 alpha_can_change_mode_class (machine_mode from
, machine_mode to
,
9845 return (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
)
9846 || !reg_classes_intersect_p (FLOAT_REGS
, rclass
));
9849 /* Initialize the GCC target structure. */
9850 #if TARGET_ABI_OPEN_VMS
9851 # undef TARGET_ATTRIBUTE_TABLE
9852 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9853 # undef TARGET_CAN_ELIMINATE
9854 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9857 #undef TARGET_IN_SMALL_DATA_P
9858 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9860 #undef TARGET_ASM_ALIGNED_HI_OP
9861 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9862 #undef TARGET_ASM_ALIGNED_DI_OP
9863 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9865 /* Default unaligned ops are provided for ELF systems. To get unaligned
9866 data for non-ELF systems, we have to turn off auto alignment. */
9867 #if TARGET_ABI_OPEN_VMS
9868 #undef TARGET_ASM_UNALIGNED_HI_OP
9869 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9870 #undef TARGET_ASM_UNALIGNED_SI_OP
9871 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9872 #undef TARGET_ASM_UNALIGNED_DI_OP
9873 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9876 #undef TARGET_ASM_RELOC_RW_MASK
9877 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9878 #undef TARGET_ASM_SELECT_RTX_SECTION
9879 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9880 #undef TARGET_SECTION_TYPE_FLAGS
9881 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9883 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9884 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9886 #undef TARGET_INIT_LIBFUNCS
9887 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9889 #undef TARGET_LEGITIMIZE_ADDRESS
9890 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9891 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9892 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9894 #undef TARGET_ASM_FILE_START
9895 #define TARGET_ASM_FILE_START alpha_file_start
9897 #undef TARGET_SCHED_ADJUST_COST
9898 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9899 #undef TARGET_SCHED_ISSUE_RATE
9900 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9901 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9902 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9903 alpha_multipass_dfa_lookahead
9905 #undef TARGET_HAVE_TLS
9906 #define TARGET_HAVE_TLS HAVE_AS_TLS
9908 #undef TARGET_BUILTIN_DECL
9909 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9910 #undef TARGET_INIT_BUILTINS
9911 #define TARGET_INIT_BUILTINS alpha_init_builtins
9912 #undef TARGET_EXPAND_BUILTIN
9913 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9914 #undef TARGET_FOLD_BUILTIN
9915 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9916 #undef TARGET_GIMPLE_FOLD_BUILTIN
9917 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
9919 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9920 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9921 #undef TARGET_CANNOT_COPY_INSN_P
9922 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9923 #undef TARGET_LEGITIMATE_CONSTANT_P
9924 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9925 #undef TARGET_CANNOT_FORCE_CONST_MEM
9926 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9929 #undef TARGET_ASM_OUTPUT_MI_THUNK
9930 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9931 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9932 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9933 #undef TARGET_STDARG_OPTIMIZE_HOOK
9934 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9937 #undef TARGET_PRINT_OPERAND
9938 #define TARGET_PRINT_OPERAND alpha_print_operand
9939 #undef TARGET_PRINT_OPERAND_ADDRESS
9940 #define TARGET_PRINT_OPERAND_ADDRESS alpha_print_operand_address
9941 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
9942 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P alpha_print_operand_punct_valid_p
9944 /* Use 16-bits anchor. */
9945 #undef TARGET_MIN_ANCHOR_OFFSET
9946 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9947 #undef TARGET_MAX_ANCHOR_OFFSET
9948 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9949 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9950 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9952 #undef TARGET_REGISTER_MOVE_COST
9953 #define TARGET_REGISTER_MOVE_COST alpha_register_move_cost
9954 #undef TARGET_MEMORY_MOVE_COST
9955 #define TARGET_MEMORY_MOVE_COST alpha_memory_move_cost
9956 #undef TARGET_RTX_COSTS
9957 #define TARGET_RTX_COSTS alpha_rtx_costs
9958 #undef TARGET_ADDRESS_COST
9959 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
9961 #undef TARGET_MACHINE_DEPENDENT_REORG
9962 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9964 #undef TARGET_PROMOTE_FUNCTION_MODE
9965 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9966 #undef TARGET_PROMOTE_PROTOTYPES
9967 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9969 #undef TARGET_FUNCTION_VALUE
9970 #define TARGET_FUNCTION_VALUE alpha_function_value
9971 #undef TARGET_LIBCALL_VALUE
9972 #define TARGET_LIBCALL_VALUE alpha_libcall_value
9973 #undef TARGET_FUNCTION_VALUE_REGNO_P
9974 #define TARGET_FUNCTION_VALUE_REGNO_P alpha_function_value_regno_p
9975 #undef TARGET_RETURN_IN_MEMORY
9976 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9977 #undef TARGET_PASS_BY_REFERENCE
9978 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9979 #undef TARGET_SETUP_INCOMING_VARARGS
9980 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9981 #undef TARGET_STRICT_ARGUMENT_NAMING
9982 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9983 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9984 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9985 #undef TARGET_SPLIT_COMPLEX_ARG
9986 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9987 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9988 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9989 #undef TARGET_ARG_PARTIAL_BYTES
9990 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9991 #undef TARGET_FUNCTION_ARG
9992 #define TARGET_FUNCTION_ARG alpha_function_arg
9993 #undef TARGET_FUNCTION_ARG_ADVANCE
9994 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9995 #undef TARGET_TRAMPOLINE_INIT
9996 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9998 #undef TARGET_INSTANTIATE_DECLS
9999 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10001 #undef TARGET_SECONDARY_RELOAD
10002 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10003 #undef TARGET_SECONDARY_MEMORY_NEEDED
10004 #define TARGET_SECONDARY_MEMORY_NEEDED alpha_secondary_memory_needed
10005 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
10006 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE alpha_secondary_memory_needed_mode
10008 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10009 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10010 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10011 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10013 #undef TARGET_BUILD_BUILTIN_VA_LIST
10014 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10016 #undef TARGET_EXPAND_BUILTIN_VA_START
10017 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10019 #undef TARGET_OPTION_OVERRIDE
10020 #define TARGET_OPTION_OVERRIDE alpha_option_override
10022 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
10023 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
10024 alpha_override_options_after_change
10026 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10027 #undef TARGET_MANGLE_TYPE
10028 #define TARGET_MANGLE_TYPE alpha_mangle_type
10031 #undef TARGET_LRA_P
10032 #define TARGET_LRA_P hook_bool_void_false
10034 #undef TARGET_LEGITIMATE_ADDRESS_P
10035 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10037 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10038 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10040 #undef TARGET_CANONICALIZE_COMPARISON
10041 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10043 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10044 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10046 #undef TARGET_HARD_REGNO_MODE_OK
10047 #define TARGET_HARD_REGNO_MODE_OK alpha_hard_regno_mode_ok
10049 #undef TARGET_MODES_TIEABLE_P
10050 #define TARGET_MODES_TIEABLE_P alpha_modes_tieable_p
10052 #undef TARGET_CAN_CHANGE_MODE_CLASS
10053 #define TARGET_CAN_CHANGE_MODE_CLASS alpha_can_change_mode_class
10055 struct gcc_target targetm
= TARGET_INITIALIZER
;
10058 #include "gt-alpha.h"