]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/alpha/alpha.c
Use function_arg_info for TARGET_FUNCTION_ARG_ADVANCE
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
CommitLineData
a6f12d7c 1/* Subroutines used for code generation on the DEC Alpha.
a5544970 2 Copyright (C) 1992-2019 Free Software Foundation, Inc.
d60a05a1 3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
a6f12d7c 4
7ec022b2 5This file is part of GCC.
a6f12d7c 6
7ec022b2 7GCC is free software; you can redistribute it and/or modify
a6f12d7c 8it under the terms of the GNU General Public License as published by
2f83c7d6 9the Free Software Foundation; either version 3, or (at your option)
a6f12d7c
RK
10any later version.
11
7ec022b2 12GCC is distributed in the hope that it will be useful,
a6f12d7c
RK
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
2f83c7d6
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
a6f12d7c
RK
20
21
8fcc61f8
RS
22#define IN_TARGET_CODE 1
23
a6f12d7c 24#include "config.h"
3c303f52 25#include "system.h"
4977bab6 26#include "coretypes.h"
c7131fb2 27#include "backend.h"
e11c4407
AM
28#include "target.h"
29#include "rtl.h"
c7131fb2 30#include "tree.h"
314e6352
ML
31#include "stringpool.h"
32#include "attribs.h"
e73cf9a2 33#include "memmodel.h"
c7131fb2 34#include "gimple.h"
c7131fb2 35#include "df.h"
8cdf5397 36#include "predict.h"
e11c4407 37#include "tm_p.h"
c7131fb2 38#include "ssa.h"
e11c4407
AM
39#include "expmed.h"
40#include "optabs.h"
41#include "regs.h"
42#include "emit-rtl.h"
43#include "recog.h"
44#include "diagnostic-core.h"
40e23961 45#include "alias.h"
40e23961 46#include "fold-const.h"
d8a2d370
DN
47#include "stor-layout.h"
48#include "calls.h"
49#include "varasm.h"
a6f12d7c
RK
50#include "output.h"
51#include "insn-attr.h"
36566b39 52#include "explow.h"
a6f12d7c 53#include "expr.h"
e78d8e51 54#include "reload.h"
9ecc37f0 55#include "except.h"
677f3fa8 56#include "common/common-target.h"
14691f8d 57#include "debug.h"
f1e639b1 58#include "langhooks.h"
60393bbc 59#include "cfgrtl.h"
4bdf6418
UB
60#include "tree-pass.h"
61#include "context.h"
b6db8af6 62#include "gimple-iterator.h"
45b0be94 63#include "gimplify.h"
9d30f3c1 64#include "tree-stdarg.h"
dfcbeaa5 65#include "tm-constrs.h"
5e3fef6c 66#include "libfuncs.h"
f49278e6 67#include "params.h"
9b2b7279 68#include "builtins.h"
572e01c7 69#include "rtl-iter.h"
c518c102 70#include "flags.h"
9ecc37f0 71
994c5d85 72/* This file should be included last. */
d58627a0
RS
73#include "target-def.h"
74
285a5742 75/* Specify which cpu to schedule for. */
8bea7f7c 76enum processor_type alpha_tune;
9ecc37f0 77
8bea7f7c 78/* Which cpu we're generating code for. */
9b009d45 79enum processor_type alpha_cpu;
8bea7f7c 80
f676971a 81static const char * const alpha_cpu_name[] =
bcbbac26
RH
82{
83 "ev4", "ev5", "ev6"
84};
da792a68 85
6245e3df
RK
86/* Specify how accurate floating-point traps need to be. */
87
88enum alpha_trap_precision alpha_tp;
89
90/* Specify the floating-point rounding mode. */
91
92enum alpha_fp_rounding_mode alpha_fprm;
93
94/* Specify which things cause traps. */
95
96enum alpha_fp_trap_mode alpha_fptm;
97
825dda42 98/* Nonzero if inside of a function, because the Alpha asm can't
48f6bfac
RK
99 handle .files inside of functions. */
100
101static int inside_function = FALSE;
102
bcbbac26
RH
103/* The number of cycles of latency we should assume on memory reads. */
104
ef995717 105static int alpha_memory_latency = 3;
bcbbac26 106
9c0e94a5
RH
107/* Whether the function needs the GP. */
108
109static int alpha_function_needs_gp;
110
941cc05a
RK
111/* The assembler name of the current function. */
112
113static const char *alpha_fnname;
114
1eb356b9 115/* The next explicit relocation sequence number. */
f030826a 116extern GTY(()) int alpha_next_sequence_number;
1eb356b9
RH
117int alpha_next_sequence_number = 1;
118
119/* The literal and gpdisp sequence numbers for this insn, as printed
120 by %# and %* respectively. */
f030826a
RH
121extern GTY(()) int alpha_this_literal_sequence_number;
122extern GTY(()) int alpha_this_gpdisp_sequence_number;
1eb356b9
RH
123int alpha_this_literal_sequence_number;
124int alpha_this_gpdisp_sequence_number;
125
3c50106f
RH
126/* Costs of various operations on the different architectures. */
127
128struct alpha_rtx_cost_data
129{
130 unsigned char fp_add;
131 unsigned char fp_mult;
132 unsigned char fp_div_sf;
133 unsigned char fp_div_df;
134 unsigned char int_mult_si;
135 unsigned char int_mult_di;
136 unsigned char int_shift;
137 unsigned char int_cmov;
8260c194 138 unsigned short int_div;
3c50106f
RH
139};
140
141static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
142{
143 { /* EV4 */
144 COSTS_N_INSNS (6), /* fp_add */
145 COSTS_N_INSNS (6), /* fp_mult */
146 COSTS_N_INSNS (34), /* fp_div_sf */
147 COSTS_N_INSNS (63), /* fp_div_df */
148 COSTS_N_INSNS (23), /* int_mult_si */
149 COSTS_N_INSNS (23), /* int_mult_di */
150 COSTS_N_INSNS (2), /* int_shift */
151 COSTS_N_INSNS (2), /* int_cmov */
9b4f6a07 152 COSTS_N_INSNS (97), /* int_div */
3c50106f
RH
153 },
154 { /* EV5 */
155 COSTS_N_INSNS (4), /* fp_add */
156 COSTS_N_INSNS (4), /* fp_mult */
157 COSTS_N_INSNS (15), /* fp_div_sf */
158 COSTS_N_INSNS (22), /* fp_div_df */
159 COSTS_N_INSNS (8), /* int_mult_si */
160 COSTS_N_INSNS (12), /* int_mult_di */
161 COSTS_N_INSNS (1) + 1, /* int_shift */
162 COSTS_N_INSNS (1), /* int_cmov */
9b4f6a07 163 COSTS_N_INSNS (83), /* int_div */
3c50106f
RH
164 },
165 { /* EV6 */
166 COSTS_N_INSNS (4), /* fp_add */
167 COSTS_N_INSNS (4), /* fp_mult */
168 COSTS_N_INSNS (12), /* fp_div_sf */
169 COSTS_N_INSNS (15), /* fp_div_df */
170 COSTS_N_INSNS (7), /* int_mult_si */
171 COSTS_N_INSNS (7), /* int_mult_di */
172 COSTS_N_INSNS (1), /* int_shift */
173 COSTS_N_INSNS (2), /* int_cmov */
9b4f6a07 174 COSTS_N_INSNS (86), /* int_div */
3c50106f
RH
175 },
176};
177
8260c194
RH
178/* Similar but tuned for code size instead of execution latency. The
179 extra +N is fractional cost tuning based on latency. It's used to
180 encourage use of cheaper insns like shift, but only if there's just
181 one of them. */
182
183static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
184{
185 COSTS_N_INSNS (1), /* fp_add */
186 COSTS_N_INSNS (1), /* fp_mult */
187 COSTS_N_INSNS (1), /* fp_div_sf */
188 COSTS_N_INSNS (1) + 1, /* fp_div_df */
189 COSTS_N_INSNS (1) + 1, /* int_mult_si */
190 COSTS_N_INSNS (1) + 2, /* int_mult_di */
191 COSTS_N_INSNS (1), /* int_shift */
192 COSTS_N_INSNS (1), /* int_cmov */
193 COSTS_N_INSNS (6), /* int_div */
194};
195
e9a25f70 196/* Get the number of args of a function in one of two ways. */
75db85d8 197#if TARGET_ABI_OPEN_VMS
38173d38 198#define NUM_ARGS crtl->args.info.num_args
e9a25f70 199#else
38173d38 200#define NUM_ARGS crtl->args.info
e9a25f70 201#endif
26250081 202
26250081
RH
203#define REG_PV 27
204#define REG_RA 26
6d8fd7bb 205
a5c24926
RH
206/* Declarations of static functions. */
207static struct machine_function *alpha_init_machine_status (void);
0da4e73a 208static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
4bdf6418
UB
209static void alpha_handle_trap_shadows (void);
210static void alpha_align_insns (void);
ad2c39af 211static void alpha_override_options_after_change (void);
4977bab6 212
a5c24926 213#if TARGET_ABI_OPEN_VMS
735f469b 214static void alpha_write_linkage (FILE *, const char *);
095a2d76 215static bool vms_valid_pointer_mode (scalar_int_mode);
171da07a
RH
216#else
217#define vms_patch_builtins() gcc_unreachable()
c590b625 218#endif
672a6f42 219\f
4bdf6418
UB
220static unsigned int
221rest_of_handle_trap_shadows (void)
222{
223 alpha_handle_trap_shadows ();
224 return 0;
225}
226
227namespace {
228
229const pass_data pass_data_handle_trap_shadows =
230{
231 RTL_PASS,
232 "trap_shadows", /* name */
233 OPTGROUP_NONE, /* optinfo_flags */
234 TV_NONE, /* tv_id */
235 0, /* properties_required */
236 0, /* properties_provided */
237 0, /* properties_destroyed */
238 0, /* todo_flags_start */
239 TODO_df_finish, /* todo_flags_finish */
240};
241
242class pass_handle_trap_shadows : public rtl_opt_pass
243{
244public:
245 pass_handle_trap_shadows(gcc::context *ctxt)
246 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
247 {}
248
249 /* opt_pass methods: */
250 virtual bool gate (function *)
251 {
252 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
253 }
254
255 virtual unsigned int execute (function *)
256 {
257 return rest_of_handle_trap_shadows ();
258 }
259
260}; // class pass_handle_trap_shadows
261
262} // anon namespace
263
264rtl_opt_pass *
265make_pass_handle_trap_shadows (gcc::context *ctxt)
266{
267 return new pass_handle_trap_shadows (ctxt);
268}
269
270static unsigned int
271rest_of_align_insns (void)
272{
273 alpha_align_insns ();
274 return 0;
275}
276
277namespace {
278
279const pass_data pass_data_align_insns =
280{
281 RTL_PASS,
282 "align_insns", /* name */
283 OPTGROUP_NONE, /* optinfo_flags */
284 TV_NONE, /* tv_id */
285 0, /* properties_required */
286 0, /* properties_provided */
287 0, /* properties_destroyed */
288 0, /* todo_flags_start */
289 TODO_df_finish, /* todo_flags_finish */
290};
291
292class pass_align_insns : public rtl_opt_pass
293{
294public:
295 pass_align_insns(gcc::context *ctxt)
296 : rtl_opt_pass(pass_data_align_insns, ctxt)
297 {}
298
299 /* opt_pass methods: */
300 virtual bool gate (function *)
301 {
302 /* Due to the number of extra trapb insns, don't bother fixing up
303 alignment when trap precision is instruction. Moreover, we can
304 only do our job when sched2 is run. */
305 return ((alpha_tune == PROCESSOR_EV4
306 || alpha_tune == PROCESSOR_EV5)
307 && optimize && !optimize_size
308 && alpha_tp != ALPHA_TP_INSN
309 && flag_schedule_insns_after_reload);
310 }
311
312 virtual unsigned int execute (function *)
313 {
314 return rest_of_align_insns ();
315 }
316
317}; // class pass_align_insns
318
319} // anon namespace
320
321rtl_opt_pass *
322make_pass_align_insns (gcc::context *ctxt)
323{
324 return new pass_align_insns (ctxt);
325}
326
7269aee7 327#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
608063c3 328/* Implement TARGET_MANGLE_TYPE. */
7269aee7
AH
329
330static const char *
3101faab 331alpha_mangle_type (const_tree type)
7269aee7
AH
332{
333 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
334 && TARGET_LONG_DOUBLE_128)
335 return "g";
336
337 /* For all other types, use normal C++ mangling. */
338 return NULL;
339}
340#endif
341
285a5742 342/* Parse target option strings. */
6245e3df 343
c5387660
JM
344static void
345alpha_option_override (void)
6245e3df 346{
8b60264b
KG
347 static const struct cpu_table {
348 const char *const name;
349 const enum processor_type processor;
350 const int flags;
f49278e6
RH
351 const unsigned short line_size; /* in bytes */
352 const unsigned short l1_size; /* in kb. */
353 const unsigned short l2_size; /* in kb. */
a3b815cb 354 } cpu_table[] = {
f49278e6
RH
355 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
356 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
357 had 64k to 8M 8-byte direct Bcache. */
358 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
359 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
360 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
361
362 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
363 and 1M to 16M 64 byte L3 (not modeled).
364 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
365 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
366 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
367 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
368 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
369 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
370 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
371 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
372 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
373
374 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
375 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
376 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
377 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
378 64, 64, 16*1024 },
379 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
380 64, 64, 16*1024 }
a3b815cb 381 };
f676971a 382
8224166e 383 int const ct_size = ARRAY_SIZE (cpu_table);
f49278e6 384 int line_size = 0, l1_size = 0, l2_size = 0;
8bea7f7c
RH
385 int i;
386
c5387660
JM
387#ifdef SUBTARGET_OVERRIDE_OPTIONS
388 SUBTARGET_OVERRIDE_OPTIONS;
389#endif
390
1e86df8d
UB
391 /* Default to full IEEE compliance mode for Go language. */
392 if (strcmp (lang_hooks.name, "GNU Go") == 0
393 && !(target_flags_explicit & MASK_IEEE))
394 target_flags |= MASK_IEEE;
395
75db85d8 396 alpha_fprm = ALPHA_FPRM_NORM;
6245e3df 397 alpha_tp = ALPHA_TP_PROG;
6245e3df
RK
398 alpha_fptm = ALPHA_FPTM_N;
399
400 if (TARGET_IEEE)
401 {
75db85d8
RH
402 alpha_tp = ALPHA_TP_INSN;
403 alpha_fptm = ALPHA_FPTM_SU;
6245e3df 404 }
6245e3df
RK
405 if (TARGET_IEEE_WITH_INEXACT)
406 {
75db85d8
RH
407 alpha_tp = ALPHA_TP_INSN;
408 alpha_fptm = ALPHA_FPTM_SUI;
6245e3df
RK
409 }
410
411 if (alpha_tp_string)
10d5c73f
RK
412 {
413 if (! strcmp (alpha_tp_string, "p"))
6245e3df 414 alpha_tp = ALPHA_TP_PROG;
10d5c73f 415 else if (! strcmp (alpha_tp_string, "f"))
6245e3df 416 alpha_tp = ALPHA_TP_FUNC;
10d5c73f 417 else if (! strcmp (alpha_tp_string, "i"))
6245e3df 418 alpha_tp = ALPHA_TP_INSN;
10d5c73f 419 else
a3f9f006
ML
420 error ("bad value %qs for %<-mtrap-precision%> switch",
421 alpha_tp_string);
10d5c73f 422 }
6245e3df
RK
423
424 if (alpha_fprm_string)
10d5c73f
RK
425 {
426 if (! strcmp (alpha_fprm_string, "n"))
6245e3df 427 alpha_fprm = ALPHA_FPRM_NORM;
10d5c73f 428 else if (! strcmp (alpha_fprm_string, "m"))
6245e3df 429 alpha_fprm = ALPHA_FPRM_MINF;
10d5c73f 430 else if (! strcmp (alpha_fprm_string, "c"))
6245e3df 431 alpha_fprm = ALPHA_FPRM_CHOP;
10d5c73f 432 else if (! strcmp (alpha_fprm_string,"d"))
6245e3df 433 alpha_fprm = ALPHA_FPRM_DYN;
10d5c73f 434 else
a3f9f006 435 error ("bad value %qs for %<-mfp-rounding-mode%> switch",
6245e3df 436 alpha_fprm_string);
10d5c73f 437 }
6245e3df
RK
438
439 if (alpha_fptm_string)
10d5c73f
RK
440 {
441 if (strcmp (alpha_fptm_string, "n") == 0)
442 alpha_fptm = ALPHA_FPTM_N;
443 else if (strcmp (alpha_fptm_string, "u") == 0)
444 alpha_fptm = ALPHA_FPTM_U;
445 else if (strcmp (alpha_fptm_string, "su") == 0)
446 alpha_fptm = ALPHA_FPTM_SU;
447 else if (strcmp (alpha_fptm_string, "sui") == 0)
448 alpha_fptm = ALPHA_FPTM_SUI;
449 else
a3f9f006
ML
450 error ("bad value %qs for %<-mfp-trap-mode%> switch",
451 alpha_fptm_string);
10d5c73f 452 }
6245e3df 453
de4abb91
RH
454 if (alpha_cpu_string)
455 {
8224166e 456 for (i = 0; i < ct_size; i++)
a3b815cb
JJ
457 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
458 {
f49278e6
RH
459 alpha_tune = alpha_cpu = cpu_table[i].processor;
460 line_size = cpu_table[i].line_size;
461 l1_size = cpu_table[i].l1_size;
462 l2_size = cpu_table[i].l2_size;
8bea7f7c 463 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
f49278e6 464 target_flags |= cpu_table[i].flags;
a3b815cb
JJ
465 break;
466 }
8224166e 467 if (i == ct_size)
a3f9f006 468 error ("bad value %qs for %<-mcpu%> switch", alpha_cpu_string);
de4abb91
RH
469 }
470
a3b815cb
JJ
471 if (alpha_tune_string)
472 {
8224166e 473 for (i = 0; i < ct_size; i++)
a3b815cb
JJ
474 if (! strcmp (alpha_tune_string, cpu_table [i].name))
475 {
f49278e6
RH
476 alpha_tune = cpu_table[i].processor;
477 line_size = cpu_table[i].line_size;
478 l1_size = cpu_table[i].l1_size;
479 l2_size = cpu_table[i].l2_size;
a3b815cb
JJ
480 break;
481 }
8224166e 482 if (i == ct_size)
a3f9f006 483 error ("bad value %qs for %<-mtune%> switch", alpha_tune_string);
a3b815cb
JJ
484 }
485
f49278e6
RH
486 if (line_size)
487 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
488 global_options.x_param_values,
489 global_options_set.x_param_values);
490 if (l1_size)
491 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
492 global_options.x_param_values,
493 global_options_set.x_param_values);
494 if (l2_size)
495 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
496 global_options.x_param_values,
497 global_options_set.x_param_values);
498
285a5742 499 /* Do some sanity checks on the above options. */
6245e3df 500
10d5c73f 501 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
8bea7f7c 502 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
6245e3df 503 {
a3f9f006 504 warning (0, "fp software completion requires %<-mtrap-precision=i%>");
6245e3df
RK
505 alpha_tp = ALPHA_TP_INSN;
506 }
89cfc2c6 507
8bea7f7c 508 if (alpha_cpu == PROCESSOR_EV6)
981a828e
RH
509 {
510 /* Except for EV6 pass 1 (not released), we always have precise
511 arithmetic traps. Which means we can do software completion
512 without minding trap shadows. */
513 alpha_tp = ALPHA_TP_PROG;
514 }
515
89cfc2c6
RK
516 if (TARGET_FLOAT_VAX)
517 {
518 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
519 {
d4ee4d25 520 warning (0, "rounding mode not supported for VAX floats");
89cfc2c6
RK
521 alpha_fprm = ALPHA_FPRM_NORM;
522 }
523 if (alpha_fptm == ALPHA_FPTM_SUI)
524 {
d4ee4d25 525 warning (0, "trap mode not supported for VAX floats");
89cfc2c6
RK
526 alpha_fptm = ALPHA_FPTM_SU;
527 }
0f15adbd 528 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
5ec4a442 529 warning (0, "128-bit %<long double%> not supported for VAX floats");
0f15adbd 530 target_flags &= ~MASK_LONG_DOUBLE_128;
89cfc2c6 531 }
bcbbac26
RH
532
533 {
534 char *end;
535 int lat;
536
537 if (!alpha_mlat_string)
538 alpha_mlat_string = "L1";
539
d1e6b55b 540 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
bcbbac26
RH
541 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
542 ;
543 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
d1e6b55b 544 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
bcbbac26
RH
545 && alpha_mlat_string[2] == '\0')
546 {
f676971a 547 static int const cache_latency[][4] =
bcbbac26
RH
548 {
549 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
550 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
285a5742 551 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
bcbbac26
RH
552 };
553
554 lat = alpha_mlat_string[1] - '0';
8bea7f7c 555 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
bcbbac26 556 {
d4ee4d25 557 warning (0, "L%d cache latency unknown for %s",
8bea7f7c 558 lat, alpha_cpu_name[alpha_tune]);
bcbbac26
RH
559 lat = 3;
560 }
561 else
8bea7f7c 562 lat = cache_latency[alpha_tune][lat-1];
bcbbac26
RH
563 }
564 else if (! strcmp (alpha_mlat_string, "main"))
565 {
566 /* Most current memories have about 370ns latency. This is
567 a reasonable guess for a fast cpu. */
568 lat = 150;
569 }
570 else
571 {
a3f9f006
ML
572 warning (0, "bad value %qs for %<-mmemory-latency%>",
573 alpha_mlat_string);
bcbbac26
RH
574 lat = 3;
575 }
576
577 alpha_memory_latency = lat;
578 }
bb8ebb7f
RH
579
580 /* Default the definition of "small data" to 8 bytes. */
fa37ed29 581 if (!global_options_set.x_g_switch_value)
bb8ebb7f 582 g_switch_value = 8;
3873d24b 583
133d3133
RH
584 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
585 if (flag_pic == 1)
586 target_flags |= MASK_SMALL_DATA;
587 else if (flag_pic == 2)
588 target_flags &= ~MASK_SMALL_DATA;
589
ad2c39af 590 alpha_override_options_after_change ();
c176c051 591
30102605
RH
592 /* Register variables and functions with the garbage collector. */
593
30102605
RH
594 /* Set up function hooks. */
595 init_machine_status = alpha_init_machine_status;
3dc85dfb
RH
596
597 /* Tell the compiler when we're using VAX floating point. */
598 if (TARGET_FLOAT_VAX)
599 {
70a01792
ZW
600 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
601 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
602 REAL_MODE_FORMAT (TFmode) = NULL;
3dc85dfb 603 }
ed965309
JJ
604
605#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
606 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
607 target_flags |= MASK_LONG_DOUBLE_128;
608#endif
4bdf6418 609
6245e3df 610}
ad2c39af
UB
611
612/* Implement targetm.override_options_after_change. */
613
614static void
615alpha_override_options_after_change (void)
616{
617 /* Align labels and loops for optimal branching. */
618 /* ??? Kludge these by not doing anything if we don't optimize. */
619 if (optimize > 0)
620 {
c518c102
ML
621 if (flag_align_loops && !str_align_loops)
622 str_align_loops = "16";
623 if (flag_align_jumps && !str_align_jumps)
624 str_align_jumps = "16";
ad2c39af 625 }
c518c102
ML
626 if (flag_align_functions && !str_align_functions)
627 str_align_functions = "16";
ad2c39af 628}
6245e3df 629\f
a6f12d7c
RK
630/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
631
632int
a5c24926 633zap_mask (HOST_WIDE_INT value)
a6f12d7c
RK
634{
635 int i;
636
637 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
638 i++, value >>= 8)
639 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
640 return 0;
641
642 return 1;
643}
644
f676971a 645/* Return true if OP is valid for a particular TLS relocation.
201312c2 646 We are already guaranteed that OP is a CONST. */
a6f12d7c
RK
647
648int
201312c2 649tls_symbolic_operand_1 (rtx op, int size, int unspec)
a6f12d7c 650{
6f9b006d
RH
651 op = XEXP (op, 0);
652
653 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
654 return 0;
655 op = XVECEXP (op, 0, 0);
656
657 if (GET_CODE (op) != SYMBOL_REF)
658 return 0;
6f9b006d 659
d055668e 660 switch (SYMBOL_REF_TLS_MODEL (op))
3094247f 661 {
d055668e 662 case TLS_MODEL_LOCAL_DYNAMIC:
f6326c19 663 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
d055668e 664 case TLS_MODEL_INITIAL_EXEC:
3094247f 665 return unspec == UNSPEC_TPREL && size == 64;
d055668e 666 case TLS_MODEL_LOCAL_EXEC:
f6326c19 667 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
3094247f 668 default:
56daab84 669 gcc_unreachable ();
3094247f 670 }
6f9b006d
RH
671}
672
201312c2
RH
673/* Used by aligned_memory_operand and unaligned_memory_operand to
674 resolve what reload is going to do with OP if it's a register. */
8f4773ea 675
201312c2
RH
676rtx
677resolve_reload_operand (rtx op)
a6f12d7c 678{
4e46365b 679 if (reload_in_progress)
a6f12d7c 680 {
4e46365b 681 rtx tmp = op;
e4e040f1 682 if (SUBREG_P (tmp))
4e46365b 683 tmp = SUBREG_REG (tmp);
7d83f4f5 684 if (REG_P (tmp)
4e46365b
RH
685 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
686 {
f2034d06 687 op = reg_equiv_memory_loc (REGNO (tmp));
4e46365b
RH
688 if (op == 0)
689 return 0;
690 }
a6f12d7c 691 }
201312c2 692 return op;
3611aef0
RH
693}
694
6dd53648
RH
695/* The scalar modes supported differs from the default check-what-c-supports
696 version in that sometimes TFmode is available even when long double
75db85d8 697 indicates only DFmode. */
6dd53648
RH
698
699static bool
18e2a8b8 700alpha_scalar_mode_supported_p (scalar_mode mode)
6dd53648
RH
701{
702 switch (mode)
703 {
4e10a5a7
RS
704 case E_QImode:
705 case E_HImode:
706 case E_SImode:
707 case E_DImode:
708 case E_TImode: /* via optabs.c */
6dd53648
RH
709 return true;
710
4e10a5a7
RS
711 case E_SFmode:
712 case E_DFmode:
6dd53648
RH
713 return true;
714
4e10a5a7 715 case E_TFmode:
6dd53648
RH
716 return TARGET_HAS_XFLOATING_LIBS;
717
718 default:
719 return false;
720 }
721}
722
723/* Alpha implements a couple of integer vector mode operations when
e2ea71ea
RH
724 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
725 which allows the vectorizer to operate on e.g. move instructions,
726 or when expand_vector_operations can do something useful. */
6dd53648 727
f676971a 728static bool
ef4bddc2 729alpha_vector_mode_supported_p (machine_mode mode)
f676971a 730{
e2ea71ea 731 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
f676971a
EC
732}
733
6f9b006d
RH
734/* Return the TLS model to use for SYMBOL. */
735
736static enum tls_model
a5c24926 737tls_symbolic_operand_type (rtx symbol)
6f9b006d 738{
d055668e 739 enum tls_model model;
6f9b006d
RH
740
741 if (GET_CODE (symbol) != SYMBOL_REF)
8224166e 742 return TLS_MODEL_NONE;
d055668e 743 model = SYMBOL_REF_TLS_MODEL (symbol);
6f9b006d 744
d055668e
RH
745 /* Local-exec with a 64-bit size is the same code as initial-exec. */
746 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
747 model = TLS_MODEL_INITIAL_EXEC;
6f9b006d 748
d055668e 749 return model;
6f9b006d 750}
3611aef0 751\f
3094247f
RH
752/* Return true if the function DECL will share the same GP as any
753 function in the current unit of translation. */
754
755static bool
3101faab 756decl_has_samegp (const_tree decl)
3094247f
RH
757{
758 /* Functions that are not local can be overridden, and thus may
759 not share the same gp. */
760 if (!(*targetm.binds_local_p) (decl))
761 return false;
762
763 /* If -msmall-data is in effect, assume that there is only one GP
764 for the module, and so any local symbol has this property. We
765 need explicit relocations to be able to enforce this for symbols
766 not defined in this unit of translation, however. */
767 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
768 return true;
769
770 /* Functions that are not external are defined in this UoT. */
7f24e7c5
RH
771 /* ??? Irritatingly, static functions not yet emitted are still
772 marked "external". Apply this to non-static functions only. */
773 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
3094247f
RH
774}
775
ae46c4e0
RH
776/* Return true if EXP should be placed in the small data section. */
777
778static bool
3101faab 779alpha_in_small_data_p (const_tree exp)
ae46c4e0 780{
34a6c2ec
RH
781 /* We want to merge strings, so we never consider them small data. */
782 if (TREE_CODE (exp) == STRING_CST)
783 return false;
784
7179b6db
RH
785 /* Functions are never in the small data area. Duh. */
786 if (TREE_CODE (exp) == FUNCTION_DECL)
787 return false;
788
dcdeca7a
UB
789 /* COMMON symbols are never small data. */
790 if (TREE_CODE (exp) == VAR_DECL && DECL_COMMON (exp))
791 return false;
792
ae46c4e0
RH
793 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
794 {
f961457f 795 const char *section = DECL_SECTION_NAME (exp);
ae46c4e0
RH
796 if (strcmp (section, ".sdata") == 0
797 || strcmp (section, ".sbss") == 0)
798 return true;
799 }
800 else
801 {
802 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
803
804 /* If this is an incomplete type with size 0, then we can't put it
805 in sdata because it might be too big when completed. */
fa37ed29 806 if (size > 0 && size <= g_switch_value)
ae46c4e0
RH
807 return true;
808 }
809
810 return false;
811}
812
1330f7d5 813#if TARGET_ABI_OPEN_VMS
dfe6ba6d 814static bool
095a2d76 815vms_valid_pointer_mode (scalar_int_mode mode)
dfe6ba6d
DR
816{
817 return (mode == SImode || mode == DImode);
818}
819
1330f7d5 820static bool
a5c24926 821alpha_linkage_symbol_p (const char *symname)
1330f7d5
DR
822{
823 int symlen = strlen (symname);
824
825 if (symlen > 4)
826 return strcmp (&symname [symlen - 4], "..lk") == 0;
827
828 return false;
829}
830
831#define LINKAGE_SYMBOL_REF_P(X) \
832 ((GET_CODE (X) == SYMBOL_REF \
833 && alpha_linkage_symbol_p (XSTR (X, 0))) \
834 || (GET_CODE (X) == CONST \
835 && GET_CODE (XEXP (X, 0)) == PLUS \
836 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
837 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
838#endif
839
a39bdefc
RH
840/* legitimate_address_p recognizes an RTL expression that is a valid
841 memory address for an instruction. The MODE argument is the
842 machine mode for the MEM expression that wants to use this address.
843
844 For Alpha, we have either a constant address or the sum of a
845 register and a constant address, or just a register. For DImode,
846 any of those forms can be surrounded with an AND that clear the
847 low-order three bits; this is an "unaligned" access. */
848
c6c3dba9 849static bool
ef4bddc2 850alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
a39bdefc
RH
851{
852 /* If this is an ldq_u type address, discard the outer AND. */
853 if (mode == DImode
854 && GET_CODE (x) == AND
7d83f4f5 855 && CONST_INT_P (XEXP (x, 1))
a39bdefc
RH
856 && INTVAL (XEXP (x, 1)) == -8)
857 x = XEXP (x, 0);
858
859 /* Discard non-paradoxical subregs. */
e4e040f1 860 if (SUBREG_P (x)
a39bdefc
RH
861 && (GET_MODE_SIZE (GET_MODE (x))
862 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
863 x = SUBREG_REG (x);
864
865 /* Unadorned general registers are valid. */
866 if (REG_P (x)
867 && (strict
868 ? STRICT_REG_OK_FOR_BASE_P (x)
869 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
870 return true;
871
872 /* Constant addresses (i.e. +/- 32k) are valid. */
873 if (CONSTANT_ADDRESS_P (x))
874 return true;
875
1330f7d5
DR
876#if TARGET_ABI_OPEN_VMS
877 if (LINKAGE_SYMBOL_REF_P (x))
878 return true;
879#endif
880
a39bdefc
RH
881 /* Register plus a small constant offset is valid. */
882 if (GET_CODE (x) == PLUS)
883 {
884 rtx ofs = XEXP (x, 1);
885 x = XEXP (x, 0);
886
887 /* Discard non-paradoxical subregs. */
e4e040f1 888 if (SUBREG_P (x)
a39bdefc
RH
889 && (GET_MODE_SIZE (GET_MODE (x))
890 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
891 x = SUBREG_REG (x);
892
893 if (REG_P (x))
894 {
895 if (! strict
896 && NONSTRICT_REG_OK_FP_BASE_P (x)
7d83f4f5 897 && CONST_INT_P (ofs))
a39bdefc
RH
898 return true;
899 if ((strict
900 ? STRICT_REG_OK_FOR_BASE_P (x)
901 : NONSTRICT_REG_OK_FOR_BASE_P (x))
902 && CONSTANT_ADDRESS_P (ofs))
903 return true;
904 }
a39bdefc
RH
905 }
906
26d5bf5b
UB
907 /* If we're managing explicit relocations, LO_SUM is valid, as are small
908 data symbols. Avoid explicit relocations of modes larger than word
909 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
910 else if (TARGET_EXPLICIT_RELOCS
911 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
1eb356b9 912 {
551cc6fd 913 if (small_symbolic_operand (x, Pmode))
1eb356b9 914 return true;
551cc6fd
RH
915
916 if (GET_CODE (x) == LO_SUM)
917 {
918 rtx ofs = XEXP (x, 1);
919 x = XEXP (x, 0);
920
921 /* Discard non-paradoxical subregs. */
e4e040f1 922 if (SUBREG_P (x)
551cc6fd
RH
923 && (GET_MODE_SIZE (GET_MODE (x))
924 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
925 x = SUBREG_REG (x);
926
927 /* Must have a valid base register. */
928 if (! (REG_P (x)
929 && (strict
930 ? STRICT_REG_OK_FOR_BASE_P (x)
931 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
932 return false;
933
934 /* The symbol must be local. */
6f9b006d
RH
935 if (local_symbolic_operand (ofs, Pmode)
936 || dtp32_symbolic_operand (ofs, Pmode)
937 || tp32_symbolic_operand (ofs, Pmode))
551cc6fd
RH
938 return true;
939 }
1eb356b9
RH
940 }
941
a39bdefc
RH
942 return false;
943}
944
d055668e
RH
945/* Build the SYMBOL_REF for __tls_get_addr. */
946
947static GTY(()) rtx tls_get_addr_libfunc;
948
949static rtx
a5c24926 950get_tls_get_addr (void)
d055668e
RH
951{
952 if (!tls_get_addr_libfunc)
953 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
954 return tls_get_addr_libfunc;
955}
956
aead1ca3
RH
957/* Try machine-dependent ways of modifying an illegitimate address
958 to be legitimate. If we find one, return the new, valid address. */
959
506d7b68 960static rtx
ef4bddc2 961alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
aead1ca3
RH
962{
963 HOST_WIDE_INT addend;
964
965 /* If the address is (plus reg const_int) and the CONST_INT is not a
966 valid offset, compute the high part of the constant and add it to
967 the register. Then our address is (plus temp low-part-const). */
968 if (GET_CODE (x) == PLUS
7d83f4f5
UB
969 && REG_P (XEXP (x, 0))
970 && CONST_INT_P (XEXP (x, 1))
aead1ca3
RH
971 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
972 {
973 addend = INTVAL (XEXP (x, 1));
974 x = XEXP (x, 0);
975 goto split_addend;
976 }
977
978 /* If the address is (const (plus FOO const_int)), find the low-order
979 part of the CONST_INT. Then load FOO plus any high-order part of the
980 CONST_INT into a register. Our address is (plus reg low-part-const).
981 This is done to reduce the number of GOT entries. */
b3a13419 982 if (can_create_pseudo_p ()
551cc6fd 983 && GET_CODE (x) == CONST
aead1ca3 984 && GET_CODE (XEXP (x, 0)) == PLUS
7d83f4f5 985 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
aead1ca3
RH
986 {
987 addend = INTVAL (XEXP (XEXP (x, 0), 1));
988 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
989 goto split_addend;
990 }
991
992 /* If we have a (plus reg const), emit the load as in (2), then add
993 the two registers, and finally generate (plus reg low-part-const) as
994 our address. */
b3a13419 995 if (can_create_pseudo_p ()
551cc6fd 996 && GET_CODE (x) == PLUS
7d83f4f5 997 && REG_P (XEXP (x, 0))
aead1ca3
RH
998 && GET_CODE (XEXP (x, 1)) == CONST
999 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
7d83f4f5 1000 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
aead1ca3
RH
1001 {
1002 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1003 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1004 XEXP (XEXP (XEXP (x, 1), 0), 0),
1005 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1006 goto split_addend;
1007 }
1008
26d5bf5b
UB
1009 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1010 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1011 around +/- 32k offset. */
1012 if (TARGET_EXPLICIT_RELOCS
1013 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1014 && symbolic_operand (x, Pmode))
1eb356b9 1015 {
4b6ab433
TS
1016 rtx r0, r16, eqv, tga, tp, dest, seq;
1017 rtx_insn *insn;
6f9b006d
RH
1018
1019 switch (tls_symbolic_operand_type (x))
1020 {
6cb718e4
RH
1021 case TLS_MODEL_NONE:
1022 break;
1023
6f9b006d 1024 case TLS_MODEL_GLOBAL_DYNAMIC:
4b6ab433
TS
1025 {
1026 start_sequence ();
6f9b006d 1027
4b6ab433
TS
1028 r0 = gen_rtx_REG (Pmode, 0);
1029 r16 = gen_rtx_REG (Pmode, 16);
1030 tga = get_tls_get_addr ();
1031 dest = gen_reg_rtx (Pmode);
1032 seq = GEN_INT (alpha_next_sequence_number++);
f676971a 1033
4b6ab433
TS
1034 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1035 rtx val = gen_call_value_osf_tlsgd (r0, tga, seq);
1036 insn = emit_call_insn (val);
1037 RTL_CONST_CALL_P (insn) = 1;
1038 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
6f9b006d 1039
4b6ab433
TS
1040 insn = get_insns ();
1041 end_sequence ();
6f9b006d 1042
4b6ab433
TS
1043 emit_libcall_block (insn, dest, r0, x);
1044 return dest;
1045 }
6f9b006d
RH
1046
1047 case TLS_MODEL_LOCAL_DYNAMIC:
4b6ab433
TS
1048 {
1049 start_sequence ();
6f9b006d 1050
4b6ab433
TS
1051 r0 = gen_rtx_REG (Pmode, 0);
1052 r16 = gen_rtx_REG (Pmode, 16);
1053 tga = get_tls_get_addr ();
1054 scratch = gen_reg_rtx (Pmode);
1055 seq = GEN_INT (alpha_next_sequence_number++);
6f9b006d 1056
4b6ab433
TS
1057 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1058 rtx val = gen_call_value_osf_tlsldm (r0, tga, seq);
1059 insn = emit_call_insn (val);
1060 RTL_CONST_CALL_P (insn) = 1;
1061 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
6f9b006d 1062
4b6ab433
TS
1063 insn = get_insns ();
1064 end_sequence ();
6f9b006d 1065
4b6ab433
TS
1066 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1067 UNSPEC_TLSLDM_CALL);
1068 emit_libcall_block (insn, scratch, r0, eqv);
6f9b006d 1069
4b6ab433
TS
1070 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1071 eqv = gen_rtx_CONST (Pmode, eqv);
6f9b006d 1072
4b6ab433
TS
1073 if (alpha_tls_size == 64)
1074 {
1075 dest = gen_reg_rtx (Pmode);
1076 emit_insn (gen_rtx_SET (dest, eqv));
1077 emit_insn (gen_adddi3 (dest, dest, scratch));
1078 return dest;
1079 }
1080 if (alpha_tls_size == 32)
1081 {
1082 rtx temp = gen_rtx_HIGH (Pmode, eqv);
1083 temp = gen_rtx_PLUS (Pmode, scratch, temp);
1084 scratch = gen_reg_rtx (Pmode);
1085 emit_insn (gen_rtx_SET (scratch, temp));
1086 }
1087 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1088 }
6f9b006d
RH
1089
1090 case TLS_MODEL_INITIAL_EXEC:
1091 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1092 eqv = gen_rtx_CONST (Pmode, eqv);
1093 tp = gen_reg_rtx (Pmode);
1094 scratch = gen_reg_rtx (Pmode);
1095 dest = gen_reg_rtx (Pmode);
1096
f959607b 1097 emit_insn (gen_get_thread_pointerdi (tp));
f7df4a84 1098 emit_insn (gen_rtx_SET (scratch, eqv));
6f9b006d
RH
1099 emit_insn (gen_adddi3 (dest, tp, scratch));
1100 return dest;
1101
1102 case TLS_MODEL_LOCAL_EXEC:
1103 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1104 eqv = gen_rtx_CONST (Pmode, eqv);
1105 tp = gen_reg_rtx (Pmode);
1106
f959607b 1107 emit_insn (gen_get_thread_pointerdi (tp));
6f9b006d
RH
1108 if (alpha_tls_size == 32)
1109 {
4b6ab433
TS
1110 rtx temp = gen_rtx_HIGH (Pmode, eqv);
1111 temp = gen_rtx_PLUS (Pmode, tp, temp);
6f9b006d 1112 tp = gen_reg_rtx (Pmode);
4b6ab433 1113 emit_insn (gen_rtx_SET (tp, temp));
6f9b006d
RH
1114 }
1115 return gen_rtx_LO_SUM (Pmode, tp, eqv);
6cb718e4
RH
1116
1117 default:
1118 gcc_unreachable ();
6f9b006d
RH
1119 }
1120
e2c9fb9b
RH
1121 if (local_symbolic_operand (x, Pmode))
1122 {
1123 if (small_symbolic_operand (x, Pmode))
551cc6fd 1124 return x;
e2c9fb9b
RH
1125 else
1126 {
b3a13419 1127 if (can_create_pseudo_p ())
551cc6fd 1128 scratch = gen_reg_rtx (Pmode);
f7df4a84 1129 emit_insn (gen_rtx_SET (scratch, gen_rtx_HIGH (Pmode, x)));
551cc6fd 1130 return gen_rtx_LO_SUM (Pmode, scratch, x);
e2c9fb9b 1131 }
133d3133 1132 }
1eb356b9
RH
1133 }
1134
aead1ca3
RH
1135 return NULL;
1136
1137 split_addend:
1138 {
551cc6fd
RH
1139 HOST_WIDE_INT low, high;
1140
1141 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1142 addend -= low;
1143 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1144 addend -= high;
1145
1146 if (addend)
1147 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
b3a13419 1148 (!can_create_pseudo_p () ? scratch : NULL_RTX),
551cc6fd
RH
1149 1, OPTAB_LIB_WIDEN);
1150 if (high)
1151 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
b3a13419 1152 (!can_create_pseudo_p () ? scratch : NULL_RTX),
551cc6fd
RH
1153 1, OPTAB_LIB_WIDEN);
1154
0a81f074 1155 return plus_constant (Pmode, x, low);
aead1ca3
RH
1156 }
1157}
1158
506d7b68
PB
1159
1160/* Try machine-dependent ways of modifying an illegitimate address
1161 to be legitimate. Return X or the new, valid address. */
1162
1163static rtx
1164alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1165 machine_mode mode)
506d7b68
PB
1166{
1167 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1168 return new_x ? new_x : x;
1169}
1170
b0f6b612
NF
1171/* Return true if ADDR has an effect that depends on the machine mode it
1172 is used for. On the Alpha this is true only for the unaligned modes.
1173 We can simplify the test since we know that the address must be valid. */
1174
1175static bool
5bfed9a9
GJL
1176alpha_mode_dependent_address_p (const_rtx addr,
1177 addr_space_t as ATTRIBUTE_UNUSED)
b0f6b612
NF
1178{
1179 return GET_CODE (addr) == AND;
1180}
1181
04886dc0
RH
1182/* Primarily this is required for TLS symbols, but given that our move
1183 patterns *ought* to be able to handle any symbol at any time, we
1184 should never be spilling symbolic operands to the constant pool, ever. */
1185
1186static bool
ef4bddc2 1187alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
04886dc0
RH
1188{
1189 enum rtx_code code = GET_CODE (x);
1190 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1191}
1192
4977bab6 1193/* We do not allow indirect calls to be optimized into sibling calls, nor
3094247f
RH
1194 can we allow a call to a function with a different GP to be optimized
1195 into a sibcall. */
1196
4977bab6 1197static bool
a5c24926 1198alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4977bab6 1199{
3094247f
RH
1200 /* Can't do indirect tail calls, since we don't know if the target
1201 uses the same GP. */
1202 if (!decl)
1203 return false;
1204
1205 /* Otherwise, we can make a tail call if the target function shares
1206 the same GP. */
1207 return decl_has_samegp (decl);
4977bab6
ZW
1208}
1209
dbb838b7
RS
1210bool
1211some_small_symbolic_operand_int (rtx x)
1e7e480e 1212{
dbb838b7
RS
1213 subrtx_var_iterator::array_type array;
1214 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1215 {
1216 rtx x = *iter;
1217 /* Don't re-split. */
1218 if (GET_CODE (x) == LO_SUM)
1219 iter.skip_subrtxes ();
1220 else if (small_symbolic_operand (x, Pmode))
1221 return true;
1222 }
1223 return false;
551cc6fd
RH
1224}
1225
a5c24926
RH
1226rtx
1227split_small_symbolic_operand (rtx x)
1228{
1229 x = copy_insn (x);
572e01c7
RS
1230 subrtx_ptr_iterator::array_type array;
1231 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1232 {
1233 rtx *ptr = *iter;
1234 rtx x = *ptr;
1235 /* Don't re-split. */
1236 if (GET_CODE (x) == LO_SUM)
1237 iter.skip_subrtxes ();
1238 else if (small_symbolic_operand (x, Pmode))
1239 {
1240 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1241 iter.skip_subrtxes ();
1242 }
1243 }
a5c24926
RH
1244 return x;
1245}
1246
0b077eac
RH
1247/* Indicate that INSN cannot be duplicated. This is true for any insn
1248 that we've marked with gpdisp relocs, since those have to stay in
1249 1-1 correspondence with one another.
1250
093354e0 1251 Technically we could copy them if we could set up a mapping from one
0b077eac
RH
1252 sequence number to another, across the set of insns to be duplicated.
1253 This seems overly complicated and error-prone since interblock motion
501e79ef
RH
1254 from sched-ebb could move one of the pair of insns to a different block.
1255
1256 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1257 then they'll be in a different block from their ldgp. Which could lead
1258 the bb reorder code to think that it would be ok to copy just the block
1259 containing the call and branch to the block containing the ldgp. */
0b077eac
RH
1260
1261static bool
ac44248e 1262alpha_cannot_copy_insn_p (rtx_insn *insn)
0b077eac 1263{
0b077eac
RH
1264 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1265 return false;
501e79ef
RH
1266 if (recog_memoized (insn) >= 0)
1267 return get_attr_cannot_copy (insn);
1268 else
0b077eac 1269 return false;
0b077eac
RH
1270}
1271
f676971a 1272
aead1ca3
RH
1273/* Try a machine-dependent way of reloading an illegitimate address
1274 operand. If we find one, push the reload and return the new rtx. */
f676971a 1275
aead1ca3 1276rtx
a5c24926 1277alpha_legitimize_reload_address (rtx x,
ef4bddc2 1278 machine_mode mode ATTRIBUTE_UNUSED,
a5c24926
RH
1279 int opnum, int type,
1280 int ind_levels ATTRIBUTE_UNUSED)
aead1ca3
RH
1281{
1282 /* We must recognize output that we have already generated ourselves. */
1283 if (GET_CODE (x) == PLUS
1284 && GET_CODE (XEXP (x, 0)) == PLUS
7d83f4f5
UB
1285 && REG_P (XEXP (XEXP (x, 0), 0))
1286 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1287 && CONST_INT_P (XEXP (x, 1)))
aead1ca3
RH
1288 {
1289 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1290 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
bf758008 1291 opnum, (enum reload_type) type);
aead1ca3
RH
1292 return x;
1293 }
1294
1295 /* We wish to handle large displacements off a base register by
1296 splitting the addend across an ldah and the mem insn. This
1297 cuts number of extra insns needed from 3 to 1. */
1298 if (GET_CODE (x) == PLUS
7d83f4f5 1299 && REG_P (XEXP (x, 0))
aead1ca3
RH
1300 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1301 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
c799797d 1302 && CONST_INT_P (XEXP (x, 1)))
aead1ca3
RH
1303 {
1304 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1305 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1306 HOST_WIDE_INT high
1307 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1308
1309 /* Check for 32-bit overflow. */
1310 if (high + low != val)
1311 return NULL_RTX;
1312
1313 /* Reload the high part into a base reg; leave the low part
1314 in the mem directly. */
1315 x = gen_rtx_PLUS (GET_MODE (x),
1316 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1317 GEN_INT (high)),
1318 GEN_INT (low));
1319
1320 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1321 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
bf758008 1322 opnum, (enum reload_type) type);
aead1ca3
RH
1323 return x;
1324 }
1325
1326 return NULL_RTX;
1327}
1328\f
ef995717
UB
1329/* Return the cost of moving between registers of various classes. Moving
1330 between FLOAT_REGS and anything else except float regs is expensive.
1331 In fact, we make it quite expensive because we really don't want to
1332 do these moves unless it is clearly worth it. Optimizations may
1333 reduce the impact of not being able to allocate a pseudo to a
1334 hard register. */
1335
1336static int
1337alpha_register_move_cost (machine_mode /*mode*/,
1338 reg_class_t from, reg_class_t to)
1339{
1340 if ((from == FLOAT_REGS) == (to == FLOAT_REGS))
1341 return 2;
1342
1343 if (TARGET_FIX)
1344 return (from == FLOAT_REGS) ? 6 : 8;
1345
1346 return 4 + 2 * alpha_memory_latency;
1347}
1348
1349/* Return the cost of moving data of MODE from a register to
1350 or from memory. On the Alpha, bump this up a bit. */
1351
1352static int
1353alpha_memory_move_cost (machine_mode /*mode*/, reg_class_t /*regclass*/,
1354 bool /*in*/)
1355{
1356 return 2 * alpha_memory_latency;
1357}
1358
3c50106f
RH
1359/* Compute a (partial) cost for rtx X. Return true if the complete
1360 cost has been computed, and false if subexpressions should be
1361 scanned. In either case, *TOTAL contains the cost result. */
1362
1363static bool
e548c9df 1364alpha_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno, int *total,
f40751dd 1365 bool speed)
3c50106f 1366{
e548c9df 1367 int code = GET_CODE (x);
3c50106f 1368 bool float_mode_p = FLOAT_MODE_P (mode);
8260c194
RH
1369 const struct alpha_rtx_cost_data *cost_data;
1370
f40751dd 1371 if (!speed)
8260c194
RH
1372 cost_data = &alpha_rtx_cost_size;
1373 else
8bea7f7c 1374 cost_data = &alpha_rtx_cost_data[alpha_tune];
3c50106f
RH
1375
1376 switch (code)
1377 {
8260c194 1378 case CONST_INT:
3c50106f
RH
1379 /* If this is an 8-bit constant, return zero since it can be used
1380 nearly anywhere with no cost. If it is a valid operand for an
1381 ADD or AND, likewise return 0 if we know it will be used in that
1382 context. Otherwise, return 2 since it might be used there later.
1383 All other constants take at least two insns. */
3c50106f
RH
1384 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1385 {
1386 *total = 0;
1387 return true;
1388 }
5efb1046 1389 /* FALLTHRU */
3c50106f
RH
1390
1391 case CONST_DOUBLE:
f06ed650 1392 case CONST_WIDE_INT:
3c50106f
RH
1393 if (x == CONST0_RTX (mode))
1394 *total = 0;
1395 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1396 || (outer_code == AND && and_operand (x, VOIDmode)))
1397 *total = 0;
1398 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1399 *total = 2;
1400 else
1401 *total = COSTS_N_INSNS (2);
1402 return true;
f676971a 1403
3c50106f
RH
1404 case CONST:
1405 case SYMBOL_REF:
1406 case LABEL_REF:
1407 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1408 *total = COSTS_N_INSNS (outer_code != MEM);
1409 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1410 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1411 else if (tls_symbolic_operand_type (x))
1412 /* Estimate of cost for call_pal rduniq. */
8260c194 1413 /* ??? How many insns do we emit here? More than one... */
3c50106f
RH
1414 *total = COSTS_N_INSNS (15);
1415 else
1416 /* Otherwise we do a load from the GOT. */
f40751dd 1417 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
3c50106f 1418 return true;
f676971a 1419
72910a0b
RH
1420 case HIGH:
1421 /* This is effectively an add_operand. */
1422 *total = 2;
1423 return true;
1424
3c50106f
RH
1425 case PLUS:
1426 case MINUS:
1427 if (float_mode_p)
8260c194 1428 *total = cost_data->fp_add;
02ea1c76
UB
1429 else if (GET_CODE (XEXP (x, 0)) == ASHIFT
1430 && const23_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
3c50106f 1431 {
e548c9df 1432 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), mode,
68f932c4 1433 (enum rtx_code) outer_code, opno, speed)
e548c9df 1434 + rtx_cost (XEXP (x, 1), mode,
68f932c4 1435 (enum rtx_code) outer_code, opno, speed)
bf758008 1436 + COSTS_N_INSNS (1));
3c50106f
RH
1437 return true;
1438 }
1439 return false;
1440
1441 case MULT:
1442 if (float_mode_p)
8260c194 1443 *total = cost_data->fp_mult;
3c50106f 1444 else if (mode == DImode)
8260c194 1445 *total = cost_data->int_mult_di;
3c50106f 1446 else
8260c194 1447 *total = cost_data->int_mult_si;
3c50106f
RH
1448 return false;
1449
1450 case ASHIFT:
7d83f4f5 1451 if (CONST_INT_P (XEXP (x, 1))
3c50106f
RH
1452 && INTVAL (XEXP (x, 1)) <= 3)
1453 {
1454 *total = COSTS_N_INSNS (1);
1455 return false;
1456 }
5efb1046 1457 /* FALLTHRU */
3c50106f
RH
1458
1459 case ASHIFTRT:
1460 case LSHIFTRT:
8260c194 1461 *total = cost_data->int_shift;
3c50106f
RH
1462 return false;
1463
1464 case IF_THEN_ELSE:
1465 if (float_mode_p)
8260c194 1466 *total = cost_data->fp_add;
3c50106f 1467 else
8260c194 1468 *total = cost_data->int_cmov;
3c50106f
RH
1469 return false;
1470
1471 case DIV:
1472 case UDIV:
1473 case MOD:
1474 case UMOD:
1475 if (!float_mode_p)
8260c194 1476 *total = cost_data->int_div;
3c50106f 1477 else if (mode == SFmode)
8260c194 1478 *total = cost_data->fp_div_sf;
3c50106f 1479 else
8260c194 1480 *total = cost_data->fp_div_df;
3c50106f
RH
1481 return false;
1482
1483 case MEM:
f40751dd 1484 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
3c50106f
RH
1485 return true;
1486
1487 case NEG:
1488 if (! float_mode_p)
1489 {
1490 *total = COSTS_N_INSNS (1);
1491 return false;
1492 }
5efb1046 1493 /* FALLTHRU */
3c50106f
RH
1494
1495 case ABS:
1496 if (! float_mode_p)
1497 {
8260c194 1498 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
3c50106f
RH
1499 return false;
1500 }
5efb1046 1501 /* FALLTHRU */
3c50106f
RH
1502
1503 case FLOAT:
1504 case UNSIGNED_FLOAT:
1505 case FIX:
1506 case UNSIGNED_FIX:
3c50106f 1507 case FLOAT_TRUNCATE:
8260c194 1508 *total = cost_data->fp_add;
3c50106f
RH
1509 return false;
1510
a220ee34 1511 case FLOAT_EXTEND:
7d83f4f5 1512 if (MEM_P (XEXP (x, 0)))
a220ee34
RH
1513 *total = 0;
1514 else
1515 *total = cost_data->fp_add;
1516 return false;
1517
3c50106f
RH
1518 default:
1519 return false;
1520 }
1521}
1522\f
a6f12d7c
RK
1523/* REF is an alignable memory location. Place an aligned SImode
1524 reference into *PALIGNED_MEM and the number of bits to shift into
96043e7e
RH
1525 *PBITNUM. SCRATCH is a free register for use in reloading out
1526 of range stack slots. */
a6f12d7c
RK
1527
1528void
a5c24926 1529get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
a6f12d7c
RK
1530{
1531 rtx base;
525e67c1 1532 HOST_WIDE_INT disp, offset;
a6f12d7c 1533
7d83f4f5 1534 gcc_assert (MEM_P (ref));
a6f12d7c 1535
30a5d3e6 1536 if (reload_in_progress)
96043e7e 1537 {
4e46365b 1538 base = find_replacement (&XEXP (ref, 0));
56daab84 1539 gcc_assert (memory_address_p (GET_MODE (ref), base));
96043e7e 1540 }
a6f12d7c 1541 else
56daab84 1542 base = XEXP (ref, 0);
a6f12d7c
RK
1543
1544 if (GET_CODE (base) == PLUS)
525e67c1
RH
1545 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1546 else
1547 disp = 0;
1548
1549 /* Find the byte offset within an aligned word. If the memory itself is
1550 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1551 will have examined the base register and determined it is aligned, and
1552 thus displacements from it are naturally alignable. */
1553 if (MEM_ALIGN (ref) >= 32)
1554 offset = 0;
1555 else
1556 offset = disp & 3;
a6f12d7c 1557
02143d0b
UB
1558 /* The location should not cross aligned word boundary. */
1559 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1560 <= GET_MODE_SIZE (SImode));
1561
525e67c1
RH
1562 /* Access the entire aligned word. */
1563 *paligned_mem = widen_memory_access (ref, SImode, -offset);
a6f12d7c 1564
525e67c1 1565 /* Convert the byte offset within the word to a bit offset. */
0b2a7367 1566 offset *= BITS_PER_UNIT;
525e67c1 1567 *pbitnum = GEN_INT (offset);
a6f12d7c
RK
1568}
1569
f676971a 1570/* Similar, but just get the address. Handle the two reload cases.
adb18b68 1571 Add EXTRA_OFFSET to the address we return. */
a6f12d7c
RK
1572
1573rtx
60e93525 1574get_unaligned_address (rtx ref)
a6f12d7c
RK
1575{
1576 rtx base;
1577 HOST_WIDE_INT offset = 0;
1578
7d83f4f5 1579 gcc_assert (MEM_P (ref));
a6f12d7c 1580
30a5d3e6 1581 if (reload_in_progress)
96043e7e 1582 {
96043e7e 1583 base = find_replacement (&XEXP (ref, 0));
56daab84 1584 gcc_assert (memory_address_p (GET_MODE (ref), base));
96043e7e 1585 }
a6f12d7c 1586 else
56daab84 1587 base = XEXP (ref, 0);
a6f12d7c
RK
1588
1589 if (GET_CODE (base) == PLUS)
1590 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1591
0a81f074 1592 return plus_constant (Pmode, base, offset);
60e93525
RH
1593}
1594
1595/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1596 X is always returned in a register. */
1597
1598rtx
1599get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1600{
1601 if (GET_CODE (addr) == PLUS)
1602 {
1603 ofs += INTVAL (XEXP (addr, 1));
1604 addr = XEXP (addr, 0);
1605 }
1606
1607 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1608 NULL_RTX, 1, OPTAB_LIB_WIDEN);
a6f12d7c 1609}
3611aef0 1610
551cc6fd 1611/* On the Alpha, all (non-symbolic) constants except zero go into
f676971a 1612 a floating-point register via memory. Note that we cannot
0a2aaacc 1613 return anything that is not a subset of RCLASS, and that some
551cc6fd
RH
1614 symbolic constants cannot be dropped to memory. */
1615
1616enum reg_class
0a2aaacc 1617alpha_preferred_reload_class(rtx x, enum reg_class rclass)
551cc6fd
RH
1618{
1619 /* Zero is present in any register class. */
1620 if (x == CONST0_RTX (GET_MODE (x)))
0a2aaacc 1621 return rclass;
551cc6fd
RH
1622
1623 /* These sorts of constants we can easily drop to memory. */
c799797d
UB
1624 if (CONST_SCALAR_INT_P (x)
1625 || CONST_DOUBLE_P (x)
72910a0b 1626 || GET_CODE (x) == CONST_VECTOR)
551cc6fd 1627 {
0a2aaacc 1628 if (rclass == FLOAT_REGS)
551cc6fd 1629 return NO_REGS;
0a2aaacc 1630 if (rclass == ALL_REGS)
551cc6fd 1631 return GENERAL_REGS;
0a2aaacc 1632 return rclass;
551cc6fd
RH
1633 }
1634
1635 /* All other kinds of constants should not (and in the case of HIGH
1636 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1637 secondary reload. */
1638 if (CONSTANT_P (x))
0a2aaacc 1639 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
551cc6fd 1640
0a2aaacc 1641 return rclass;
551cc6fd
RH
1642}
1643
48f46219 1644/* Inform reload about cases where moving X with a mode MODE to a register in
0a2aaacc 1645 RCLASS requires an extra scratch or immediate register. Return the class
48f46219 1646 needed for the immediate register. */
3611aef0 1647
a87cf97e
JR
1648static reg_class_t
1649alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
ef4bddc2 1650 machine_mode mode, secondary_reload_info *sri)
3611aef0 1651{
a87cf97e
JR
1652 enum reg_class rclass = (enum reg_class) rclass_i;
1653
48f46219
RH
1654 /* Loading and storing HImode or QImode values to and from memory
1655 usually requires a scratch register. */
1656 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
35a414df 1657 {
48f46219 1658 if (any_memory_operand (x, mode))
35a414df 1659 {
48f46219
RH
1660 if (in_p)
1661 {
1662 if (!aligned_memory_operand (x, mode))
f9621cc4 1663 sri->icode = direct_optab_handler (reload_in_optab, mode);
48f46219
RH
1664 }
1665 else
f9621cc4 1666 sri->icode = direct_optab_handler (reload_out_optab, mode);
48f46219 1667 return NO_REGS;
35a414df
RH
1668 }
1669 }
3611aef0 1670
48f46219
RH
1671 /* We also cannot do integral arithmetic into FP regs, as might result
1672 from register elimination into a DImode fp register. */
0a2aaacc 1673 if (rclass == FLOAT_REGS)
3611aef0 1674 {
48f46219 1675 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
3611aef0 1676 return GENERAL_REGS;
48f46219
RH
1677 if (in_p && INTEGRAL_MODE_P (mode)
1678 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
3611aef0
RH
1679 return GENERAL_REGS;
1680 }
1681
1682 return NO_REGS;
1683}
94e23f53 1684
f15643d4
RS
1685/* Implement TARGET_SECONDARY_MEMORY_NEEDED.
1686
1687 If we are copying between general and FP registers, we need a memory
1688 location unless the FIX extension is available. */
1689
1690static bool
1691alpha_secondary_memory_needed (machine_mode, reg_class_t class1,
1692 reg_class_t class2)
1693{
1694 return (!TARGET_FIX
1695 && ((class1 == FLOAT_REGS && class2 != FLOAT_REGS)
1696 || (class2 == FLOAT_REGS && class1 != FLOAT_REGS)));
1697}
1698
94e23f53
RS
1699/* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE. If MODE is
1700 floating-point, use it. Otherwise, widen to a word like the default.
1701 This is needed because we always store integers in FP registers in
1702 quadword format. This whole area is very tricky! */
1703
1704static machine_mode
1705alpha_secondary_memory_needed_mode (machine_mode mode)
1706{
1707 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1708 return mode;
1709 if (GET_MODE_SIZE (mode) >= 4)
1710 return mode;
1711 return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1712}
a6f12d7c 1713\f
13a4e577 1714/* Given SEQ, which is an INSN list, look for any MEMs in either
2f937369
DM
1715 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1716 volatile flags from REF into each of the MEMs found. If REF is not
1717 a MEM, don't do anything. */
a6f12d7c
RK
1718
1719void
13a4e577 1720alpha_set_memflags (rtx seq, rtx ref)
a6f12d7c 1721{
b32d5189 1722 rtx_insn *insn;
3873d24b 1723
13a4e577 1724 if (!MEM_P (ref))
a6f12d7c
RK
1725 return;
1726
f676971a 1727 /* This is only called from alpha.md, after having had something
3873d24b
RH
1728 generated from one of the insn patterns. So if everything is
1729 zero, the pattern is already up-to-date. */
389fdba0 1730 if (!MEM_VOLATILE_P (ref)
389fdba0
RH
1731 && !MEM_NOTRAP_P (ref)
1732 && !MEM_READONLY_P (ref))
3873d24b
RH
1733 return;
1734
f9a20af0 1735 subrtx_var_iterator::array_type array;
b32d5189 1736 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
13a4e577 1737 if (INSN_P (insn))
f9a20af0
RS
1738 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1739 {
1740 rtx x = *iter;
1741 if (MEM_P (x))
1742 {
1743 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1744 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1745 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1746 /* Sadly, we cannot use alias sets because the extra
1747 aliasing produced by the AND interferes. Given that
1748 two-byte quantities are the only thing we would be
1749 able to differentiate anyway, there does not seem to
1750 be any point in convoluting the early out of the
1751 alias check. */
1752 iter.skip_subrtxes ();
1753 }
1754 }
13a4e577
UB
1755 else
1756 gcc_unreachable ();
a6f12d7c
RK
1757}
1758\f
ef4bddc2 1759static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
72910a0b
RH
1760 int, bool);
1761
1762/* Internal routine for alpha_emit_set_const to check for N or below insns.
1763 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1764 and return pc_rtx if successful. */
9102cd1f
RK
1765
1766static rtx
ef4bddc2 1767alpha_emit_set_const_1 (rtx target, machine_mode mode,
72910a0b 1768 HOST_WIDE_INT c, int n, bool no_output)
a6f12d7c 1769{
0a2aaacc 1770 HOST_WIDE_INT new_const;
a6f12d7c 1771 int i, bits;
fd94addf
RK
1772 /* Use a pseudo if highly optimizing and still generating RTL. */
1773 rtx subtarget
b3a13419 1774 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
b83b7fa3 1775 rtx temp, insn;
a6f12d7c 1776
a6f12d7c 1777 /* If this is a sign-extended 32-bit constant, we can do this in at most
c37aa43b 1778 three insns, so do it if we have enough insns left. */
a6f12d7c 1779
c37aa43b 1780 if (c >> 31 == -1 || c >> 31 == 0)
a6f12d7c 1781 {
20a4db98 1782 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
a6f12d7c 1783 HOST_WIDE_INT tmp1 = c - low;
20a4db98 1784 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
a6f12d7c
RK
1785 HOST_WIDE_INT extra = 0;
1786
ab034cfc
RK
1787 /* If HIGH will be interpreted as negative but the constant is
1788 positive, we must adjust it to do two ldha insns. */
1789
1790 if ((high & 0x8000) != 0 && c >= 0)
a6f12d7c
RK
1791 {
1792 extra = 0x4000;
1793 tmp1 -= 0x40000000;
1794 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1795 }
1796
1797 if (c == low || (low == 0 && extra == 0))
858e4e8c
RH
1798 {
1799 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1800 but that meant that we can't handle INT_MIN on 32-bit machines
f676971a 1801 (like NT/Alpha), because we recurse indefinitely through
858e4e8c
RH
1802 emit_move_insn to gen_movdi. So instead, since we know exactly
1803 what we want, create it explicitly. */
1804
72910a0b
RH
1805 if (no_output)
1806 return pc_rtx;
858e4e8c
RH
1807 if (target == NULL)
1808 target = gen_reg_rtx (mode);
f7df4a84 1809 emit_insn (gen_rtx_SET (target, GEN_INT (c)));
858e4e8c
RH
1810 return target;
1811 }
9102cd1f 1812 else if (n >= 2 + (extra != 0))
a6f12d7c 1813 {
72910a0b
RH
1814 if (no_output)
1815 return pc_rtx;
b3a13419 1816 if (!can_create_pseudo_p ())
c3cda381 1817 {
f7df4a84 1818 emit_insn (gen_rtx_SET (target, GEN_INT (high << 16)));
c3cda381
FH
1819 temp = target;
1820 }
1821 else
1822 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1823 subtarget, mode);
fd94addf 1824
b83b7fa3
RH
1825 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1826 This means that if we go through expand_binop, we'll try to
1827 generate extensions, etc, which will require new pseudos, which
1828 will fail during some split phases. The SImode add patterns
1829 still exist, but are not named. So build the insns by hand. */
1830
a6f12d7c 1831 if (extra != 0)
b83b7fa3
RH
1832 {
1833 if (! subtarget)
1834 subtarget = gen_reg_rtx (mode);
1835 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
f7df4a84 1836 insn = gen_rtx_SET (subtarget, insn);
b83b7fa3 1837 emit_insn (insn);
1ef9531b 1838 temp = subtarget;
b83b7fa3 1839 }
a6f12d7c 1840
b83b7fa3
RH
1841 if (target == NULL)
1842 target = gen_reg_rtx (mode);
1843 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
f7df4a84 1844 insn = gen_rtx_SET (target, insn);
b83b7fa3
RH
1845 emit_insn (insn);
1846 return target;
a6f12d7c
RK
1847 }
1848 }
1849
0af3ee30 1850 /* If we couldn't do it that way, try some other methods. But if we have
f444f304
RK
1851 no instructions left, don't bother. Likewise, if this is SImode and
1852 we can't make pseudos, we can't do anything since the expand_binop
1853 and expand_unop calls will widen and try to make pseudos. */
a6f12d7c 1854
b3a13419 1855 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
a6f12d7c
RK
1856 return 0;
1857
0af3ee30 1858 /* Next, see if we can load a related constant and then shift and possibly
a6f12d7c
RK
1859 negate it to get the constant we want. Try this once each increasing
1860 numbers of insns. */
1861
1862 for (i = 1; i < n; i++)
1863 {
20a4db98
RH
1864 /* First, see if minus some low bits, we've an easy load of
1865 high bits. */
1866
0a2aaacc
KG
1867 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1868 if (new_const != 0)
72910a0b 1869 {
0a2aaacc 1870 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
72910a0b
RH
1871 if (temp)
1872 {
1873 if (no_output)
1874 return temp;
0a2aaacc 1875 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
72910a0b
RH
1876 target, 0, OPTAB_WIDEN);
1877 }
1878 }
20a4db98
RH
1879
1880 /* Next try complementing. */
72910a0b
RH
1881 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1882 if (temp)
1883 {
1884 if (no_output)
1885 return temp;
1886 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1887 }
a6f12d7c 1888
fd94addf 1889 /* Next try to form a constant and do a left shift. We can do this
a6f12d7c
RK
1890 if some low-order bits are zero; the exact_log2 call below tells
1891 us that information. The bits we are shifting out could be any
1892 value, but here we'll just try the 0- and sign-extended forms of
1893 the constant. To try to increase the chance of having the same
1894 constant in more than one insn, start at the highest number of
1895 bits to shift, but try all possibilities in case a ZAPNOT will
1896 be useful. */
1897
72910a0b
RH
1898 bits = exact_log2 (c & -c);
1899 if (bits > 0)
a6f12d7c 1900 for (; bits > 0; bits--)
72910a0b 1901 {
0a2aaacc
KG
1902 new_const = c >> bits;
1903 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
72910a0b
RH
1904 if (!temp && c < 0)
1905 {
0a2aaacc
KG
1906 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1907 temp = alpha_emit_set_const (subtarget, mode, new_const,
72910a0b
RH
1908 i, no_output);
1909 }
1910 if (temp)
1911 {
1912 if (no_output)
1913 return temp;
1914 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1915 target, 0, OPTAB_WIDEN);
1916 }
1917 }
a6f12d7c
RK
1918
1919 /* Now try high-order zero bits. Here we try the shifted-in bits as
57cfde96
RK
1920 all zero and all ones. Be careful to avoid shifting outside the
1921 mode and to avoid shifting outside the host wide int size. */
a6f12d7c 1922
72910a0b 1923 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
c37aa43b 1924 - floor_log2 (c) - 1);
72910a0b 1925 if (bits > 0)
a6f12d7c 1926 for (; bits > 0; bits--)
72910a0b 1927 {
0a2aaacc
KG
1928 new_const = c << bits;
1929 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
72910a0b
RH
1930 if (!temp)
1931 {
c37aa43b 1932 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
0a2aaacc 1933 temp = alpha_emit_set_const (subtarget, mode, new_const,
72910a0b
RH
1934 i, no_output);
1935 }
1936 if (temp)
1937 {
1938 if (no_output)
1939 return temp;
1940 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1941 target, 1, OPTAB_WIDEN);
1942 }
1943 }
a6f12d7c
RK
1944
1945 /* Now try high-order 1 bits. We get that with a sign-extension.
57cfde96 1946 But one bit isn't enough here. Be careful to avoid shifting outside
285a5742 1947 the mode and to avoid shifting outside the host wide int size. */
30102605 1948
72910a0b
RH
1949 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1950 - floor_log2 (~ c) - 2);
1951 if (bits > 0)
a6f12d7c 1952 for (; bits > 0; bits--)
72910a0b 1953 {
0a2aaacc
KG
1954 new_const = c << bits;
1955 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
72910a0b
RH
1956 if (!temp)
1957 {
c37aa43b 1958 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
0a2aaacc 1959 temp = alpha_emit_set_const (subtarget, mode, new_const,
72910a0b
RH
1960 i, no_output);
1961 }
1962 if (temp)
1963 {
1964 if (no_output)
1965 return temp;
1966 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1967 target, 0, OPTAB_WIDEN);
1968 }
1969 }
a6f12d7c
RK
1970 }
1971
20a4db98
RH
1972 /* Finally, see if can load a value into the target that is the same as the
1973 constant except that all bytes that are 0 are changed to be 0xff. If we
1974 can, then we can do a ZAPNOT to obtain the desired constant. */
1975
0a2aaacc 1976 new_const = c;
20a4db98 1977 for (i = 0; i < 64; i += 8)
0a2aaacc
KG
1978 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1979 new_const |= (HOST_WIDE_INT) 0xff << i;
e68c380c 1980
20a4db98
RH
1981 /* We are only called for SImode and DImode. If this is SImode, ensure that
1982 we are sign extended to a full word. */
1983
1984 if (mode == SImode)
0a2aaacc 1985 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
20a4db98 1986
0a2aaacc 1987 if (new_const != c)
72910a0b 1988 {
0a2aaacc 1989 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
72910a0b
RH
1990 if (temp)
1991 {
1992 if (no_output)
1993 return temp;
0a2aaacc 1994 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
72910a0b
RH
1995 target, 0, OPTAB_WIDEN);
1996 }
1997 }
e68c380c 1998
a6f12d7c
RK
1999 return 0;
2000}
758d2c0c 2001
a5c24926
RH
2002/* Try to output insns to set TARGET equal to the constant C if it can be
2003 done in less than N insns. Do all computations in MODE. Returns the place
2004 where the output has been placed if it can be done and the insns have been
2005 emitted. If it would take more than N insns, zero is returned and no
2006 insns and emitted. */
2007
72910a0b 2008static rtx
ef4bddc2 2009alpha_emit_set_const (rtx target, machine_mode mode,
72910a0b 2010 HOST_WIDE_INT c, int n, bool no_output)
a5c24926 2011{
ef4bddc2 2012 machine_mode orig_mode = mode;
a5c24926 2013 rtx orig_target = target;
72910a0b 2014 rtx result = 0;
a5c24926
RH
2015 int i;
2016
2017 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2018 can't load this constant in one insn, do this in DImode. */
b3a13419 2019 if (!can_create_pseudo_p () && mode == SImode
7d83f4f5 2020 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
a5c24926 2021 {
72910a0b
RH
2022 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
2023 if (result)
2024 return result;
2025
2026 target = no_output ? NULL : gen_lowpart (DImode, target);
2027 mode = DImode;
2028 }
2029 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2030 {
2031 target = no_output ? NULL : gen_lowpart (DImode, target);
a5c24926
RH
2032 mode = DImode;
2033 }
2034
2035 /* Try 1 insn, then 2, then up to N. */
2036 for (i = 1; i <= n; i++)
2037 {
72910a0b 2038 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
a5c24926
RH
2039 if (result)
2040 {
cad003ba
DM
2041 rtx_insn *insn;
2042 rtx set;
72910a0b
RH
2043
2044 if (no_output)
2045 return result;
2046
2047 insn = get_last_insn ();
2048 set = single_set (insn);
a5c24926
RH
2049 if (! CONSTANT_P (SET_SRC (set)))
2050 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2051 break;
2052 }
2053 }
2054
2055 /* Allow for the case where we changed the mode of TARGET. */
72910a0b
RH
2056 if (result)
2057 {
2058 if (result == target)
2059 result = orig_target;
2060 else if (mode != orig_mode)
2061 result = gen_lowpart (orig_mode, result);
2062 }
a5c24926
RH
2063
2064 return result;
2065}
2066
97aea203
RK
2067/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2068 fall back to a straight forward decomposition. We do this to avoid
2069 exponential run times encountered when looking for longer sequences
2070 with alpha_emit_set_const. */
2071
72910a0b 2072static rtx
da80c6b8 2073alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
97aea203 2074{
97aea203 2075 HOST_WIDE_INT d1, d2, d3, d4;
97aea203
RK
2076
2077 /* Decompose the entire word */
c37aa43b 2078
3fe5612d
RH
2079 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2080 c1 -= d1;
2081 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2082 c1 = (c1 - d2) >> 32;
2083 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2084 c1 -= d3;
2085 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
56daab84 2086 gcc_assert (c1 == d4);
97aea203
RK
2087
2088 /* Construct the high word */
3fe5612d
RH
2089 if (d4)
2090 {
2091 emit_move_insn (target, GEN_INT (d4));
2092 if (d3)
2093 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2094 }
97aea203 2095 else
3fe5612d 2096 emit_move_insn (target, GEN_INT (d3));
97aea203
RK
2097
2098 /* Shift it into place */
3fe5612d 2099 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
97aea203 2100
3fe5612d
RH
2101 /* Add in the low bits. */
2102 if (d2)
2103 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2104 if (d1)
2105 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
97aea203 2106
3fe5612d 2107 return target;
97aea203 2108}
97aea203 2109
7eb05850 2110/* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
72910a0b 2111
da80c6b8
UB
2112static HOST_WIDE_INT
2113alpha_extract_integer (rtx x)
72910a0b 2114{
72910a0b
RH
2115 if (GET_CODE (x) == CONST_VECTOR)
2116 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2117
7eb05850
UB
2118 gcc_assert (CONST_INT_P (x));
2119
2120 return INTVAL (x);
72910a0b
RH
2121}
2122
1a627b35
RS
2123/* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2124 we are willing to load the value into a register via a move pattern.
72910a0b
RH
2125 Normally this is all symbolic constants, integral constants that
2126 take three or fewer instructions, and floating-point zero. */
2127
2128bool
ef4bddc2 2129alpha_legitimate_constant_p (machine_mode mode, rtx x)
72910a0b 2130{
da80c6b8 2131 HOST_WIDE_INT i0;
72910a0b
RH
2132
2133 switch (GET_CODE (x))
2134 {
72910a0b 2135 case LABEL_REF:
72910a0b
RH
2136 case HIGH:
2137 return true;
2138
42a9ba1d
UB
2139 case CONST:
2140 if (GET_CODE (XEXP (x, 0)) == PLUS
c799797d 2141 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
42a9ba1d
UB
2142 x = XEXP (XEXP (x, 0), 0);
2143 else
2144 return true;
2145
2146 if (GET_CODE (x) != SYMBOL_REF)
2147 return true;
42a9ba1d
UB
2148 /* FALLTHRU */
2149
e584065d
RH
2150 case SYMBOL_REF:
2151 /* TLS symbols are never valid. */
2152 return SYMBOL_REF_TLS_MODEL (x) == 0;
2153
f06ed650 2154 case CONST_WIDE_INT:
7eb05850
UB
2155 if (TARGET_BUILD_CONSTANTS)
2156 return true;
72910a0b
RH
2157 if (x == CONST0_RTX (mode))
2158 return true;
7eb05850
UB
2159 mode = DImode;
2160 gcc_assert (CONST_WIDE_INT_NUNITS (x) == 2);
2161 i0 = CONST_WIDE_INT_ELT (x, 1);
2162 if (alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) == NULL)
2163 return false;
2164 i0 = CONST_WIDE_INT_ELT (x, 0);
72910a0b
RH
2165 goto do_integer;
2166
f06ed650
UB
2167 case CONST_DOUBLE:
2168 if (x == CONST0_RTX (mode))
2169 return true;
2170 return false;
2171
72910a0b
RH
2172 case CONST_VECTOR:
2173 if (x == CONST0_RTX (mode))
2174 return true;
2175 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2176 return false;
2177 if (GET_MODE_SIZE (mode) != 8)
2178 return false;
7eb05850 2179 /* FALLTHRU */
72910a0b
RH
2180
2181 case CONST_INT:
72910a0b
RH
2182 if (TARGET_BUILD_CONSTANTS)
2183 return true;
da80c6b8 2184 i0 = alpha_extract_integer (x);
7eb05850
UB
2185 do_integer:
2186 return alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) != NULL;
72910a0b
RH
2187
2188 default:
2189 return false;
2190 }
2191}
2192
2193/* Operand 1 is known to be a constant, and should require more than one
2194 instruction to load. Emit that multi-part load. */
2195
2196bool
ef4bddc2 2197alpha_split_const_mov (machine_mode mode, rtx *operands)
72910a0b 2198{
da80c6b8 2199 HOST_WIDE_INT i0;
72910a0b
RH
2200 rtx temp = NULL_RTX;
2201
da80c6b8 2202 i0 = alpha_extract_integer (operands[1]);
72910a0b 2203
c37aa43b 2204 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
72910a0b
RH
2205
2206 if (!temp && TARGET_BUILD_CONSTANTS)
da80c6b8 2207 temp = alpha_emit_set_long_const (operands[0], i0);
72910a0b
RH
2208
2209 if (temp)
2210 {
2211 if (!rtx_equal_p (operands[0], temp))
2212 emit_move_insn (operands[0], temp);
2213 return true;
2214 }
2215
2216 return false;
2217}
2218
23296a36
RH
2219/* Expand a move instruction; return true if all work is done.
2220 We don't handle non-bwx subword loads here. */
2221
2222bool
ef4bddc2 2223alpha_expand_mov (machine_mode mode, rtx *operands)
23296a36 2224{
9dadeeb8
UB
2225 rtx tmp;
2226
23296a36 2227 /* If the output is not a register, the input must be. */
7d83f4f5 2228 if (MEM_P (operands[0])
23296a36
RH
2229 && ! reg_or_0_operand (operands[1], mode))
2230 operands[1] = force_reg (mode, operands[1]);
2231
551cc6fd 2232 /* Allow legitimize_address to perform some simplifications. */
d3e98208 2233 if (mode == Pmode && symbolic_operand (operands[1], mode))
1eb356b9 2234 {
506d7b68 2235 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
551cc6fd 2236 if (tmp)
133d3133 2237 {
6f9b006d
RH
2238 if (tmp == operands[0])
2239 return true;
551cc6fd 2240 operands[1] = tmp;
e2c9fb9b
RH
2241 return false;
2242 }
1eb356b9
RH
2243 }
2244
23296a36
RH
2245 /* Early out for non-constants and valid constants. */
2246 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2247 return false;
2248
2249 /* Split large integers. */
7d83f4f5 2250 if (CONST_INT_P (operands[1])
72910a0b 2251 || GET_CODE (operands[1]) == CONST_VECTOR)
23296a36 2252 {
72910a0b
RH
2253 if (alpha_split_const_mov (mode, operands))
2254 return true;
23296a36
RH
2255 }
2256
2257 /* Otherwise we've nothing left but to drop the thing to memory. */
9dadeeb8
UB
2258 tmp = force_const_mem (mode, operands[1]);
2259
2260 if (tmp == NULL_RTX)
2261 return false;
2262
23296a36
RH
2263 if (reload_in_progress)
2264 {
9dadeeb8
UB
2265 emit_move_insn (operands[0], XEXP (tmp, 0));
2266 operands[1] = replace_equiv_address (tmp, operands[0]);
23296a36
RH
2267 }
2268 else
9dadeeb8 2269 operands[1] = validize_mem (tmp);
23296a36
RH
2270 return false;
2271}
2272
2273/* Expand a non-bwx QImode or HImode move instruction;
2274 return true if all work is done. */
2275
2276bool
ef4bddc2 2277alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
23296a36 2278{
48f46219
RH
2279 rtx seq;
2280
23296a36 2281 /* If the output is not a register, the input must be. */
48f46219 2282 if (MEM_P (operands[0]))
23296a36
RH
2283 operands[1] = force_reg (mode, operands[1]);
2284
2285 /* Handle four memory cases, unaligned and aligned for either the input
2286 or the output. The only case where we can be called during reload is
2287 for aligned loads; all other cases require temporaries. */
2288
48f46219 2289 if (any_memory_operand (operands[1], mode))
23296a36
RH
2290 {
2291 if (aligned_memory_operand (operands[1], mode))
2292 {
2293 if (reload_in_progress)
2294 {
145f748f 2295 seq = gen_reload_in_aligned (mode, operands[0], operands[1]);
48f46219 2296 emit_insn (seq);
23296a36
RH
2297 }
2298 else
2299 {
2300 rtx aligned_mem, bitnum;
2301 rtx scratch = gen_reg_rtx (SImode);
62e88293
RH
2302 rtx subtarget;
2303 bool copyout;
23296a36
RH
2304
2305 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2306
62e88293 2307 subtarget = operands[0];
7d83f4f5 2308 if (REG_P (subtarget))
62e88293
RH
2309 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2310 else
2311 subtarget = gen_reg_rtx (DImode), copyout = true;
2312
48f46219
RH
2313 if (mode == QImode)
2314 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2315 bitnum, scratch);
2316 else
2317 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2318 bitnum, scratch);
2319 emit_insn (seq);
62e88293
RH
2320
2321 if (copyout)
2322 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
23296a36
RH
2323 }
2324 }
2325 else
2326 {
2327 /* Don't pass these as parameters since that makes the generated
2328 code depend on parameter evaluation order which will cause
2329 bootstrap failures. */
2330
48f46219 2331 rtx temp1, temp2, subtarget, ua;
62e88293
RH
2332 bool copyout;
2333
2334 temp1 = gen_reg_rtx (DImode);
2335 temp2 = gen_reg_rtx (DImode);
23296a36 2336
62e88293 2337 subtarget = operands[0];
7d83f4f5 2338 if (REG_P (subtarget))
62e88293
RH
2339 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2340 else
2341 subtarget = gen_reg_rtx (DImode), copyout = true;
2342
48f46219
RH
2343 ua = get_unaligned_address (operands[1]);
2344 if (mode == QImode)
2345 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2346 else
2347 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2348
23296a36
RH
2349 alpha_set_memflags (seq, operands[1]);
2350 emit_insn (seq);
62e88293
RH
2351
2352 if (copyout)
2353 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
23296a36
RH
2354 }
2355 return true;
2356 }
2357
48f46219 2358 if (any_memory_operand (operands[0], mode))
23296a36
RH
2359 {
2360 if (aligned_memory_operand (operands[0], mode))
2361 {
2362 rtx aligned_mem, bitnum;
2363 rtx temp1 = gen_reg_rtx (SImode);
2364 rtx temp2 = gen_reg_rtx (SImode);
2365
2366 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2367
2368 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2369 temp1, temp2));
2370 }
2371 else
2372 {
2373 rtx temp1 = gen_reg_rtx (DImode);
2374 rtx temp2 = gen_reg_rtx (DImode);
2375 rtx temp3 = gen_reg_rtx (DImode);
48f46219
RH
2376 rtx ua = get_unaligned_address (operands[0]);
2377
145f748f
UB
2378 seq = gen_unaligned_store
2379 (mode, ua, operands[1], temp1, temp2, temp3);
23296a36
RH
2380
2381 alpha_set_memflags (seq, operands[0]);
2382 emit_insn (seq);
2383 }
2384 return true;
2385 }
2386
2387 return false;
2388}
2389
ad78a663 2390/* Implement the movmisalign patterns. One of the operands is a memory
35c0104b 2391 that is not naturally aligned. Emit instructions to load it. */
ad78a663
RH
2392
2393void
ef4bddc2 2394alpha_expand_movmisalign (machine_mode mode, rtx *operands)
ad78a663
RH
2395{
2396 /* Honor misaligned loads, for those we promised to do so. */
2397 if (MEM_P (operands[1]))
2398 {
2399 rtx tmp;
2400
2401 if (register_operand (operands[0], mode))
2402 tmp = operands[0];
2403 else
2404 tmp = gen_reg_rtx (mode);
2405
2406 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2407 if (tmp != operands[0])
2408 emit_move_insn (operands[0], tmp);
2409 }
2410 else if (MEM_P (operands[0]))
2411 {
2412 if (!reg_or_0_operand (operands[1], mode))
2413 operands[1] = force_reg (mode, operands[1]);
2414 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2415 }
2416 else
2417 gcc_unreachable ();
2418}
2419
01b9e84e
RH
2420/* Generate an unsigned DImode to FP conversion. This is the same code
2421 optabs would emit if we didn't have TFmode patterns.
2422
2423 For SFmode, this is the only construction I've found that can pass
2424 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2425 intermediates will work, because you'll get intermediate rounding
2426 that ruins the end result. Some of this could be fixed by turning
2427 on round-to-positive-infinity, but that requires diddling the fpsr,
2428 which kills performance. I tried turning this around and converting
2429 to a negative number, so that I could turn on /m, but either I did
2430 it wrong or there's something else cause I wound up with the exact
2431 same single-bit error. There is a branch-less form of this same code:
2432
2433 srl $16,1,$1
2434 and $16,1,$2
2435 cmplt $16,0,$3
2436 or $1,$2,$2
2437 cmovge $16,$16,$2
2438 itoft $3,$f10
2439 itoft $2,$f11
2440 cvtqs $f11,$f11
2441 adds $f11,$f11,$f0
2442 fcmoveq $f10,$f11,$f0
2443
2444 I'm not using it because it's the same number of instructions as
2445 this branch-full form, and it has more serialized long latency
2446 instructions on the critical path.
2447
2448 For DFmode, we can avoid rounding errors by breaking up the word
2449 into two pieces, converting them separately, and adding them back:
2450
2451 LC0: .long 0,0x5f800000
2452
2453 itoft $16,$f11
2454 lda $2,LC0
70994f30 2455 cmplt $16,0,$1
01b9e84e
RH
2456 cpyse $f11,$f31,$f10
2457 cpyse $f31,$f11,$f11
2458 s4addq $1,$2,$1
2459 lds $f12,0($1)
2460 cvtqt $f10,$f10
2461 cvtqt $f11,$f11
2462 addt $f12,$f10,$f0
2463 addt $f0,$f11,$f0
2464
2465 This doesn't seem to be a clear-cut win over the optabs form.
2466 It probably all depends on the distribution of numbers being
2467 converted -- in the optabs form, all but high-bit-set has a
2468 much lower minimum execution time. */
2469
2470void
a5c24926 2471alpha_emit_floatuns (rtx operands[2])
01b9e84e
RH
2472{
2473 rtx neglab, donelab, i0, i1, f0, in, out;
ef4bddc2 2474 machine_mode mode;
01b9e84e
RH
2475
2476 out = operands[0];
57014cb9 2477 in = force_reg (DImode, operands[1]);
01b9e84e
RH
2478 mode = GET_MODE (out);
2479 neglab = gen_label_rtx ();
2480 donelab = gen_label_rtx ();
2481 i0 = gen_reg_rtx (DImode);
2482 i1 = gen_reg_rtx (DImode);
2483 f0 = gen_reg_rtx (mode);
2484
d43e0b7d 2485 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
01b9e84e 2486
f7df4a84 2487 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
01b9e84e 2488 emit_jump_insn (gen_jump (donelab));
70994f30 2489 emit_barrier ();
01b9e84e
RH
2490
2491 emit_label (neglab);
2492
2493 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2494 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2495 emit_insn (gen_iordi3 (i0, i0, i1));
f7df4a84
RS
2496 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
2497 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
01b9e84e
RH
2498
2499 emit_label (donelab);
2500}
2501
f283421d
RH
2502/* Generate the comparison for a conditional branch. */
2503
f90b7a5a 2504void
ef4bddc2 2505alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
f283421d
RH
2506{
2507 enum rtx_code cmp_code, branch_code;
ef4bddc2 2508 machine_mode branch_mode = VOIDmode;
f90b7a5a
PB
2509 enum rtx_code code = GET_CODE (operands[0]);
2510 rtx op0 = operands[1], op1 = operands[2];
f283421d
RH
2511 rtx tem;
2512
f90b7a5a 2513 if (cmp_mode == TFmode)
5495cc55 2514 {
0da4e73a 2515 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
5495cc55 2516 op1 = const0_rtx;
f90b7a5a 2517 cmp_mode = DImode;
5495cc55
RH
2518 }
2519
f283421d
RH
2520 /* The general case: fold the comparison code to the types of compares
2521 that we have, choosing the branch as necessary. */
2522 switch (code)
2523 {
2524 case EQ: case LE: case LT: case LEU: case LTU:
1eb8759b 2525 case UNORDERED:
d2b21f20 2526 /* We have these compares. */
f283421d
RH
2527 cmp_code = code, branch_code = NE;
2528 break;
2529
2530 case NE:
1eb8759b 2531 case ORDERED:
285a5742 2532 /* These must be reversed. */
1eb8759b 2533 cmp_code = reverse_condition (code), branch_code = EQ;
f283421d
RH
2534 break;
2535
2536 case GE: case GT: case GEU: case GTU:
2537 /* For FP, we swap them, for INT, we reverse them. */
f90b7a5a 2538 if (cmp_mode == DFmode)
f283421d
RH
2539 {
2540 cmp_code = swap_condition (code);
2541 branch_code = NE;
3f3f5af0 2542 std::swap (op0, op1);
f283421d
RH
2543 }
2544 else
2545 {
2546 cmp_code = reverse_condition (code);
2547 branch_code = EQ;
2548 }
2549 break;
2550
2551 default:
56daab84 2552 gcc_unreachable ();
f283421d
RH
2553 }
2554
f90b7a5a 2555 if (cmp_mode == DFmode)
f283421d 2556 {
ec46190f 2557 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
f283421d
RH
2558 {
2559 /* When we are not as concerned about non-finite values, and we
2560 are comparing against zero, we can branch directly. */
2561 if (op1 == CONST0_RTX (DFmode))
f822d252 2562 cmp_code = UNKNOWN, branch_code = code;
f283421d
RH
2563 else if (op0 == CONST0_RTX (DFmode))
2564 {
2565 /* Undo the swap we probably did just above. */
3f3f5af0 2566 std::swap (op0, op1);
b771b6b4 2567 branch_code = swap_condition (cmp_code);
f822d252 2568 cmp_code = UNKNOWN;
f283421d
RH
2569 }
2570 }
2571 else
2572 {
27d30956 2573 /* ??? We mark the branch mode to be CCmode to prevent the
f676971a 2574 compare and branch from being combined, since the compare
f283421d
RH
2575 insn follows IEEE rules that the branch does not. */
2576 branch_mode = CCmode;
2577 }
2578 }
2579 else
2580 {
f283421d
RH
2581 /* The following optimizations are only for signed compares. */
2582 if (code != LEU && code != LTU && code != GEU && code != GTU)
2583 {
2584 /* Whee. Compare and branch against 0 directly. */
2585 if (op1 == const0_rtx)
f822d252 2586 cmp_code = UNKNOWN, branch_code = code;
f283421d 2587
e006ced2
FH
2588 /* If the constants doesn't fit into an immediate, but can
2589 be generated by lda/ldah, we adjust the argument and
2590 compare against zero, so we can use beq/bne directly. */
4a4f95d9
RH
2591 /* ??? Don't do this when comparing against symbols, otherwise
2592 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2593 be declared false out of hand (at least for non-weak). */
7d83f4f5 2594 else if (CONST_INT_P (op1)
4a4f95d9
RH
2595 && (code == EQ || code == NE)
2596 && !(symbolic_operand (op0, VOIDmode)
7d83f4f5 2597 || (REG_P (op0) && REG_POINTER (op0))))
f283421d 2598 {
dfcbeaa5
RH
2599 rtx n_op1 = GEN_INT (-INTVAL (op1));
2600
2601 if (! satisfies_constraint_I (op1)
2602 && (satisfies_constraint_K (n_op1)
2603 || satisfies_constraint_L (n_op1)))
2604 cmp_code = PLUS, branch_code = code, op1 = n_op1;
f283421d
RH
2605 }
2606 }
f283421d 2607
9e495700
RH
2608 if (!reg_or_0_operand (op0, DImode))
2609 op0 = force_reg (DImode, op0);
2610 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2611 op1 = force_reg (DImode, op1);
2612 }
f283421d
RH
2613
2614 /* Emit an initial compare instruction, if necessary. */
2615 tem = op0;
f822d252 2616 if (cmp_code != UNKNOWN)
f283421d
RH
2617 {
2618 tem = gen_reg_rtx (cmp_mode);
2619 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2620 }
2621
f90b7a5a 2622 /* Emit the branch instruction. */
f7df4a84 2623 tem = gen_rtx_SET (pc_rtx,
f90b7a5a
PB
2624 gen_rtx_IF_THEN_ELSE (VOIDmode,
2625 gen_rtx_fmt_ee (branch_code,
2626 branch_mode, tem,
2627 CONST0_RTX (cmp_mode)),
2628 gen_rtx_LABEL_REF (VOIDmode,
2629 operands[3]),
2630 pc_rtx));
2631 emit_jump_insn (tem);
f283421d
RH
2632}
2633
9e495700
RH
2634/* Certain simplifications can be done to make invalid setcc operations
2635 valid. Return the final comparison, or NULL if we can't work. */
2636
f90b7a5a 2637bool
ef4bddc2 2638alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
9e495700
RH
2639{
2640 enum rtx_code cmp_code;
f90b7a5a
PB
2641 enum rtx_code code = GET_CODE (operands[1]);
2642 rtx op0 = operands[2], op1 = operands[3];
9e495700
RH
2643 rtx tmp;
2644
f90b7a5a 2645 if (cmp_mode == TFmode)
9e495700 2646 {
0da4e73a 2647 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
9e495700 2648 op1 = const0_rtx;
f90b7a5a 2649 cmp_mode = DImode;
9e495700
RH
2650 }
2651
f90b7a5a
PB
2652 if (cmp_mode == DFmode && !TARGET_FIX)
2653 return 0;
9e495700
RH
2654
2655 /* The general case: fold the comparison code to the types of compares
2656 that we have, choosing the branch as necessary. */
2657
f822d252 2658 cmp_code = UNKNOWN;
9e495700
RH
2659 switch (code)
2660 {
2661 case EQ: case LE: case LT: case LEU: case LTU:
2662 case UNORDERED:
2663 /* We have these compares. */
f90b7a5a 2664 if (cmp_mode == DFmode)
9e495700
RH
2665 cmp_code = code, code = NE;
2666 break;
2667
2668 case NE:
f90b7a5a 2669 if (cmp_mode == DImode && op1 == const0_rtx)
9e495700 2670 break;
5efb1046 2671 /* FALLTHRU */
9e495700
RH
2672
2673 case ORDERED:
2674 cmp_code = reverse_condition (code);
2675 code = EQ;
2676 break;
2677
2678 case GE: case GT: case GEU: case GTU:
56f19d92 2679 /* These normally need swapping, but for integer zero we have
c74fa144 2680 special patterns that recognize swapped operands. */
f90b7a5a 2681 if (cmp_mode == DImode && op1 == const0_rtx)
c74fa144 2682 break;
9e495700 2683 code = swap_condition (code);
f90b7a5a 2684 if (cmp_mode == DFmode)
9e495700 2685 cmp_code = code, code = NE;
3f3f5af0 2686 std::swap (op0, op1);
9e495700
RH
2687 break;
2688
2689 default:
56daab84 2690 gcc_unreachable ();
9e495700
RH
2691 }
2692
f90b7a5a 2693 if (cmp_mode == DImode)
9e495700 2694 {
c74fa144 2695 if (!register_operand (op0, DImode))
9e495700
RH
2696 op0 = force_reg (DImode, op0);
2697 if (!reg_or_8bit_operand (op1, DImode))
2698 op1 = force_reg (DImode, op1);
2699 }
2700
2701 /* Emit an initial compare instruction, if necessary. */
f822d252 2702 if (cmp_code != UNKNOWN)
9e495700 2703 {
f90b7a5a 2704 tmp = gen_reg_rtx (cmp_mode);
f7df4a84
RS
2705 emit_insn (gen_rtx_SET (tmp, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2706 op0, op1)));
9e495700 2707
7c1db202 2708 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
9e495700
RH
2709 op1 = const0_rtx;
2710 }
2711
f90b7a5a 2712 /* Emit the setcc instruction. */
f7df4a84
RS
2713 emit_insn (gen_rtx_SET (operands[0], gen_rtx_fmt_ee (code, DImode,
2714 op0, op1)));
f90b7a5a 2715 return true;
9e495700
RH
2716}
2717
f283421d 2718
758d2c0c
RK
2719/* Rewrite a comparison against zero CMP of the form
2720 (CODE (cc0) (const_int 0)) so it can be written validly in
2721 a conditional move (if_then_else CMP ...).
825dda42 2722 If both of the operands that set cc0 are nonzero we must emit
758d2c0c 2723 an insn to perform the compare (it can't be done within
285a5742 2724 the conditional move). */
a5c24926 2725
758d2c0c 2726rtx
ef4bddc2 2727alpha_emit_conditional_move (rtx cmp, machine_mode mode)
758d2c0c 2728{
1ad2a62d 2729 enum rtx_code code = GET_CODE (cmp);
89b7c471 2730 enum rtx_code cmov_code = NE;
f90b7a5a
PB
2731 rtx op0 = XEXP (cmp, 0);
2732 rtx op1 = XEXP (cmp, 1);
ef4bddc2 2733 machine_mode cmp_mode
1ad2a62d 2734 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
ef4bddc2 2735 machine_mode cmov_mode = VOIDmode;
de6c5979 2736 int local_fast_math = flag_unsafe_math_optimizations;
1ad2a62d 2737 rtx tem;
758d2c0c 2738
387c39e1
UB
2739 if (cmp_mode == TFmode)
2740 {
2741 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2742 op1 = const0_rtx;
2743 cmp_mode = DImode;
2744 }
2745
f90b7a5a 2746 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
6db21c7f 2747
f90b7a5a 2748 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
09fe1c49
RH
2749 {
2750 enum rtx_code cmp_code;
2751
2752 if (! TARGET_FIX)
2753 return 0;
2754
2755 /* If we have fp<->int register move instructions, do a cmov by
2756 performing the comparison in fp registers, and move the
825dda42 2757 zero/nonzero value to integer registers, where we can then
09fe1c49
RH
2758 use a normal cmov, or vice-versa. */
2759
2760 switch (code)
2761 {
2762 case EQ: case LE: case LT: case LEU: case LTU:
d2b21f20 2763 case UNORDERED:
09fe1c49
RH
2764 /* We have these compares. */
2765 cmp_code = code, code = NE;
2766 break;
2767
2768 case NE:
d2b21f20
UB
2769 case ORDERED:
2770 /* These must be reversed. */
2771 cmp_code = reverse_condition (code), code = EQ;
09fe1c49
RH
2772 break;
2773
2774 case GE: case GT: case GEU: case GTU:
56f19d92
RH
2775 /* These normally need swapping, but for integer zero we have
2776 special patterns that recognize swapped operands. */
f90b7a5a 2777 if (cmp_mode == DImode && op1 == const0_rtx)
c53f9f5b
RH
2778 cmp_code = code, code = NE;
2779 else
2780 {
2781 cmp_code = swap_condition (code);
2782 code = NE;
3f3f5af0 2783 std::swap (op0, op1);
c53f9f5b 2784 }
09fe1c49
RH
2785 break;
2786
2787 default:
56daab84 2788 gcc_unreachable ();
09fe1c49
RH
2789 }
2790
d2b21f20
UB
2791 if (cmp_mode == DImode)
2792 {
2793 if (!reg_or_0_operand (op0, DImode))
2794 op0 = force_reg (DImode, op0);
2795 if (!reg_or_8bit_operand (op1, DImode))
2796 op1 = force_reg (DImode, op1);
2797 }
2798
f90b7a5a 2799 tem = gen_reg_rtx (cmp_mode);
f7df4a84
RS
2800 emit_insn (gen_rtx_SET (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2801 op0, op1)));
09fe1c49 2802
0d4a1197 2803 cmp_mode = cmp_mode == DImode ? E_DFmode : E_DImode;
f90b7a5a
PB
2804 op0 = gen_lowpart (cmp_mode, tem);
2805 op1 = CONST0_RTX (cmp_mode);
41dedebd 2806 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
09fe1c49
RH
2807 local_fast_math = 1;
2808 }
758d2c0c 2809
d2b21f20
UB
2810 if (cmp_mode == DImode)
2811 {
2812 if (!reg_or_0_operand (op0, DImode))
2813 op0 = force_reg (DImode, op0);
2814 if (!reg_or_8bit_operand (op1, DImode))
2815 op1 = force_reg (DImode, op1);
2816 }
2817
758d2c0c 2818 /* We may be able to use a conditional move directly.
285a5742 2819 This avoids emitting spurious compares. */
01b9e84e 2820 if (signed_comparison_operator (cmp, VOIDmode)
f90b7a5a 2821 && (cmp_mode == DImode || local_fast_math)
1ad2a62d 2822 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
38a448ca 2823 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
758d2c0c 2824
000ca373 2825 /* We can't put the comparison inside the conditional move;
758d2c0c 2826 emit a compare instruction and put that inside the
1ad2a62d
RK
2827 conditional move. Make sure we emit only comparisons we have;
2828 swap or reverse as necessary. */
758d2c0c 2829
b3a13419 2830 if (!can_create_pseudo_p ())
000ca373
RH
2831 return NULL_RTX;
2832
758d2c0c
RK
2833 switch (code)
2834 {
1ad2a62d 2835 case EQ: case LE: case LT: case LEU: case LTU:
d2b21f20 2836 case UNORDERED:
1ad2a62d 2837 /* We have these compares: */
758d2c0c 2838 break;
1ad2a62d 2839
758d2c0c 2840 case NE:
d2b21f20
UB
2841 case ORDERED:
2842 /* These must be reversed. */
1ad2a62d 2843 code = reverse_condition (code);
89b7c471 2844 cmov_code = EQ;
758d2c0c 2845 break;
1ad2a62d
RK
2846
2847 case GE: case GT: case GEU: case GTU:
c1e183a9
UB
2848 /* These normally need swapping, but for integer zero we have
2849 special patterns that recognize swapped operands. */
2850 if (cmp_mode == DImode && op1 == const0_rtx)
2851 break;
2852 code = swap_condition (code);
3f3f5af0 2853 std::swap (op0, op1);
758d2c0c 2854 break;
1ad2a62d 2855
758d2c0c 2856 default:
56daab84 2857 gcc_unreachable ();
758d2c0c
RK
2858 }
2859
f90b7a5a 2860 if (cmp_mode == DImode)
9e495700
RH
2861 {
2862 if (!reg_or_0_operand (op0, DImode))
2863 op0 = force_reg (DImode, op0);
2864 if (!reg_or_8bit_operand (op1, DImode))
2865 op1 = force_reg (DImode, op1);
2866 }
2867
68aed21b 2868 /* ??? We mark the branch mode to be CCmode to prevent the compare
f283421d
RH
2869 and cmov from being combined, since the compare insn follows IEEE
2870 rules that the cmov does not. */
f90b7a5a 2871 if (cmp_mode == DFmode && !local_fast_math)
f283421d
RH
2872 cmov_mode = CCmode;
2873
f90b7a5a
PB
2874 tem = gen_reg_rtx (cmp_mode);
2875 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2876 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
758d2c0c 2877}
8f4773ea
RH
2878
2879/* Simplify a conditional move of two constants into a setcc with
2880 arithmetic. This is done with a splitter since combine would
2881 just undo the work if done during code generation. It also catches
2882 cases we wouldn't have before cse. */
2883
2884int
a5c24926
RH
2885alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2886 rtx t_rtx, rtx f_rtx)
8f4773ea
RH
2887{
2888 HOST_WIDE_INT t, f, diff;
ef4bddc2 2889 machine_mode mode;
8f4773ea
RH
2890 rtx target, subtarget, tmp;
2891
2892 mode = GET_MODE (dest);
2893 t = INTVAL (t_rtx);
2894 f = INTVAL (f_rtx);
2895 diff = t - f;
2896
2897 if (((code == NE || code == EQ) && diff < 0)
2898 || (code == GE || code == GT))
2899 {
2900 code = reverse_condition (code);
7159f19c
UB
2901 std::swap (t, f);
2902 diff = -diff;
8f4773ea
RH
2903 }
2904
2905 subtarget = target = dest;
2906 if (mode != DImode)
2907 {
2908 target = gen_lowpart (DImode, dest);
b3a13419 2909 if (can_create_pseudo_p ())
8f4773ea
RH
2910 subtarget = gen_reg_rtx (DImode);
2911 else
2912 subtarget = target;
2913 }
a5376276
RH
2914 /* Below, we must be careful to use copy_rtx on target and subtarget
2915 in intermediate insns, as they may be a subreg rtx, which may not
2916 be shared. */
8f4773ea
RH
2917
2918 if (f == 0 && exact_log2 (diff) > 0
9a9f7594 2919 /* On EV6, we've got enough shifters to make non-arithmetic shifts
8f4773ea 2920 viable over a longer latency cmove. On EV5, the E0 slot is a
285a5742 2921 scarce resource, and on EV4 shift has the same latency as a cmove. */
8bea7f7c 2922 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
8f4773ea
RH
2923 {
2924 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
f7df4a84 2925 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
8f4773ea 2926
a5376276
RH
2927 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2928 GEN_INT (exact_log2 (t)));
f7df4a84 2929 emit_insn (gen_rtx_SET (target, tmp));
8f4773ea
RH
2930 }
2931 else if (f == 0 && t == -1)
2932 {
2933 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
f7df4a84 2934 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
8f4773ea 2935
a5376276 2936 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
8f4773ea
RH
2937 }
2938 else if (diff == 1 || diff == 4 || diff == 8)
2939 {
2940 rtx add_op;
2941
2942 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
f7df4a84 2943 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
8f4773ea
RH
2944
2945 if (diff == 1)
a5376276 2946 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
8f4773ea
RH
2947 else
2948 {
2949 add_op = GEN_INT (f);
2950 if (sext_add_operand (add_op, mode))
2951 {
02ea1c76
UB
2952 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2953 GEN_INT (exact_log2 (diff)));
8f4773ea 2954 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
f7df4a84 2955 emit_insn (gen_rtx_SET (target, tmp));
8f4773ea
RH
2956 }
2957 else
2958 return 0;
2959 }
2960 }
2961 else
2962 return 0;
2963
2964 return 1;
2965}
6c174fc0 2966\f
5495cc55
RH
2967/* Look up the function X_floating library function name for the
2968 given operation. */
2969
d1b38208 2970struct GTY(()) xfloating_op
75959f0a
RH
2971{
2972 const enum rtx_code code;
1431042e
ZW
2973 const char *const GTY((skip)) osf_func;
2974 const char *const GTY((skip)) vms_func;
75959f0a
RH
2975 rtx libcall;
2976};
2977
f676971a 2978static GTY(()) struct xfloating_op xfloating_ops[] =
75959f0a
RH
2979{
2980 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2981 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2982 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2983 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2984 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2985 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2986 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2987 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2988 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2989 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2990 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2991 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2992 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2993 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2994 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2995};
2996
2997static GTY(()) struct xfloating_op vax_cvt_ops[] =
2998{
2999 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3000 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3001};
3002
3003static rtx
a5c24926 3004alpha_lookup_xfloating_lib_func (enum rtx_code code)
5495cc55 3005{
75959f0a
RH
3006 struct xfloating_op *ops = xfloating_ops;
3007 long n = ARRAY_SIZE (xfloating_ops);
5495cc55
RH
3008 long i;
3009
0da4e73a
RH
3010 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
3011
75959f0a
RH
3012 /* How irritating. Nothing to key off for the main table. */
3013 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
5495cc55 3014 {
75959f0a
RH
3015 ops = vax_cvt_ops;
3016 n = ARRAY_SIZE (vax_cvt_ops);
5495cc55
RH
3017 }
3018
75959f0a
RH
3019 for (i = 0; i < n; ++i, ++ops)
3020 if (ops->code == code)
3021 {
3022 rtx func = ops->libcall;
3023 if (!func)
3024 {
3025 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3026 ? ops->vms_func : ops->osf_func);
3027 ops->libcall = func;
3028 }
3029 return func;
3030 }
5495cc55 3031
56daab84 3032 gcc_unreachable ();
5495cc55
RH
3033}
3034
3035/* Most X_floating operations take the rounding mode as an argument.
3036 Compute that here. */
3037
3038static int
a5c24926
RH
3039alpha_compute_xfloating_mode_arg (enum rtx_code code,
3040 enum alpha_fp_rounding_mode round)
5495cc55
RH
3041{
3042 int mode;
3043
3044 switch (round)
3045 {
3046 case ALPHA_FPRM_NORM:
3047 mode = 2;
3048 break;
3049 case ALPHA_FPRM_MINF:
3050 mode = 1;
3051 break;
3052 case ALPHA_FPRM_CHOP:
3053 mode = 0;
3054 break;
3055 case ALPHA_FPRM_DYN:
3056 mode = 4;
3057 break;
3058 default:
56daab84 3059 gcc_unreachable ();
5495cc55
RH
3060
3061 /* XXX For reference, round to +inf is mode = 3. */
3062 }
3063
3064 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3065 mode |= 0x10000;
3066
3067 return mode;
3068}
3069
3070/* Emit an X_floating library function call.
3071
3072 Note that these functions do not follow normal calling conventions:
3073 TFmode arguments are passed in two integer registers (as opposed to
f676971a 3074 indirect); TFmode return values appear in R16+R17.
5495cc55 3075
75959f0a 3076 FUNC is the function to call.
5495cc55
RH
3077 TARGET is where the output belongs.
3078 OPERANDS are the inputs.
3079 NOPERANDS is the count of inputs.
3080 EQUIV is the expression equivalent for the function.
3081*/
3082
3083static void
75959f0a 3084alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
a5c24926 3085 int noperands, rtx equiv)
5495cc55 3086{
4b6ab433 3087 rtx usage = NULL_RTX, reg;
5495cc55
RH
3088 int regno = 16, i;
3089
3090 start_sequence ();
3091
3092 for (i = 0; i < noperands; ++i)
3093 {
3094 switch (GET_MODE (operands[i]))
3095 {
4e10a5a7 3096 case E_TFmode:
5495cc55
RH
3097 reg = gen_rtx_REG (TFmode, regno);
3098 regno += 2;
3099 break;
3100
4e10a5a7 3101 case E_DFmode:
5495cc55
RH
3102 reg = gen_rtx_REG (DFmode, regno + 32);
3103 regno += 1;
3104 break;
3105
4e10a5a7 3106 case E_VOIDmode:
7d83f4f5 3107 gcc_assert (CONST_INT_P (operands[i]));
5efb1046 3108 /* FALLTHRU */
4e10a5a7 3109 case E_DImode:
5495cc55
RH
3110 reg = gen_rtx_REG (DImode, regno);
3111 regno += 1;
3112 break;
3113
3114 default:
56daab84 3115 gcc_unreachable ();
5495cc55
RH
3116 }
3117
3118 emit_move_insn (reg, operands[i]);
44f370bf 3119 use_reg (&usage, reg);
5495cc55
RH
3120 }
3121
3122 switch (GET_MODE (target))
3123 {
4e10a5a7 3124 case E_TFmode:
5495cc55
RH
3125 reg = gen_rtx_REG (TFmode, 16);
3126 break;
4e10a5a7 3127 case E_DFmode:
5495cc55
RH
3128 reg = gen_rtx_REG (DFmode, 32);
3129 break;
4e10a5a7 3130 case E_DImode:
5495cc55
RH
3131 reg = gen_rtx_REG (DImode, 0);
3132 break;
3133 default:
56daab84 3134 gcc_unreachable ();
5495cc55
RH
3135 }
3136
4b6ab433
TS
3137 rtx mem = gen_rtx_MEM (QImode, func);
3138 rtx_insn *tmp = emit_call_insn (gen_call_value (reg, mem, const0_rtx,
3139 const0_rtx, const0_rtx));
5495cc55 3140 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
becfd6e5 3141 RTL_CONST_CALL_P (tmp) = 1;
5495cc55
RH
3142
3143 tmp = get_insns ();
3144 end_sequence ();
3145
3146 emit_libcall_block (tmp, target, reg, equiv);
3147}
3148
3149/* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3150
3151void
a5c24926 3152alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
5495cc55 3153{
75959f0a 3154 rtx func;
5495cc55 3155 int mode;
c77f46c6 3156 rtx out_operands[3];
5495cc55
RH
3157
3158 func = alpha_lookup_xfloating_lib_func (code);
3159 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3160
c77f46c6
AO
3161 out_operands[0] = operands[1];
3162 out_operands[1] = operands[2];
3163 out_operands[2] = GEN_INT (mode);
f676971a 3164 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
5495cc55
RH
3165 gen_rtx_fmt_ee (code, TFmode, operands[1],
3166 operands[2]));
3167}
3168
3169/* Emit an X_floating library function call for a comparison. */
3170
3171static rtx
0da4e73a 3172alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
5495cc55 3173{
0da4e73a 3174 enum rtx_code cmp_code, res_code;
32891ff6 3175 rtx func, out, operands[2], note;
5495cc55 3176
0da4e73a
RH
3177 /* X_floating library comparison functions return
3178 -1 unordered
3179 0 false
3180 1 true
3181 Convert the compare against the raw return value. */
3182
3183 cmp_code = *pcode;
3184 switch (cmp_code)
3185 {
3186 case UNORDERED:
3187 cmp_code = EQ;
3188 res_code = LT;
3189 break;
3190 case ORDERED:
3191 cmp_code = EQ;
3192 res_code = GE;
3193 break;
3194 case NE:
3195 res_code = NE;
3196 break;
3197 case EQ:
3198 case LT:
3199 case GT:
3200 case LE:
3201 case GE:
3202 res_code = GT;
3203 break;
3204 default:
3205 gcc_unreachable ();
3206 }
3207 *pcode = res_code;
3208
3209 func = alpha_lookup_xfloating_lib_func (cmp_code);
5495cc55
RH
3210
3211 operands[0] = op0;
3212 operands[1] = op1;
3213 out = gen_reg_rtx (DImode);
3214
70cc1536 3215 /* What's actually returned is -1,0,1, not a proper boolean value. */
57b29ca6
UB
3216 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3217 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
32891ff6 3218 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
5495cc55
RH
3219
3220 return out;
3221}
3222
3223/* Emit an X_floating library function call for a conversion. */
3224
3225void
64bb2e1d 3226alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
5495cc55
RH
3227{
3228 int noperands = 1, mode;
c77f46c6 3229 rtx out_operands[2];
75959f0a 3230 rtx func;
64bb2e1d
RH
3231 enum rtx_code code = orig_code;
3232
3233 if (code == UNSIGNED_FIX)
3234 code = FIX;
5495cc55
RH
3235
3236 func = alpha_lookup_xfloating_lib_func (code);
3237
c77f46c6
AO
3238 out_operands[0] = operands[1];
3239
5495cc55
RH
3240 switch (code)
3241 {
3242 case FIX:
3243 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
c77f46c6 3244 out_operands[1] = GEN_INT (mode);
d6cde845 3245 noperands = 2;
5495cc55
RH
3246 break;
3247 case FLOAT_TRUNCATE:
3248 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
c77f46c6 3249 out_operands[1] = GEN_INT (mode);
d6cde845 3250 noperands = 2;
5495cc55
RH
3251 break;
3252 default:
3253 break;
3254 }
3255
c77f46c6 3256 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
64bb2e1d
RH
3257 gen_rtx_fmt_e (orig_code,
3258 GET_MODE (operands[0]),
5495cc55
RH
3259 operands[1]));
3260}
628d74de 3261
b2f39494
EB
3262/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3263 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3264 guarantee that the sequence
3265 set (OP[0] OP[2])
3266 set (OP[1] OP[3])
3267 is valid. Naturally, output operand ordering is little-endian.
3268 This is used by *movtf_internal and *movti_internal. */
3269
628d74de 3270void
ef4bddc2 3271alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
b2f39494 3272 bool fixup_overlap)
628d74de 3273{
56daab84 3274 switch (GET_CODE (operands[1]))
628d74de 3275 {
56daab84 3276 case REG:
628d74de
RH
3277 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3278 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
56daab84
NS
3279 break;
3280
3281 case MEM:
f4ef873c
RK
3282 operands[3] = adjust_address (operands[1], DImode, 8);
3283 operands[2] = adjust_address (operands[1], DImode, 0);
56daab84
NS
3284 break;
3285
c799797d 3286 CASE_CONST_SCALAR_INT:
65ab381c 3287 case CONST_DOUBLE:
b2f39494 3288 gcc_assert (operands[1] == CONST0_RTX (mode));
56daab84
NS
3289 operands[2] = operands[3] = const0_rtx;
3290 break;
3291
3292 default:
3293 gcc_unreachable ();
628d74de 3294 }
628d74de 3295
56daab84 3296 switch (GET_CODE (operands[0]))
628d74de 3297 {
56daab84 3298 case REG:
628d74de
RH
3299 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3300 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
56daab84
NS
3301 break;
3302
3303 case MEM:
f4ef873c
RK
3304 operands[1] = adjust_address (operands[0], DImode, 8);
3305 operands[0] = adjust_address (operands[0], DImode, 0);
56daab84
NS
3306 break;
3307
3308 default:
3309 gcc_unreachable ();
628d74de 3310 }
b2f39494
EB
3311
3312 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3313 {
3f3f5af0
UB
3314 std::swap (operands[0], operands[1]);
3315 std::swap (operands[2], operands[3]);
b2f39494 3316 }
628d74de 3317}
f940c352 3318
f676971a
EC
3319/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3320 op2 is a register containing the sign bit, operation is the
f940c352
RH
3321 logical operation to be performed. */
3322
3323void
a5c24926 3324alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
f940c352
RH
3325{
3326 rtx high_bit = operands[2];
3327 rtx scratch;
3328 int move;
3329
b2f39494 3330 alpha_split_tmode_pair (operands, TFmode, false);
f940c352 3331
825dda42 3332 /* Detect three flavors of operand overlap. */
f940c352
RH
3333 move = 1;
3334 if (rtx_equal_p (operands[0], operands[2]))
3335 move = 0;
3336 else if (rtx_equal_p (operands[1], operands[2]))
3337 {
3338 if (rtx_equal_p (operands[0], high_bit))
3339 move = 2;
3340 else
3341 move = -1;
3342 }
3343
3344 if (move < 0)
3345 emit_move_insn (operands[0], operands[2]);
3346
3347 /* ??? If the destination overlaps both source tf and high_bit, then
3348 assume source tf is dead in its entirety and use the other half
3349 for a scratch register. Otherwise "scratch" is just the proper
3350 destination register. */
3351 scratch = operands[move < 2 ? 1 : 3];
3352
3353 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3354
3355 if (move > 0)
3356 {
3357 emit_move_insn (operands[0], operands[2]);
3358 if (move > 1)
3359 emit_move_insn (operands[1], scratch);
3360 }
3361}
5495cc55 3362\f
6c174fc0
RH
3363/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3364 unaligned data:
3365
3366 unsigned: signed:
3367 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3368 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3369 lda r3,X(r11) lda r3,X+2(r11)
3370 extwl r1,r3,r1 extql r1,r3,r1
3371 extwh r2,r3,r2 extqh r2,r3,r2
3372 or r1.r2.r1 or r1,r2,r1
3373 sra r1,48,r1
3374
3375 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3376 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3377 lda r3,X(r11) lda r3,X(r11)
3378 extll r1,r3,r1 extll r1,r3,r1
3379 extlh r2,r3,r2 extlh r2,r3,r2
3380 or r1.r2.r1 addl r1,r2,r1
3381
3382 quad: ldq_u r1,X(r11)
3383 ldq_u r2,X+7(r11)
3384 lda r3,X(r11)
3385 extql r1,r3,r1
3386 extqh r2,r3,r2
3387 or r1.r2.r1
3388*/
3389
3390void
a5c24926
RH
3391alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3392 HOST_WIDE_INT ofs, int sign)
6c174fc0 3393{
1eb356b9 3394 rtx meml, memh, addr, extl, exth, tmp, mema;
ef4bddc2 3395 machine_mode mode;
6c174fc0 3396
9f7d06d6
RH
3397 if (TARGET_BWX && size == 2)
3398 {
34642493
RH
3399 meml = adjust_address (mem, QImode, ofs);
3400 memh = adjust_address (mem, QImode, ofs+1);
9f7d06d6
RH
3401 extl = gen_reg_rtx (DImode);
3402 exth = gen_reg_rtx (DImode);
3403 emit_insn (gen_zero_extendqidi2 (extl, meml));
3404 emit_insn (gen_zero_extendqidi2 (exth, memh));
3405 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3406 NULL, 1, OPTAB_LIB_WIDEN);
3407 addr = expand_simple_binop (DImode, IOR, extl, exth,
3408 NULL, 1, OPTAB_LIB_WIDEN);
3409
3410 if (sign && GET_MODE (tgt) != HImode)
3411 {
3412 addr = gen_lowpart (HImode, addr);
3413 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3414 }
3415 else
3416 {
3417 if (GET_MODE (tgt) != DImode)
3418 addr = gen_lowpart (GET_MODE (tgt), addr);
3419 emit_move_insn (tgt, addr);
3420 }
3421 return;
3422 }
3423
6c174fc0
RH
3424 meml = gen_reg_rtx (DImode);
3425 memh = gen_reg_rtx (DImode);
3426 addr = gen_reg_rtx (DImode);
3427 extl = gen_reg_rtx (DImode);
3428 exth = gen_reg_rtx (DImode);
3429
1eb356b9
RH
3430 mema = XEXP (mem, 0);
3431 if (GET_CODE (mema) == LO_SUM)
3432 mema = force_reg (Pmode, mema);
3433
e01acbb1 3434 /* AND addresses cannot be in any alias set, since they may implicitly
f676971a 3435 alias surrounding code. Ideally we'd have some alias set that
e01acbb1
RH
3436 covered all types except those with alignment 8 or higher. */
3437
3438 tmp = change_address (mem, DImode,
f676971a 3439 gen_rtx_AND (DImode,
0a81f074 3440 plus_constant (DImode, mema, ofs),
e01acbb1 3441 GEN_INT (-8)));
ba4828e0 3442 set_mem_alias_set (tmp, 0);
e01acbb1
RH
3443 emit_move_insn (meml, tmp);
3444
3445 tmp = change_address (mem, DImode,
f676971a 3446 gen_rtx_AND (DImode,
0a81f074
RS
3447 plus_constant (DImode, mema,
3448 ofs + size - 1),
e01acbb1 3449 GEN_INT (-8)));
ba4828e0 3450 set_mem_alias_set (tmp, 0);
e01acbb1 3451 emit_move_insn (memh, tmp);
6c174fc0 3452
0b2a7367 3453 if (sign && size == 2)
6c174fc0 3454 {
0a81f074 3455 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
6c174fc0 3456
e533b2a4 3457 emit_insn (gen_extql (extl, meml, addr));
0b2a7367 3458 emit_insn (gen_extqh (exth, memh, addr));
6c174fc0 3459
1a7cb241
JW
3460 /* We must use tgt here for the target. Alpha-vms port fails if we use
3461 addr for the target, because addr is marked as a pointer and combine
a50aa827 3462 knows that pointers are always sign-extended 32-bit values. */
1a7cb241 3463 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
f676971a 3464 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
4208b40f 3465 addr, 1, OPTAB_WIDEN);
6c174fc0 3466 }
4208b40f 3467 else
6c174fc0 3468 {
0a81f074 3469 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
0b2a7367
RH
3470 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3471 switch ((int) size)
30102605 3472 {
0b2a7367
RH
3473 case 2:
3474 emit_insn (gen_extwh (exth, memh, addr));
3475 mode = HImode;
3476 break;
0b2a7367
RH
3477 case 4:
3478 emit_insn (gen_extlh (exth, memh, addr));
3479 mode = SImode;
3480 break;
0b2a7367
RH
3481 case 8:
3482 emit_insn (gen_extqh (exth, memh, addr));
3483 mode = DImode;
3484 break;
0b2a7367
RH
3485 default:
3486 gcc_unreachable ();
4208b40f
RH
3487 }
3488
3489 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3490 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3491 sign, OPTAB_WIDEN);
6c174fc0
RH
3492 }
3493
4208b40f 3494 if (addr != tgt)
9f7d06d6 3495 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
6c174fc0
RH
3496}
3497
3498/* Similarly, use ins and msk instructions to perform unaligned stores. */
3499
3500void
a5c24926
RH
3501alpha_expand_unaligned_store (rtx dst, rtx src,
3502 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
6c174fc0 3503{
1eb356b9 3504 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
f676971a 3505
9f7d06d6
RH
3506 if (TARGET_BWX && size == 2)
3507 {
3508 if (src != const0_rtx)
3509 {
3510 dstl = gen_lowpart (QImode, src);
3511 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3512 NULL, 1, OPTAB_LIB_WIDEN);
3513 dsth = gen_lowpart (QImode, dsth);
3514 }
3515 else
3516 dstl = dsth = const0_rtx;
3517
34642493
RH
3518 meml = adjust_address (dst, QImode, ofs);
3519 memh = adjust_address (dst, QImode, ofs+1);
9f7d06d6
RH
3520
3521 emit_move_insn (meml, dstl);
3522 emit_move_insn (memh, dsth);
3523 return;
3524 }
3525
6c174fc0
RH
3526 dstl = gen_reg_rtx (DImode);
3527 dsth = gen_reg_rtx (DImode);
3528 insl = gen_reg_rtx (DImode);
3529 insh = gen_reg_rtx (DImode);
3530
1eb356b9
RH
3531 dsta = XEXP (dst, 0);
3532 if (GET_CODE (dsta) == LO_SUM)
3533 dsta = force_reg (Pmode, dsta);
3534
e01acbb1 3535 /* AND addresses cannot be in any alias set, since they may implicitly
f676971a 3536 alias surrounding code. Ideally we'd have some alias set that
e01acbb1
RH
3537 covered all types except those with alignment 8 or higher. */
3538
6c174fc0 3539 meml = change_address (dst, DImode,
f676971a 3540 gen_rtx_AND (DImode,
0a81f074 3541 plus_constant (DImode, dsta, ofs),
38a448ca 3542 GEN_INT (-8)));
ba4828e0 3543 set_mem_alias_set (meml, 0);
e01acbb1 3544
6c174fc0 3545 memh = change_address (dst, DImode,
f676971a 3546 gen_rtx_AND (DImode,
0a81f074
RS
3547 plus_constant (DImode, dsta,
3548 ofs + size - 1),
38a448ca 3549 GEN_INT (-8)));
ba4828e0 3550 set_mem_alias_set (memh, 0);
6c174fc0
RH
3551
3552 emit_move_insn (dsth, memh);
3553 emit_move_insn (dstl, meml);
30102605 3554
0a81f074 3555 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
0b2a7367
RH
3556
3557 if (src != CONST0_RTX (GET_MODE (src)))
3558 {
3559 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3560 GEN_INT (size*8), addr));
6c174fc0 3561
c8d8ed65 3562 switch ((int) size)
6c174fc0
RH
3563 {
3564 case 2:
0b2a7367 3565 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
6c174fc0
RH
3566 break;
3567 case 4:
0b2a7367
RH
3568 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3569 break;
c4b50f1a 3570 case 8:
0b2a7367 3571 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
6c174fc0 3572 break;
0b2a7367
RH
3573 default:
3574 gcc_unreachable ();
6c174fc0
RH
3575 }
3576 }
30102605 3577
0b2a7367 3578 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
30102605 3579
0b2a7367
RH
3580 switch ((int) size)
3581 {
3582 case 2:
e533b2a4 3583 emit_insn (gen_mskwl (dstl, dstl, addr));
0b2a7367
RH
3584 break;
3585 case 4:
e533b2a4
RH
3586 emit_insn (gen_mskll (dstl, dstl, addr));
3587 break;
0b2a7367 3588 case 8:
e533b2a4 3589 emit_insn (gen_mskql (dstl, dstl, addr));
0b2a7367
RH
3590 break;
3591 default:
3592 gcc_unreachable ();
6c174fc0
RH
3593 }
3594
e2ea71ea 3595 if (src != CONST0_RTX (GET_MODE (src)))
6c174fc0 3596 {
4208b40f
RH
3597 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3598 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
6c174fc0 3599 }
f676971a 3600
0b2a7367
RH
3601 /* Must store high before low for degenerate case of aligned. */
3602 emit_move_insn (memh, dsth);
3603 emit_move_insn (meml, dstl);
6c174fc0
RH
3604}
3605
4208b40f
RH
3606/* The block move code tries to maximize speed by separating loads and
3607 stores at the expense of register pressure: we load all of the data
3608 before we store it back out. There are two secondary effects worth
3609 mentioning, that this speeds copying to/from aligned and unaligned
3610 buffers, and that it makes the code significantly easier to write. */
6c174fc0 3611
4208b40f
RH
3612#define MAX_MOVE_WORDS 8
3613
3614/* Load an integral number of consecutive unaligned quadwords. */
6c174fc0
RH
3615
3616static void
a5c24926
RH
3617alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3618 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
6c174fc0
RH
3619{
3620 rtx const im8 = GEN_INT (-8);
4208b40f 3621 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1eb356b9 3622 rtx sreg, areg, tmp, smema;
6c174fc0
RH
3623 HOST_WIDE_INT i;
3624
1eb356b9
RH
3625 smema = XEXP (smem, 0);
3626 if (GET_CODE (smema) == LO_SUM)
3627 smema = force_reg (Pmode, smema);
3628
6c174fc0
RH
3629 /* Generate all the tmp registers we need. */
3630 for (i = 0; i < words; ++i)
4208b40f
RH
3631 {
3632 data_regs[i] = out_regs[i];
3633 ext_tmps[i] = gen_reg_rtx (DImode);
3634 }
3635 data_regs[words] = gen_reg_rtx (DImode);
3636
3637 if (ofs != 0)
f4ef873c 3638 smem = adjust_address (smem, GET_MODE (smem), ofs);
f676971a 3639
6c174fc0
RH
3640 /* Load up all of the source data. */
3641 for (i = 0; i < words; ++i)
3642 {
e01acbb1
RH
3643 tmp = change_address (smem, DImode,
3644 gen_rtx_AND (DImode,
0a81f074 3645 plus_constant (DImode, smema, 8*i),
e01acbb1 3646 im8));
ba4828e0 3647 set_mem_alias_set (tmp, 0);
e01acbb1 3648 emit_move_insn (data_regs[i], tmp);
6c174fc0 3649 }
e01acbb1
RH
3650
3651 tmp = change_address (smem, DImode,
3652 gen_rtx_AND (DImode,
0a81f074
RS
3653 plus_constant (DImode, smema,
3654 8*words - 1),
e01acbb1 3655 im8));
ba4828e0 3656 set_mem_alias_set (tmp, 0);
e01acbb1 3657 emit_move_insn (data_regs[words], tmp);
6c174fc0
RH
3658
3659 /* Extract the half-word fragments. Unfortunately DEC decided to make
f676971a 3660 extxh with offset zero a noop instead of zeroing the register, so
6c174fc0
RH
3661 we must take care of that edge condition ourselves with cmov. */
3662
1eb356b9 3663 sreg = copy_addr_to_reg (smema);
f676971a 3664 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
4208b40f 3665 1, OPTAB_WIDEN);
6c174fc0
RH
3666 for (i = 0; i < words; ++i)
3667 {
e533b2a4 3668 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
0b2a7367 3669 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
f7df4a84 3670 emit_insn (gen_rtx_SET (ext_tmps[i],
38a448ca 3671 gen_rtx_IF_THEN_ELSE (DImode,
4208b40f
RH
3672 gen_rtx_EQ (DImode, areg,
3673 const0_rtx),
38a448ca 3674 const0_rtx, ext_tmps[i])));
6c174fc0
RH
3675 }
3676
3677 /* Merge the half-words into whole words. */
3678 for (i = 0; i < words; ++i)
3679 {
4208b40f
RH
3680 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3681 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
6c174fc0
RH
3682 }
3683}
3684
3685/* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3686 may be NULL to store zeros. */
3687
3688static void
a5c24926
RH
3689alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3690 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
6c174fc0
RH
3691{
3692 rtx const im8 = GEN_INT (-8);
6c174fc0 3693 rtx ins_tmps[MAX_MOVE_WORDS];
4208b40f 3694 rtx st_tmp_1, st_tmp_2, dreg;
1eb356b9 3695 rtx st_addr_1, st_addr_2, dmema;
6c174fc0
RH
3696 HOST_WIDE_INT i;
3697
1eb356b9
RH
3698 dmema = XEXP (dmem, 0);
3699 if (GET_CODE (dmema) == LO_SUM)
3700 dmema = force_reg (Pmode, dmema);
3701
6c174fc0
RH
3702 /* Generate all the tmp registers we need. */
3703 if (data_regs != NULL)
3704 for (i = 0; i < words; ++i)
3705 ins_tmps[i] = gen_reg_rtx(DImode);
3706 st_tmp_1 = gen_reg_rtx(DImode);
3707 st_tmp_2 = gen_reg_rtx(DImode);
f676971a 3708
4208b40f 3709 if (ofs != 0)
f4ef873c 3710 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
4208b40f
RH
3711
3712 st_addr_2 = change_address (dmem, DImode,
38a448ca 3713 gen_rtx_AND (DImode,
0a81f074
RS
3714 plus_constant (DImode, dmema,
3715 words*8 - 1),
3716 im8));
ba4828e0 3717 set_mem_alias_set (st_addr_2, 0);
e01acbb1 3718
4208b40f 3719 st_addr_1 = change_address (dmem, DImode,
1eb356b9 3720 gen_rtx_AND (DImode, dmema, im8));
ba4828e0 3721 set_mem_alias_set (st_addr_1, 0);
6c174fc0
RH
3722
3723 /* Load up the destination end bits. */
3724 emit_move_insn (st_tmp_2, st_addr_2);
3725 emit_move_insn (st_tmp_1, st_addr_1);
3726
3727 /* Shift the input data into place. */
1eb356b9 3728 dreg = copy_addr_to_reg (dmema);
6c174fc0
RH
3729 if (data_regs != NULL)
3730 {
3731 for (i = words-1; i >= 0; --i)
3732 {
e533b2a4 3733 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
0b2a7367 3734 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
6c174fc0 3735 }
6c174fc0
RH
3736 for (i = words-1; i > 0; --i)
3737 {
4208b40f
RH
3738 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3739 ins_tmps[i-1], ins_tmps[i-1], 1,
3740 OPTAB_WIDEN);
6c174fc0
RH
3741 }
3742 }
3743
3744 /* Split and merge the ends with the destination data. */
e533b2a4
RH
3745 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3746 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
6c174fc0
RH
3747
3748 if (data_regs != NULL)
3749 {
4208b40f
RH
3750 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3751 st_tmp_2, 1, OPTAB_WIDEN);
3752 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3753 st_tmp_1, 1, OPTAB_WIDEN);
6c174fc0
RH
3754 }
3755
3756 /* Store it all. */
0b2a7367 3757 emit_move_insn (st_addr_2, st_tmp_2);
6c174fc0
RH
3758 for (i = words-1; i > 0; --i)
3759 {
e01acbb1
RH
3760 rtx tmp = change_address (dmem, DImode,
3761 gen_rtx_AND (DImode,
0a81f074
RS
3762 plus_constant (DImode,
3763 dmema, i*8),
e01acbb1 3764 im8));
ba4828e0 3765 set_mem_alias_set (tmp, 0);
e01acbb1 3766 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
6c174fc0 3767 }
0b2a7367 3768 emit_move_insn (st_addr_1, st_tmp_1);
6c174fc0
RH
3769}
3770
3771
3772/* Expand string/block move operations.
3773
3774 operands[0] is the pointer to the destination.
3775 operands[1] is the pointer to the source.
3776 operands[2] is the number of bytes to move.
3777 operands[3] is the alignment. */
3778
3779int
a5c24926 3780alpha_expand_block_move (rtx operands[])
6c174fc0
RH
3781{
3782 rtx bytes_rtx = operands[2];
3783 rtx align_rtx = operands[3];
f35cba21 3784 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
c17f08e1
RH
3785 HOST_WIDE_INT bytes = orig_bytes;
3786 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3787 HOST_WIDE_INT dst_align = src_align;
bdb429a5
RK
3788 rtx orig_src = operands[1];
3789 rtx orig_dst = operands[0];
3790 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
4208b40f 3791 rtx tmp;
1eb356b9 3792 unsigned int i, words, ofs, nregs = 0;
f676971a 3793
bdb429a5 3794 if (orig_bytes <= 0)
6c174fc0 3795 return 1;
c17f08e1 3796 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
6c174fc0
RH
3797 return 0;
3798
4208b40f
RH
3799 /* Look for additional alignment information from recorded register info. */
3800
3801 tmp = XEXP (orig_src, 0);
7d83f4f5 3802 if (REG_P (tmp))
bdb429a5 3803 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4208b40f 3804 else if (GET_CODE (tmp) == PLUS
7d83f4f5
UB
3805 && REG_P (XEXP (tmp, 0))
3806 && CONST_INT_P (XEXP (tmp, 1)))
4208b40f 3807 {
bdb429a5
RK
3808 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3809 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4208b40f
RH
3810
3811 if (a > src_align)
3812 {
bdb429a5
RK
3813 if (a >= 64 && c % 8 == 0)
3814 src_align = 64;
3815 else if (a >= 32 && c % 4 == 0)
3816 src_align = 32;
3817 else if (a >= 16 && c % 2 == 0)
3818 src_align = 16;
4208b40f
RH
3819 }
3820 }
f676971a 3821
4208b40f 3822 tmp = XEXP (orig_dst, 0);
7d83f4f5 3823 if (REG_P (tmp))
bdb429a5 3824 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4208b40f 3825 else if (GET_CODE (tmp) == PLUS
7d83f4f5
UB
3826 && REG_P (XEXP (tmp, 0))
3827 && CONST_INT_P (XEXP (tmp, 1)))
4208b40f 3828 {
bdb429a5
RK
3829 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3830 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4208b40f
RH
3831
3832 if (a > dst_align)
3833 {
bdb429a5
RK
3834 if (a >= 64 && c % 8 == 0)
3835 dst_align = 64;
3836 else if (a >= 32 && c % 4 == 0)
3837 dst_align = 32;
3838 else if (a >= 16 && c % 2 == 0)
3839 dst_align = 16;
4208b40f
RH
3840 }
3841 }
3842
4208b40f 3843 ofs = 0;
bdb429a5 3844 if (src_align >= 64 && bytes >= 8)
6c174fc0
RH
3845 {
3846 words = bytes / 8;
3847
6c174fc0 3848 for (i = 0; i < words; ++i)
5197bd50 3849 data_regs[nregs + i] = gen_reg_rtx (DImode);
6c174fc0 3850
6c174fc0 3851 for (i = 0; i < words; ++i)
bdb429a5 3852 emit_move_insn (data_regs[nregs + i],
f4ef873c 3853 adjust_address (orig_src, DImode, ofs + i * 8));
6c174fc0 3854
4208b40f 3855 nregs += words;
6c174fc0 3856 bytes -= words * 8;
cd36edbd 3857 ofs += words * 8;
6c174fc0 3858 }
bdb429a5
RK
3859
3860 if (src_align >= 32 && bytes >= 4)
6c174fc0
RH
3861 {
3862 words = bytes / 4;
3863
6c174fc0 3864 for (i = 0; i < words; ++i)
5197bd50 3865 data_regs[nregs + i] = gen_reg_rtx (SImode);
6c174fc0 3866
6c174fc0 3867 for (i = 0; i < words; ++i)
bdb429a5 3868 emit_move_insn (data_regs[nregs + i],
792760b9 3869 adjust_address (orig_src, SImode, ofs + i * 4));
6c174fc0 3870
4208b40f 3871 nregs += words;
6c174fc0 3872 bytes -= words * 4;
cd36edbd 3873 ofs += words * 4;
6c174fc0 3874 }
bdb429a5 3875
c17f08e1 3876 if (bytes >= 8)
6c174fc0
RH
3877 {
3878 words = bytes / 8;
3879
6c174fc0 3880 for (i = 0; i < words+1; ++i)
5197bd50 3881 data_regs[nregs + i] = gen_reg_rtx (DImode);
6c174fc0 3882
c576fce7
RH
3883 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3884 words, ofs);
6c174fc0 3885
4208b40f 3886 nregs += words;
6c174fc0 3887 bytes -= words * 8;
cd36edbd 3888 ofs += words * 8;
6c174fc0 3889 }
bdb429a5 3890
bdb429a5 3891 if (! TARGET_BWX && bytes >= 4)
6c174fc0 3892 {
4208b40f 3893 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
6c174fc0 3894 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
6c174fc0
RH
3895 bytes -= 4;
3896 ofs += 4;
3897 }
bdb429a5 3898
6c174fc0
RH
3899 if (bytes >= 2)
3900 {
bdb429a5 3901 if (src_align >= 16)
6c174fc0
RH
3902 {
3903 do {
4208b40f 3904 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
f4ef873c 3905 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
6c174fc0
RH
3906 bytes -= 2;
3907 ofs += 2;
3908 } while (bytes >= 2);
3909 }
bdb429a5 3910 else if (! TARGET_BWX)
6c174fc0 3911 {
4208b40f 3912 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
6c174fc0 3913 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
6c174fc0
RH
3914 bytes -= 2;
3915 ofs += 2;
3916 }
3917 }
bdb429a5 3918
6c174fc0
RH
3919 while (bytes > 0)
3920 {
4208b40f 3921 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
f4ef873c 3922 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
6c174fc0
RH
3923 bytes -= 1;
3924 ofs += 1;
3925 }
bdb429a5 3926
56daab84 3927 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4208b40f 3928
bdb429a5 3929 /* Now save it back out again. */
4208b40f
RH
3930
3931 i = 0, ofs = 0;
3932
4208b40f 3933 /* Write out the data in whatever chunks reading the source allowed. */
bdb429a5 3934 if (dst_align >= 64)
4208b40f
RH
3935 {
3936 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3937 {
f4ef873c 3938 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4208b40f
RH
3939 data_regs[i]);
3940 ofs += 8;
3941 i++;
3942 }
3943 }
bdb429a5
RK
3944
3945 if (dst_align >= 32)
4208b40f
RH
3946 {
3947 /* If the source has remaining DImode regs, write them out in
3948 two pieces. */
3949 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3950 {
3951 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3952 NULL_RTX, 1, OPTAB_WIDEN);
3953
f4ef873c 3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4208b40f 3955 gen_lowpart (SImode, data_regs[i]));
f4ef873c 3956 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4208b40f
RH
3957 gen_lowpart (SImode, tmp));
3958 ofs += 8;
3959 i++;
3960 }
3961
3962 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3963 {
f4ef873c 3964 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4208b40f
RH
3965 data_regs[i]);
3966 ofs += 4;
3967 i++;
3968 }
3969 }
bdb429a5 3970
4208b40f
RH
3971 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3972 {
3973 /* Write out a remaining block of words using unaligned methods. */
3974
bdb429a5
RK
3975 for (words = 1; i + words < nregs; words++)
3976 if (GET_MODE (data_regs[i + words]) != DImode)
4208b40f
RH
3977 break;
3978
3979 if (words == 1)
3980 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3981 else
bdb429a5
RK
3982 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3983 words, ofs);
f676971a 3984
4208b40f
RH
3985 i += words;
3986 ofs += words * 8;
3987 }
3988
3989 /* Due to the above, this won't be aligned. */
3990 /* ??? If we have more than one of these, consider constructing full
3991 words in registers and using alpha_expand_unaligned_store_words. */
3992 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3993 {
3994 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3995 ofs += 4;
3996 i++;
3997 }
3998
bdb429a5 3999 if (dst_align >= 16)
4208b40f
RH
4000 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4001 {
f4ef873c 4002 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4208b40f
RH
4003 i++;
4004 ofs += 2;
4005 }
4006 else
4007 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4008 {
4009 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4010 i++;
4011 ofs += 2;
4012 }
bdb429a5 4013
56daab84
NS
4014 /* The remainder must be byte copies. */
4015 while (i < nregs)
4208b40f 4016 {
56daab84 4017 gcc_assert (GET_MODE (data_regs[i]) == QImode);
f4ef873c 4018 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4208b40f
RH
4019 i++;
4020 ofs += 1;
4021 }
bdb429a5 4022
6c174fc0
RH
4023 return 1;
4024}
4025
4026int
a5c24926 4027alpha_expand_block_clear (rtx operands[])
6c174fc0
RH
4028{
4029 rtx bytes_rtx = operands[1];
57e84f18 4030 rtx align_rtx = operands[3];
bdb429a5 4031 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
c17f08e1
RH
4032 HOST_WIDE_INT bytes = orig_bytes;
4033 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4034 HOST_WIDE_INT alignofs = 0;
bdb429a5 4035 rtx orig_dst = operands[0];
4208b40f 4036 rtx tmp;
c17f08e1 4037 int i, words, ofs = 0;
f676971a 4038
bdb429a5 4039 if (orig_bytes <= 0)
6c174fc0 4040 return 1;
c17f08e1 4041 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
6c174fc0
RH
4042 return 0;
4043
4208b40f 4044 /* Look for stricter alignment. */
4208b40f 4045 tmp = XEXP (orig_dst, 0);
7d83f4f5 4046 if (REG_P (tmp))
bdb429a5 4047 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4208b40f 4048 else if (GET_CODE (tmp) == PLUS
7d83f4f5
UB
4049 && REG_P (XEXP (tmp, 0))
4050 && CONST_INT_P (XEXP (tmp, 1)))
4208b40f 4051 {
c17f08e1
RH
4052 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4053 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4208b40f
RH
4054
4055 if (a > align)
4056 {
c17f08e1
RH
4057 if (a >= 64)
4058 align = a, alignofs = 8 - c % 8;
4059 else if (a >= 32)
4060 align = a, alignofs = 4 - c % 4;
4061 else if (a >= 16)
4062 align = a, alignofs = 2 - c % 2;
4208b40f
RH
4063 }
4064 }
4065
c17f08e1
RH
4066 /* Handle an unaligned prefix first. */
4067
4068 if (alignofs > 0)
4069 {
c17f08e1
RH
4070 /* Given that alignofs is bounded by align, the only time BWX could
4071 generate three stores is for a 7 byte fill. Prefer two individual
4072 stores over a load/mask/store sequence. */
4073 if ((!TARGET_BWX || alignofs == 7)
4074 && align >= 32
4075 && !(alignofs == 4 && bytes >= 4))
4076 {
ef4bddc2 4077 machine_mode mode = (align >= 64 ? DImode : SImode);
c17f08e1
RH
4078 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4079 rtx mem, tmp;
4080 HOST_WIDE_INT mask;
4081
f4ef873c 4082 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
ba4828e0 4083 set_mem_alias_set (mem, 0);
c17f08e1 4084
10d48e7c 4085 mask = ~(HOST_WIDE_INT_M1U << (inv_alignofs * 8));
c17f08e1
RH
4086 if (bytes < alignofs)
4087 {
10d48e7c 4088 mask |= HOST_WIDE_INT_M1U << ((inv_alignofs + bytes) * 8);
c17f08e1
RH
4089 ofs += bytes;
4090 bytes = 0;
4091 }
4092 else
4093 {
4094 bytes -= alignofs;
4095 ofs += alignofs;
4096 }
4097 alignofs = 0;
4098
4099 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4100 NULL_RTX, 1, OPTAB_WIDEN);
4101
4102 emit_move_insn (mem, tmp);
4103 }
c17f08e1
RH
4104
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4106 {
f4ef873c 4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
c17f08e1
RH
4108 bytes -= 1;
4109 ofs += 1;
4110 alignofs -= 1;
4111 }
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4113 {
f4ef873c 4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
c17f08e1
RH
4115 bytes -= 2;
4116 ofs += 2;
4117 alignofs -= 2;
4118 }
4119 if (alignofs == 4 && bytes >= 4)
4120 {
f4ef873c 4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
c17f08e1
RH
4122 bytes -= 4;
4123 ofs += 4;
4124 alignofs = 0;
4125 }
4126
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4129 if (alignofs > 0)
4130 {
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4133 }
4134 }
4135
4136 /* Handle a block of contiguous long-words. */
6c174fc0 4137
bdb429a5 4138 if (align >= 64 && bytes >= 8)
6c174fc0
RH
4139 {
4140 words = bytes / 8;
4141
4142 for (i = 0; i < words; ++i)
1eb356b9 4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
f4ef873c 4144 const0_rtx);
6c174fc0
RH
4145
4146 bytes -= words * 8;
cd36edbd 4147 ofs += words * 8;
6c174fc0 4148 }
bdb429a5 4149
c17f08e1
RH
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4152
4153 if (align >= 32 && bytes > 16)
4154 {
1eb356b9
RH
4155 rtx orig_dsta;
4156
f4ef873c 4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
c17f08e1
RH
4158 bytes -= 4;
4159 ofs += 4;
4160
1eb356b9
RH
4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4164
c17f08e1
RH
4165 words = bytes / 8;
4166 for (i = 0; i < words; ++i)
4167 {
ba4828e0
RK
4168 rtx mem
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
0a81f074
RS
4171 plus_constant (DImode, orig_dsta,
4172 ofs + i*8),
ba4828e0
RK
4173 GEN_INT (-8)));
4174 set_mem_alias_set (mem, 0);
c17f08e1
RH
4175 emit_move_insn (mem, const0_rtx);
4176 }
4177
4178 /* Depending on the alignment, the first stq_u may have overlapped
4179 with the initial stl, which means that the last stq_u didn't
4180 write as much as it would appear. Leave those questionable bytes
4181 unaccounted for. */
4182 bytes -= words * 8 - 4;
4183 ofs += words * 8 - 4;
4184 }
4185
4186 /* Handle a smaller block of aligned words. */
4187
4188 if ((align >= 64 && bytes == 4)
4189 || (align == 32 && bytes >= 4))
6c174fc0
RH
4190 {
4191 words = bytes / 4;
4192
4193 for (i = 0; i < words; ++i)
f4ef873c 4194 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
bdb429a5 4195 const0_rtx);
6c174fc0
RH
4196
4197 bytes -= words * 4;
cd36edbd 4198 ofs += words * 4;
6c174fc0 4199 }
bdb429a5 4200
c17f08e1
RH
4201 /* An unaligned block uses stq_u stores for as many as possible. */
4202
4203 if (bytes >= 8)
6c174fc0
RH
4204 {
4205 words = bytes / 8;
4206
cd36edbd 4207 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
6c174fc0
RH
4208
4209 bytes -= words * 8;
cd36edbd 4210 ofs += words * 8;
6c174fc0
RH
4211 }
4212
c17f08e1 4213 /* Next clean up any trailing pieces. */
6c174fc0 4214
c17f08e1
RH
4215 /* Count the number of bits in BYTES for which aligned stores could
4216 be emitted. */
4217 words = 0;
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4219 if (bytes & i)
4220 words += 1;
4221
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4225 {
4226 if (align >= 64)
4227 {
4228 rtx mem, tmp;
4229 HOST_WIDE_INT mask;
4230
f4ef873c 4231 mem = adjust_address (orig_dst, DImode, ofs);
ba4828e0 4232 set_mem_alias_set (mem, 0);
c17f08e1 4233
10d48e7c 4234 mask = HOST_WIDE_INT_M1U << (bytes * 8);
c17f08e1
RH
4235
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4238
4239 emit_move_insn (mem, tmp);
4240 return 1;
4241 }
4242 else if (align >= 32 && bytes < 4)
4243 {
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4246
f4ef873c 4247 mem = adjust_address (orig_dst, SImode, ofs);
ba4828e0 4248 set_mem_alias_set (mem, 0);
c17f08e1 4249
10d48e7c 4250 mask = HOST_WIDE_INT_M1U << (bytes * 8);
c17f08e1
RH
4251
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4254
4255 emit_move_insn (mem, tmp);
4256 return 1;
4257 }
6c174fc0 4258 }
bdb429a5 4259
6c174fc0
RH
4260 if (!TARGET_BWX && bytes >= 4)
4261 {
4262 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4263 bytes -= 4;
4264 ofs += 4;
4265 }
bdb429a5 4266
6c174fc0
RH
4267 if (bytes >= 2)
4268 {
bdb429a5 4269 if (align >= 16)
6c174fc0
RH
4270 {
4271 do {
f4ef873c 4272 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
6c174fc0
RH
4273 const0_rtx);
4274 bytes -= 2;
4275 ofs += 2;
4276 } while (bytes >= 2);
4277 }
bdb429a5 4278 else if (! TARGET_BWX)
6c174fc0
RH
4279 {
4280 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4281 bytes -= 2;
4282 ofs += 2;
4283 }
4284 }
bdb429a5 4285
6c174fc0
RH
4286 while (bytes > 0)
4287 {
f4ef873c 4288 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
6c174fc0
RH
4289 bytes -= 1;
4290 ofs += 1;
4291 }
4292
4293 return 1;
4294}
6d8fd7bb
RH
4295
4296/* Returns a mask so that zap(x, value) == x & mask. */
4297
4298rtx
a5c24926 4299alpha_expand_zap_mask (HOST_WIDE_INT value)
6d8fd7bb
RH
4300{
4301 rtx result;
4302 int i;
c37aa43b 4303 HOST_WIDE_INT mask = 0;
6d8fd7bb 4304
c37aa43b 4305 for (i = 7; i >= 0; --i)
6d8fd7bb 4306 {
c37aa43b
UB
4307 mask <<= 8;
4308 if (!((value >> i) & 1))
4309 mask |= 0xff;
6d8fd7bb 4310 }
6d8fd7bb 4311
c37aa43b 4312 result = gen_int_mode (mask, DImode);
6d8fd7bb
RH
4313 return result;
4314}
4315
4316void
a5c24926 4317alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
ef4bddc2 4318 machine_mode mode,
a5c24926 4319 rtx op0, rtx op1, rtx op2)
6d8fd7bb
RH
4320{
4321 op0 = gen_lowpart (mode, op0);
4322
4323 if (op1 == const0_rtx)
4324 op1 = CONST0_RTX (mode);
4325 else
4326 op1 = gen_lowpart (mode, op1);
c4b50f1a
RH
4327
4328 if (op2 == const0_rtx)
6d8fd7bb
RH
4329 op2 = CONST0_RTX (mode);
4330 else
4331 op2 = gen_lowpart (mode, op2);
4332
4333 emit_insn ((*gen) (op0, op1, op2));
4334}
0b196b18 4335
b686c48c
RH
4336/* A subroutine of the atomic operation splitters. Jump to LABEL if
4337 COND is true. Mark the jump as unlikely to be taken. */
4338
4339static void
4340emit_unlikely_jump (rtx cond, rtx label)
4341{
f370536c
TS
4342 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4343 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
5fa396ad 4344 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
b686c48c
RH
4345}
4346
2371d1a0
RH
4347/* Subroutines of the atomic operation splitters. Emit barriers
4348 as needed for the memory MODEL. */
4349
4350static void
4351alpha_pre_atomic_barrier (enum memmodel model)
4352{
8930883e
MK
4353 if (need_atomic_barrier_p (model, true))
4354 emit_insn (gen_memory_barrier ());
2371d1a0
RH
4355}
4356
4357static void
4358alpha_post_atomic_barrier (enum memmodel model)
4359{
8930883e
MK
4360 if (need_atomic_barrier_p (model, false))
4361 emit_insn (gen_memory_barrier ());
2371d1a0
RH
4362}
4363
38f31687
RH
4364/* A subroutine of the atomic operation splitters. Emit an insxl
4365 instruction in MODE. */
4366
4367static rtx
ef4bddc2 4368emit_insxl (machine_mode mode, rtx op1, rtx op2)
38f31687
RH
4369{
4370 rtx ret = gen_reg_rtx (DImode);
4371 rtx (*fn) (rtx, rtx, rtx);
4372
e533b2a4
RH
4373 switch (mode)
4374 {
4e10a5a7 4375 case E_QImode:
e533b2a4
RH
4376 fn = gen_insbl;
4377 break;
4e10a5a7 4378 case E_HImode:
e533b2a4
RH
4379 fn = gen_inswl;
4380 break;
4e10a5a7 4381 case E_SImode:
e533b2a4
RH
4382 fn = gen_insll;
4383 break;
4e10a5a7 4384 case E_DImode:
e533b2a4
RH
4385 fn = gen_insql;
4386 break;
4387 default:
4388 gcc_unreachable ();
4389 }
0b2a7367 4390
f2477b06 4391 op1 = force_reg (mode, op1);
38f31687
RH
4392 emit_insn (fn (ret, op1, op2));
4393
4394 return ret;
4395}
4396
ea2c620c 4397/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
0b196b18
RH
4398 to perform. MEM is the memory on which to operate. VAL is the second
4399 operand of the binary operator. BEFORE and AFTER are optional locations to
4400 return the value of MEM either before of after the operation. SCRATCH is
4401 a scratch register. */
4402
4403void
2371d1a0
RH
4404alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4405 rtx after, rtx scratch, enum memmodel model)
0b196b18 4406{
ef4bddc2 4407 machine_mode mode = GET_MODE (mem);
b686c48c 4408 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
0b196b18 4409
2371d1a0 4410 alpha_pre_atomic_barrier (model);
0b196b18
RH
4411
4412 label = gen_label_rtx ();
4413 emit_label (label);
4414 label = gen_rtx_LABEL_REF (DImode, label);
4415
4416 if (before == NULL)
4417 before = scratch;
145f748f 4418 emit_insn (gen_load_locked (mode, before, mem));
0b196b18
RH
4419
4420 if (code == NOT)
d04dceb5
UB
4421 {
4422 x = gen_rtx_AND (mode, before, val);
f7df4a84 4423 emit_insn (gen_rtx_SET (val, x));
d04dceb5
UB
4424
4425 x = gen_rtx_NOT (mode, val);
4426 }
0b196b18
RH
4427 else
4428 x = gen_rtx_fmt_ee (code, mode, before, val);
0b196b18 4429 if (after)
f7df4a84
RS
4430 emit_insn (gen_rtx_SET (after, copy_rtx (x)));
4431 emit_insn (gen_rtx_SET (scratch, x));
0b196b18 4432
145f748f 4433 emit_insn (gen_store_conditional (mode, cond, mem, scratch));
b686c48c
RH
4434
4435 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4436 emit_unlikely_jump (x, label);
4437
2371d1a0 4438 alpha_post_atomic_barrier (model);
b686c48c
RH
4439}
4440
4441/* Expand a compare and swap operation. */
4442
4443void
2371d1a0 4444alpha_split_compare_and_swap (rtx operands[])
b686c48c 4445{
2371d1a0
RH
4446 rtx cond, retval, mem, oldval, newval;
4447 bool is_weak;
4448 enum memmodel mod_s, mod_f;
ef4bddc2 4449 machine_mode mode;
2371d1a0
RH
4450 rtx label1, label2, x;
4451
4452 cond = operands[0];
4453 retval = operands[1];
4454 mem = operands[2];
4455 oldval = operands[3];
4456 newval = operands[4];
4457 is_weak = (operands[5] != const0_rtx);
46b35980
AM
4458 mod_s = memmodel_from_int (INTVAL (operands[6]));
4459 mod_f = memmodel_from_int (INTVAL (operands[7]));
2371d1a0
RH
4460 mode = GET_MODE (mem);
4461
4462 alpha_pre_atomic_barrier (mod_s);
4463
4464 label1 = NULL_RTX;
4465 if (!is_weak)
4466 {
4467 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4468 emit_label (XEXP (label1, 0));
4469 }
b686c48c 4470 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
b686c48c 4471
145f748f 4472 emit_insn (gen_load_locked (mode, retval, mem));
b686c48c
RH
4473
4474 x = gen_lowpart (DImode, retval);
4475 if (oldval == const0_rtx)
2371d1a0
RH
4476 {
4477 emit_move_insn (cond, const0_rtx);
4478 x = gen_rtx_NE (DImode, x, const0_rtx);
4479 }
0b196b18 4480 else
b686c48c
RH
4481 {
4482 x = gen_rtx_EQ (DImode, x, oldval);
f7df4a84 4483 emit_insn (gen_rtx_SET (cond, x));
b686c48c
RH
4484 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4485 }
4486 emit_unlikely_jump (x, label2);
4487
2371d1a0 4488 emit_move_insn (cond, newval);
145f748f
UB
4489 emit_insn (gen_store_conditional
4490 (mode, cond, mem, gen_lowpart (mode, cond)));
0b196b18 4491
2371d1a0
RH
4492 if (!is_weak)
4493 {
4494 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4495 emit_unlikely_jump (x, label1);
4496 }
4497
46b35980 4498 if (!is_mm_relaxed (mod_f))
2371d1a0
RH
4499 emit_label (XEXP (label2, 0));
4500
4501 alpha_post_atomic_barrier (mod_s);
b686c48c 4502
46b35980 4503 if (is_mm_relaxed (mod_f))
2371d1a0 4504 emit_label (XEXP (label2, 0));
b686c48c
RH
4505}
4506
38f31687 4507void
2371d1a0 4508alpha_expand_compare_and_swap_12 (rtx operands[])
38f31687 4509{
2371d1a0 4510 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
ef4bddc2 4511 machine_mode mode;
38f31687 4512 rtx addr, align, wdst;
2371d1a0
RH
4513
4514 cond = operands[0];
4515 dst = operands[1];
4516 mem = operands[2];
4517 oldval = operands[3];
4518 newval = operands[4];
4519 is_weak = operands[5];
4520 mod_s = operands[6];
4521 mod_f = operands[7];
4522 mode = GET_MODE (mem);
4523
4524 /* We forced the address into a register via mem_noofs_operand. */
4525 addr = XEXP (mem, 0);
4526 gcc_assert (register_operand (addr, DImode));
38f31687 4527
38f31687
RH
4528 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4529 NULL_RTX, 1, OPTAB_DIRECT);
4530
4531 oldval = convert_modes (DImode, mode, oldval, 1);
2371d1a0
RH
4532
4533 if (newval != const0_rtx)
4534 newval = emit_insxl (mode, newval, addr);
38f31687
RH
4535
4536 wdst = gen_reg_rtx (DImode);
145f748f
UB
4537 emit_insn (gen_atomic_compare_and_swap_1
4538 (mode, cond, wdst, mem, oldval, newval, align,
4539 is_weak, mod_s, mod_f));
38f31687
RH
4540
4541 emit_move_insn (dst, gen_lowpart (mode, wdst));
4542}
4543
4544void
2371d1a0 4545alpha_split_compare_and_swap_12 (rtx operands[])
38f31687 4546{
2371d1a0 4547 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
ef4bddc2 4548 machine_mode mode;
2371d1a0
RH
4549 bool is_weak;
4550 enum memmodel mod_s, mod_f;
4551 rtx label1, label2, mem, addr, width, mask, x;
4552
4553 cond = operands[0];
4554 dest = operands[1];
4555 orig_mem = operands[2];
4556 oldval = operands[3];
4557 newval = operands[4];
4558 align = operands[5];
4559 is_weak = (operands[6] != const0_rtx);
46b35980
AM
4560 mod_s = memmodel_from_int (INTVAL (operands[7]));
4561 mod_f = memmodel_from_int (INTVAL (operands[8]));
2371d1a0
RH
4562 scratch = operands[9];
4563 mode = GET_MODE (orig_mem);
4564 addr = XEXP (orig_mem, 0);
38f31687
RH
4565
4566 mem = gen_rtx_MEM (DImode, align);
2371d1a0 4567 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4eace304
RH
4568 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4569 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
38f31687 4570
2371d1a0
RH
4571 alpha_pre_atomic_barrier (mod_s);
4572
4573 label1 = NULL_RTX;
4574 if (!is_weak)
4575 {
4576 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4577 emit_label (XEXP (label1, 0));
4578 }
38f31687 4579 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
38f31687 4580
145f748f 4581 emit_insn (gen_load_locked (DImode, scratch, mem));
38f31687
RH
4582
4583 width = GEN_INT (GET_MODE_BITSIZE (mode));
4584 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
0b2a7367 4585 emit_insn (gen_extxl (dest, scratch, width, addr));
38f31687
RH
4586
4587 if (oldval == const0_rtx)
2371d1a0
RH
4588 {
4589 emit_move_insn (cond, const0_rtx);
4590 x = gen_rtx_NE (DImode, dest, const0_rtx);
4591 }
38f31687
RH
4592 else
4593 {
4594 x = gen_rtx_EQ (DImode, dest, oldval);
f7df4a84 4595 emit_insn (gen_rtx_SET (cond, x));
38f31687
RH
4596 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4597 }
4598 emit_unlikely_jump (x, label2);
4599
2371d1a0 4600 emit_insn (gen_mskxl (cond, scratch, mask, addr));
38f31687 4601
2371d1a0
RH
4602 if (newval != const0_rtx)
4603 emit_insn (gen_iordi3 (cond, cond, newval));
38f31687 4604
145f748f 4605 emit_insn (gen_store_conditional (DImode, cond, mem, cond));
38f31687 4606
2371d1a0
RH
4607 if (!is_weak)
4608 {
4609 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4610 emit_unlikely_jump (x, label1);
4611 }
4612
46b35980 4613 if (!is_mm_relaxed (mod_f))
2371d1a0
RH
4614 emit_label (XEXP (label2, 0));
4615
4616 alpha_post_atomic_barrier (mod_s);
4617
46b35980 4618 if (is_mm_relaxed (mod_f))
2371d1a0 4619 emit_label (XEXP (label2, 0));
38f31687
RH
4620}
4621
b686c48c
RH
4622/* Expand an atomic exchange operation. */
4623
4624void
2371d1a0 4625alpha_split_atomic_exchange (rtx operands[])
b686c48c 4626{
2371d1a0
RH
4627 rtx retval, mem, val, scratch;
4628 enum memmodel model;
ef4bddc2 4629 machine_mode mode;
2371d1a0
RH
4630 rtx label, x, cond;
4631
4632 retval = operands[0];
4633 mem = operands[1];
4634 val = operands[2];
4635 model = (enum memmodel) INTVAL (operands[3]);
4636 scratch = operands[4];
4637 mode = GET_MODE (mem);
4638 cond = gen_lowpart (DImode, scratch);
4639
4640 alpha_pre_atomic_barrier (model);
0b196b18 4641
b686c48c
RH
4642 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4643 emit_label (XEXP (label, 0));
4644
145f748f 4645 emit_insn (gen_load_locked (mode, retval, mem));
b686c48c 4646 emit_move_insn (scratch, val);
145f748f 4647 emit_insn (gen_store_conditional (mode, cond, mem, scratch));
b686c48c
RH
4648
4649 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4650 emit_unlikely_jump (x, label);
27738602 4651
2371d1a0 4652 alpha_post_atomic_barrier (model);
0b196b18 4653}
38f31687
RH
4654
4655void
2371d1a0 4656alpha_expand_atomic_exchange_12 (rtx operands[])
38f31687 4657{
2371d1a0 4658 rtx dst, mem, val, model;
ef4bddc2 4659 machine_mode mode;
38f31687 4660 rtx addr, align, wdst;
38f31687 4661
2371d1a0
RH
4662 dst = operands[0];
4663 mem = operands[1];
4664 val = operands[2];
4665 model = operands[3];
4666 mode = GET_MODE (mem);
4667
4668 /* We forced the address into a register via mem_noofs_operand. */
4669 addr = XEXP (mem, 0);
4670 gcc_assert (register_operand (addr, DImode));
38f31687 4671
38f31687
RH
4672 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4673 NULL_RTX, 1, OPTAB_DIRECT);
4674
4675 /* Insert val into the correct byte location within the word. */
2371d1a0
RH
4676 if (val != const0_rtx)
4677 val = emit_insxl (mode, val, addr);
38f31687
RH
4678
4679 wdst = gen_reg_rtx (DImode);
145f748f 4680 emit_insn (gen_atomic_exchange_1 (mode, wdst, mem, val, align, model));
38f31687
RH
4681
4682 emit_move_insn (dst, gen_lowpart (mode, wdst));
4683}
4684
4685void
2371d1a0 4686alpha_split_atomic_exchange_12 (rtx operands[])
38f31687 4687{
2371d1a0 4688 rtx dest, orig_mem, addr, val, align, scratch;
38f31687 4689 rtx label, mem, width, mask, x;
ef4bddc2 4690 machine_mode mode;
2371d1a0
RH
4691 enum memmodel model;
4692
4693 dest = operands[0];
4694 orig_mem = operands[1];
4695 val = operands[2];
4696 align = operands[3];
4697 model = (enum memmodel) INTVAL (operands[4]);
4698 scratch = operands[5];
4699 mode = GET_MODE (orig_mem);
4700 addr = XEXP (orig_mem, 0);
38f31687
RH
4701
4702 mem = gen_rtx_MEM (DImode, align);
2371d1a0 4703 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4eace304
RH
4704 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4705 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
2371d1a0
RH
4706
4707 alpha_pre_atomic_barrier (model);
38f31687 4708
38f31687
RH
4709 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4710 emit_label (XEXP (label, 0));
4711
145f748f 4712 emit_insn (gen_load_locked (DImode, scratch, mem));
38f31687
RH
4713
4714 width = GEN_INT (GET_MODE_BITSIZE (mode));
4715 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
0b2a7367
RH
4716 emit_insn (gen_extxl (dest, scratch, width, addr));
4717 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
2371d1a0
RH
4718 if (val != const0_rtx)
4719 emit_insn (gen_iordi3 (scratch, scratch, val));
38f31687 4720
145f748f 4721 emit_insn (gen_store_conditional (DImode, scratch, mem, scratch));
38f31687
RH
4722
4723 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4724 emit_unlikely_jump (x, label);
27738602 4725
2371d1a0 4726 alpha_post_atomic_barrier (model);
38f31687 4727}
a6f12d7c
RK
4728\f
4729/* Adjust the cost of a scheduling dependency. Return the new cost of
4730 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4731
c237e94a 4732static int
b505225b
TS
4733alpha_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4734 unsigned int)
a6f12d7c 4735{
d58770e7 4736 enum attr_type dep_insn_type;
a6f12d7c
RK
4737
4738 /* If the dependence is an anti-dependence, there is no cost. For an
4739 output dependence, there is sometimes a cost, but it doesn't seem
4740 worth handling those few cases. */
b505225b 4741 if (dep_type != 0)
98791e3a 4742 return cost;
a6f12d7c 4743
26250081
RH
4744 /* If we can't recognize the insns, we can't really do anything. */
4745 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4746 return cost;
4747
26250081
RH
4748 dep_insn_type = get_attr_type (dep_insn);
4749
bcbbac26 4750 /* Bring in the user-defined memory latency. */
71d9b493
RH
4751 if (dep_insn_type == TYPE_ILD
4752 || dep_insn_type == TYPE_FLD
4753 || dep_insn_type == TYPE_LDSYM)
bcbbac26
RH
4754 cost += alpha_memory_latency-1;
4755
98791e3a 4756 /* Everything else handled in DFA bypasses now. */
74835ed8 4757
a6f12d7c
RK
4758 return cost;
4759}
c237e94a 4760
98791e3a
RH
4761/* The number of instructions that can be issued per cycle. */
4762
c237e94a 4763static int
a5c24926 4764alpha_issue_rate (void)
c237e94a 4765{
8bea7f7c 4766 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
c237e94a
ZW
4767}
4768
98791e3a
RH
4769/* How many alternative schedules to try. This should be as wide as the
4770 scheduling freedom in the DFA, but no wider. Making this value too
4771 large results extra work for the scheduler.
4772
4773 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4774 alternative schedules. For EV5, we can choose between E0/E1 and
9a9f7594 4775 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
98791e3a
RH
4776
4777static int
a5c24926 4778alpha_multipass_dfa_lookahead (void)
98791e3a 4779{
8bea7f7c 4780 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
98791e3a 4781}
9ecc37f0 4782\f
6f9b006d
RH
4783/* Machine-specific function data. */
4784
735f469b
TG
4785struct GTY(()) alpha_links;
4786
d1b38208 4787struct GTY(()) machine_function
6f9b006d 4788{
0191520b
RH
4789 unsigned HOST_WIDE_INT sa_mask;
4790 HOST_WIDE_INT sa_size;
4791 HOST_WIDE_INT frame_size;
4792
5c30094f 4793 /* For flag_reorder_blocks_and_partition. */
984514ac 4794 rtx gp_save_rtx;
221cf9ab
OH
4795
4796 /* For VMS condition handlers. */
735f469b
TG
4797 bool uses_condition_handler;
4798
4799 /* Linkage entries. */
fb5c464a 4800 hash_map<nofree_string_hash, alpha_links *> *links;
6f9b006d
RH
4801};
4802
e2500fed 4803/* How to allocate a 'struct machine_function'. */
30102605 4804
e2500fed 4805static struct machine_function *
a5c24926 4806alpha_init_machine_status (void)
30102605 4807{
766090c2 4808 return ggc_cleared_alloc<machine_function> ();
30102605 4809}
30102605 4810
221cf9ab
OH
4811/* Support for frame based VMS condition handlers. */
4812
4813/* A VMS condition handler may be established for a function with a call to
4814 __builtin_establish_vms_condition_handler, and cancelled with a call to
4815 __builtin_revert_vms_condition_handler.
4816
4817 The VMS Condition Handling Facility knows about the existence of a handler
4818 from the procedure descriptor .handler field. As the VMS native compilers,
4819 we store the user specified handler's address at a fixed location in the
4820 stack frame and point the procedure descriptor at a common wrapper which
4821 fetches the real handler's address and issues an indirect call.
4822
4823 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4824
4825 We force the procedure kind to PT_STACK, and the fixed frame location is
4826 fp+8, just before the register save area. We use the handler_data field in
4827 the procedure descriptor to state the fp offset at which the installed
4828 handler address can be found. */
4829
4830#define VMS_COND_HANDLER_FP_OFFSET 8
4831
4832/* Expand code to store the currently installed user VMS condition handler
4833 into TARGET and install HANDLER as the new condition handler. */
4834
4835void
4836alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4837{
0a81f074
RS
4838 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4839 VMS_COND_HANDLER_FP_OFFSET);
221cf9ab
OH
4840
4841 rtx handler_slot
4842 = gen_rtx_MEM (DImode, handler_slot_address);
4843
4844 emit_move_insn (target, handler_slot);
4845 emit_move_insn (handler_slot, handler);
4846
4847 /* Notify the start/prologue/epilogue emitters that the condition handler
4848 slot is needed. In addition to reserving the slot space, this will force
4849 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4850 use above is correct. */
4851 cfun->machine->uses_condition_handler = true;
4852}
4853
4854/* Expand code to store the current VMS condition handler into TARGET and
4855 nullify it. */
4856
4857void
4858alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4859{
4860 /* We implement this by establishing a null condition handler, with the tiny
4861 side effect of setting uses_condition_handler. This is a little bit
4862 pessimistic if no actual builtin_establish call is ever issued, which is
4863 not a real problem and expected never to happen anyway. */
4864
4865 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4866}
4867
9ecc37f0
RH
4868/* Functions to save and restore alpha_return_addr_rtx. */
4869
9ecc37f0
RH
4870/* Start the ball rolling with RETURN_ADDR_RTX. */
4871
4872rtx
a5c24926 4873alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
9ecc37f0 4874{
9ecc37f0
RH
4875 if (count != 0)
4876 return const0_rtx;
4877
b91055dd 4878 return get_hard_reg_initial_val (Pmode, REG_RA);
9ecc37f0
RH
4879}
4880
229aa352 4881/* Return or create a memory slot containing the gp value for the current
ccb83cbc
RH
4882 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4883
4884rtx
a5c24926 4885alpha_gp_save_rtx (void)
ccb83cbc 4886{
cad003ba
DM
4887 rtx_insn *seq;
4888 rtx m = cfun->machine->gp_save_rtx;
229aa352
RH
4889
4890 if (m == NULL)
4891 {
4892 start_sequence ();
4893
4894 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4895 m = validize_mem (m);
4896 emit_move_insn (m, pic_offset_table_rtx);
4897
4898 seq = get_insns ();
4899 end_sequence ();
8deb1d31
EB
4900
4901 /* We used to simply emit the sequence after entry_of_function.
4902 However this breaks the CFG if the first instruction in the
4903 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4904 label. Emit the sequence properly on the edge. We are only
4905 invoked from dw2_build_landing_pads and finish_eh_generation
4906 will call commit_edge_insertions thanks to a kludge. */
fefa31b5
DM
4907 insert_insn_on_edge (seq,
4908 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
229aa352
RH
4909
4910 cfun->machine->gp_save_rtx = m;
4911 }
4912
4913 return m;
ccb83cbc
RH
4914}
4915
1e46eb2a
UB
4916static void
4917alpha_instantiate_decls (void)
4918{
4919 if (cfun->machine->gp_save_rtx != NULL_RTX)
4920 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4921}
4922
9ecc37f0 4923static int
a5c24926 4924alpha_ra_ever_killed (void)
9ecc37f0 4925{
cad003ba 4926 rtx_insn *top;
6abc6f40 4927
b91055dd 4928 if (!has_hard_reg_initial_val (Pmode, REG_RA))
6fb5fa3c 4929 return (int)df_regs_ever_live_p (REG_RA);
9ecc37f0 4930
6abc6f40
RH
4931 push_topmost_sequence ();
4932 top = get_insns ();
4933 pop_topmost_sequence ();
4934
a5d567ec 4935 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
9ecc37f0
RH
4936}
4937
a6f12d7c 4938\f
be7560ea 4939/* Return the trap mode suffix applicable to the current
285a5742 4940 instruction, or NULL. */
a6f12d7c 4941
be7560ea 4942static const char *
a5c24926 4943get_trap_mode_suffix (void)
a6f12d7c 4944{
be7560ea 4945 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
a6f12d7c 4946
be7560ea 4947 switch (s)
a6f12d7c 4948 {
be7560ea
RH
4949 case TRAP_SUFFIX_NONE:
4950 return NULL;
6245e3df 4951
be7560ea 4952 case TRAP_SUFFIX_SU:
981a828e 4953 if (alpha_fptm >= ALPHA_FPTM_SU)
be7560ea
RH
4954 return "su";
4955 return NULL;
6245e3df 4956
be7560ea
RH
4957 case TRAP_SUFFIX_SUI:
4958 if (alpha_fptm >= ALPHA_FPTM_SUI)
4959 return "sui";
4960 return NULL;
4961
4962 case TRAP_SUFFIX_V_SV:
e83015a9
RH
4963 switch (alpha_fptm)
4964 {
4965 case ALPHA_FPTM_N:
be7560ea 4966 return NULL;
e83015a9 4967 case ALPHA_FPTM_U:
be7560ea 4968 return "v";
e83015a9
RH
4969 case ALPHA_FPTM_SU:
4970 case ALPHA_FPTM_SUI:
be7560ea 4971 return "sv";
56daab84
NS
4972 default:
4973 gcc_unreachable ();
e83015a9 4974 }
e83015a9 4975
be7560ea 4976 case TRAP_SUFFIX_V_SV_SVI:
0022a940
DMT
4977 switch (alpha_fptm)
4978 {
4979 case ALPHA_FPTM_N:
be7560ea 4980 return NULL;
0022a940 4981 case ALPHA_FPTM_U:
be7560ea 4982 return "v";
0022a940 4983 case ALPHA_FPTM_SU:
be7560ea 4984 return "sv";
0022a940 4985 case ALPHA_FPTM_SUI:
be7560ea 4986 return "svi";
56daab84
NS
4987 default:
4988 gcc_unreachable ();
0022a940
DMT
4989 }
4990 break;
4991
be7560ea 4992 case TRAP_SUFFIX_U_SU_SUI:
6245e3df
RK
4993 switch (alpha_fptm)
4994 {
4995 case ALPHA_FPTM_N:
be7560ea 4996 return NULL;
6245e3df 4997 case ALPHA_FPTM_U:
be7560ea 4998 return "u";
6245e3df 4999 case ALPHA_FPTM_SU:
be7560ea 5000 return "su";
6245e3df 5001 case ALPHA_FPTM_SUI:
be7560ea 5002 return "sui";
56daab84
NS
5003 default:
5004 gcc_unreachable ();
6245e3df
RK
5005 }
5006 break;
56daab84
NS
5007
5008 default:
5009 gcc_unreachable ();
be7560ea 5010 }
56daab84 5011 gcc_unreachable ();
be7560ea 5012}
6245e3df 5013
be7560ea 5014/* Return the rounding mode suffix applicable to the current
285a5742 5015 instruction, or NULL. */
be7560ea
RH
5016
5017static const char *
a5c24926 5018get_round_mode_suffix (void)
be7560ea
RH
5019{
5020 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5021
5022 switch (s)
5023 {
5024 case ROUND_SUFFIX_NONE:
5025 return NULL;
5026 case ROUND_SUFFIX_NORMAL:
5027 switch (alpha_fprm)
6245e3df 5028 {
be7560ea
RH
5029 case ALPHA_FPRM_NORM:
5030 return NULL;
f676971a 5031 case ALPHA_FPRM_MINF:
be7560ea
RH
5032 return "m";
5033 case ALPHA_FPRM_CHOP:
5034 return "c";
5035 case ALPHA_FPRM_DYN:
5036 return "d";
56daab84
NS
5037 default:
5038 gcc_unreachable ();
6245e3df
RK
5039 }
5040 break;
5041
be7560ea
RH
5042 case ROUND_SUFFIX_C:
5043 return "c";
56daab84
NS
5044
5045 default:
5046 gcc_unreachable ();
be7560ea 5047 }
56daab84 5048 gcc_unreachable ();
be7560ea
RH
5049}
5050
f83e2262 5051/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
be7560ea 5052
f83e2262
UB
5053static bool
5054alpha_print_operand_punct_valid_p (unsigned char code)
5055{
5056 return (code == '/' || code == ',' || code == '-' || code == '~'
5057 || code == '#' || code == '*' || code == '&');
5058}
5059
5060/* Implement TARGET_PRINT_OPERAND. The alpha-specific
5061 operand codes are documented below. */
5062
5063static void
5064alpha_print_operand (FILE *file, rtx x, int code)
be7560ea
RH
5065{
5066 int i;
5067
5068 switch (code)
5069 {
5070 case '~':
5071 /* Print the assembler name of the current function. */
5072 assemble_name (file, alpha_fnname);
5073 break;
5074
6f9b006d 5075 case '&':
4fbca4ba
RS
5076 if (const char *name = get_some_local_dynamic_name ())
5077 assemble_name (file, name);
5078 else
5079 output_operand_lossage ("'%%&' used without any "
5080 "local dynamic TLS references");
6f9b006d
RH
5081 break;
5082
be7560ea 5083 case '/':
f83e2262
UB
5084 /* Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX
5085 attributes are examined to determine what is appropriate. */
be7560ea
RH
5086 {
5087 const char *trap = get_trap_mode_suffix ();
5088 const char *round = get_round_mode_suffix ();
5089
5090 if (trap || round)
46e1a769 5091 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
be7560ea
RH
5092 break;
5093 }
5094
89cfc2c6 5095 case ',':
f83e2262
UB
5096 /* Generates single precision suffix for floating point
5097 instructions (s for IEEE, f for VAX). */
be7560ea 5098 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
89cfc2c6
RK
5099 break;
5100
5101 case '-':
f83e2262
UB
5102 /* Generates double precision suffix for floating point
5103 instructions (t for IEEE, g for VAX). */
be7560ea 5104 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
89cfc2c6
RK
5105 break;
5106
1eb356b9
RH
5107 case '#':
5108 if (alpha_this_literal_sequence_number == 0)
5109 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5110 fprintf (file, "%d", alpha_this_literal_sequence_number);
5111 break;
5112
5113 case '*':
5114 if (alpha_this_gpdisp_sequence_number == 0)
5115 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5116 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5117 break;
5118
40571d67 5119 case 'J':
6f9b006d
RH
5120 {
5121 const char *lituse;
5122
5123 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5124 {
5125 x = XVECEXP (x, 0, 0);
5126 lituse = "lituse_tlsgd";
5127 }
5128 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5129 {
5130 x = XVECEXP (x, 0, 0);
5131 lituse = "lituse_tlsldm";
5132 }
7d83f4f5 5133 else if (CONST_INT_P (x))
6f9b006d
RH
5134 lituse = "lituse_jsr";
5135 else
5136 {
5137 output_operand_lossage ("invalid %%J value");
5138 break;
5139 }
5140
5141 if (x != const0_rtx)
5142 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5143 }
40571d67
RH
5144 break;
5145
d006f5eb
RH
5146 case 'j':
5147 {
5148 const char *lituse;
5149
5150#ifdef HAVE_AS_JSRDIRECT_RELOCS
5151 lituse = "lituse_jsrdirect";
5152#else
5153 lituse = "lituse_jsr";
5154#endif
5155
5156 gcc_assert (INTVAL (x) != 0);
5157 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5158 }
5159 break;
a6f12d7c
RK
5160 case 'r':
5161 /* If this operand is the constant zero, write it as "$31". */
7d83f4f5 5162 if (REG_P (x))
a6f12d7c
RK
5163 fprintf (file, "%s", reg_names[REGNO (x)]);
5164 else if (x == CONST0_RTX (GET_MODE (x)))
5165 fprintf (file, "$31");
5166 else
5167 output_operand_lossage ("invalid %%r value");
a6f12d7c
RK
5168 break;
5169
5170 case 'R':
5171 /* Similar, but for floating-point. */
7d83f4f5 5172 if (REG_P (x))
a6f12d7c
RK
5173 fprintf (file, "%s", reg_names[REGNO (x)]);
5174 else if (x == CONST0_RTX (GET_MODE (x)))
5175 fprintf (file, "$f31");
5176 else
5177 output_operand_lossage ("invalid %%R value");
a6f12d7c
RK
5178 break;
5179
5180 case 'N':
5181 /* Write the 1's complement of a constant. */
7d83f4f5 5182 if (!CONST_INT_P (x))
a6f12d7c
RK
5183 output_operand_lossage ("invalid %%N value");
5184
0bc8ae6e 5185 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
a6f12d7c
RK
5186 break;
5187
5188 case 'P':
5189 /* Write 1 << C, for a constant C. */
7d83f4f5 5190 if (!CONST_INT_P (x))
a6f12d7c
RK
5191 output_operand_lossage ("invalid %%P value");
5192
c37aa43b 5193 fprintf (file, HOST_WIDE_INT_PRINT_DEC, HOST_WIDE_INT_1 << INTVAL (x));
a6f12d7c
RK
5194 break;
5195
5196 case 'h':
5197 /* Write the high-order 16 bits of a constant, sign-extended. */
7d83f4f5 5198 if (!CONST_INT_P (x))
a6f12d7c
RK
5199 output_operand_lossage ("invalid %%h value");
5200
0bc8ae6e 5201 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
a6f12d7c
RK
5202 break;
5203
5204 case 'L':
5205 /* Write the low-order 16 bits of a constant, sign-extended. */
7d83f4f5 5206 if (!CONST_INT_P (x))
a6f12d7c
RK
5207 output_operand_lossage ("invalid %%L value");
5208
0bc8ae6e
RK
5209 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5210 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
a6f12d7c
RK
5211 break;
5212
5213 case 'm':
5214 /* Write mask for ZAP insn. */
f06ed650 5215 if (CONST_INT_P (x))
a6f12d7c
RK
5216 {
5217 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5218
5219 for (i = 0; i < 8; i++, value >>= 8)
5220 if (value & 0xff)
5221 mask |= (1 << i);
5222
0bc8ae6e 5223 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
a6f12d7c
RK
5224 }
5225 else
5226 output_operand_lossage ("invalid %%m value");
5227 break;
5228
5229 case 'M':
6c174fc0 5230 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
c799797d 5231 if (!mode_width_operand (x, VOIDmode))
a6f12d7c
RK
5232 output_operand_lossage ("invalid %%M value");
5233
5234 fprintf (file, "%s",
6c174fc0
RH
5235 (INTVAL (x) == 8 ? "b"
5236 : INTVAL (x) == 16 ? "w"
5237 : INTVAL (x) == 32 ? "l"
5238 : "q"));
a6f12d7c
RK
5239 break;
5240
5241 case 'U':
5242 /* Similar, except do it from the mask. */
7d83f4f5 5243 if (CONST_INT_P (x))
c4b50f1a
RH
5244 {
5245 HOST_WIDE_INT value = INTVAL (x);
5246
5247 if (value == 0xff)
5248 {
5249 fputc ('b', file);
5250 break;
5251 }
5252 if (value == 0xffff)
5253 {
5254 fputc ('w', file);
5255 break;
5256 }
5257 if (value == 0xffffffff)
5258 {
5259 fputc ('l', file);
5260 break;
5261 }
5262 if (value == -1)
5263 {
5264 fputc ('q', file);
5265 break;
5266 }
5267 }
c37aa43b 5268
c4b50f1a 5269 output_operand_lossage ("invalid %%U value");
a6f12d7c
RK
5270 break;
5271
5272 case 's':
0b2a7367 5273 /* Write the constant value divided by 8. */
7d83f4f5 5274 if (!CONST_INT_P (x)
0b2a7367 5275 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
30102605 5276 || (INTVAL (x) & 7) != 0)
a6f12d7c
RK
5277 output_operand_lossage ("invalid %%s value");
5278
0b2a7367 5279 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
a6f12d7c
RK
5280 break;
5281
bdd4c95a 5282 case 'C': case 'D': case 'c': case 'd':
a6f12d7c 5283 /* Write out comparison name. */
bdd4c95a
RK
5284 {
5285 enum rtx_code c = GET_CODE (x);
5286
ec8e098d 5287 if (!COMPARISON_P (x))
bdd4c95a
RK
5288 output_operand_lossage ("invalid %%C value");
5289
948068e2 5290 else if (code == 'D')
bdd4c95a
RK
5291 c = reverse_condition (c);
5292 else if (code == 'c')
5293 c = swap_condition (c);
5294 else if (code == 'd')
5295 c = swap_condition (reverse_condition (c));
5296
5297 if (c == LEU)
5298 fprintf (file, "ule");
5299 else if (c == LTU)
5300 fprintf (file, "ult");
1eb8759b
RH
5301 else if (c == UNORDERED)
5302 fprintf (file, "un");
bdd4c95a
RK
5303 else
5304 fprintf (file, "%s", GET_RTX_NAME (c));
5305 }
ab561e66
RK
5306 break;
5307
a6f12d7c
RK
5308 case 'E':
5309 /* Write the divide or modulus operator. */
5310 switch (GET_CODE (x))
5311 {
5312 case DIV:
5313 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5314 break;
5315 case UDIV:
5316 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5317 break;
5318 case MOD:
5319 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5320 break;
5321 case UMOD:
5322 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5323 break;
5324 default:
5325 output_operand_lossage ("invalid %%E value");
5326 break;
5327 }
5328 break;
5329
a6f12d7c
RK
5330 case 'A':
5331 /* Write "_u" for unaligned access. */
7d83f4f5 5332 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
a6f12d7c
RK
5333 fprintf (file, "_u");
5334 break;
5335
5336 case 0:
7d83f4f5 5337 if (REG_P (x))
a6f12d7c 5338 fprintf (file, "%s", reg_names[REGNO (x)]);
7d83f4f5 5339 else if (MEM_P (x))
cc8ca59e 5340 output_address (GET_MODE (x), XEXP (x, 0));
6f9b006d
RH
5341 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5342 {
5343 switch (XINT (XEXP (x, 0), 1))
5344 {
5345 case UNSPEC_DTPREL:
5346 case UNSPEC_TPREL:
5347 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5348 break;
5349 default:
5350 output_operand_lossage ("unknown relocation unspec");
5351 break;
5352 }
5353 }
a6f12d7c
RK
5354 else
5355 output_addr_const (file, x);
5356 break;
5357
5358 default:
5359 output_operand_lossage ("invalid %%xn code");
5360 }
5361}
714b019c 5362
f83e2262
UB
5363/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
5364
5365static void
5366alpha_print_operand_address (FILE *file, machine_mode /*mode*/, rtx addr)
714b019c 5367{
e03ec28f 5368 int basereg = 31;
714b019c
RH
5369 HOST_WIDE_INT offset = 0;
5370
5371 if (GET_CODE (addr) == AND)
5372 addr = XEXP (addr, 0);
714b019c 5373
e03ec28f 5374 if (GET_CODE (addr) == PLUS
7d83f4f5 5375 && CONST_INT_P (XEXP (addr, 1)))
714b019c
RH
5376 {
5377 offset = INTVAL (XEXP (addr, 1));
e03ec28f 5378 addr = XEXP (addr, 0);
714b019c 5379 }
1eb356b9
RH
5380
5381 if (GET_CODE (addr) == LO_SUM)
5382 {
6f9b006d
RH
5383 const char *reloc16, *reloclo;
5384 rtx op1 = XEXP (addr, 1);
5385
5386 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5387 {
5388 op1 = XEXP (op1, 0);
5389 switch (XINT (op1, 1))
5390 {
5391 case UNSPEC_DTPREL:
5392 reloc16 = NULL;
5393 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5394 break;
5395 case UNSPEC_TPREL:
5396 reloc16 = NULL;
5397 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5398 break;
5399 default:
5400 output_operand_lossage ("unknown relocation unspec");
5401 return;
5402 }
5403
5404 output_addr_const (file, XVECEXP (op1, 0, 0));
5405 }
5406 else
5407 {
5408 reloc16 = "gprel";
5409 reloclo = "gprellow";
5410 output_addr_const (file, op1);
5411 }
5412
1eb356b9 5413 if (offset)
4a0a75dd 5414 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
f676971a 5415
1eb356b9 5416 addr = XEXP (addr, 0);
56daab84
NS
5417 switch (GET_CODE (addr))
5418 {
5419 case REG:
5420 basereg = REGNO (addr);
5421 break;
5422
5423 case SUBREG:
5424 basereg = subreg_regno (addr);
5425 break;
5426
5427 default:
5428 gcc_unreachable ();
5429 }
133d3133
RH
5430
5431 fprintf (file, "($%d)\t\t!%s", basereg,
6f9b006d 5432 (basereg == 29 ? reloc16 : reloclo));
1eb356b9
RH
5433 return;
5434 }
5435
56daab84
NS
5436 switch (GET_CODE (addr))
5437 {
5438 case REG:
5439 basereg = REGNO (addr);
5440 break;
5441
5442 case SUBREG:
5443 basereg = subreg_regno (addr);
5444 break;
5445
5446 case CONST_INT:
5447 offset = INTVAL (addr);
5448 break;
1330f7d5 5449
56daab84 5450 case SYMBOL_REF:
20a951e6 5451 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
1330f7d5
DR
5452 fprintf (file, "%s", XSTR (addr, 0));
5453 return;
56daab84
NS
5454
5455 case CONST:
20a951e6 5456 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
56daab84
NS
5457 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5458 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
74eda121 5459 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
1330f7d5
DR
5460 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5461 INTVAL (XEXP (XEXP (addr, 0), 1)));
5462 return;
20a951e6 5463
56daab84 5464 default:
20a951e6
RH
5465 output_operand_lossage ("invalid operand address");
5466 return;
56daab84 5467 }
714b019c 5468
4a0a75dd 5469 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
714b019c 5470}
a6f12d7c 5471\f
9ec36da5 5472/* Emit RTL insns to initialize the variable parts of a trampoline at
2d7b663a
RH
5473 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5474 for the static chain value for the function. */
c714f03d 5475
2d7b663a
RH
5476static void
5477alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
9ec36da5 5478{
2d7b663a
RH
5479 rtx fnaddr, mem, word1, word2;
5480
5481 fnaddr = XEXP (DECL_RTL (fndecl), 0);
9ec36da5 5482
d2692ef8 5483#ifdef POINTERS_EXTEND_UNSIGNED
2d7b663a
RH
5484 fnaddr = convert_memory_address (Pmode, fnaddr);
5485 chain_value = convert_memory_address (Pmode, chain_value);
d2692ef8
DT
5486#endif
5487
fe2786f5
DR
5488 if (TARGET_ABI_OPEN_VMS)
5489 {
fe2786f5
DR
5490 const char *fnname;
5491 char *trname;
5492
5493 /* Construct the name of the trampoline entry point. */
5494 fnname = XSTR (fnaddr, 0);
5495 trname = (char *) alloca (strlen (fnname) + 5);
5496 strcpy (trname, fnname);
5497 strcat (trname, "..tr");
2d7b663a
RH
5498 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5499 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
fe2786f5
DR
5500
5501 /* Trampoline (or "bounded") procedure descriptor is constructed from
5502 the function's procedure descriptor with certain fields zeroed IAW
5503 the VMS calling standard. This is stored in the first quadword. */
2d7b663a 5504 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
4522854a 5505 word1 = expand_and (DImode, word1,
fd2d9121
RH
5506 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5507 NULL);
fe2786f5 5508 }
2d7b663a
RH
5509 else
5510 {
5511 /* These 4 instructions are:
5512 ldq $1,24($27)
5513 ldq $27,16($27)
5514 jmp $31,($27),0
5515 nop
5516 We don't bother setting the HINT field of the jump; the nop
5517 is merely there for padding. */
fd2d9121
RH
5518 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5519 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
2d7b663a
RH
5520 }
5521
5522 /* Store the first two words, as computed above. */
5523 mem = adjust_address (m_tramp, DImode, 0);
5524 emit_move_insn (mem, word1);
5525 mem = adjust_address (m_tramp, DImode, 8);
5526 emit_move_insn (mem, word2);
5527
5528 /* Store function address and static chain value. */
5529 mem = adjust_address (m_tramp, Pmode, 16);
5530 emit_move_insn (mem, fnaddr);
5531 mem = adjust_address (m_tramp, Pmode, 24);
5532 emit_move_insn (mem, chain_value);
fe2786f5 5533
42d085c1 5534 if (TARGET_ABI_OSF)
2d7b663a
RH
5535 {
5536 emit_insn (gen_imb ());
10e48e39 5537#ifdef HAVE_ENABLE_EXECUTE_STACK
2d7b663a 5538 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
db69559b 5539 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9ec36da5 5540#endif
2d7b663a 5541 }
9ec36da5
JL
5542}
5543\f
5495cc55
RH
5544/* Determine where to put an argument to a function.
5545 Value is zero to push the argument on the stack,
5546 or a hard register in which to store the argument.
5547
5495cc55
RH
5548 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5549 the preceding args and about the function being called.
6783fdb7 5550 ARG is a description of the argument.
5495cc55
RH
5551
5552 On Alpha the first 6 words of args are normally in registers
5553 and the rest are pushed. */
5554
0c3a9758 5555static rtx
6783fdb7 5556alpha_function_arg (cumulative_args_t cum_v, const function_arg_info &arg)
5495cc55 5557{
d5cc9181 5558 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5495cc55 5559 int basereg;
a82c7f05 5560 int num_args;
5495cc55 5561
7e4fb06a 5562 /* Don't get confused and pass small structures in FP registers. */
6783fdb7 5563 if (arg.aggregate_type_p ())
30102605 5564 basereg = 16;
7e4fb06a
RH
5565 else
5566 {
42ba5130 5567 /* With alpha_split_complex_arg, we shouldn't see any raw complex
7e4fb06a 5568 values here. */
6783fdb7 5569 gcc_checking_assert (!COMPLEX_MODE_P (arg.mode));
7e4fb06a
RH
5570
5571 /* Set up defaults for FP operands passed in FP registers, and
5572 integral operands passed in integer registers. */
6783fdb7 5573 if (TARGET_FPREGS && GET_MODE_CLASS (arg.mode) == MODE_FLOAT)
7e4fb06a
RH
5574 basereg = 32 + 16;
5575 else
5576 basereg = 16;
5577 }
30102605
RH
5578
5579 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
0c3a9758 5580 the two platforms, so we can't avoid conditional compilation. */
be7b80f4 5581#if TARGET_ABI_OPEN_VMS
30102605 5582 {
6783fdb7 5583 if (arg.end_marker_p ())
1f5576a8 5584 return alpha_arg_info_reg_val (*cum);
be7b80f4 5585
0c3a9758 5586 num_args = cum->num_args;
fe984136 5587 if (num_args >= 6
6783fdb7 5588 || targetm.calls.must_pass_in_stack (arg.mode, arg.type))
30102605
RH
5589 return NULL_RTX;
5590 }
7e4fb06a 5591#elif TARGET_ABI_OSF
30102605 5592 {
0c3a9758 5593 if (*cum >= 6)
30102605 5594 return NULL_RTX;
0c3a9758 5595 num_args = *cum;
30102605 5596
6783fdb7 5597 if (arg.end_marker_p ())
30102605 5598 basereg = 16;
6783fdb7 5599 else if (targetm.calls.must_pass_in_stack (arg.mode, arg.type))
30102605 5600 return NULL_RTX;
30102605 5601 }
7e4fb06a
RH
5602#else
5603#error Unhandled ABI
5604#endif
5495cc55 5605
6783fdb7 5606 return gen_rtx_REG (arg.mode, num_args + basereg);
5495cc55
RH
5607}
5608
6930c98c 5609/* Update the data in CUM to advance over argument ARG. */
0c3a9758
NF
5610
5611static void
6930c98c
RS
5612alpha_function_arg_advance (cumulative_args_t cum_v,
5613 const function_arg_info &arg)
0c3a9758 5614{
d5cc9181 5615 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6930c98c
RS
5616 bool onstack = targetm.calls.must_pass_in_stack (arg.mode, arg.type);
5617 int increment = onstack ? 6 : ALPHA_ARG_SIZE (arg.mode, arg.type);
0c3a9758
NF
5618
5619#if TARGET_ABI_OSF
5620 *cum += increment;
5621#else
5622 if (!onstack && cum->num_args < 6)
6930c98c 5623 cum->atypes[cum->num_args] = alpha_arg_type (arg.mode);
0c3a9758
NF
5624 cum->num_args += increment;
5625#endif
5626}
5627
78a52f11 5628static int
a7c81bc1 5629alpha_arg_partial_bytes (cumulative_args_t cum_v, const function_arg_info &arg)
78a52f11
RH
5630{
5631 int words = 0;
d5cc9181 5632 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
78a52f11
RH
5633
5634#if TARGET_ABI_OPEN_VMS
5635 if (cum->num_args < 6
a7c81bc1 5636 && 6 < cum->num_args + ALPHA_ARG_SIZE (arg.mode, arg.type))
907f033f 5637 words = 6 - cum->num_args;
78a52f11 5638#elif TARGET_ABI_OSF
a7c81bc1 5639 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (arg.mode, arg.type))
78a52f11
RH
5640 words = 6 - *cum;
5641#else
5642#error Unhandled ABI
5643#endif
5644
5645 return words * UNITS_PER_WORD;
5646}
5647
5648
7e4fb06a
RH
5649/* Return true if TYPE must be returned in memory, instead of in registers. */
5650
f93c2180 5651static bool
586de218 5652alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
7e4fb06a 5653{
ef4bddc2 5654 machine_mode mode = VOIDmode;
7e4fb06a
RH
5655 int size;
5656
5657 if (type)
5658 {
5659 mode = TYPE_MODE (type);
5660
050d3f9d
VF
5661 /* All aggregates are returned in memory, except on OpenVMS where
5662 records that fit 64 bits should be returned by immediate value
5663 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5664 if (TARGET_ABI_OPEN_VMS
5665 && TREE_CODE (type) != ARRAY_TYPE
5666 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5667 return false;
5668
7e4fb06a
RH
5669 if (AGGREGATE_TYPE_P (type))
5670 return true;
5671 }
5672
5673 size = GET_MODE_SIZE (mode);
5674 switch (GET_MODE_CLASS (mode))
5675 {
5676 case MODE_VECTOR_FLOAT:
5677 /* Pass all float vectors in memory, like an aggregate. */
5678 return true;
5679
5680 case MODE_COMPLEX_FLOAT:
5681 /* We judge complex floats on the size of their element,
5682 not the size of the whole type. */
5683 size = GET_MODE_UNIT_SIZE (mode);
5684 break;
5685
5686 case MODE_INT:
5687 case MODE_FLOAT:
5688 case MODE_COMPLEX_INT:
5689 case MODE_VECTOR_INT:
5690 break;
5691
5692 default:
f676971a 5693 /* ??? We get called on all sorts of random stuff from
56daab84
NS
5694 aggregate_value_p. We must return something, but it's not
5695 clear what's safe to return. Pretend it's a struct I
5696 guess. */
7e4fb06a
RH
5697 return true;
5698 }
5699
5700 /* Otherwise types must fit in one register. */
5701 return size > UNITS_PER_WORD;
5702}
5703
52090e4d 5704/* Return true if ARG should be passed by invisible reference. */
8cd5a4e0
RH
5705
5706static bool
52090e4d 5707alpha_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
8cd5a4e0 5708{
809aff74
UB
5709 /* Pass float and _Complex float variable arguments by reference.
5710 This avoids 64-bit store from a FP register to a pretend args save area
5711 and subsequent 32-bit load from the saved location to a FP register.
5712
5713 Note that 32-bit loads and stores to/from a FP register on alpha reorder
5714 bits to form a canonical 64-bit value in the FP register. This fact
5715 invalidates compiler assumption that 32-bit FP value lives in the lower
5716 32-bits of the passed 64-bit FP value, so loading the 32-bit value from
5717 the stored 64-bit location using 32-bit FP load is invalid on alpha.
5718
5719 This introduces sort of ABI incompatibility, but until _Float32 was
5720 introduced, C-family languages promoted 32-bit float variable arg to
5721 a 64-bit double, and it was not allowed to pass float as a varible
5722 argument. Passing _Complex float as a variable argument never
5723 worked on alpha. Thus, we have no backward compatibility issues
5724 to worry about, and passing unpromoted _Float32 and _Complex float
5725 as a variable argument will actually work in the future. */
5726
52090e4d
RS
5727 if (arg.mode == SFmode || arg.mode == SCmode)
5728 return !arg.named;
809aff74 5729
52090e4d 5730 return arg.mode == TFmode || arg.mode == TCmode;
8cd5a4e0
RH
5731}
5732
7e4fb06a
RH
5733/* Define how to find the value returned by a function. VALTYPE is the
5734 data type of the value (as a tree). If the precise function being
5735 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5736 MODE is set instead of VALTYPE for libcalls.
5737
5738 On Alpha the value is found in $0 for integer functions and
5739 $f0 for floating-point functions. */
5740
ef995717
UB
5741static rtx
5742alpha_function_value_1 (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5743 machine_mode mode)
7e4fb06a 5744{
d58770e7 5745 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
0a2aaacc 5746 enum mode_class mclass;
7e4fb06a 5747
56daab84 5748 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
7e4fb06a
RH
5749
5750 if (valtype)
5751 mode = TYPE_MODE (valtype);
5752
0a2aaacc
KG
5753 mclass = GET_MODE_CLASS (mode);
5754 switch (mclass)
7e4fb06a
RH
5755 {
5756 case MODE_INT:
050d3f9d
VF
5757 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5758 where we have them returning both SImode and DImode. */
5759 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5760 PROMOTE_MODE (mode, dummy, valtype);
5efb1046 5761 /* FALLTHRU */
7e4fb06a
RH
5762
5763 case MODE_COMPLEX_INT:
5764 case MODE_VECTOR_INT:
5765 regnum = 0;
5766 break;
5767
5768 case MODE_FLOAT:
5769 regnum = 32;
5770 break;
5771
5772 case MODE_COMPLEX_FLOAT:
5773 {
ef4bddc2 5774 machine_mode cmode = GET_MODE_INNER (mode);
7e4fb06a
RH
5775
5776 return gen_rtx_PARALLEL
5777 (VOIDmode,
5778 gen_rtvec (2,
5779 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
a556fd39 5780 const0_rtx),
7e4fb06a
RH
5781 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5782 GEN_INT (GET_MODE_SIZE (cmode)))));
5783 }
5784
050d3f9d
VF
5785 case MODE_RANDOM:
5786 /* We should only reach here for BLKmode on VMS. */
5787 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5788 regnum = 0;
5789 break;
5790
7e4fb06a 5791 default:
56daab84 5792 gcc_unreachable ();
7e4fb06a
RH
5793 }
5794
5795 return gen_rtx_REG (mode, regnum);
5796}
5797
ef995717
UB
5798/* Implement TARGET_FUNCTION_VALUE. */
5799
5800static rtx
5801alpha_function_value (const_tree valtype, const_tree fn_decl_or_type,
5802 bool /*outgoing*/)
5803{
5804 return alpha_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
5805}
5806
5807/* Implement TARGET_LIBCALL_VALUE. */
5808
5809static rtx
5810alpha_libcall_value (machine_mode mode, const_rtx /*fun*/)
5811{
5812 return alpha_function_value_1 (NULL_TREE, NULL_TREE, mode);
5813}
5814
5815/* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5816
5817 On the Alpha, $0 $1 and $f0 $f1 are the only register thus used. */
5818
5819static bool
5820alpha_function_value_regno_p (const unsigned int regno)
5821{
5822 return (regno == 0 || regno == 1 || regno == 32 || regno == 33);
5823}
5824
f676971a 5825/* TCmode complex values are passed by invisible reference. We
42ba5130
RH
5826 should not split these values. */
5827
5828static bool
3101faab 5829alpha_split_complex_arg (const_tree type)
42ba5130
RH
5830{
5831 return TYPE_MODE (type) != TCmode;
5832}
5833
c35d187f
RH
5834static tree
5835alpha_build_builtin_va_list (void)
a6f12d7c 5836{
5849d27c 5837 tree base, ofs, space, record, type_decl;
a6f12d7c 5838
75db85d8 5839 if (TARGET_ABI_OPEN_VMS)
63966b3b
RH
5840 return ptr_type_node;
5841
f1e639b1 5842 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4c4bde29
AH
5843 type_decl = build_decl (BUILTINS_LOCATION,
5844 TYPE_DECL, get_identifier ("__va_list_tag"), record);
0fd2eac2 5845 TYPE_STUB_DECL (record) = type_decl;
d4b15af9
RH
5846 TYPE_NAME (record) = type_decl;
5847
63966b3b 5848 /* C++? SET_IS_AGGR_TYPE (record, 1); */
a6f12d7c 5849
5849d27c 5850 /* Dummy field to prevent alignment warnings. */
4c4bde29
AH
5851 space = build_decl (BUILTINS_LOCATION,
5852 FIELD_DECL, NULL_TREE, integer_type_node);
5849d27c
RH
5853 DECL_FIELD_CONTEXT (space) = record;
5854 DECL_ARTIFICIAL (space) = 1;
5855 DECL_IGNORED_P (space) = 1;
5856
4c4bde29
AH
5857 ofs = build_decl (BUILTINS_LOCATION,
5858 FIELD_DECL, get_identifier ("__offset"),
63966b3b
RH
5859 integer_type_node);
5860 DECL_FIELD_CONTEXT (ofs) = record;
910ad8de 5861 DECL_CHAIN (ofs) = space;
29587b1c 5862
4c4bde29
AH
5863 base = build_decl (BUILTINS_LOCATION,
5864 FIELD_DECL, get_identifier ("__base"),
63966b3b
RH
5865 ptr_type_node);
5866 DECL_FIELD_CONTEXT (base) = record;
910ad8de 5867 DECL_CHAIN (base) = ofs;
29587b1c 5868
63966b3b
RH
5869 TYPE_FIELDS (record) = base;
5870 layout_type (record);
5871
9d30f3c1 5872 va_list_gpr_counter_field = ofs;
63966b3b
RH
5873 return record;
5874}
5875
3f620b5f 5876#if TARGET_ABI_OSF
9d30f3c1
JJ
5877/* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5878 and constant additions. */
5879
355fe088 5880static gimple *
9d30f3c1
JJ
5881va_list_skip_additions (tree lhs)
5882{
355fe088 5883 gimple *stmt;
9d30f3c1
JJ
5884
5885 for (;;)
5886 {
777b1fbe
JJ
5887 enum tree_code code;
5888
9d30f3c1
JJ
5889 stmt = SSA_NAME_DEF_STMT (lhs);
5890
777b1fbe 5891 if (gimple_code (stmt) == GIMPLE_PHI)
9d30f3c1
JJ
5892 return stmt;
5893
777b1fbe
JJ
5894 if (!is_gimple_assign (stmt)
5895 || gimple_assign_lhs (stmt) != lhs)
5896 return NULL;
9d30f3c1 5897
777b1fbe
JJ
5898 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5899 return stmt;
5900 code = gimple_assign_rhs_code (stmt);
5901 if (!CONVERT_EXPR_CODE_P (code)
5902 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5903 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
cc269bb6 5904 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
777b1fbe 5905 return stmt;
9d30f3c1 5906
777b1fbe 5907 lhs = gimple_assign_rhs1 (stmt);
9d30f3c1
JJ
5908 }
5909}
5910
5911/* Check if LHS = RHS statement is
5912 LHS = *(ap.__base + ap.__offset + cst)
5913 or
5914 LHS = *(ap.__base
5915 + ((ap.__offset + cst <= 47)
5916 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5917 If the former, indicate that GPR registers are needed,
5918 if the latter, indicate that FPR registers are needed.
138ae41e
RH
5919
5920 Also look for LHS = (*ptr).field, where ptr is one of the forms
5921 listed above.
5922
9d30f3c1 5923 On alpha, cfun->va_list_gpr_size is used as size of the needed
138ae41e
RH
5924 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5925 registers are needed and bit 1 set if FPR registers are needed.
5926 Return true if va_list references should not be scanned for the
5927 current statement. */
9d30f3c1
JJ
5928
5929static bool
355fe088 5930alpha_stdarg_optimize_hook (struct stdarg_info *si, const gimple *stmt)
9d30f3c1 5931{
777b1fbe 5932 tree base, offset, rhs;
9d30f3c1 5933 int offset_arg = 1;
355fe088 5934 gimple *base_stmt;
9d30f3c1 5935
777b1fbe
JJ
5936 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5937 != GIMPLE_SINGLE_RHS)
5938 return false;
5939
5940 rhs = gimple_assign_rhs1 (stmt);
138ae41e
RH
5941 while (handled_component_p (rhs))
5942 rhs = TREE_OPERAND (rhs, 0);
70f34814 5943 if (TREE_CODE (rhs) != MEM_REF
9d30f3c1
JJ
5944 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5945 return false;
5946
777b1fbe
JJ
5947 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5948 if (stmt == NULL
5949 || !is_gimple_assign (stmt)
5950 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
9d30f3c1
JJ
5951 return false;
5952
777b1fbe 5953 base = gimple_assign_rhs1 (stmt);
9d30f3c1 5954 if (TREE_CODE (base) == SSA_NAME)
777b1fbe
JJ
5955 {
5956 base_stmt = va_list_skip_additions (base);
5957 if (base_stmt
5958 && is_gimple_assign (base_stmt)
5959 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5960 base = gimple_assign_rhs1 (base_stmt);
5961 }
9d30f3c1
JJ
5962
5963 if (TREE_CODE (base) != COMPONENT_REF
5964 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5965 {
777b1fbe 5966 base = gimple_assign_rhs2 (stmt);
9d30f3c1 5967 if (TREE_CODE (base) == SSA_NAME)
777b1fbe
JJ
5968 {
5969 base_stmt = va_list_skip_additions (base);
5970 if (base_stmt
5971 && is_gimple_assign (base_stmt)
5972 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5973 base = gimple_assign_rhs1 (base_stmt);
5974 }
9d30f3c1
JJ
5975
5976 if (TREE_CODE (base) != COMPONENT_REF
5977 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5978 return false;
5979
5980 offset_arg = 0;
5981 }
5982
5983 base = get_base_address (base);
5984 if (TREE_CODE (base) != VAR_DECL
fcecf84f 5985 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
9d30f3c1
JJ
5986 return false;
5987
777b1fbe 5988 offset = gimple_op (stmt, 1 + offset_arg);
9d30f3c1 5989 if (TREE_CODE (offset) == SSA_NAME)
9d30f3c1 5990 {
355fe088 5991 gimple *offset_stmt = va_list_skip_additions (offset);
9d30f3c1 5992
777b1fbe
JJ
5993 if (offset_stmt
5994 && gimple_code (offset_stmt) == GIMPLE_PHI)
9d30f3c1 5995 {
777b1fbe 5996 HOST_WIDE_INT sub;
355fe088 5997 gimple *arg1_stmt, *arg2_stmt;
777b1fbe
JJ
5998 tree arg1, arg2;
5999 enum tree_code code1, code2;
9d30f3c1 6000
777b1fbe 6001 if (gimple_phi_num_args (offset_stmt) != 2)
3f620b5f 6002 goto escapes;
9d30f3c1 6003
777b1fbe
JJ
6004 arg1_stmt
6005 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6006 arg2_stmt
6007 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6008 if (arg1_stmt == NULL
6009 || !is_gimple_assign (arg1_stmt)
6010 || arg2_stmt == NULL
6011 || !is_gimple_assign (arg2_stmt))
6012 goto escapes;
9d30f3c1 6013
777b1fbe
JJ
6014 code1 = gimple_assign_rhs_code (arg1_stmt);
6015 code2 = gimple_assign_rhs_code (arg2_stmt);
6016 if (code1 == COMPONENT_REF
6017 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6018 /* Do nothing. */;
6019 else if (code2 == COMPONENT_REF
6020 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6021 {
7159f19c 6022 std::swap (arg1_stmt, arg2_stmt);
777b1fbe 6023 code2 = code1;
777b1fbe
JJ
6024 }
6025 else
6026 goto escapes;
3f620b5f 6027
9541ffee 6028 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
777b1fbe 6029 goto escapes;
3f620b5f 6030
9439e9a1 6031 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
777b1fbe
JJ
6032 if (code2 == MINUS_EXPR)
6033 sub = -sub;
6034 if (sub < -48 || sub > -32)
6035 goto escapes;
9d30f3c1 6036
777b1fbe
JJ
6037 arg1 = gimple_assign_rhs1 (arg1_stmt);
6038 arg2 = gimple_assign_rhs1 (arg2_stmt);
6039 if (TREE_CODE (arg2) == SSA_NAME)
6040 {
6041 arg2_stmt = va_list_skip_additions (arg2);
6042 if (arg2_stmt == NULL
6043 || !is_gimple_assign (arg2_stmt)
6044 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6045 goto escapes;
6046 arg2 = gimple_assign_rhs1 (arg2_stmt);
6047 }
6048 if (arg1 != arg2)
6049 goto escapes;
6050
6051 if (TREE_CODE (arg1) != COMPONENT_REF
6052 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6053 || get_base_address (arg1) != base)
6054 goto escapes;
6055
6056 /* Need floating point regs. */
6057 cfun->va_list_fpr_size |= 2;
6058 return false;
6059 }
6060 if (offset_stmt
6061 && is_gimple_assign (offset_stmt)
6062 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6063 offset = gimple_assign_rhs1 (offset_stmt);
6064 }
6065 if (TREE_CODE (offset) != COMPONENT_REF
6066 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6067 || get_base_address (offset) != base)
9d30f3c1
JJ
6068 goto escapes;
6069 else
6070 /* Need general regs. */
6071 cfun->va_list_fpr_size |= 1;
6072 return false;
6073
6074escapes:
6075 si->va_list_escapes = true;
6076 return false;
6077}
3f620b5f 6078#endif
9d30f3c1 6079
35d9c403 6080/* Perform any needed actions needed for a function that is receiving a
f93c2180 6081 variable number of arguments. */
35d9c403 6082
f93c2180 6083static void
e7056ca4
RS
6084alpha_setup_incoming_varargs (cumulative_args_t pcum,
6085 const function_arg_info &arg,
6086 int *pretend_size, int no_rtl)
f93c2180 6087{
d5cc9181 6088 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
bae47977
RH
6089
6090 /* Skip the current argument. */
6930c98c 6091 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), arg);
bae47977 6092
75db85d8 6093#if TARGET_ABI_OPEN_VMS
f93c2180 6094 /* For VMS, we allocate space for all 6 arg registers plus a count.
35d9c403 6095
f93c2180
RH
6096 However, if NO registers need to be saved, don't allocate any space.
6097 This is not only because we won't need the space, but because AP
6098 includes the current_pretend_args_size and we don't want to mess up
6099 any ap-relative addresses already made. */
bae47977 6100 if (cum.num_args < 6)
f93c2180
RH
6101 {
6102 if (!no_rtl)
6103 {
6104 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6105 emit_insn (gen_arg_home ());
6106 }
6107 *pretend_size = 7 * UNITS_PER_WORD;
6108 }
6109#else
6110 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6111 only push those that are remaining. However, if NO registers need to
6112 be saved, don't allocate any space. This is not only because we won't
6113 need the space, but because AP includes the current_pretend_args_size
6114 and we don't want to mess up any ap-relative addresses already made.
6115
6116 If we are not to use the floating-point registers, save the integer
6117 registers where we would put the floating-point registers. This is
6118 not the most efficient way to implement varargs with just one register
6119 class, but it isn't worth doing anything more efficient in this rare
6120 case. */
35d9c403
RH
6121 if (cum >= 6)
6122 return;
6123
6124 if (!no_rtl)
6125 {
4862826d
ILT
6126 int count;
6127 alias_set_type set = get_varargs_alias_set ();
35d9c403
RH
6128 rtx tmp;
6129
3f620b5f
RH
6130 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6131 if (count > 6 - cum)
6132 count = 6 - cum;
35d9c403 6133
3f620b5f
RH
6134 /* Detect whether integer registers or floating-point registers
6135 are needed by the detected va_arg statements. See above for
6136 how these values are computed. Note that the "escape" value
6137 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6138 these bits set. */
6139 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6140
6141 if (cfun->va_list_fpr_size & 1)
6142 {
6143 tmp = gen_rtx_MEM (BLKmode,
0a81f074 6144 plus_constant (Pmode, virtual_incoming_args_rtx,
3f620b5f 6145 (cum + 6) * UNITS_PER_WORD));
8476af98 6146 MEM_NOTRAP_P (tmp) = 1;
3f620b5f
RH
6147 set_mem_alias_set (tmp, set);
6148 move_block_from_reg (16 + cum, tmp, count);
6149 }
6150
6151 if (cfun->va_list_fpr_size & 2)
6152 {
6153 tmp = gen_rtx_MEM (BLKmode,
0a81f074 6154 plus_constant (Pmode, virtual_incoming_args_rtx,
3f620b5f 6155 cum * UNITS_PER_WORD));
8476af98 6156 MEM_NOTRAP_P (tmp) = 1;
3f620b5f
RH
6157 set_mem_alias_set (tmp, set);
6158 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6159 }
6160 }
35d9c403 6161 *pretend_size = 12 * UNITS_PER_WORD;
a5fe455b 6162#endif
f93c2180 6163}
35d9c403 6164
d7bd8aeb 6165static void
a5c24926 6166alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
63966b3b
RH
6167{
6168 HOST_WIDE_INT offset;
6169 tree t, offset_field, base_field;
29587b1c 6170
bdb429a5
RK
6171 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6172 return;
6173
bd5bd7ac 6174 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
63966b3b
RH
6175 up by 48, storing fp arg registers in the first 48 bytes, and the
6176 integer arg registers in the next 48 bytes. This is only done,
6177 however, if any integer registers need to be stored.
6178
6179 If no integer registers need be stored, then we must subtract 48
6180 in order to account for the integer arg registers which are counted
35d9c403
RH
6181 in argsize above, but which are not actually stored on the stack.
6182 Must further be careful here about structures straddling the last
f676971a 6183 integer argument register; that futzes with pretend_args_size,
35d9c403 6184 which changes the meaning of AP. */
63966b3b 6185
bae47977 6186 if (NUM_ARGS < 6)
f7130778 6187 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
89cfc2c6 6188 else
38173d38 6189 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
63966b3b 6190
f7130778
DR
6191 if (TARGET_ABI_OPEN_VMS)
6192 {
050d3f9d 6193 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5d49b6a7 6194 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
050d3f9d 6195 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
f7130778 6196 TREE_SIDE_EFFECTS (t) = 1;
f7130778
DR
6197 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6198 }
6199 else
6200 {
6201 base_field = TYPE_FIELDS (TREE_TYPE (valist));
910ad8de 6202 offset_field = DECL_CHAIN (base_field);
f7130778 6203
47a25a46
RG
6204 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6205 valist, base_field, NULL_TREE);
6206 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6207 valist, offset_field, NULL_TREE);
f7130778
DR
6208
6209 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5d49b6a7 6210 t = fold_build_pointer_plus_hwi (t, offset);
726a989a 6211 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
f7130778
DR
6212 TREE_SIDE_EFFECTS (t) = 1;
6213 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6214
7d60be94 6215 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
726a989a 6216 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
f7130778
DR
6217 TREE_SIDE_EFFECTS (t) = 1;
6218 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6219 }
63966b3b
RH
6220}
6221
28245018 6222static tree
777b1fbe 6223alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
726a989a 6224 gimple_seq *pre_p)
28245018 6225{
777b1fbe
JJ
6226 tree type_size, ptr_type, addend, t, addr;
6227 gimple_seq internal_post;
28245018 6228
28245018
RH
6229 /* If the type could not be passed in registers, skip the block
6230 reserved for the registers. */
4f53599c 6231 if (must_pass_va_arg_in_stack (type))
28245018 6232 {
7d60be94 6233 t = build_int_cst (TREE_TYPE (offset), 6*8);
726a989a
RB
6234 gimplify_assign (offset,
6235 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6236 pre_p);
28245018
RH
6237 }
6238
6239 addend = offset;
1f063d10 6240 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
28245018 6241
08b0dc1b 6242 if (TREE_CODE (type) == COMPLEX_TYPE)
28245018
RH
6243 {
6244 tree real_part, imag_part, real_temp;
6245
65da5a20
RH
6246 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6247 offset, pre_p);
6248
6249 /* Copy the value into a new temporary, lest the formal temporary
28245018 6250 be reused out from under us. */
65da5a20 6251 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
28245018 6252
65da5a20
RH
6253 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6254 offset, pre_p);
28245018 6255
47a25a46 6256 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
28245018
RH
6257 }
6258 else if (TREE_CODE (type) == REAL_TYPE)
6259 {
6260 tree fpaddend, cond, fourtyeight;
6261
7d60be94 6262 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
47a25a46
RG
6263 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6264 addend, fourtyeight);
6265 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6266 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6267 fpaddend, addend);
28245018
RH
6268 }
6269
6270 /* Build the final address and force that value into a temporary. */
5d49b6a7 6271 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
65da5a20
RH
6272 internal_post = NULL;
6273 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
777b1fbe 6274 gimple_seq_add_seq (pre_p, internal_post);
28245018
RH
6275
6276 /* Update the offset field. */
65da5a20
RH
6277 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6278 if (type_size == NULL || TREE_OVERFLOW (type_size))
6279 t = size_zero_node;
6280 else
6281 {
6282 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6283 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6284 t = size_binop (MULT_EXPR, t, size_int (8));
6285 }
6286 t = fold_convert (TREE_TYPE (offset), t);
726a989a
RB
6287 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6288 pre_p);
28245018 6289
d6e9821f 6290 return build_va_arg_indirect_ref (addr);
28245018
RH
6291}
6292
23a60a04 6293static tree
726a989a
RB
6294alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6295 gimple_seq *post_p)
28245018 6296{
23a60a04 6297 tree offset_field, base_field, offset, base, t, r;
08b0dc1b 6298 bool indirect;
28245018 6299
75db85d8 6300 if (TARGET_ABI_OPEN_VMS)
23a60a04 6301 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
28245018
RH
6302
6303 base_field = TYPE_FIELDS (va_list_type_node);
910ad8de 6304 offset_field = DECL_CHAIN (base_field);
47a25a46
RG
6305 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6306 valist, base_field, NULL_TREE);
6307 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6308 valist, offset_field, NULL_TREE);
28245018 6309
65da5a20
RH
6310 /* Pull the fields of the structure out into temporaries. Since we never
6311 modify the base field, we can use a formal temporary. Sign-extend the
6312 offset field so that it's the proper width for pointer arithmetic. */
6313 base = get_formal_tmp_var (base_field, pre_p);
28245018 6314
21fa2faf 6315 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
65da5a20 6316 offset = get_initialized_tmp_var (t, pre_p, NULL);
28245018 6317
fde65a89 6318 indirect = pass_va_arg_by_reference (type);
647c5e3e 6319
08b0dc1b 6320 if (indirect)
647c5e3e
UB
6321 {
6322 if (TREE_CODE (type) == COMPLEX_TYPE
6323 && targetm.calls.split_complex_arg (type))
6324 {
6325 tree real_part, imag_part, real_temp;
6326
6327 tree ptr_type = build_pointer_type_for_mode (TREE_TYPE (type),
6328 ptr_mode, true);
6329
6330 real_part = alpha_gimplify_va_arg_1 (ptr_type, base,
6331 offset, pre_p);
6332 real_part = build_va_arg_indirect_ref (real_part);
6333
6334 /* Copy the value into a new temporary, lest the formal temporary
6335 be reused out from under us. */
6336 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6337
6338 imag_part = alpha_gimplify_va_arg_1 (ptr_type, base,
6339 offset, pre_p);
6340 imag_part = build_va_arg_indirect_ref (imag_part);
6341
6342 r = build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6343
6344 /* Stuff the offset temporary back into its field. */
6345 gimplify_assign (unshare_expr (offset_field),
6346 fold_convert (TREE_TYPE (offset_field), offset),
6347 pre_p);
6348 return r;
6349 }
6350 else
6351 type = build_pointer_type_for_mode (type, ptr_mode, true);
6352 }
08b0dc1b 6353
28245018
RH
6354 /* Find the value. Note that this will be a stable indirection, or
6355 a composite of stable indirections in the case of complex. */
65da5a20 6356 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
28245018
RH
6357
6358 /* Stuff the offset temporary back into its field. */
777b1fbe 6359 gimplify_assign (unshare_expr (offset_field),
726a989a 6360 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
23a60a04 6361
08b0dc1b 6362 if (indirect)
d6e9821f 6363 r = build_va_arg_indirect_ref (r);
08b0dc1b 6364
23a60a04 6365 return r;
28245018 6366}
a6f12d7c 6367\f
6d8fd7bb
RH
6368/* Builtins. */
6369
6370enum alpha_builtin
6371{
6372 ALPHA_BUILTIN_CMPBGE,
c4b50f1a
RH
6373 ALPHA_BUILTIN_EXTBL,
6374 ALPHA_BUILTIN_EXTWL,
6375 ALPHA_BUILTIN_EXTLL,
6d8fd7bb 6376 ALPHA_BUILTIN_EXTQL,
c4b50f1a
RH
6377 ALPHA_BUILTIN_EXTWH,
6378 ALPHA_BUILTIN_EXTLH,
6d8fd7bb 6379 ALPHA_BUILTIN_EXTQH,
c4b50f1a
RH
6380 ALPHA_BUILTIN_INSBL,
6381 ALPHA_BUILTIN_INSWL,
6382 ALPHA_BUILTIN_INSLL,
6383 ALPHA_BUILTIN_INSQL,
6384 ALPHA_BUILTIN_INSWH,
6385 ALPHA_BUILTIN_INSLH,
6386 ALPHA_BUILTIN_INSQH,
6387 ALPHA_BUILTIN_MSKBL,
6388 ALPHA_BUILTIN_MSKWL,
6389 ALPHA_BUILTIN_MSKLL,
6390 ALPHA_BUILTIN_MSKQL,
6391 ALPHA_BUILTIN_MSKWH,
6392 ALPHA_BUILTIN_MSKLH,
6393 ALPHA_BUILTIN_MSKQH,
6394 ALPHA_BUILTIN_UMULH,
6d8fd7bb
RH
6395 ALPHA_BUILTIN_ZAP,
6396 ALPHA_BUILTIN_ZAPNOT,
6397 ALPHA_BUILTIN_AMASK,
6398 ALPHA_BUILTIN_IMPLVER,
6399 ALPHA_BUILTIN_RPCC,
221cf9ab
OH
6400 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6401 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6d8fd7bb
RH
6402
6403 /* TARGET_MAX */
6404 ALPHA_BUILTIN_MINUB8,
6405 ALPHA_BUILTIN_MINSB8,
6406 ALPHA_BUILTIN_MINUW4,
6407 ALPHA_BUILTIN_MINSW4,
6408 ALPHA_BUILTIN_MAXUB8,
6409 ALPHA_BUILTIN_MAXSB8,
6410 ALPHA_BUILTIN_MAXUW4,
6411 ALPHA_BUILTIN_MAXSW4,
6412 ALPHA_BUILTIN_PERR,
6413 ALPHA_BUILTIN_PKLB,
6414 ALPHA_BUILTIN_PKWB,
6415 ALPHA_BUILTIN_UNPKBL,
6416 ALPHA_BUILTIN_UNPKBW,
6417
c4b50f1a
RH
6418 /* TARGET_CIX */
6419 ALPHA_BUILTIN_CTTZ,
6420 ALPHA_BUILTIN_CTLZ,
6421 ALPHA_BUILTIN_CTPOP,
6422
6d8fd7bb
RH
6423 ALPHA_BUILTIN_max
6424};
6425
e3136fa2 6426static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
c4b50f1a 6427 CODE_FOR_builtin_cmpbge,
0b2a7367
RH
6428 CODE_FOR_extbl,
6429 CODE_FOR_extwl,
6430 CODE_FOR_extll,
6431 CODE_FOR_extql,
6432 CODE_FOR_extwh,
6433 CODE_FOR_extlh,
6434 CODE_FOR_extqh,
c4b50f1a
RH
6435 CODE_FOR_builtin_insbl,
6436 CODE_FOR_builtin_inswl,
6437 CODE_FOR_builtin_insll,
0b2a7367
RH
6438 CODE_FOR_insql,
6439 CODE_FOR_inswh,
6440 CODE_FOR_inslh,
6441 CODE_FOR_insqh,
6442 CODE_FOR_mskbl,
6443 CODE_FOR_mskwl,
6444 CODE_FOR_mskll,
6445 CODE_FOR_mskql,
6446 CODE_FOR_mskwh,
6447 CODE_FOR_msklh,
6448 CODE_FOR_mskqh,
c4b50f1a
RH
6449 CODE_FOR_umuldi3_highpart,
6450 CODE_FOR_builtin_zap,
6451 CODE_FOR_builtin_zapnot,
6452 CODE_FOR_builtin_amask,
6453 CODE_FOR_builtin_implver,
6454 CODE_FOR_builtin_rpcc,
221cf9ab
OH
6455 CODE_FOR_builtin_establish_vms_condition_handler,
6456 CODE_FOR_builtin_revert_vms_condition_handler,
c4b50f1a
RH
6457
6458 /* TARGET_MAX */
6459 CODE_FOR_builtin_minub8,
6460 CODE_FOR_builtin_minsb8,
6461 CODE_FOR_builtin_minuw4,
6462 CODE_FOR_builtin_minsw4,
6463 CODE_FOR_builtin_maxub8,
6464 CODE_FOR_builtin_maxsb8,
6465 CODE_FOR_builtin_maxuw4,
6466 CODE_FOR_builtin_maxsw4,
6467 CODE_FOR_builtin_perr,
6468 CODE_FOR_builtin_pklb,
6469 CODE_FOR_builtin_pkwb,
6470 CODE_FOR_builtin_unpkbl,
6471 CODE_FOR_builtin_unpkbw,
6472
6473 /* TARGET_CIX */
36013987
RH
6474 CODE_FOR_ctzdi2,
6475 CODE_FOR_clzdi2,
6476 CODE_FOR_popcountdi2
c4b50f1a
RH
6477};
6478
6d8fd7bb
RH
6479struct alpha_builtin_def
6480{
6481 const char *name;
6482 enum alpha_builtin code;
6483 unsigned int target_mask;
36013987 6484 bool is_const;
6d8fd7bb
RH
6485};
6486
6487static struct alpha_builtin_def const zero_arg_builtins[] = {
36013987
RH
6488 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6489 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6d8fd7bb
RH
6490};
6491
6492static struct alpha_builtin_def const one_arg_builtins[] = {
36013987
RH
6493 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6494 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6495 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6496 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6497 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6498 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6499 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6500 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6d8fd7bb
RH
6501};
6502
6503static struct alpha_builtin_def const two_arg_builtins[] = {
36013987
RH
6504 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6505 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6506 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6507 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6508 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6509 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6510 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6511 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6512 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6513 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6514 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6515 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6516 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6517 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6518 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6519 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6520 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6521 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6522 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6523 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6524 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6525 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6526 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6527 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6528 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6529 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6530 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6531 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6532 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6533 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6534 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6535 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6536 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6537 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6d8fd7bb
RH
6538};
6539
64a5dc56 6540static GTY(()) tree alpha_dimode_u;
36013987
RH
6541static GTY(()) tree alpha_v8qi_u;
6542static GTY(()) tree alpha_v8qi_s;
6543static GTY(()) tree alpha_v4hi_u;
6544static GTY(()) tree alpha_v4hi_s;
6545
fd930388
RH
6546static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6547
6548/* Return the alpha builtin for CODE. */
6549
6550static tree
6551alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6552{
6553 if (code >= ALPHA_BUILTIN_max)
6554 return error_mark_node;
6555 return alpha_builtins[code];
6556}
6557
6558/* Helper function of alpha_init_builtins. Add the built-in specified
6559 by NAME, TYPE, CODE, and ECF. */
6560
6561static void
6562alpha_builtin_function (const char *name, tree ftype,
6563 enum alpha_builtin code, unsigned ecf)
6564{
6565 tree decl = add_builtin_function (name, ftype, (int) code,
6566 BUILT_IN_MD, NULL, NULL_TREE);
6567
6568 if (ecf & ECF_CONST)
6569 TREE_READONLY (decl) = 1;
6570 if (ecf & ECF_NOTHROW)
6571 TREE_NOTHROW (decl) = 1;
6572
6573 alpha_builtins [(int) code] = decl;
6574}
6575
b6e46ca1
RS
6576/* Helper function of alpha_init_builtins. Add the COUNT built-in
6577 functions pointed to by P, with function type FTYPE. */
6578
6579static void
6580alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6581 tree ftype)
6582{
b6e46ca1
RS
6583 size_t i;
6584
6585 for (i = 0; i < count; ++i, ++p)
6586 if ((target_flags & p->target_mask) == p->target_mask)
fd930388
RH
6587 alpha_builtin_function (p->name, ftype, p->code,
6588 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
b6e46ca1
RS
6589}
6590
6d8fd7bb 6591static void
a5c24926 6592alpha_init_builtins (void)
6d8fd7bb 6593{
fd930388 6594 tree ftype;
6d8fd7bb 6595
64a5dc56
RH
6596 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6597 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6598 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6599 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6600 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
f001093a 6601
64a5dc56
RH
6602 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6603 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6d8fd7bb 6604
64a5dc56
RH
6605 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6606 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6d8fd7bb 6607
64a5dc56
RH
6608 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6609 alpha_dimode_u, NULL_TREE);
6610 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
116b7a5e 6611
221cf9ab
OH
6612 if (TARGET_ABI_OPEN_VMS)
6613 {
6614 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6615 NULL_TREE);
fd930388
RH
6616 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6617 ftype,
6618 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6619 0);
221cf9ab
OH
6620
6621 ftype = build_function_type_list (ptr_type_node, void_type_node,
6622 NULL_TREE);
fd930388
RH
6623 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6624 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
db8b22ef
TG
6625
6626 vms_patch_builtins ();
221cf9ab 6627 }
6d8fd7bb
RH
6628}
6629
6630/* Expand an expression EXP that calls a built-in function,
6631 with result going to TARGET if that's convenient
6632 (and in mode MODE if that's convenient).
6633 SUBTARGET may be used as the target for computing one of EXP's operands.
6634 IGNORE is nonzero if the value is to be ignored. */
6635
6636static rtx
a5c24926
RH
6637alpha_expand_builtin (tree exp, rtx target,
6638 rtx subtarget ATTRIBUTE_UNUSED,
ef4bddc2 6639 machine_mode mode ATTRIBUTE_UNUSED,
a5c24926 6640 int ignore ATTRIBUTE_UNUSED)
6d8fd7bb 6641{
6d8fd7bb
RH
6642#define MAX_ARGS 2
6643
5039610b 6644 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
4d732405 6645 unsigned int fcode = DECL_MD_FUNCTION_CODE (fndecl);
5039610b
SL
6646 tree arg;
6647 call_expr_arg_iterator iter;
6d8fd7bb
RH
6648 enum insn_code icode;
6649 rtx op[MAX_ARGS], pat;
6650 int arity;
116b7a5e 6651 bool nonvoid;
6d8fd7bb
RH
6652
6653 if (fcode >= ALPHA_BUILTIN_max)
6654 internal_error ("bad builtin fcode");
6655 icode = code_for_builtin[fcode];
6656 if (icode == 0)
6657 internal_error ("bad builtin fcode");
6658
116b7a5e
RH
6659 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6660
5039610b
SL
6661 arity = 0;
6662 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6d8fd7bb
RH
6663 {
6664 const struct insn_operand_data *insn_op;
6665
6d8fd7bb
RH
6666 if (arg == error_mark_node)
6667 return NULL_RTX;
6668 if (arity > MAX_ARGS)
6669 return NULL_RTX;
6670
116b7a5e
RH
6671 insn_op = &insn_data[icode].operand[arity + nonvoid];
6672
bf758008 6673 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6d8fd7bb 6674
6d8fd7bb
RH
6675 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6676 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
5039610b 6677 arity++;
6d8fd7bb
RH
6678 }
6679
116b7a5e
RH
6680 if (nonvoid)
6681 {
ef4bddc2 6682 machine_mode tmode = insn_data[icode].operand[0].mode;
116b7a5e
RH
6683 if (!target
6684 || GET_MODE (target) != tmode
6685 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6686 target = gen_reg_rtx (tmode);
6687 }
6d8fd7bb
RH
6688
6689 switch (arity)
6690 {
6691 case 0:
6692 pat = GEN_FCN (icode) (target);
6693 break;
6694 case 1:
116b7a5e
RH
6695 if (nonvoid)
6696 pat = GEN_FCN (icode) (target, op[0]);
6697 else
6698 pat = GEN_FCN (icode) (op[0]);
6d8fd7bb
RH
6699 break;
6700 case 2:
6701 pat = GEN_FCN (icode) (target, op[0], op[1]);
6702 break;
6703 default:
56daab84 6704 gcc_unreachable ();
6d8fd7bb
RH
6705 }
6706 if (!pat)
6707 return NULL_RTX;
6708 emit_insn (pat);
6709
116b7a5e
RH
6710 if (nonvoid)
6711 return target;
6712 else
6713 return const0_rtx;
6d8fd7bb 6714}
36013987 6715
36013987 6716/* Fold the builtin for the CMPBGE instruction. This is a vector comparison
a50aa827 6717 with an 8-bit output vector. OPINT contains the integer operands; bit N
36013987
RH
6718 of OP_CONST is set if OPINT[N] is valid. */
6719
6720static tree
6721alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6722{
6723 if (op_const == 3)
6724 {
6725 int i, val;
6726 for (i = 0, val = 0; i < 8; ++i)
6727 {
6728 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6729 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6730 if (c0 >= c1)
6731 val |= 1 << i;
6732 }
64a5dc56 6733 return build_int_cst (alpha_dimode_u, val);
36013987 6734 }
18410793 6735 else if (op_const == 2 && opint[1] == 0)
64a5dc56 6736 return build_int_cst (alpha_dimode_u, 0xff);
36013987
RH
6737 return NULL;
6738}
6739
6740/* Fold the builtin for the ZAPNOT instruction. This is essentially a
6741 specialized form of an AND operation. Other byte manipulation instructions
6742 are defined in terms of this instruction, so this is also used as a
6743 subroutine for other builtins.
6744
6745 OP contains the tree operands; OPINT contains the extracted integer values.
6746 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6747 OPINT may be considered. */
6748
6749static tree
6750alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6751 long op_const)
6752{
6753 if (op_const & 2)
6754 {
6755 unsigned HOST_WIDE_INT mask = 0;
6756 int i;
6757
6758 for (i = 0; i < 8; ++i)
6759 if ((opint[1] >> i) & 1)
6760 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6761
6762 if (op_const & 1)
64a5dc56 6763 return build_int_cst (alpha_dimode_u, opint[0] & mask);
36013987
RH
6764
6765 if (op)
64a5dc56
RH
6766 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6767 build_int_cst (alpha_dimode_u, mask));
36013987
RH
6768 }
6769 else if ((op_const & 1) && opint[0] == 0)
64a5dc56 6770 return build_int_cst (alpha_dimode_u, 0);
36013987
RH
6771 return NULL;
6772}
6773
6774/* Fold the builtins for the EXT family of instructions. */
6775
6776static tree
6777alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6778 long op_const, unsigned HOST_WIDE_INT bytemask,
6779 bool is_high)
6780{
6781 long zap_const = 2;
6782 tree *zap_op = NULL;
6783
6784 if (op_const & 2)
6785 {
6786 unsigned HOST_WIDE_INT loc;
6787
6788 loc = opint[1] & 7;
0b2a7367 6789 loc *= BITS_PER_UNIT;
36013987
RH
6790
6791 if (loc != 0)
6792 {
6793 if (op_const & 1)
6794 {
6795 unsigned HOST_WIDE_INT temp = opint[0];
6796 if (is_high)
6797 temp <<= loc;
6798 else
6799 temp >>= loc;
6800 opint[0] = temp;
6801 zap_const = 3;
6802 }
6803 }
6804 else
6805 zap_op = op;
6806 }
6807
6808 opint[1] = bytemask;
6809 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6810}
6811
6812/* Fold the builtins for the INS family of instructions. */
6813
6814static tree
6815alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6816 long op_const, unsigned HOST_WIDE_INT bytemask,
6817 bool is_high)
6818{
6819 if ((op_const & 1) && opint[0] == 0)
64a5dc56 6820 return build_int_cst (alpha_dimode_u, 0);
36013987
RH
6821
6822 if (op_const & 2)
6823 {
6824 unsigned HOST_WIDE_INT temp, loc, byteloc;
6825 tree *zap_op = NULL;
6826
6827 loc = opint[1] & 7;
36013987
RH
6828 bytemask <<= loc;
6829
6830 temp = opint[0];
6831 if (is_high)
6832 {
6833 byteloc = (64 - (loc * 8)) & 0x3f;
6834 if (byteloc == 0)
6835 zap_op = op;
6836 else
6837 temp >>= byteloc;
6838 bytemask >>= 8;
6839 }
6840 else
6841 {
6842 byteloc = loc * 8;
6843 if (byteloc == 0)
6844 zap_op = op;
6845 else
6846 temp <<= byteloc;
6847 }
6848
6849 opint[0] = temp;
6850 opint[1] = bytemask;
6851 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6852 }
6853
6854 return NULL;
6855}
6856
6857static tree
6858alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6859 long op_const, unsigned HOST_WIDE_INT bytemask,
6860 bool is_high)
6861{
6862 if (op_const & 2)
6863 {
6864 unsigned HOST_WIDE_INT loc;
6865
6866 loc = opint[1] & 7;
36013987
RH
6867 bytemask <<= loc;
6868
6869 if (is_high)
6870 bytemask >>= 8;
6871
6872 opint[1] = bytemask ^ 0xff;
6873 }
6874
6875 return alpha_fold_builtin_zapnot (op, opint, op_const);
6876}
6877
36013987
RH
6878static tree
6879alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6880{
6881 tree op0 = fold_convert (vtype, op[0]);
6882 tree op1 = fold_convert (vtype, op[1]);
31ff73b5 6883 tree val = fold_build2 (code, vtype, op0, op1);
64a5dc56 6884 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
36013987
RH
6885}
6886
6887static tree
6888alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6889{
6890 unsigned HOST_WIDE_INT temp = 0;
6891 int i;
6892
6893 if (op_const != 3)
6894 return NULL;
6895
6896 for (i = 0; i < 8; ++i)
6897 {
6898 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6899 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6900 if (a >= b)
6901 temp += a - b;
6902 else
6903 temp += b - a;
6904 }
6905
64a5dc56 6906 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6907}
6908
6909static tree
6910alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6911{
6912 unsigned HOST_WIDE_INT temp;
6913
6914 if (op_const == 0)
6915 return NULL;
6916
6917 temp = opint[0] & 0xff;
6918 temp |= (opint[0] >> 24) & 0xff00;
6919
64a5dc56 6920 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6921}
6922
6923static tree
6924alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6925{
6926 unsigned HOST_WIDE_INT temp;
6927
6928 if (op_const == 0)
6929 return NULL;
6930
6931 temp = opint[0] & 0xff;
6932 temp |= (opint[0] >> 8) & 0xff00;
6933 temp |= (opint[0] >> 16) & 0xff0000;
6934 temp |= (opint[0] >> 24) & 0xff000000;
6935
64a5dc56 6936 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6937}
6938
6939static tree
6940alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6941{
6942 unsigned HOST_WIDE_INT temp;
6943
6944 if (op_const == 0)
6945 return NULL;
6946
6947 temp = opint[0] & 0xff;
6948 temp |= (opint[0] & 0xff00) << 24;
6949
64a5dc56 6950 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6951}
6952
6953static tree
6954alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6955{
6956 unsigned HOST_WIDE_INT temp;
6957
6958 if (op_const == 0)
6959 return NULL;
6960
6961 temp = opint[0] & 0xff;
6962 temp |= (opint[0] & 0x0000ff00) << 8;
6963 temp |= (opint[0] & 0x00ff0000) << 16;
6964 temp |= (opint[0] & 0xff000000) << 24;
6965
64a5dc56 6966 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6967}
6968
6969static tree
6970alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6971{
6972 unsigned HOST_WIDE_INT temp;
6973
6974 if (op_const == 0)
6975 return NULL;
6976
6977 if (opint[0] == 0)
6978 temp = 64;
6979 else
6980 temp = exact_log2 (opint[0] & -opint[0]);
6981
64a5dc56 6982 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6983}
6984
6985static tree
6986alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6987{
6988 unsigned HOST_WIDE_INT temp;
6989
6990 if (op_const == 0)
6991 return NULL;
6992
6993 if (opint[0] == 0)
6994 temp = 64;
6995 else
6996 temp = 64 - floor_log2 (opint[0]) - 1;
6997
64a5dc56 6998 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6999}
7000
7001static tree
7002alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7003{
7004 unsigned HOST_WIDE_INT temp, op;
7005
7006 if (op_const == 0)
7007 return NULL;
7008
7009 op = opint[0];
7010 temp = 0;
7011 while (op)
7012 temp++, op &= op - 1;
7013
64a5dc56 7014 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
7015}
7016
7017/* Fold one of our builtin functions. */
7018
7019static tree
f311c3b4
NF
7020alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7021 bool ignore ATTRIBUTE_UNUSED)
36013987 7022{
36013987 7023 unsigned HOST_WIDE_INT opint[MAX_ARGS];
58a11859 7024 long op_const = 0;
f311c3b4 7025 int i;
36013987 7026
64a5dc56 7027 if (n_args > MAX_ARGS)
f311c3b4
NF
7028 return NULL;
7029
7030 for (i = 0; i < n_args; i++)
36013987 7031 {
f311c3b4 7032 tree arg = op[i];
36013987
RH
7033 if (arg == error_mark_node)
7034 return NULL;
36013987 7035
f311c3b4 7036 opint[i] = 0;
36013987
RH
7037 if (TREE_CODE (arg) == INTEGER_CST)
7038 {
f311c3b4
NF
7039 op_const |= 1L << i;
7040 opint[i] = int_cst_value (arg);
36013987
RH
7041 }
7042 }
7043
4d732405 7044 switch (DECL_MD_FUNCTION_CODE (fndecl))
36013987
RH
7045 {
7046 case ALPHA_BUILTIN_CMPBGE:
7047 return alpha_fold_builtin_cmpbge (opint, op_const);
7048
7049 case ALPHA_BUILTIN_EXTBL:
7050 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7051 case ALPHA_BUILTIN_EXTWL:
7052 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7053 case ALPHA_BUILTIN_EXTLL:
7054 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7055 case ALPHA_BUILTIN_EXTQL:
7056 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7057 case ALPHA_BUILTIN_EXTWH:
7058 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7059 case ALPHA_BUILTIN_EXTLH:
7060 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7061 case ALPHA_BUILTIN_EXTQH:
7062 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7063
7064 case ALPHA_BUILTIN_INSBL:
7065 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7066 case ALPHA_BUILTIN_INSWL:
7067 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7068 case ALPHA_BUILTIN_INSLL:
7069 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7070 case ALPHA_BUILTIN_INSQL:
7071 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7072 case ALPHA_BUILTIN_INSWH:
7073 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7074 case ALPHA_BUILTIN_INSLH:
7075 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7076 case ALPHA_BUILTIN_INSQH:
7077 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7078
7079 case ALPHA_BUILTIN_MSKBL:
7080 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7081 case ALPHA_BUILTIN_MSKWL:
7082 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7083 case ALPHA_BUILTIN_MSKLL:
7084 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7085 case ALPHA_BUILTIN_MSKQL:
7086 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7087 case ALPHA_BUILTIN_MSKWH:
7088 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7089 case ALPHA_BUILTIN_MSKLH:
7090 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7091 case ALPHA_BUILTIN_MSKQH:
7092 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7093
36013987
RH
7094 case ALPHA_BUILTIN_ZAP:
7095 opint[1] ^= 0xff;
7096 /* FALLTHRU */
7097 case ALPHA_BUILTIN_ZAPNOT:
7098 return alpha_fold_builtin_zapnot (op, opint, op_const);
7099
7100 case ALPHA_BUILTIN_MINUB8:
7101 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7102 case ALPHA_BUILTIN_MINSB8:
7103 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7104 case ALPHA_BUILTIN_MINUW4:
7105 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7106 case ALPHA_BUILTIN_MINSW4:
7107 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7108 case ALPHA_BUILTIN_MAXUB8:
7109 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7110 case ALPHA_BUILTIN_MAXSB8:
7111 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7112 case ALPHA_BUILTIN_MAXUW4:
7113 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7114 case ALPHA_BUILTIN_MAXSW4:
7115 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7116
7117 case ALPHA_BUILTIN_PERR:
7118 return alpha_fold_builtin_perr (opint, op_const);
7119 case ALPHA_BUILTIN_PKLB:
7120 return alpha_fold_builtin_pklb (opint, op_const);
7121 case ALPHA_BUILTIN_PKWB:
7122 return alpha_fold_builtin_pkwb (opint, op_const);
7123 case ALPHA_BUILTIN_UNPKBL:
7124 return alpha_fold_builtin_unpkbl (opint, op_const);
7125 case ALPHA_BUILTIN_UNPKBW:
7126 return alpha_fold_builtin_unpkbw (opint, op_const);
7127
7128 case ALPHA_BUILTIN_CTTZ:
7129 return alpha_fold_builtin_cttz (opint, op_const);
7130 case ALPHA_BUILTIN_CTLZ:
7131 return alpha_fold_builtin_ctlz (opint, op_const);
7132 case ALPHA_BUILTIN_CTPOP:
7133 return alpha_fold_builtin_ctpop (opint, op_const);
7134
7135 case ALPHA_BUILTIN_AMASK:
7136 case ALPHA_BUILTIN_IMPLVER:
7137 case ALPHA_BUILTIN_RPCC:
36013987
RH
7138 /* None of these are foldable at compile-time. */
7139 default:
7140 return NULL;
7141 }
7142}
b6db8af6
UB
7143
7144bool
7145alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7146{
7147 bool changed = false;
355fe088 7148 gimple *stmt = gsi_stmt (*gsi);
b6db8af6 7149 tree call = gimple_call_fn (stmt);
355fe088 7150 gimple *new_stmt = NULL;
b6db8af6
UB
7151
7152 if (call)
7153 {
7154 tree fndecl = gimple_call_fndecl (stmt);
7155
7156 if (fndecl)
7157 {
7158 tree arg0, arg1;
7159
4d732405 7160 switch (DECL_MD_FUNCTION_CODE (fndecl))
b6db8af6
UB
7161 {
7162 case ALPHA_BUILTIN_UMULH:
7163 arg0 = gimple_call_arg (stmt, 0);
7164 arg1 = gimple_call_arg (stmt, 1);
7165
0d3d8152
JJ
7166 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
7167 MULT_HIGHPART_EXPR, arg0, arg1);
b6db8af6
UB
7168 break;
7169 default:
7170 break;
7171 }
7172 }
7173 }
7174
7175 if (new_stmt)
7176 {
7177 gsi_replace (gsi, new_stmt, true);
7178 changed = true;
7179 }
7180
7181 return changed;
7182}
6d8fd7bb 7183\f
a6f12d7c
RK
7184/* This page contains routines that are used to determine what the function
7185 prologue and epilogue code will do and write them out. */
7186
7187/* Compute the size of the save area in the stack. */
7188
89cfc2c6
RK
7189/* These variables are used for communication between the following functions.
7190 They indicate various things about the current function being compiled
7191 that are used to tell what kind of prologue, epilogue and procedure
839a4992 7192 descriptor to generate. */
89cfc2c6
RK
7193
7194/* Nonzero if we need a stack procedure. */
c2ea1ac6
DR
7195enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7196static enum alpha_procedure_types alpha_procedure_type;
89cfc2c6
RK
7197
7198/* Register number (either FP or SP) that is used to unwind the frame. */
9c0e94a5 7199static int vms_unwind_regno;
89cfc2c6
RK
7200
7201/* Register number used to save FP. We need not have one for RA since
7202 we don't modify it for register procedures. This is only defined
7203 for register frame procedures. */
9c0e94a5 7204static int vms_save_fp_regno;
89cfc2c6
RK
7205
7206/* Register number used to reference objects off our PV. */
9c0e94a5 7207static int vms_base_regno;
89cfc2c6 7208
0191520b
RH
7209/* Compute register masks for saved registers, register save area size,
7210 and total frame size. */
89cfc2c6 7211static void
0191520b 7212alpha_compute_frame_layout (void)
89cfc2c6 7213{
0191520b
RH
7214 unsigned HOST_WIDE_INT sa_mask = 0;
7215 HOST_WIDE_INT frame_size;
7216 int sa_size;
89cfc2c6 7217
dd292d0a
MM
7218 /* When outputting a thunk, we don't have valid register life info,
7219 but assemble_start_function wants to output .frame and .mask
7220 directives. */
0191520b 7221 if (!cfun->is_thunk)
acd92049 7222 {
0191520b
RH
7223 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7224 sa_mask |= HOST_WIDE_INT_1U << HARD_FRAME_POINTER_REGNUM;
89cfc2c6 7225
0191520b
RH
7226 /* One for every register we have to save. */
7227 for (unsigned i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7228 if (! fixed_regs[i] && ! call_used_regs[i]
7229 && df_regs_ever_live_p (i) && i != REG_RA)
7230 sa_mask |= HOST_WIDE_INT_1U << i;
14691f8d 7231
0191520b
RH
7232 /* We need to restore these for the handler. */
7233 if (crtl->calls_eh_return)
ed80cd68 7234 {
0191520b
RH
7235 for (unsigned i = 0; ; ++i)
7236 {
7237 unsigned regno = EH_RETURN_DATA_REGNO (i);
7238 if (regno == INVALID_REGNUM)
7239 break;
7240 sa_mask |= HOST_WIDE_INT_1U << regno;
7241 }
ed80cd68 7242 }
89cfc2c6 7243
0191520b
RH
7244 /* If any register spilled, then spill the return address also. */
7245 /* ??? This is required by the Digital stack unwind specification
7246 and isn't needed if we're doing Dwarf2 unwinding. */
7247 if (sa_mask || alpha_ra_ever_killed ())
7248 sa_mask |= HOST_WIDE_INT_1U << REG_RA;
7249 }
61334ebe 7250
0191520b
RH
7251 sa_size = popcount_hwi(sa_mask);
7252 frame_size = get_frame_size ();
30102605 7253
75db85d8 7254 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 7255 {
1d3499d8
OH
7256 /* Start with a stack procedure if we make any calls (REG_RA used), or
7257 need a frame pointer, with a register procedure if we otherwise need
7258 at least a slot, and with a null procedure in other cases. */
0191520b 7259 if ((sa_mask >> REG_RA) & 1 || frame_pointer_needed)
c2ea1ac6 7260 alpha_procedure_type = PT_STACK;
0191520b 7261 else if (frame_size != 0)
c2ea1ac6
DR
7262 alpha_procedure_type = PT_REGISTER;
7263 else
7264 alpha_procedure_type = PT_NULL;
61334ebe 7265
cb9a8e97 7266 /* Don't reserve space for saving FP & RA yet. Do that later after we've
61334ebe 7267 made the final decision on stack procedure vs register procedure. */
c2ea1ac6 7268 if (alpha_procedure_type == PT_STACK)
cb9a8e97 7269 sa_size -= 2;
9c0e94a5
RH
7270
7271 /* Decide whether to refer to objects off our PV via FP or PV.
7272 If we need FP for something else or if we receive a nonlocal
7273 goto (which expects PV to contain the value), we must use PV.
7274 Otherwise, start by assuming we can use FP. */
c2ea1ac6
DR
7275
7276 vms_base_regno
7277 = (frame_pointer_needed
e3b5732b 7278 || cfun->has_nonlocal_label
c2ea1ac6 7279 || alpha_procedure_type == PT_STACK
38173d38 7280 || crtl->outgoing_args_size)
c2ea1ac6 7281 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
9c0e94a5
RH
7282
7283 /* If we want to copy PV into FP, we need to find some register
7284 in which to save FP. */
9c0e94a5
RH
7285 vms_save_fp_regno = -1;
7286 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
0191520b
RH
7287 for (unsigned i = 0; i < 32; i++)
7288 if (! fixed_regs[i] && call_used_regs[i]
7289 && ! df_regs_ever_live_p (i))
7290 {
7291 vms_save_fp_regno = i;
7292 break;
7293 }
9c0e94a5 7294
221cf9ab
OH
7295 /* A VMS condition handler requires a stack procedure in our
7296 implementation. (not required by the calling standard). */
7297 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7298 || cfun->machine->uses_condition_handler)
c2ea1ac6
DR
7299 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7300 else if (alpha_procedure_type == PT_NULL)
7301 vms_base_regno = REG_PV;
9c0e94a5
RH
7302
7303 /* Stack unwinding should be done via FP unless we use it for PV. */
7304 vms_unwind_regno = (vms_base_regno == REG_PV
7305 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7306
221cf9ab
OH
7307 /* If this is a stack procedure, allow space for saving FP, RA and
7308 a condition handler slot if needed. */
c2ea1ac6 7309 if (alpha_procedure_type == PT_STACK)
221cf9ab 7310 sa_size += 2 + cfun->machine->uses_condition_handler;
9c0e94a5
RH
7311 }
7312 else
7313 {
9c0e94a5
RH
7314 /* Our size must be even (multiple of 16 bytes). */
7315 if (sa_size & 1)
7316 sa_size++;
7317 }
0191520b
RH
7318 sa_size *= 8;
7319
7320 if (TARGET_ABI_OPEN_VMS)
7321 frame_size = ALPHA_ROUND (sa_size
7322 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7323 + frame_size
7324 + crtl->args.pretend_args_size);
7325 else
7326 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7327 + sa_size
7328 + ALPHA_ROUND (frame_size + crtl->args.pretend_args_size));
89cfc2c6 7329
0191520b
RH
7330 cfun->machine->sa_mask = sa_mask;
7331 cfun->machine->sa_size = sa_size;
7332 cfun->machine->frame_size = frame_size;
7333}
7334
7335#undef TARGET_COMPUTE_FRAME_LAYOUT
7336#define TARGET_COMPUTE_FRAME_LAYOUT alpha_compute_frame_layout
7337
7338/* Return 1 if this function can directly return via $26. */
7339
7340bool
7341direct_return (void)
7342{
7343 return (TARGET_ABI_OSF
7344 && reload_completed
7345 && cfun->machine->frame_size == 0);
89cfc2c6
RK
7346}
7347
35d9c403
RH
7348/* Define the offset between two registers, one to be eliminated,
7349 and the other its replacement, at the start of a routine. */
7350
7351HOST_WIDE_INT
a5c24926
RH
7352alpha_initial_elimination_offset (unsigned int from,
7353 unsigned int to ATTRIBUTE_UNUSED)
35d9c403
RH
7354{
7355 HOST_WIDE_INT ret;
7356
0191520b 7357 ret = cfun->machine->sa_size;
38173d38 7358 ret += ALPHA_ROUND (crtl->outgoing_args_size);
35d9c403 7359
56daab84
NS
7360 switch (from)
7361 {
7362 case FRAME_POINTER_REGNUM:
7363 break;
7364
7365 case ARG_POINTER_REGNUM:
7366 ret += (ALPHA_ROUND (get_frame_size ()
38173d38
JH
7367 + crtl->args.pretend_args_size)
7368 - crtl->args.pretend_args_size);
56daab84
NS
7369 break;
7370
7371 default:
7372 gcc_unreachable ();
7373 }
35d9c403
RH
7374
7375 return ret;
7376}
7377
1d3499d8
OH
7378#if TARGET_ABI_OPEN_VMS
7379
7b5cbb57
AS
7380/* Worker function for TARGET_CAN_ELIMINATE. */
7381
7382static bool
7383alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
89cfc2c6 7384{
1d3499d8
OH
7385 switch (alpha_procedure_type)
7386 {
7387 case PT_NULL:
7388 /* NULL procedures have no frame of their own and we only
7389 know how to resolve from the current stack pointer. */
7390 return to == STACK_POINTER_REGNUM;
7391
7392 case PT_REGISTER:
7393 case PT_STACK:
7394 /* We always eliminate except to the stack pointer if there is no
7395 usable frame pointer at hand. */
7396 return (to != STACK_POINTER_REGNUM
7397 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7398 }
7399
7400 gcc_unreachable ();
89cfc2c6
RK
7401}
7402
1d3499d8
OH
7403/* FROM is to be eliminated for TO. Return the offset so that TO+offset
7404 designates the same location as FROM. */
7405
7406HOST_WIDE_INT
7407alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7408{
7409 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7410 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7411 on the proper computations and will need the register save area size
7412 in most cases. */
7413
0191520b 7414 HOST_WIDE_INT sa_size = cfun->machine->sa_size;
1d3499d8
OH
7415
7416 /* PT_NULL procedures have no frame of their own and we only allow
7417 elimination to the stack pointer. This is the argument pointer and we
7418 resolve the soft frame pointer to that as well. */
7419
7420 if (alpha_procedure_type == PT_NULL)
7421 return 0;
7422
7423 /* For a PT_STACK procedure the frame layout looks as follows
7424
7425 -----> decreasing addresses
7426
7427 < size rounded up to 16 | likewise >
7428 --------------#------------------------------+++--------------+++-------#
7429 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7430 --------------#---------------------------------------------------------#
7431 ^ ^ ^ ^
7432 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7433
7434
7435 PT_REGISTER procedures are similar in that they may have a frame of their
7436 own. They have no regs-sa/pv/outgoing-args area.
7437
7438 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7439 to STACK_PTR if need be. */
7440
7441 {
7442 HOST_WIDE_INT offset;
7443 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7444
7445 switch (from)
7446 {
7447 case FRAME_POINTER_REGNUM:
7448 offset = ALPHA_ROUND (sa_size + pv_save_size);
7449 break;
7450 case ARG_POINTER_REGNUM:
7451 offset = (ALPHA_ROUND (sa_size + pv_save_size
7452 + get_frame_size ()
7453 + crtl->args.pretend_args_size)
7454 - crtl->args.pretend_args_size);
7455 break;
7456 default:
7457 gcc_unreachable ();
7458 }
7459
7460 if (to == STACK_POINTER_REGNUM)
7461 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7462
7463 return offset;
7464 }
89cfc2c6
RK
7465}
7466
18fd5621
EB
7467#define COMMON_OBJECT "common_object"
7468
7469static tree
7470common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7471 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7472 bool *no_add_attrs ATTRIBUTE_UNUSED)
7473{
7474 tree decl = *node;
7475 gcc_assert (DECL_P (decl));
7476
7477 DECL_COMMON (decl) = 1;
7478 return NULL_TREE;
7479}
8289c43b 7480
6bc7bc14 7481static const struct attribute_spec vms_attribute_table[] =
a6f12d7c 7482{
4849deb1
JJ
7483 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
7484 affects_type_identity, handler, exclude } */
7485 { COMMON_OBJECT, 0, 1, true, false, false, false, common_object_handler,
5d9ae53d 7486 NULL },
4849deb1 7487 { NULL, 0, 0, false, false, false, false, NULL, NULL }
91d231cb 7488};
a6f12d7c 7489
18fd5621
EB
7490void
7491vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7492 unsigned HOST_WIDE_INT size,
7493 unsigned int align)
7494{
7495 tree attr = DECL_ATTRIBUTES (decl);
7496 fprintf (file, "%s", COMMON_ASM_OP);
7497 assemble_name (file, name);
7498 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7499 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7500 fprintf (file, ",%u", align / BITS_PER_UNIT);
7501 if (attr)
7502 {
7503 attr = lookup_attribute (COMMON_OBJECT, attr);
7504 if (attr)
7505 fprintf (file, ",%s",
7506 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7507 }
7508 fputc ('\n', file);
7509}
7510
7511#undef COMMON_OBJECT
7512
8289c43b
NB
7513#endif
7514
4dba3553 7515bool
a5c24926 7516alpha_find_lo_sum_using_gp (rtx insn)
77480b0b 7517{
4dba3553
RS
7518 subrtx_iterator::array_type array;
7519 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7520 {
7521 const_rtx x = *iter;
7522 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7523 return true;
7524 }
7525 return false;
1eb356b9
RH
7526}
7527
9c0e94a5 7528static int
a5c24926 7529alpha_does_function_need_gp (void)
9c0e94a5 7530{
cad003ba 7531 rtx_insn *insn;
a6f12d7c 7532
30102605
RH
7533 /* The GP being variable is an OSF abi thing. */
7534 if (! TARGET_ABI_OSF)
9c0e94a5 7535 return 0;
a6f12d7c 7536
b64de1fe 7537 /* We need the gp to load the address of __mcount. */
e3b5732b 7538 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
9c0e94a5 7539 return 1;
d60a05a1 7540
b64de1fe 7541 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
3c072c6b 7542 if (cfun->is_thunk)
acd92049 7543 return 1;
acd92049 7544
b64de1fe
RH
7545 /* The nonlocal receiver pattern assumes that the gp is valid for
7546 the nested function. Reasonable because it's almost always set
7547 correctly already. For the cases where that's wrong, make sure
7548 the nested function loads its gp on entry. */
e3b5732b 7549 if (crtl->has_nonlocal_goto)
b64de1fe
RH
7550 return 1;
7551
f676971a 7552 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
9c0e94a5
RH
7553 Even if we are a static function, we still need to do this in case
7554 our address is taken and passed to something like qsort. */
a6f12d7c 7555
9c0e94a5
RH
7556 push_topmost_sequence ();
7557 insn = get_insns ();
7558 pop_topmost_sequence ();
89cfc2c6 7559
9c0e94a5 7560 for (; insn; insn = NEXT_INSN (insn))
14e58be0 7561 if (NONDEBUG_INSN_P (insn)
9c0e94a5 7562 && GET_CODE (PATTERN (insn)) != USE
77480b0b
RH
7563 && GET_CODE (PATTERN (insn)) != CLOBBER
7564 && get_attr_usegp (insn))
7565 return 1;
a6f12d7c 7566
9c0e94a5 7567 return 0;
a6f12d7c
RK
7568}
7569
ec6840c1 7570\f
6abc6f40
RH
7571/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7572 sequences. */
7573
cad003ba 7574static rtx_insn *
a5c24926 7575set_frame_related_p (void)
6abc6f40 7576{
cad003ba
DM
7577 rtx_insn *seq = get_insns ();
7578 rtx_insn *insn;
2f937369 7579
6abc6f40
RH
7580 end_sequence ();
7581
2f937369 7582 if (!seq)
cad003ba 7583 return NULL;
2f937369
DM
7584
7585 if (INSN_P (seq))
6abc6f40 7586 {
2f937369
DM
7587 insn = seq;
7588 while (insn != NULL_RTX)
7589 {
7590 RTX_FRAME_RELATED_P (insn) = 1;
7591 insn = NEXT_INSN (insn);
7592 }
7593 seq = emit_insn (seq);
6abc6f40
RH
7594 }
7595 else
7596 {
7597 seq = emit_insn (seq);
7598 RTX_FRAME_RELATED_P (seq) = 1;
6abc6f40 7599 }
2f937369 7600 return seq;
6abc6f40
RH
7601}
7602
7603#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7604
45f413e4 7605/* Generates a store with the proper unwind info attached. VALUE is
0e40b5f2 7606 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
45f413e4
RH
7607 contains SP+FRAME_BIAS, and that is the unwind info that should be
7608 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7609 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7610
7611static void
7612emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7613 HOST_WIDE_INT base_ofs, rtx frame_reg)
7614{
cad003ba
DM
7615 rtx addr, mem;
7616 rtx_insn *insn;
45f413e4 7617
0a81f074 7618 addr = plus_constant (Pmode, base_reg, base_ofs);
7a81008b 7619 mem = gen_frame_mem (DImode, addr);
45f413e4
RH
7620
7621 insn = emit_move_insn (mem, value);
7622 RTX_FRAME_RELATED_P (insn) = 1;
7623
7624 if (frame_bias || value != frame_reg)
7625 {
7626 if (frame_bias)
7627 {
0a81f074
RS
7628 addr = plus_constant (Pmode, stack_pointer_rtx,
7629 frame_bias + base_ofs);
45f413e4
RH
7630 mem = gen_rtx_MEM (DImode, addr);
7631 }
7632
bf758008 7633 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
f7df4a84 7634 gen_rtx_SET (mem, frame_reg));
45f413e4
RH
7635 }
7636}
7637
7638static void
7639emit_frame_store (unsigned int regno, rtx base_reg,
7640 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7641{
7642 rtx reg = gen_rtx_REG (DImode, regno);
7643 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7644}
7645
a6f12d7c
RK
7646/* Write function prologue. */
7647
89cfc2c6
RK
7648/* On vms we have two kinds of functions:
7649
7650 - stack frame (PROC_STACK)
7651 these are 'normal' functions with local vars and which are
7652 calling other functions
7653 - register frame (PROC_REGISTER)
7654 keeps all data in registers, needs no stack
7655
7656 We must pass this to the assembler so it can generate the
7657 proper pdsc (procedure descriptor)
7658 This is done with the '.pdesc' command.
7659
9c0e94a5
RH
7660 On not-vms, we don't really differentiate between the two, as we can
7661 simply allocate stack without saving registers. */
89cfc2c6
RK
7662
7663void
a5c24926 7664alpha_expand_prologue (void)
89cfc2c6 7665{
9c0e94a5 7666 /* Registers to save. */
0191520b 7667 unsigned HOST_WIDE_INT sa_mask = cfun->machine->sa_mask;
89cfc2c6 7668 /* Stack space needed for pushing registers clobbered by us. */
0191520b 7669 HOST_WIDE_INT sa_size = cfun->machine->sa_size;
89cfc2c6 7670 /* Complete stack size needed. */
0191520b 7671 HOST_WIDE_INT frame_size = cfun->machine->frame_size;
10937190
EB
7672 /* Probed stack size; it additionally includes the size of
7673 the "reserve region" if any. */
0191520b 7674 HOST_WIDE_INT probed_size, sa_bias;
89cfc2c6 7675 /* Offset from base reg to register save area. */
9c0e94a5 7676 HOST_WIDE_INT reg_offset;
45f413e4 7677 rtx sa_reg;
89cfc2c6 7678
a11e0df4 7679 if (flag_stack_usage_info)
d3c12306 7680 current_function_static_stack_size = frame_size;
89cfc2c6 7681
be7b80f4 7682 if (TARGET_ABI_OPEN_VMS)
221cf9ab 7683 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
9c0e94a5 7684 else
38173d38 7685 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
89cfc2c6 7686
941cc05a 7687 /* Emit an insn to reload GP, if needed. */
be7b80f4 7688 if (TARGET_ABI_OSF)
941cc05a
RK
7689 {
7690 alpha_function_needs_gp = alpha_does_function_need_gp ();
7691 if (alpha_function_needs_gp)
7692 emit_insn (gen_prologue_ldgp ());
7693 }
7694
4f1c5cce
RH
7695 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7696 the call to mcount ourselves, rather than having the linker do it
7697 magically in response to -pg. Since _mcount has special linkage,
7698 don't represent the call as a call. */
e3b5732b 7699 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
4f1c5cce 7700 emit_insn (gen_prologue_mcount ());
30102605 7701
89cfc2c6
RK
7702 /* Adjust the stack by the frame size. If the frame size is > 4096
7703 bytes, we need to be sure we probe somewhere in the first and last
7704 4096 bytes (we can probably get away without the latter test) and
7705 every 8192 bytes in between. If the frame size is > 32768, we
7706 do this in a loop. Otherwise, we generate the explicit probe
f676971a 7707 instructions.
89cfc2c6
RK
7708
7709 Note that we are only allowed to adjust sp once in the prologue. */
7710
10937190 7711 probed_size = frame_size;
9c1b56c4 7712 if (flag_stack_check || flag_stack_clash_protection)
8c1dd970 7713 probed_size += get_stack_check_protect ();
10937190
EB
7714
7715 if (probed_size <= 32768)
89cfc2c6 7716 {
10937190 7717 if (probed_size > 4096)
89cfc2c6 7718 {
11eef578 7719 int probed;
89cfc2c6 7720
10937190 7721 for (probed = 4096; probed < probed_size; probed += 8192)
0d136a30 7722 emit_insn (gen_stack_probe_internal (GEN_INT (-probed)));
89cfc2c6 7723
10937190
EB
7724 /* We only have to do this probe if we aren't saving registers or
7725 if we are probing beyond the frame because of -fstack-check. */
7726 if ((sa_size == 0 && probed_size > probed - 4096)
9c1b56c4 7727 || flag_stack_check || flag_stack_clash_protection)
0d136a30 7728 emit_insn (gen_stack_probe_internal (GEN_INT (-probed_size)));
89cfc2c6
RK
7729 }
7730
7731 if (frame_size != 0)
8207e7c6 7732 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
75db85d8 7733 GEN_INT (-frame_size))));
89cfc2c6
RK
7734 }
7735 else
7736 {
9c0e94a5 7737 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
89cfc2c6
RK
7738 number of 8192 byte blocks to probe. We then probe each block
7739 in the loop and then set SP to the proper location. If the
7740 amount remaining is > 4096, we have to do one more probe if we
10937190
EB
7741 are not saving any registers or if we are probing beyond the
7742 frame because of -fstack-check. */
89cfc2c6 7743
10937190
EB
7744 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7745 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
9c0e94a5
RH
7746 rtx ptr = gen_rtx_REG (DImode, 22);
7747 rtx count = gen_rtx_REG (DImode, 23);
37679e06 7748 rtx seq;
89cfc2c6 7749
9c0e94a5 7750 emit_move_insn (count, GEN_INT (blocks));
75db85d8 7751 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
89cfc2c6 7752
9c0e94a5
RH
7753 /* Because of the difficulty in emitting a new basic block this
7754 late in the compilation, generate the loop as a single insn. */
7755 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
89cfc2c6 7756
9c1b56c4
JL
7757 if ((leftover > 4096 && sa_size == 0)
7758 || flag_stack_check || flag_stack_clash_protection)
9c0e94a5 7759 {
0a81f074
RS
7760 rtx last = gen_rtx_MEM (DImode,
7761 plus_constant (Pmode, ptr, -leftover));
9c0e94a5
RH
7762 MEM_VOLATILE_P (last) = 1;
7763 emit_move_insn (last, const0_rtx);
7764 }
89cfc2c6 7765
9c1b56c4 7766 if (flag_stack_check || flag_stack_clash_protection)
f9d7e5cd 7767 {
800d1de1
RH
7768 /* If -fstack-check is specified we have to load the entire
7769 constant into a register and subtract from the sp in one go,
7770 because the probed stack size is not equal to the frame size. */
f9d7e5cd 7771 HOST_WIDE_INT lo, hi;
14eecd34
RH
7772 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7773 hi = frame_size - lo;
6abc6f40 7774
37679e06 7775 emit_move_insn (ptr, GEN_INT (hi));
5c9948f4 7776 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
37679e06
RH
7777 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7778 ptr));
f9d7e5cd
RH
7779 }
7780 else
7781 {
f9d7e5cd
RH
7782 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7783 GEN_INT (-leftover)));
f9d7e5cd 7784 }
37679e06
RH
7785
7786 /* This alternative is special, because the DWARF code cannot
7787 possibly intuit through the loop above. So we invent this
7788 note it looks at instead. */
7789 RTX_FRAME_RELATED_P (seq) = 1;
bf758008 7790 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
f7df4a84 7791 gen_rtx_SET (stack_pointer_rtx,
0a81f074 7792 plus_constant (Pmode, stack_pointer_rtx,
75db85d8 7793 -frame_size)));
89cfc2c6
RK
7794 }
7795
75db85d8
RH
7796 /* Cope with very large offsets to the register save area. */
7797 sa_bias = 0;
7798 sa_reg = stack_pointer_rtx;
7799 if (reg_offset + sa_size > 0x8000)
89cfc2c6 7800 {
75db85d8
RH
7801 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7802 rtx sa_bias_rtx;
89cfc2c6 7803
75db85d8
RH
7804 if (low + sa_size <= 0x8000)
7805 sa_bias = reg_offset - low, reg_offset = low;
7806 else
7807 sa_bias = reg_offset, reg_offset = 0;
f676971a 7808
75db85d8
RH
7809 sa_reg = gen_rtx_REG (DImode, 24);
7810 sa_bias_rtx = GEN_INT (sa_bias);
89cfc2c6 7811
75db85d8
RH
7812 if (add_operand (sa_bias_rtx, DImode))
7813 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7814 else
30102605 7815 {
75db85d8
RH
7816 emit_move_insn (sa_reg, sa_bias_rtx);
7817 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
30102605 7818 }
75db85d8 7819 }
89cfc2c6 7820
75db85d8
RH
7821 /* Save regs in stack order. Beginning with VMS PV. */
7822 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7823 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
89cfc2c6 7824
0191520b
RH
7825 /* Save register RA next, followed by any other registers
7826 that need to be saved. */
7827 for (unsigned i = REG_RA; sa_mask != 0; i = ctz_hwi(sa_mask))
30102605 7828 {
0191520b 7829 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
75db85d8 7830 reg_offset += 8;
0191520b 7831 sa_mask &= ~(HOST_WIDE_INT_1U << i);
30102605 7832 }
89cfc2c6 7833
be7b80f4 7834 if (TARGET_ABI_OPEN_VMS)
89cfc2c6 7835 {
15cb981a 7836 /* Register frame procedures save the fp. */
c2ea1ac6 7837 if (alpha_procedure_type == PT_REGISTER)
15cb981a 7838 {
cad003ba
DM
7839 rtx_insn *insn =
7840 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7841 hard_frame_pointer_rtx);
15cb981a
RH
7842 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7843 RTX_FRAME_RELATED_P (insn) = 1;
7844 }
89cfc2c6 7845
c2ea1ac6 7846 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
54aaa4ea
RH
7847 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7848 gen_rtx_REG (DImode, REG_PV)));
89cfc2c6 7849
c2ea1ac6
DR
7850 if (alpha_procedure_type != PT_NULL
7851 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8207e7c6 7852 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
89cfc2c6 7853
9c0e94a5 7854 /* If we have to allocate space for outgoing args, do it now. */
38173d38 7855 if (crtl->outgoing_args_size != 0)
c1238896 7856 {
cad003ba 7857 rtx_insn *seq
f676971a 7858 = emit_move_insn (stack_pointer_rtx,
c1238896 7859 plus_constant
0a81f074 7860 (Pmode, hard_frame_pointer_rtx,
c1238896 7861 - (ALPHA_ROUND
38173d38 7862 (crtl->outgoing_args_size))));
f676971a 7863
c1238896
OH
7864 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7865 if ! frame_pointer_needed. Setting the bit will change the CFA
7866 computation rule to use sp again, which would be wrong if we had
7867 frame_pointer_needed, as this means sp might move unpredictably
7868 later on.
7869
7870 Also, note that
7871 frame_pointer_needed
7872 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7873 and
38173d38 7874 crtl->outgoing_args_size != 0
c1238896
OH
7875 => alpha_procedure_type != PT_NULL,
7876
7877 so when we are not setting the bit here, we are guaranteed to
093354e0 7878 have emitted an FRP frame pointer update just before. */
c1238896
OH
7879 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7880 }
9c0e94a5 7881 }
75db85d8 7882 else
9c0e94a5
RH
7883 {
7884 /* If we need a frame pointer, set it from the stack pointer. */
7885 if (frame_pointer_needed)
7886 {
7887 if (TARGET_CAN_FAULT_IN_PROLOGUE)
6abc6f40 7888 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
89cfc2c6 7889 else
8207e7c6
RK
7890 /* This must always be the last instruction in the
7891 prologue, thus we emit a special move + clobber. */
6abc6f40
RH
7892 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7893 stack_pointer_rtx, sa_reg)));
89cfc2c6 7894 }
89cfc2c6
RK
7895 }
7896
9c0e94a5
RH
7897 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7898 the prologue, for exception handling reasons, we cannot do this for
7899 any insn that might fault. We could prevent this for mems with a
7900 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7901 have to prevent all such scheduling with a blockage.
89cfc2c6 7902
f676971a 7903 Linux, on the other hand, never bothered to implement OSF/1's
9c0e94a5
RH
7904 exception handling, and so doesn't care about such things. Anyone
7905 planning to use dwarf2 frame-unwind info can also omit the blockage. */
89cfc2c6 7906
9c0e94a5
RH
7907 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7908 emit_insn (gen_blockage ());
ef86d2ee
WL
7909}
7910
3e487b21 7911/* Count the number of .file directives, so that .loc is up to date. */
93a27b7b 7912int num_source_filenames = 0;
3e487b21 7913
acd92049 7914/* Output the textual info surrounding the prologue. */
89cfc2c6 7915
9c0e94a5 7916void
a5c24926
RH
7917alpha_start_function (FILE *file, const char *fnname,
7918 tree decl ATTRIBUTE_UNUSED)
9ecc37f0 7919{
0191520b 7920 unsigned long imask, fmask;
9c0e94a5 7921 /* Complete stack size needed. */
0191520b 7922 HOST_WIDE_INT frame_size = cfun->machine->frame_size;
5c30094f 7923 /* The maximum debuggable frame size. */
0191520b 7924 const HOST_WIDE_INT max_frame_size = HOST_WIDE_INT_1 << 31;
9c0e94a5
RH
7925 /* Offset from base reg to register save area. */
7926 HOST_WIDE_INT reg_offset;
acd92049 7927 char *entry_label = (char *) alloca (strlen (fnname) + 6);
fe2786f5 7928 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
9c0e94a5 7929 int i;
9ecc37f0 7930
5ea8f977 7931#if TARGET_ABI_OPEN_VMS
4b12e93d 7932 vms_start_function (fnname);
5ea8f977
DR
7933#endif
7934
941cc05a 7935 alpha_fnname = fnname;
9ecc37f0 7936
be7b80f4 7937 if (TARGET_ABI_OPEN_VMS)
221cf9ab 7938 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
9c0e94a5 7939 else
38173d38 7940 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
9ecc37f0 7941
0191520b
RH
7942 imask = cfun->machine->sa_mask & 0xffffffffu;
7943 fmask = cfun->machine->sa_mask >> 32;
a6f12d7c 7944
9c0e94a5 7945 /* Issue function start and label. */
75db85d8 7946 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
33d01c33 7947 {
9c0e94a5 7948 fputs ("\t.ent ", file);
acd92049 7949 assemble_name (file, fnname);
9c0e94a5 7950 putc ('\n', file);
941cc05a
RK
7951
7952 /* If the function needs GP, we'll write the "..ng" label there.
7953 Otherwise, do it here. */
14691f8d
RH
7954 if (TARGET_ABI_OSF
7955 && ! alpha_function_needs_gp
3c072c6b 7956 && ! cfun->is_thunk)
941cc05a
RK
7957 {
7958 putc ('$', file);
7959 assemble_name (file, fnname);
7960 fputs ("..ng:\n", file);
7961 }
33d01c33 7962 }
fe2786f5
DR
7963 /* Nested functions on VMS that are potentially called via trampoline
7964 get a special transfer entry point that loads the called functions
7965 procedure descriptor and static chain. */
7966 if (TARGET_ABI_OPEN_VMS
7967 && !TREE_PUBLIC (decl)
7968 && DECL_CONTEXT (decl)
cf45cd09
TG
7969 && !TYPE_P (DECL_CONTEXT (decl))
7970 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
fe2786f5
DR
7971 {
7972 strcpy (tramp_label, fnname);
7973 strcat (tramp_label, "..tr");
7974 ASM_OUTPUT_LABEL (file, tramp_label);
7975 fprintf (file, "\tldq $1,24($27)\n");
7976 fprintf (file, "\tldq $27,16($27)\n");
7977 }
48f6bfac 7978
acd92049 7979 strcpy (entry_label, fnname);
be7b80f4 7980 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 7981 strcat (entry_label, "..en");
30102605 7982
9c0e94a5
RH
7983 ASM_OUTPUT_LABEL (file, entry_label);
7984 inside_function = TRUE;
48f6bfac 7985
be7b80f4 7986 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 7987 fprintf (file, "\t.base $%d\n", vms_base_regno);
a6f12d7c 7988
42d085c1
RH
7989 if (TARGET_ABI_OSF
7990 && TARGET_IEEE_CONFORMANT
9c0e94a5 7991 && !flag_inhibit_size_directive)
9973f4a2 7992 {
9c0e94a5
RH
7993 /* Set flags in procedure descriptor to request IEEE-conformant
7994 math-library routines. The value we set it to is PDSC_EXC_IEEE
285a5742 7995 (/usr/include/pdsc.h). */
9c0e94a5 7996 fputs ("\t.eflag 48\n", file);
9973f4a2 7997 }
a6f12d7c 7998
9c0e94a5 7999 /* Set up offsets to alpha virtual arg/local debugging pointer. */
38173d38 8000 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
9c0e94a5 8001 alpha_arg_offset = -frame_size + 48;
c97e3db7 8002
9c0e94a5
RH
8003 /* Describe our frame. If the frame size is larger than an integer,
8004 print it as zero to avoid an assembler error. We won't be
8005 properly describing such a frame, but that's the best we can do. */
75db85d8 8006 if (TARGET_ABI_OPEN_VMS)
4a0a75dd
KG
8007 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8008 HOST_WIDE_INT_PRINT_DEC "\n",
8009 vms_unwind_regno,
0191520b 8010 frame_size >= max_frame_size ? 0 : frame_size,
4a0a75dd 8011 reg_offset);
9c0e94a5 8012 else if (!flag_inhibit_size_directive)
4a0a75dd
KG
8013 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8014 (frame_pointer_needed
8015 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
b598cb38 8016 frame_size >= max_frame_size ? 0 : frame_size,
38173d38 8017 crtl->args.pretend_args_size);
0d24ff5d 8018
9c0e94a5 8019 /* Describe which registers were spilled. */
75db85d8 8020 if (TARGET_ABI_OPEN_VMS)
0d24ff5d 8021 {
9c0e94a5 8022 if (imask)
30102605 8023 /* ??? Does VMS care if mask contains ra? The old code didn't
9c0e94a5 8024 set it, so I don't here. */
409f52d3 8025 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
9c0e94a5 8026 if (fmask)
3c303f52 8027 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
c2ea1ac6 8028 if (alpha_procedure_type == PT_REGISTER)
9c0e94a5
RH
8029 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8030 }
8031 else if (!flag_inhibit_size_directive)
8032 {
8033 if (imask)
0d24ff5d 8034 {
4a0a75dd 8035 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
b598cb38 8036 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
9c0e94a5
RH
8037
8038 for (i = 0; i < 32; ++i)
409f52d3 8039 if (imask & (1UL << i))
9c0e94a5 8040 reg_offset += 8;
0d24ff5d 8041 }
9c0e94a5
RH
8042
8043 if (fmask)
4a0a75dd 8044 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
b598cb38 8045 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
a6f12d7c
RK
8046 }
8047
be7b80f4 8048#if TARGET_ABI_OPEN_VMS
221cf9ab
OH
8049 /* If a user condition handler has been installed at some point, emit
8050 the procedure descriptor bits to point the Condition Handling Facility
8051 at the indirection wrapper, and state the fp offset at which the user
8052 handler may be found. */
8053 if (cfun->machine->uses_condition_handler)
8054 {
8055 fprintf (file, "\t.handler __gcc_shell_handler\n");
8056 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8057 }
8058
735f469b
TG
8059#ifdef TARGET_VMS_CRASH_DEBUG
8060 /* Support of minimal traceback info. */
d6b5193b 8061 switch_to_section (readonly_data_section);
9c0e94a5 8062 fprintf (file, "\t.align 3\n");
acd92049 8063 assemble_name (file, fnname); fputs ("..na:\n", file);
9c0e94a5 8064 fputs ("\t.ascii \"", file);
acd92049 8065 assemble_name (file, fnname);
9c0e94a5 8066 fputs ("\\0\"\n", file);
d6b5193b 8067 switch_to_section (text_section);
9c0e94a5 8068#endif
735f469b 8069#endif /* TARGET_ABI_OPEN_VMS */
9c0e94a5 8070}
a6f12d7c 8071
9c0e94a5 8072/* Emit the .prologue note at the scheduled end of the prologue. */
0f33506c 8073
b4c25db2 8074static void
a5c24926 8075alpha_output_function_end_prologue (FILE *file)
9c0e94a5 8076{
75db85d8 8077 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 8078 fputs ("\t.prologue\n", file);
9c0e94a5 8079 else if (!flag_inhibit_size_directive)
14691f8d 8080 fprintf (file, "\t.prologue %d\n",
3c072c6b 8081 alpha_function_needs_gp || cfun->is_thunk);
a6f12d7c
RK
8082}
8083
8084/* Write function epilogue. */
8085
8086void
a5c24926 8087alpha_expand_epilogue (void)
a6f12d7c 8088{
9c0e94a5 8089 /* Registers to save. */
0191520b 8090 unsigned HOST_WIDE_INT sa_mask = cfun->machine->sa_mask;
9c0e94a5 8091 /* Stack space needed for pushing registers clobbered by us. */
0191520b 8092 HOST_WIDE_INT sa_size = cfun->machine->sa_size;
9c0e94a5 8093 /* Complete stack size needed. */
0191520b 8094 HOST_WIDE_INT frame_size = cfun->machine->frame_size;
9c0e94a5
RH
8095 /* Offset from base reg to register save area. */
8096 HOST_WIDE_INT reg_offset;
8097 int fp_is_frame_pointer, fp_offset;
8098 rtx sa_reg, sa_reg_exp = NULL;
15cb981a 8099 rtx sp_adj1, sp_adj2, mem, reg, insn;
01439aee 8100 rtx eh_ofs;
15cb981a 8101 rtx cfa_restores = NULL_RTX;
a6f12d7c 8102
be7b80f4 8103 if (TARGET_ABI_OPEN_VMS)
c2ea1ac6
DR
8104 {
8105 if (alpha_procedure_type == PT_STACK)
221cf9ab 8106 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
c2ea1ac6
DR
8107 else
8108 reg_offset = 0;
8109 }
9c0e94a5 8110 else
38173d38 8111 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
9c0e94a5 8112
c2ea1ac6 8113 fp_is_frame_pointer
42d085c1
RH
8114 = (TARGET_ABI_OPEN_VMS
8115 ? alpha_procedure_type == PT_STACK
8116 : frame_pointer_needed);
c8d8ed65
RK
8117 fp_offset = 0;
8118 sa_reg = stack_pointer_rtx;
9c0e94a5 8119
e3b5732b 8120 if (crtl->calls_eh_return)
4573b4de
RH
8121 eh_ofs = EH_RETURN_STACKADJ_RTX;
8122 else
8123 eh_ofs = NULL_RTX;
8124
75db85d8 8125 if (sa_size)
9c0e94a5
RH
8126 {
8127 /* If we have a frame pointer, restore SP from it. */
42d085c1
RH
8128 if (TARGET_ABI_OPEN_VMS
8129 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8130 : frame_pointer_needed)
15cb981a 8131 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
0d24ff5d 8132
9c0e94a5 8133 /* Cope with very large offsets to the register save area. */
9c0e94a5 8134 if (reg_offset + sa_size > 0x8000)
a6f12d7c 8135 {
9c0e94a5
RH
8136 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8137 HOST_WIDE_INT bias;
8138
8139 if (low + sa_size <= 0x8000)
8140 bias = reg_offset - low, reg_offset = low;
f676971a 8141 else
9c0e94a5
RH
8142 bias = reg_offset, reg_offset = 0;
8143
8144 sa_reg = gen_rtx_REG (DImode, 22);
0a81f074 8145 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
9c0e94a5 8146
15cb981a 8147 emit_move_insn (sa_reg, sa_reg_exp);
a6f12d7c 8148 }
f676971a 8149
285a5742 8150 /* Restore registers in order, excepting a true frame pointer. */
0191520b
RH
8151 for (unsigned i = REG_RA; sa_mask != 0; i = ctz_hwi(sa_mask))
8152 {
8153 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8154 fp_offset = reg_offset;
8155 else
8156 {
8157 mem = gen_frame_mem (DImode,
8158 plus_constant (Pmode, sa_reg,
8159 reg_offset));
8160 reg = gen_rtx_REG (DImode, i);
8161 emit_move_insn (reg, mem);
8162 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8163 cfa_restores);
8164 }
8165 reg_offset += 8;
8166 sa_mask &= ~(HOST_WIDE_INT_1U << i);
8167 }
9c0e94a5 8168 }
a6f12d7c 8169
01439aee 8170 if (frame_size || eh_ofs)
9c0e94a5 8171 {
71038426
RH
8172 sp_adj1 = stack_pointer_rtx;
8173
01439aee 8174 if (eh_ofs)
71038426
RH
8175 {
8176 sp_adj1 = gen_rtx_REG (DImode, 23);
8177 emit_move_insn (sp_adj1,
01439aee 8178 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
71038426
RH
8179 }
8180
9c0e94a5
RH
8181 /* If the stack size is large, begin computation into a temporary
8182 register so as not to interfere with a potential fp restore,
8183 which must be consecutive with an SP restore. */
75db85d8 8184 if (frame_size < 32768 && !cfun->calls_alloca)
71038426 8185 sp_adj2 = GEN_INT (frame_size);
9c0e94a5
RH
8186 else if (frame_size < 0x40007fffL)
8187 {
8188 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8189
0a81f074 8190 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
9c0e94a5
RH
8191 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8192 sp_adj1 = sa_reg;
8193 else
8194 {
8195 sp_adj1 = gen_rtx_REG (DImode, 23);
15cb981a 8196 emit_move_insn (sp_adj1, sp_adj2);
9c0e94a5
RH
8197 }
8198 sp_adj2 = GEN_INT (low);
8199 }
d60a05a1 8200 else
9c0e94a5 8201 {
71038426 8202 rtx tmp = gen_rtx_REG (DImode, 23);
15cb981a 8203 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
71038426 8204 if (!sp_adj2)
9c0e94a5
RH
8205 {
8206 /* We can't drop new things to memory this late, afaik,
8207 so build it up by pieces. */
da80c6b8 8208 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size);
56daab84 8209 gcc_assert (sp_adj2);
9c0e94a5 8210 }
9c0e94a5 8211 }
a6f12d7c 8212
9c0e94a5
RH
8213 /* From now on, things must be in order. So emit blockages. */
8214
8215 /* Restore the frame pointer. */
75db85d8 8216 if (fp_is_frame_pointer)
9c0e94a5
RH
8217 {
8218 emit_insn (gen_blockage ());
0a81f074
RS
8219 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8220 fp_offset));
15cb981a
RH
8221 emit_move_insn (hard_frame_pointer_rtx, mem);
8222 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8223 hard_frame_pointer_rtx, cfa_restores);
9c0e94a5 8224 }
be7b80f4 8225 else if (TARGET_ABI_OPEN_VMS)
9c0e94a5
RH
8226 {
8227 emit_insn (gen_blockage ());
15cb981a
RH
8228 emit_move_insn (hard_frame_pointer_rtx,
8229 gen_rtx_REG (DImode, vms_save_fp_regno));
8230 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8231 hard_frame_pointer_rtx, cfa_restores);
9c0e94a5
RH
8232 }
8233
8234 /* Restore the stack pointer. */
8235 emit_insn (gen_blockage ());
30102605 8236 if (sp_adj2 == const0_rtx)
15cb981a 8237 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
30102605 8238 else
15cb981a
RH
8239 insn = emit_move_insn (stack_pointer_rtx,
8240 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8241 REG_NOTES (insn) = cfa_restores;
8242 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8243 RTX_FRAME_RELATED_P (insn) = 1;
9c0e94a5 8244 }
f676971a 8245 else
9c0e94a5 8246 {
15cb981a
RH
8247 gcc_assert (cfa_restores == NULL);
8248
c2ea1ac6 8249 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
9c0e94a5
RH
8250 {
8251 emit_insn (gen_blockage ());
15cb981a
RH
8252 insn = emit_move_insn (hard_frame_pointer_rtx,
8253 gen_rtx_REG (DImode, vms_save_fp_regno));
8254 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8255 RTX_FRAME_RELATED_P (insn) = 1;
9c0e94a5 8256 }
a6f12d7c 8257 }
9c0e94a5 8258}
1330f7d5 8259\f
9c0e94a5
RH
8260/* Output the rest of the textual info surrounding the epilogue. */
8261
8262void
a5c24926 8263alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
9c0e94a5 8264{
cad003ba 8265 rtx_insn *insn;
e4bec638
RH
8266
8267 /* We output a nop after noreturn calls at the very end of the function to
8268 ensure that the return address always remains in the caller's code range,
8269 as not doing so might confuse unwinding engines. */
8270 insn = get_last_insn ();
8271 if (!INSN_P (insn))
8272 insn = prev_active_insn (insn);
3eb96d01 8273 if (insn && CALL_P (insn))
e4bec638
RH
8274 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8275
7053a0e2 8276#if TARGET_ABI_OPEN_VMS
735f469b
TG
8277 /* Write the linkage entries. */
8278 alpha_write_linkage (file, fnname);
7053a0e2
BG
8279#endif
8280
a6f12d7c 8281 /* End the function. */
b213221d
TG
8282 if (TARGET_ABI_OPEN_VMS
8283 || !flag_inhibit_size_directive)
33d01c33 8284 {
9c0e94a5 8285 fputs ("\t.end ", file);
acd92049 8286 assemble_name (file, fnname);
9c0e94a5 8287 putc ('\n', file);
33d01c33 8288 }
48f6bfac 8289 inside_function = FALSE;
a6f12d7c 8290}
14691f8d 8291
c590b625
RH
8292#if TARGET_ABI_OSF
8293/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
14691f8d
RH
8294
8295 In order to avoid the hordes of differences between generated code
8296 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8297 lots of code loading up large constants, generate rtl and emit it
8298 instead of going straight to text.
8299
8300 Not sure why this idea hasn't been explored before... */
8301
c590b625 8302static void
a5c24926
RH
8303alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8304 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8305 tree function)
14691f8d 8306{
f7430263 8307 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
14691f8d 8308 HOST_WIDE_INT hi, lo;
cad003ba
DM
8309 rtx this_rtx, funexp;
8310 rtx_insn *insn;
14691f8d
RH
8311
8312 /* We always require a valid GP. */
8313 emit_insn (gen_prologue_ldgp ());
2e040219 8314 emit_note (NOTE_INSN_PROLOGUE_END);
14691f8d
RH
8315
8316 /* Find the "this" pointer. If the function returns a structure,
8317 the structure return pointer is in $16. */
61f71b34 8318 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
0a2aaacc 8319 this_rtx = gen_rtx_REG (Pmode, 17);
14691f8d 8320 else
0a2aaacc 8321 this_rtx = gen_rtx_REG (Pmode, 16);
14691f8d
RH
8322
8323 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8324 entire constant for the add. */
8325 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8326 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8327 if (hi + lo == delta)
8328 {
8329 if (hi)
0a2aaacc 8330 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
14691f8d 8331 if (lo)
0a2aaacc 8332 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
14691f8d
RH
8333 }
8334 else
8335 {
da80c6b8 8336 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0), delta);
0a2aaacc 8337 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
14691f8d
RH
8338 }
8339
e2358068
RH
8340 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8341 if (vcall_offset)
8342 {
8343 rtx tmp, tmp2;
8344
8345 tmp = gen_rtx_REG (Pmode, 0);
0a2aaacc 8346 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
e2358068
RH
8347
8348 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8349 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8350 if (hi + lo == vcall_offset)
8351 {
8352 if (hi)
8353 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8354 }
8355 else
8356 {
8357 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
da80c6b8 8358 vcall_offset);
e2358068
RH
8359 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8360 lo = 0;
8361 }
8362 if (lo)
8363 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8364 else
8365 tmp2 = tmp;
8366 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8367
0a2aaacc 8368 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
e2358068
RH
8369 }
8370
14691f8d
RH
8371 /* Generate a tail call to the target function. */
8372 if (! TREE_USED (function))
8373 {
8374 assemble_external (function);
8375 TREE_USED (function) = 1;
8376 }
8377 funexp = XEXP (DECL_RTL (function), 0);
8378 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8379 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8380 SIBLING_CALL_P (insn) = 1;
8381
8382 /* Run just enough of rest_of_compilation to get the insns emitted.
8383 There's not really enough bulk here to make other passes such as
8b4e7143 8384 instruction scheduling worth while. */
14691f8d
RH
8385 insn = get_insns ();
8386 shorten_branches (insn);
f7430263 8387 assemble_start_function (thunk_fndecl, fnname);
14691f8d 8388 final_start_function (insn, file, 1);
c9d691e9 8389 final (insn, file, 1);
14691f8d 8390 final_end_function ();
f7430263 8391 assemble_end_function (thunk_fndecl, fnname);
14691f8d 8392}
c590b625 8393#endif /* TARGET_ABI_OSF */
48f6bfac
RK
8394\f
8395/* Debugging support. */
8396
8397#include "gstab.h"
8398
48f6bfac
RK
8399/* Name of the file containing the current function. */
8400
df45c7ea 8401static const char *current_function_file = "";
48f6bfac
RK
8402
8403/* Offsets to alpha virtual arg/local debugging pointers. */
8404
8405long alpha_arg_offset;
8406long alpha_auto_offset;
8407\f
8408/* Emit a new filename to a stream. */
8409
8410void
a5c24926 8411alpha_output_filename (FILE *stream, const char *name)
48f6bfac
RK
8412{
8413 static int first_time = TRUE;
48f6bfac
RK
8414
8415 if (first_time)
8416 {
8417 first_time = FALSE;
8418 ++num_source_filenames;
8419 current_function_file = name;
8420 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8421 output_quoted_string (stream, name);
8422 fprintf (stream, "\n");
48f6bfac
RK
8423 }
8424
48f6bfac 8425 else if (name != current_function_file
5665caa2 8426 && strcmp (name, current_function_file) != 0)
48f6bfac 8427 {
46e1a769
RO
8428 ++num_source_filenames;
8429 current_function_file = name;
8430 fprintf (stream, "\t.file\t%d ", num_source_filenames);
48f6bfac
RK
8431
8432 output_quoted_string (stream, name);
8433 fprintf (stream, "\n");
8434 }
8435}
6245e3df
RK
8436\f
8437/* Structure to show the current status of registers and memory. */
8438
8439struct shadow_summary
8440{
8441 struct {
1d11bf18
RH
8442 unsigned int i : 31; /* Mask of int regs */
8443 unsigned int fp : 31; /* Mask of fp regs */
8444 unsigned int mem : 1; /* mem == imem | fpmem */
6245e3df
RK
8445 } used, defd;
8446};
8447
8448/* Summary the effects of expression X on the machine. Update SUM, a pointer
8449 to the summary structure. SET is nonzero if the insn is setting the
8450 object, otherwise zero. */
8451
8452static void
a5c24926 8453summarize_insn (rtx x, struct shadow_summary *sum, int set)
6245e3df 8454{
6f7d635c 8455 const char *format_ptr;
6245e3df
RK
8456 int i, j;
8457
8458 if (x == 0)
8459 return;
8460
8461 switch (GET_CODE (x))
8462 {
8463 /* ??? Note that this case would be incorrect if the Alpha had a
8464 ZERO_EXTRACT in SET_DEST. */
8465 case SET:
8466 summarize_insn (SET_SRC (x), sum, 0);
8467 summarize_insn (SET_DEST (x), sum, 1);
8468 break;
8469
8470 case CLOBBER:
8471 summarize_insn (XEXP (x, 0), sum, 1);
8472 break;
8473
8474 case USE:
8475 summarize_insn (XEXP (x, 0), sum, 0);
8476 break;
8477
f4e31cf5
RH
8478 case ASM_OPERANDS:
8479 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8480 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8481 break;
8482
6245e3df 8483 case PARALLEL:
8fed04e5 8484 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
6245e3df
RK
8485 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8486 break;
8487
f4e31cf5 8488 case SUBREG:
9c0e94a5
RH
8489 summarize_insn (SUBREG_REG (x), sum, 0);
8490 break;
f4e31cf5 8491
6245e3df
RK
8492 case REG:
8493 {
8494 int regno = REGNO (x);
948068e2 8495 unsigned long mask = ((unsigned long) 1) << (regno % 32);
6245e3df
RK
8496
8497 if (regno == 31 || regno == 63)
8498 break;
8499
8500 if (set)
8501 {
8502 if (regno < 32)
8503 sum->defd.i |= mask;
8504 else
8505 sum->defd.fp |= mask;
8506 }
8507 else
8508 {
8509 if (regno < 32)
8510 sum->used.i |= mask;
8511 else
8512 sum->used.fp |= mask;
8513 }
8514 }
8515 break;
8516
8517 case MEM:
8518 if (set)
8519 sum->defd.mem = 1;
8520 else
8521 sum->used.mem = 1;
8522
8523 /* Find the regs used in memory address computation: */
8524 summarize_insn (XEXP (x, 0), sum, 0);
8525 break;
8526
f06ed650
UB
8527 case CONST_INT: case CONST_WIDE_INT: case CONST_DOUBLE:
8528 case SYMBOL_REF: case LABEL_REF: case CONST:
368a1647 8529 case SCRATCH: case ASM_INPUT:
8ba46994
RK
8530 break;
8531
6245e3df
RK
8532 /* Handle common unary and binary ops for efficiency. */
8533 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8534 case MOD: case UDIV: case UMOD: case AND: case IOR:
8535 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8536 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8537 case NE: case EQ: case GE: case GT: case LE:
8538 case LT: case GEU: case GTU: case LEU: case LTU:
8539 summarize_insn (XEXP (x, 0), sum, 0);
8540 summarize_insn (XEXP (x, 1), sum, 0);
8541 break;
8542
8543 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8544 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8545 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
f676971a 8546 case SQRT: case FFS:
6245e3df
RK
8547 summarize_insn (XEXP (x, 0), sum, 0);
8548 break;
8549
8550 default:
8551 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8fed04e5 8552 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
bed95fa1 8553 switch (format_ptr[i])
6245e3df
RK
8554 {
8555 case 'e':
8556 summarize_insn (XEXP (x, i), sum, 0);
8557 break;
8558
8559 case 'E':
8fed04e5 8560 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6245e3df
RK
8561 summarize_insn (XVECEXP (x, i, j), sum, 0);
8562 break;
8563
2b01d264
RH
8564 case 'i':
8565 break;
8566
6245e3df 8567 default:
56daab84 8568 gcc_unreachable ();
6245e3df
RK
8569 }
8570 }
8571}
6245e3df 8572
9c0e94a5
RH
8573/* Ensure a sufficient number of `trapb' insns are in the code when
8574 the user requests code with a trap precision of functions or
8575 instructions.
8576
8577 In naive mode, when the user requests a trap-precision of
8578 "instruction", a trapb is needed after every instruction that may
8579 generate a trap. This ensures that the code is resumption safe but
8580 it is also slow.
8581
8582 When optimizations are turned on, we delay issuing a trapb as long
8583 as possible. In this context, a trap shadow is the sequence of
8584 instructions that starts with a (potentially) trap generating
8585 instruction and extends to the next trapb or call_pal instruction
8586 (but GCC never generates call_pal by itself). We can delay (and
8587 therefore sometimes omit) a trapb subject to the following
8588 conditions:
8589
8590 (a) On entry to the trap shadow, if any Alpha register or memory
8591 location contains a value that is used as an operand value by some
8592 instruction in the trap shadow (live on entry), then no instruction
8593 in the trap shadow may modify the register or memory location.
8594
8595 (b) Within the trap shadow, the computation of the base register
8596 for a memory load or store instruction may not involve using the
8597 result of an instruction that might generate an UNPREDICTABLE
8598 result.
8599
8600 (c) Within the trap shadow, no register may be used more than once
8601 as a destination register. (This is to make life easier for the
8602 trap-handler.)
6245e3df 8603
2ea844d3 8604 (d) The trap shadow may not include any branch instructions. */
6245e3df 8605
2ea844d3 8606static void
a5c24926 8607alpha_handle_trap_shadows (void)
6245e3df 8608{
2ea844d3
RH
8609 struct shadow_summary shadow;
8610 int trap_pending, exception_nesting;
b32d5189 8611 rtx_insn *i, *n;
6245e3df 8612
2ea844d3
RH
8613 trap_pending = 0;
8614 exception_nesting = 0;
8615 shadow.used.i = 0;
8616 shadow.used.fp = 0;
8617 shadow.used.mem = 0;
8618 shadow.defd = shadow.used;
f676971a 8619
18dbd950 8620 for (i = get_insns (); i ; i = NEXT_INSN (i))
2ea844d3 8621 {
7d83f4f5 8622 if (NOTE_P (i))
2ea844d3 8623 {
a38e7aa5 8624 switch (NOTE_KIND (i))
2ea844d3
RH
8625 {
8626 case NOTE_INSN_EH_REGION_BEG:
8627 exception_nesting++;
8628 if (trap_pending)
8629 goto close_shadow;
8630 break;
8631
8632 case NOTE_INSN_EH_REGION_END:
8633 exception_nesting--;
8634 if (trap_pending)
8635 goto close_shadow;
8636 break;
8637
8638 case NOTE_INSN_EPILOGUE_BEG:
8639 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8640 goto close_shadow;
8641 break;
8642 }
8643 }
8644 else if (trap_pending)
8645 {
8646 if (alpha_tp == ALPHA_TP_FUNC)
8647 {
7d83f4f5 8648 if (JUMP_P (i)
2ea844d3
RH
8649 && GET_CODE (PATTERN (i)) == RETURN)
8650 goto close_shadow;
8651 }
8652 else if (alpha_tp == ALPHA_TP_INSN)
8653 {
8654 if (optimize > 0)
8655 {
8656 struct shadow_summary sum;
8657
8658 sum.used.i = 0;
8659 sum.used.fp = 0;
8660 sum.used.mem = 0;
f4e31cf5 8661 sum.defd = sum.used;
2ea844d3
RH
8662
8663 switch (GET_CODE (i))
8664 {
8665 case INSN:
56daab84 8666 /* Annoyingly, get_attr_trap will die on these. */
bb02e7ea
RH
8667 if (GET_CODE (PATTERN (i)) == USE
8668 || GET_CODE (PATTERN (i)) == CLOBBER)
2ea844d3
RH
8669 break;
8670
8671 summarize_insn (PATTERN (i), &sum, 0);
8672
8673 if ((sum.defd.i & shadow.defd.i)
8674 || (sum.defd.fp & shadow.defd.fp))
8675 {
8676 /* (c) would be violated */
8677 goto close_shadow;
8678 }
8679
8680 /* Combine shadow with summary of current insn: */
8681 shadow.used.i |= sum.used.i;
8682 shadow.used.fp |= sum.used.fp;
8683 shadow.used.mem |= sum.used.mem;
8684 shadow.defd.i |= sum.defd.i;
8685 shadow.defd.fp |= sum.defd.fp;
8686 shadow.defd.mem |= sum.defd.mem;
8687
8688 if ((sum.defd.i & shadow.used.i)
8689 || (sum.defd.fp & shadow.used.fp)
8690 || (sum.defd.mem & shadow.used.mem))
8691 {
8692 /* (a) would be violated (also takes care of (b)) */
56daab84
NS
8693 gcc_assert (get_attr_trap (i) != TRAP_YES
8694 || (!(sum.defd.i & sum.used.i)
8695 && !(sum.defd.fp & sum.used.fp)));
2ea844d3
RH
8696
8697 goto close_shadow;
8698 }
8699 break;
8700
dd5e7837
UB
8701 case BARRIER:
8702 /* __builtin_unreachable can expand to no code at all,
8703 leaving (barrier) RTXes in the instruction stream. */
8704 goto close_shadow_notrapb;
8705
2ea844d3
RH
8706 case JUMP_INSN:
8707 case CALL_INSN:
8708 case CODE_LABEL:
8709 goto close_shadow;
8710
8711 default:
56daab84 8712 gcc_unreachable ();
2ea844d3
RH
8713 }
8714 }
8715 else
8716 {
8717 close_shadow:
68aed21b
RH
8718 n = emit_insn_before (gen_trapb (), i);
8719 PUT_MODE (n, TImode);
8720 PUT_MODE (i, TImode);
dd5e7837 8721 close_shadow_notrapb:
2ea844d3
RH
8722 trap_pending = 0;
8723 shadow.used.i = 0;
8724 shadow.used.fp = 0;
8725 shadow.used.mem = 0;
8726 shadow.defd = shadow.used;
8727 }
8728 }
8729 }
6245e3df 8730
4f3f5e9f 8731 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
7d83f4f5 8732 && NONJUMP_INSN_P (i)
4f3f5e9f
RH
8733 && GET_CODE (PATTERN (i)) != USE
8734 && GET_CODE (PATTERN (i)) != CLOBBER
8735 && get_attr_trap (i) == TRAP_YES)
8736 {
8737 if (optimize && !trap_pending)
8738 summarize_insn (PATTERN (i), &shadow, 0);
8739 trap_pending = 1;
8740 }
6245e3df
RK
8741 }
8742}
68aed21b 8743\f
68aed21b 8744/* Alpha can only issue instruction groups simultaneously if they are
093354e0 8745 suitably aligned. This is very processor-specific. */
4ead2a39
RH
8746/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8747 that are marked "fake". These instructions do not exist on that target,
8748 but it is possible to see these insns with deranged combinations of
8749 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8750 choose a result at random. */
68aed21b 8751
3873d24b
RH
8752enum alphaev4_pipe {
8753 EV4_STOP = 0,
8754 EV4_IB0 = 1,
8755 EV4_IB1 = 2,
8756 EV4_IBX = 4
8757};
8758
68aed21b
RH
8759enum alphaev5_pipe {
8760 EV5_STOP = 0,
8761 EV5_NONE = 1,
8762 EV5_E01 = 2,
8763 EV5_E0 = 4,
8764 EV5_E1 = 8,
8765 EV5_FAM = 16,
8766 EV5_FA = 32,
8767 EV5_FM = 64
8768};
8769
3873d24b 8770static enum alphaev4_pipe
cad003ba 8771alphaev4_insn_pipe (rtx_insn *insn)
3873d24b
RH
8772{
8773 if (recog_memoized (insn) < 0)
8774 return EV4_STOP;
8775 if (get_attr_length (insn) != 4)
8776 return EV4_STOP;
8777
8778 switch (get_attr_type (insn))
8779 {
8780 case TYPE_ILD:
0b196b18 8781 case TYPE_LDSYM:
3873d24b 8782 case TYPE_FLD:
0b196b18 8783 case TYPE_LD_L:
3873d24b
RH
8784 return EV4_IBX;
8785
3873d24b
RH
8786 case TYPE_IADD:
8787 case TYPE_ILOG:
8788 case TYPE_ICMOV:
8789 case TYPE_ICMP:
3873d24b
RH
8790 case TYPE_FST:
8791 case TYPE_SHIFT:
8792 case TYPE_IMUL:
8793 case TYPE_FBR:
4ead2a39 8794 case TYPE_MVI: /* fake */
3873d24b
RH
8795 return EV4_IB0;
8796
0b196b18 8797 case TYPE_IST:
3873d24b
RH
8798 case TYPE_MISC:
8799 case TYPE_IBR:
8800 case TYPE_JSR:
d5909a79 8801 case TYPE_CALLPAL:
3873d24b
RH
8802 case TYPE_FCPYS:
8803 case TYPE_FCMOV:
8804 case TYPE_FADD:
8805 case TYPE_FDIV:
8806 case TYPE_FMUL:
0b196b18
RH
8807 case TYPE_ST_C:
8808 case TYPE_MB:
4ead2a39
RH
8809 case TYPE_FSQRT: /* fake */
8810 case TYPE_FTOI: /* fake */
8811 case TYPE_ITOF: /* fake */
3873d24b
RH
8812 return EV4_IB1;
8813
8814 default:
56daab84 8815 gcc_unreachable ();
3873d24b
RH
8816 }
8817}
8818
68aed21b 8819static enum alphaev5_pipe
cad003ba 8820alphaev5_insn_pipe (rtx_insn *insn)
68aed21b
RH
8821{
8822 if (recog_memoized (insn) < 0)
8823 return EV5_STOP;
8824 if (get_attr_length (insn) != 4)
8825 return EV5_STOP;
8826
8827 switch (get_attr_type (insn))
8828 {
8829 case TYPE_ILD:
8830 case TYPE_FLD:
8831 case TYPE_LDSYM:
8832 case TYPE_IADD:
8833 case TYPE_ILOG:
8834 case TYPE_ICMOV:
8835 case TYPE_ICMP:
8836 return EV5_E01;
8837
8838 case TYPE_IST:
8839 case TYPE_FST:
8840 case TYPE_SHIFT:
8841 case TYPE_IMUL:
8842 case TYPE_MISC:
8843 case TYPE_MVI:
0b196b18
RH
8844 case TYPE_LD_L:
8845 case TYPE_ST_C:
8846 case TYPE_MB:
4ead2a39
RH
8847 case TYPE_FTOI: /* fake */
8848 case TYPE_ITOF: /* fake */
68aed21b
RH
8849 return EV5_E0;
8850
8851 case TYPE_IBR:
8852 case TYPE_JSR:
d5909a79 8853 case TYPE_CALLPAL:
68aed21b
RH
8854 return EV5_E1;
8855
8856 case TYPE_FCPYS:
8857 return EV5_FAM;
8858
8859 case TYPE_FBR:
8860 case TYPE_FCMOV:
8861 case TYPE_FADD:
8862 case TYPE_FDIV:
4ead2a39 8863 case TYPE_FSQRT: /* fake */
68aed21b
RH
8864 return EV5_FA;
8865
8866 case TYPE_FMUL:
8867 return EV5_FM;
2c01018f
RH
8868
8869 default:
56daab84 8870 gcc_unreachable ();
68aed21b 8871 }
68aed21b
RH
8872}
8873
f676971a 8874/* IN_USE is a mask of the slots currently filled within the insn group.
3873d24b 8875 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
f676971a 8876 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
3873d24b
RH
8877
8878 LEN is, of course, the length of the group in bytes. */
8879
cad003ba
DM
8880static rtx_insn *
8881alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
3873d24b
RH
8882{
8883 int len, in_use;
8884
8885 len = in_use = 0;
8886
2c3c49de 8887 if (! INSN_P (insn)
3873d24b
RH
8888 || GET_CODE (PATTERN (insn)) == CLOBBER
8889 || GET_CODE (PATTERN (insn)) == USE)
8890 goto next_and_done;
8891
8892 while (1)
8893 {
8894 enum alphaev4_pipe pipe;
8895
8896 pipe = alphaev4_insn_pipe (insn);
8897 switch (pipe)
8898 {
8899 case EV4_STOP:
8900 /* Force complex instructions to start new groups. */
8901 if (in_use)
8902 goto done;
8903
f3b569ca 8904 /* If this is a completely unrecognized insn, it's an asm.
3873d24b
RH
8905 We don't know how long it is, so record length as -1 to
8906 signal a needed realignment. */
8907 if (recog_memoized (insn) < 0)
8908 len = -1;
8909 else
8910 len = get_attr_length (insn);
8911 goto next_and_done;
8912
8913 case EV4_IBX:
8914 if (in_use & EV4_IB0)
8915 {
8916 if (in_use & EV4_IB1)
8917 goto done;
8918 in_use |= EV4_IB1;
8919 }
8920 else
8921 in_use |= EV4_IB0 | EV4_IBX;
8922 break;
8923
8924 case EV4_IB0:
8925 if (in_use & EV4_IB0)
8926 {
8927 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8928 goto done;
8929 in_use |= EV4_IB1;
8930 }
8931 in_use |= EV4_IB0;
8932 break;
8933
8934 case EV4_IB1:
8935 if (in_use & EV4_IB1)
8936 goto done;
8937 in_use |= EV4_IB1;
8938 break;
8939
8940 default:
56daab84 8941 gcc_unreachable ();
3873d24b
RH
8942 }
8943 len += 4;
f676971a 8944
3873d24b 8945 /* Haifa doesn't do well scheduling branches. */
7d83f4f5 8946 if (JUMP_P (insn))
3873d24b
RH
8947 goto next_and_done;
8948
8949 next:
8950 insn = next_nonnote_insn (insn);
8951
2c3c49de 8952 if (!insn || ! INSN_P (insn))
3873d24b
RH
8953 goto done;
8954
8955 /* Let Haifa tell us where it thinks insn group boundaries are. */
8956 if (GET_MODE (insn) == TImode)
8957 goto done;
8958
8959 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8960 goto next;
8961 }
8962
8963 next_and_done:
8964 insn = next_nonnote_insn (insn);
8965
8966 done:
8967 *plen = len;
8968 *pin_use = in_use;
8969 return insn;
8970}
8971
f676971a 8972/* IN_USE is a mask of the slots currently filled within the insn group.
3873d24b 8973 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
f676971a 8974 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
68aed21b
RH
8975
8976 LEN is, of course, the length of the group in bytes. */
8977
cad003ba
DM
8978static rtx_insn *
8979alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
68aed21b
RH
8980{
8981 int len, in_use;
8982
8983 len = in_use = 0;
8984
2c3c49de 8985 if (! INSN_P (insn)
2c01018f
RH
8986 || GET_CODE (PATTERN (insn)) == CLOBBER
8987 || GET_CODE (PATTERN (insn)) == USE)
8988 goto next_and_done;
68aed21b 8989
2c01018f 8990 while (1)
68aed21b
RH
8991 {
8992 enum alphaev5_pipe pipe;
68aed21b
RH
8993
8994 pipe = alphaev5_insn_pipe (insn);
8995 switch (pipe)
8996 {
8997 case EV5_STOP:
8998 /* Force complex instructions to start new groups. */
8999 if (in_use)
9000 goto done;
9001
f3b569ca 9002 /* If this is a completely unrecognized insn, it's an asm.
68aed21b
RH
9003 We don't know how long it is, so record length as -1 to
9004 signal a needed realignment. */
9005 if (recog_memoized (insn) < 0)
9006 len = -1;
9007 else
9008 len = get_attr_length (insn);
2c01018f 9009 goto next_and_done;
68aed21b 9010
56daab84
NS
9011 /* ??? Most of the places below, we would like to assert never
9012 happen, as it would indicate an error either in Haifa, or
9013 in the scheduling description. Unfortunately, Haifa never
9014 schedules the last instruction of the BB, so we don't have
9015 an accurate TI bit to go off. */
68aed21b
RH
9016 case EV5_E01:
9017 if (in_use & EV5_E0)
9018 {
9019 if (in_use & EV5_E1)
9020 goto done;
9021 in_use |= EV5_E1;
9022 }
9023 else
9024 in_use |= EV5_E0 | EV5_E01;
9025 break;
9026
9027 case EV5_E0:
9028 if (in_use & EV5_E0)
9029 {
3873d24b 9030 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
68aed21b
RH
9031 goto done;
9032 in_use |= EV5_E1;
9033 }
9034 in_use |= EV5_E0;
9035 break;
9036
9037 case EV5_E1:
9038 if (in_use & EV5_E1)
9039 goto done;
9040 in_use |= EV5_E1;
9041 break;
9042
9043 case EV5_FAM:
9044 if (in_use & EV5_FA)
9045 {
9046 if (in_use & EV5_FM)
9047 goto done;
9048 in_use |= EV5_FM;
9049 }
9050 else
9051 in_use |= EV5_FA | EV5_FAM;
9052 break;
9053
9054 case EV5_FA:
9055 if (in_use & EV5_FA)
9056 goto done;
9057 in_use |= EV5_FA;
9058 break;
9059
9060 case EV5_FM:
9061 if (in_use & EV5_FM)
9062 goto done;
9063 in_use |= EV5_FM;
9064 break;
9065
9066 case EV5_NONE:
9067 break;
9068
9069 default:
56daab84 9070 gcc_unreachable ();
68aed21b
RH
9071 }
9072 len += 4;
f676971a 9073
68aed21b
RH
9074 /* Haifa doesn't do well scheduling branches. */
9075 /* ??? If this is predicted not-taken, slotting continues, except
9076 that no more IBR, FBR, or JSR insns may be slotted. */
7d83f4f5 9077 if (JUMP_P (insn))
2c01018f 9078 goto next_and_done;
68aed21b 9079
2c01018f 9080 next:
68aed21b
RH
9081 insn = next_nonnote_insn (insn);
9082
2c3c49de 9083 if (!insn || ! INSN_P (insn))
68aed21b 9084 goto done;
a874dd18 9085
68aed21b
RH
9086 /* Let Haifa tell us where it thinks insn group boundaries are. */
9087 if (GET_MODE (insn) == TImode)
9088 goto done;
9089
2c01018f
RH
9090 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9091 goto next;
68aed21b 9092 }
2c01018f
RH
9093
9094 next_and_done:
9095 insn = next_nonnote_insn (insn);
68aed21b
RH
9096
9097 done:
9098 *plen = len;
9099 *pin_use = in_use;
9100 return insn;
68aed21b
RH
9101}
9102
3873d24b 9103static rtx
a5c24926 9104alphaev4_next_nop (int *pin_use)
3873d24b
RH
9105{
9106 int in_use = *pin_use;
9107 rtx nop;
9108
9109 if (!(in_use & EV4_IB0))
9110 {
9111 in_use |= EV4_IB0;
9112 nop = gen_nop ();
9113 }
9114 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9115 {
9116 in_use |= EV4_IB1;
9117 nop = gen_nop ();
9118 }
9119 else if (TARGET_FP && !(in_use & EV4_IB1))
9120 {
9121 in_use |= EV4_IB1;
9122 nop = gen_fnop ();
9123 }
9124 else
9125 nop = gen_unop ();
9126
9127 *pin_use = in_use;
9128 return nop;
9129}
9130
9131static rtx
a5c24926 9132alphaev5_next_nop (int *pin_use)
3873d24b
RH
9133{
9134 int in_use = *pin_use;
9135 rtx nop;
9136
9137 if (!(in_use & EV5_E1))
9138 {
9139 in_use |= EV5_E1;
9140 nop = gen_nop ();
9141 }
9142 else if (TARGET_FP && !(in_use & EV5_FA))
9143 {
9144 in_use |= EV5_FA;
9145 nop = gen_fnop ();
9146 }
9147 else if (TARGET_FP && !(in_use & EV5_FM))
9148 {
9149 in_use |= EV5_FM;
9150 nop = gen_fnop ();
9151 }
9152 else
9153 nop = gen_unop ();
9154
9155 *pin_use = in_use;
9156 return nop;
9157}
9158
9159/* The instruction group alignment main loop. */
9160
68aed21b 9161static void
4bdf6418 9162alpha_align_insns_1 (unsigned int max_align,
cad003ba 9163 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
4bdf6418 9164 rtx (*next_nop) (int *))
68aed21b
RH
9165{
9166 /* ALIGN is the known alignment for the insn group. */
b81f53a1 9167 unsigned int align;
68aed21b
RH
9168 /* OFS is the offset of the current insn in the insn group. */
9169 int ofs;
0f1341c7 9170 int prev_in_use, in_use, len, ldgp;
cad003ba 9171 rtx_insn *i, *next;
68aed21b
RH
9172
9173 /* Let shorten branches care for assigning alignments to code labels. */
18dbd950 9174 shorten_branches (get_insns ());
68aed21b 9175
e6de5335 9176 unsigned int option_alignment = align_functions.levels[0].get_value ();
c518c102 9177 if (option_alignment < 4)
30864e14 9178 align = 4;
c518c102
ML
9179 else if ((unsigned int) option_alignment < max_align)
9180 align = option_alignment;
30864e14
RH
9181 else
9182 align = max_align;
80db34d8 9183
68aed21b 9184 ofs = prev_in_use = 0;
18dbd950 9185 i = get_insns ();
7d83f4f5 9186 if (NOTE_P (i))
68aed21b
RH
9187 i = next_nonnote_insn (i);
9188
0f1341c7
RH
9189 ldgp = alpha_function_needs_gp ? 8 : 0;
9190
68aed21b
RH
9191 while (i)
9192 {
b81f53a1 9193 next = (*next_group) (i, &in_use, &len);
68aed21b
RH
9194
9195 /* When we see a label, resync alignment etc. */
7d83f4f5 9196 if (LABEL_P (i))
68aed21b 9197 {
e6de5335
ML
9198 unsigned int new_align
9199 = label_to_alignment (i).levels[0].get_value ();
b81f53a1 9200
68aed21b
RH
9201 if (new_align >= align)
9202 {
3873d24b 9203 align = new_align < max_align ? new_align : max_align;
68aed21b
RH
9204 ofs = 0;
9205 }
b81f53a1 9206
68aed21b
RH
9207 else if (ofs & (new_align-1))
9208 ofs = (ofs | (new_align-1)) + 1;
56daab84 9209 gcc_assert (!len);
68aed21b
RH
9210 }
9211
9212 /* Handle complex instructions special. */
9213 else if (in_use == 0)
9214 {
9215 /* Asms will have length < 0. This is a signal that we have
9216 lost alignment knowledge. Assume, however, that the asm
9217 will not mis-align instructions. */
9218 if (len < 0)
9219 {
9220 ofs = 0;
9221 align = 4;
9222 len = 0;
9223 }
9224 }
9225
9226 /* If the known alignment is smaller than the recognized insn group,
9227 realign the output. */
1eb356b9 9228 else if ((int) align < len)
68aed21b 9229 {
b81f53a1 9230 unsigned int new_log_align = len > 8 ? 4 : 3;
cad003ba 9231 rtx_insn *prev, *where;
68aed21b 9232
11cb1475 9233 where = prev = prev_nonnote_insn (i);
7d83f4f5 9234 if (!where || !LABEL_P (where))
68aed21b
RH
9235 where = i;
9236
11cb1475
RH
9237 /* Can't realign between a call and its gp reload. */
9238 if (! (TARGET_EXPLICIT_RELOCS
7d83f4f5 9239 && prev && CALL_P (prev)))
11cb1475
RH
9240 {
9241 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9242 align = 1 << new_log_align;
9243 ofs = 0;
9244 }
68aed21b
RH
9245 }
9246
0f1341c7
RH
9247 /* We may not insert padding inside the initial ldgp sequence. */
9248 else if (ldgp > 0)
9249 ldgp -= len;
9250
68aed21b
RH
9251 /* If the group won't fit in the same INT16 as the previous,
9252 we need to add padding to keep the group together. Rather
9253 than simply leaving the insn filling to the assembler, we
9254 can make use of the knowledge of what sorts of instructions
9255 were issued in the previous group to make sure that all of
9256 the added nops are really free. */
1eb356b9 9257 else if (ofs + len > (int) align)
68aed21b
RH
9258 {
9259 int nop_count = (align - ofs) / 4;
cad003ba 9260 rtx_insn *where;
68aed21b 9261
839a4992 9262 /* Insert nops before labels, branches, and calls to truly merge
11cb1475 9263 the execution of the nops with the previous instruction group. */
68aed21b 9264 where = prev_nonnote_insn (i);
3873d24b 9265 if (where)
68aed21b 9266 {
7d83f4f5 9267 if (LABEL_P (where))
68aed21b 9268 {
cad003ba 9269 rtx_insn *where2 = prev_nonnote_insn (where);
7d83f4f5 9270 if (where2 && JUMP_P (where2))
3873d24b 9271 where = where2;
68aed21b 9272 }
7d83f4f5 9273 else if (NONJUMP_INSN_P (where))
3873d24b 9274 where = i;
68aed21b 9275 }
3873d24b
RH
9276 else
9277 where = i;
9278
f676971a 9279 do
3873d24b 9280 emit_insn_before ((*next_nop)(&prev_in_use), where);
68aed21b
RH
9281 while (--nop_count);
9282 ofs = 0;
9283 }
9284
9285 ofs = (ofs + len) & (align - 1);
9286 prev_in_use = in_use;
9287 i = next;
9288 }
9289}
76a4a1bd 9290
4bdf6418
UB
9291static void
9292alpha_align_insns (void)
9293{
9294 if (alpha_tune == PROCESSOR_EV4)
9295 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9296 else if (alpha_tune == PROCESSOR_EV5)
9297 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9298 else
9299 gcc_unreachable ();
9300}
9301
3eda5123 9302/* Insert an unop between sibcall or noreturn function call and GP load. */
76a4a1bd
UB
9303
9304static void
3eda5123 9305alpha_pad_function_end (void)
76a4a1bd 9306{
cad003ba 9307 rtx_insn *insn, *next;
76a4a1bd
UB
9308
9309 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9310 {
e1df0477
RH
9311 if (!CALL_P (insn)
9312 || !(SIBLING_CALL_P (insn)
9313 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
76a4a1bd
UB
9314 continue;
9315
9316 next = next_active_insn (insn);
76a4a1bd
UB
9317 if (next)
9318 {
9319 rtx pat = PATTERN (next);
9320
9321 if (GET_CODE (pat) == SET
9322 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9323 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9324 emit_insn_after (gen_unop (), insn);
9325 }
9326 }
9327}
68aed21b 9328\f
f5143c46 9329/* Machine dependent reorg pass. */
2ea844d3 9330
18dbd950 9331static void
a5c24926 9332alpha_reorg (void)
2ea844d3 9333{
3eda5123
UB
9334 /* Workaround for a linker error that triggers when an exception
9335 handler immediatelly follows a sibcall or a noreturn function.
9336
9337In the sibcall case:
9338
9339 The instruction stream from an object file:
9340
9341 1d8: 00 00 fb 6b jmp (t12)
9342 1dc: 00 00 ba 27 ldah gp,0(ra)
9343 1e0: 00 00 bd 23 lda gp,0(gp)
9344 1e4: 00 00 7d a7 ldq t12,0(gp)
9345 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9346
9347 was converted in the final link pass to:
9348
9349 12003aa88: 67 fa ff c3 br 120039428 <...>
9350 12003aa8c: 00 00 fe 2f unop
9351 12003aa90: 00 00 fe 2f unop
9352 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9353 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9354
9355And in the noreturn case:
76a4a1bd
UB
9356
9357 The instruction stream from an object file:
9358
9359 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9360 58: 00 00 ba 27 ldah gp,0(ra)
9361 5c: 00 00 bd 23 lda gp,0(gp)
9362 60: 00 00 7d a7 ldq t12,0(gp)
9363 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9364
9365 was converted in the final link pass to:
9366
9367 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9368 fdb28: 00 00 fe 2f unop
9369 fdb2c: 00 00 fe 2f unop
9370 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9371 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9372
9373 GP load instructions were wrongly cleared by the linker relaxation
9374 pass. This workaround prevents removal of GP loads by inserting
3eda5123 9375 an unop instruction between a sibcall or noreturn function call and
76a4a1bd
UB
9376 exception handler prologue. */
9377
9378 if (current_function_has_exception_handlers ())
3eda5123 9379 alpha_pad_function_end ();
cc8a9b87
UB
9380
9381 /* CALL_PAL that implements trap insn, updates program counter to point
9382 after the insn. In case trap is the last insn in the function,
9383 emit NOP to guarantee that PC remains inside function boundaries.
9384 This workaround is needed to get reliable backtraces. */
9385
9386 rtx_insn *insn = prev_active_insn (get_last_insn ());
9387
9388 if (insn && NONJUMP_INSN_P (insn))
9389 {
9390 rtx pat = PATTERN (insn);
9391 if (GET_CODE (pat) == PARALLEL)
9392 {
9393 rtx vec = XVECEXP (pat, 0, 0);
9394 if (GET_CODE (vec) == TRAP_IF
9395 && XEXP (vec, 0) == const1_rtx)
9396 emit_insn_after (gen_unop (), insn);
9397 }
9398 }
2ea844d3 9399}
2ea844d3 9400\f
1bc7c5b6
ZW
9401static void
9402alpha_file_start (void)
9403{
9404 default_file_start ();
1bc7c5b6
ZW
9405
9406 fputs ("\t.set noreorder\n", asm_out_file);
9407 fputs ("\t.set volatile\n", asm_out_file);
42d085c1 9408 if (TARGET_ABI_OSF)
1bc7c5b6
ZW
9409 fputs ("\t.set noat\n", asm_out_file);
9410 if (TARGET_EXPLICIT_RELOCS)
9411 fputs ("\t.set nomacro\n", asm_out_file);
9412 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
8bea7f7c
RH
9413 {
9414 const char *arch;
9415
9416 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9417 arch = "ev6";
9418 else if (TARGET_MAX)
9419 arch = "pca56";
9420 else if (TARGET_BWX)
9421 arch = "ev56";
9422 else if (alpha_cpu == PROCESSOR_EV5)
9423 arch = "ev5";
9424 else
9425 arch = "ev4";
9426
9427 fprintf (asm_out_file, "\t.arch %s\n", arch);
9428 }
1bc7c5b6 9429}
1bc7c5b6 9430
9b580a0b
RH
9431/* Since we don't have a .dynbss section, we should not allow global
9432 relocations in the .rodata section. */
9433
9434static int
9435alpha_elf_reloc_rw_mask (void)
9436{
9437 return flag_pic ? 3 : 2;
9438}
b64a1b53 9439
d6b5193b
RS
9440/* Return a section for X. The only special thing we do here is to
9441 honor small data. */
b64a1b53 9442
d6b5193b 9443static section *
ef4bddc2 9444alpha_elf_select_rtx_section (machine_mode mode, rtx x,
a5c24926 9445 unsigned HOST_WIDE_INT align)
b64a1b53
RH
9446{
9447 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
093354e0 9448 /* ??? Consider using mergeable sdata sections. */
d6b5193b 9449 return sdata_section;
b64a1b53 9450 else
d6b5193b 9451 return default_elf_select_rtx_section (mode, x, align);
b64a1b53
RH
9452}
9453
ae069803
RH
9454static unsigned int
9455alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9456{
9457 unsigned int flags = 0;
9458
9459 if (strcmp (name, ".sdata") == 0
9460 || strncmp (name, ".sdata.", 7) == 0
9461 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9462 || strcmp (name, ".sbss") == 0
9463 || strncmp (name, ".sbss.", 6) == 0
9464 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9465 flags = SECTION_SMALL;
9466
9467 flags |= default_section_type_flags (decl, name, reloc);
9468 return flags;
9469}
b64a1b53 9470\f
f030826a
RH
9471/* Structure to collect function names for final output in link section. */
9472/* Note that items marked with GTY can't be ifdef'ed out. */
17211ab5 9473
735f469b
TG
9474enum reloc_kind
9475{
9476 KIND_LINKAGE,
9477 KIND_CODEADDR
9478};
17211ab5 9479
d1b38208 9480struct GTY(()) alpha_links
17211ab5 9481{
735f469b 9482 rtx func;
17211ab5 9483 rtx linkage;
f030826a
RH
9484 enum reloc_kind rkind;
9485};
9486
be7b80f4 9487#if TARGET_ABI_OPEN_VMS
89cfc2c6 9488
e9a25f70 9489/* Return the VMS argument type corresponding to MODE. */
89cfc2c6 9490
e9a25f70 9491enum avms_arg_type
ef4bddc2 9492alpha_arg_type (machine_mode mode)
e9a25f70
JL
9493{
9494 switch (mode)
89cfc2c6 9495 {
4e10a5a7 9496 case E_SFmode:
e9a25f70 9497 return TARGET_FLOAT_VAX ? FF : FS;
4e10a5a7 9498 case E_DFmode:
e9a25f70
JL
9499 return TARGET_FLOAT_VAX ? FD : FT;
9500 default:
9501 return I64;
89cfc2c6 9502 }
e9a25f70 9503}
89cfc2c6 9504
e9a25f70
JL
9505/* Return an rtx for an integer representing the VMS Argument Information
9506 register value. */
89cfc2c6 9507
aa388f29 9508rtx
a5c24926 9509alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
e9a25f70
JL
9510{
9511 unsigned HOST_WIDE_INT regval = cum.num_args;
9512 int i;
89cfc2c6 9513
e9a25f70
JL
9514 for (i = 0; i < 6; i++)
9515 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
89cfc2c6 9516
e9a25f70
JL
9517 return GEN_INT (regval);
9518}
9519\f
89cfc2c6 9520
b714133e
EB
9521/* Return a SYMBOL_REF representing the reference to the .linkage entry
9522 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9523 this is the reference to the linkage pointer value, 0 if this is the
9524 reference to the function entry value. RFLAG is 1 if this a reduced
9525 reference (code address only), 0 if this is a full reference. */
9526
1330f7d5 9527rtx
735f469b 9528alpha_use_linkage (rtx func, bool lflag, bool rflag)
1330f7d5 9529{
735f469b 9530 struct alpha_links *al = NULL;
b714133e 9531 const char *name = XSTR (func, 0);
1330f7d5 9532
735f469b 9533 if (cfun->machine->links)
1330f7d5 9534 {
1330f7d5 9535 /* Is this name already defined? */
a6330e85 9536 alpha_links **slot = cfun->machine->links->get (name);
de144fb2
TS
9537 if (slot)
9538 al = *slot;
1330f7d5
DR
9539 }
9540 else
de144fb2 9541 cfun->machine->links
fb5c464a 9542 = hash_map<nofree_string_hash, alpha_links *>::create_ggc (64);
1330f7d5 9543
735f469b 9544 if (al == NULL)
1330f7d5 9545 {
735f469b 9546 size_t buf_len;
1330f7d5 9547 char *linksym;
39420b1a 9548 tree id;
1330f7d5
DR
9549
9550 if (name[0] == '*')
9551 name++;
9552
39420b1a
TG
9553 /* Follow transparent alias, as this is used for CRTL translations. */
9554 id = maybe_get_identifier (name);
9555 if (id)
9556 {
9557 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9558 id = TREE_CHAIN (id);
9559 name = IDENTIFIER_POINTER (id);
9560 }
9561
735f469b
TG
9562 buf_len = strlen (name) + 8 + 9;
9563 linksym = (char *) alloca (buf_len);
9564 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
1330f7d5 9565
766090c2 9566 al = ggc_alloc<alpha_links> ();
735f469b
TG
9567 al->func = func;
9568 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
1330f7d5 9569
de144fb2 9570 cfun->machine->links->put (ggc_strdup (name), al);
1330f7d5
DR
9571 }
9572
735f469b 9573 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
f676971a 9574
1330f7d5 9575 if (lflag)
0a81f074 9576 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
1330f7d5
DR
9577 else
9578 return al->linkage;
9579}
9580
a82c7f05 9581static int
a6330e85 9582alpha_write_one_linkage (const char *name, alpha_links *link, FILE *stream)
a82c7f05 9583{
735f469b 9584 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
1330f7d5 9585 if (link->rkind == KIND_CODEADDR)
a82c7f05 9586 {
735f469b 9587 /* External and used, request code address. */
39420b1a 9588 fprintf (stream, "\t.code_address ");
a82c7f05
RH
9589 }
9590 else
9591 {
735f469b
TG
9592 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9593 && SYMBOL_REF_LOCAL_P (link->func))
1330f7d5 9594 {
735f469b 9595 /* Locally defined, build linkage pair. */
1330f7d5 9596 fprintf (stream, "\t.quad %s..en\n", name);
39420b1a 9597 fprintf (stream, "\t.quad ");
1330f7d5
DR
9598 }
9599 else
9600 {
735f469b 9601 /* External, request linkage pair. */
39420b1a 9602 fprintf (stream, "\t.linkage ");
1330f7d5 9603 }
a82c7f05 9604 }
39420b1a
TG
9605 assemble_name (stream, name);
9606 fputs ("\n", stream);
a82c7f05
RH
9607
9608 return 0;
9609}
89cfc2c6 9610
1330f7d5 9611static void
735f469b 9612alpha_write_linkage (FILE *stream, const char *funname)
89cfc2c6 9613{
d6b5193b 9614 fprintf (stream, "\t.link\n");
1330f7d5 9615 fprintf (stream, "\t.align 3\n");
d6b5193b
RS
9616 in_section = NULL;
9617
735f469b 9618#ifdef TARGET_VMS_CRASH_DEBUG
1330f7d5
DR
9619 fputs ("\t.name ", stream);
9620 assemble_name (stream, funname);
9621 fputs ("..na\n", stream);
735f469b
TG
9622#endif
9623
1330f7d5
DR
9624 ASM_OUTPUT_LABEL (stream, funname);
9625 fprintf (stream, "\t.pdesc ");
9626 assemble_name (stream, funname);
9627 fprintf (stream, "..en,%s\n",
9628 alpha_procedure_type == PT_STACK ? "stack"
9629 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9630
735f469b 9631 if (cfun->machine->links)
c1bd46a8 9632 {
fb5c464a 9633 hash_map<nofree_string_hash, alpha_links *>::iterator iter
de144fb2
TS
9634 = cfun->machine->links->begin ();
9635 for (; iter != cfun->machine->links->end (); ++iter)
9636 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
c1bd46a8 9637 }
89cfc2c6
RK
9638}
9639
7c262518
RH
9640/* Switch to an arbitrary section NAME with attributes as specified
9641 by FLAGS. ALIGN specifies any known alignment requirements for
9642 the section; 0 if the default should be used. */
9643
9644static void
c18a5b6c
MM
9645vms_asm_named_section (const char *name, unsigned int flags,
9646 tree decl ATTRIBUTE_UNUSED)
7c262518 9647{
c1bd46a8
DR
9648 fputc ('\n', asm_out_file);
9649 fprintf (asm_out_file, ".section\t%s", name);
7c262518 9650
c1bd46a8
DR
9651 if (flags & SECTION_DEBUG)
9652 fprintf (asm_out_file, ",NOWRT");
9653
9654 fputc ('\n', asm_out_file);
7c262518
RH
9655}
9656
2cc07db4
RH
9657/* Record an element in the table of global constructors. SYMBOL is
9658 a SYMBOL_REF of the function to be called; PRIORITY is a number
f676971a 9659 between 0 and MAX_INIT_PRIORITY.
2cc07db4
RH
9660
9661 Differs from default_ctors_section_asm_out_constructor in that the
9662 width of the .ctors entry is always 64 bits, rather than the 32 bits
9663 used by a normal pointer. */
9664
9665static void
a5c24926 9666vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
2cc07db4 9667{
d6b5193b 9668 switch_to_section (ctors_section);
c8af3574
RH
9669 assemble_align (BITS_PER_WORD);
9670 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
2cc07db4
RH
9671}
9672
9673static void
a5c24926 9674vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
2cc07db4 9675{
d6b5193b 9676 switch_to_section (dtors_section);
c8af3574
RH
9677 assemble_align (BITS_PER_WORD);
9678 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
2cc07db4 9679}
89cfc2c6 9680#else
1330f7d5 9681rtx
b714133e 9682alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
735f469b
TG
9683 bool lflag ATTRIBUTE_UNUSED,
9684 bool rflag ATTRIBUTE_UNUSED)
1330f7d5
DR
9685{
9686 return NULL_RTX;
9687}
9688
be7b80f4 9689#endif /* TARGET_ABI_OPEN_VMS */
30102605 9690\f
c15c90bb
ZW
9691static void
9692alpha_init_libfuncs (void)
9693{
75db85d8 9694 if (TARGET_ABI_OPEN_VMS)
c15c90bb
ZW
9695 {
9696 /* Use the VMS runtime library functions for division and
9697 remainder. */
9698 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9699 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9700 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9701 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9702 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9703 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9704 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9705 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
5e3fef6c
DR
9706#ifdef MEM_LIBFUNCS_INIT
9707 MEM_LIBFUNCS_INIT;
9708#endif
c15c90bb
ZW
9709 }
9710}
9711
5efd84c5
NF
9712/* On the Alpha, we use this to disable the floating-point registers
9713 when they don't exist. */
9714
9715static void
9716alpha_conditional_register_usage (void)
9717{
9718 int i;
9719 if (! TARGET_FPREGS)
9720 for (i = 32; i < 63; i++)
9721 fixed_regs[i] = call_used_regs[i] = 1;
9722}
c354951b
AK
9723
9724/* Canonicalize a comparison from one we don't have to one we do have. */
9725
9726static void
9727alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9728 bool op0_preserve_value)
9729{
9730 if (!op0_preserve_value
9731 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9732 && (REG_P (*op1) || *op1 == const0_rtx))
9733 {
7159f19c 9734 std::swap (*op0, *op1);
c354951b
AK
9735 *code = (int)swap_condition ((enum rtx_code)*code);
9736 }
9737
9738 if ((*code == LT || *code == LTU)
9739 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9740 {
9741 *code = *code == LT ? LE : LEU;
9742 *op1 = GEN_INT (255);
9743 }
9744}
286934b4
UB
9745
9746/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9747
9748static void
9749alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9750{
9751 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9752
9753 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9754 tree new_fenv_var, reload_fenv, restore_fnenv;
9755 tree update_call, atomic_feraiseexcept, hold_fnclex;
9756
9757 /* Assume OSF/1 compatible interfaces. */
9758 if (!TARGET_ABI_OSF)
9759 return;
9760
9761 /* Generate the equivalent of :
9762 unsigned long fenv_var;
9763 fenv_var = __ieee_get_fp_control ();
9764
9765 unsigned long masked_fenv;
9766 masked_fenv = fenv_var & mask;
9767
9768 __ieee_set_fp_control (masked_fenv); */
9769
07119921 9770 fenv_var = create_tmp_var_raw (long_unsigned_type_node);
286934b4
UB
9771 get_fpscr
9772 = build_fn_decl ("__ieee_get_fp_control",
9773 build_function_type_list (long_unsigned_type_node, NULL));
9774 set_fpscr
9775 = build_fn_decl ("__ieee_set_fp_control",
9776 build_function_type_list (void_type_node, NULL));
9777 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9778 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9779 fenv_var, build_call_expr (get_fpscr, 0));
9780 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9781 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9782 *hold = build2 (COMPOUND_EXPR, void_type_node,
9783 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9784 hold_fnclex);
9785
9786 /* Store the value of masked_fenv to clear the exceptions:
9787 __ieee_set_fp_control (masked_fenv); */
9788
9789 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9790
9791 /* Generate the equivalent of :
9792 unsigned long new_fenv_var;
9793 new_fenv_var = __ieee_get_fp_control ();
9794
9795 __ieee_set_fp_control (fenv_var);
9796
9797 __atomic_feraiseexcept (new_fenv_var); */
9798
07119921 9799 new_fenv_var = create_tmp_var_raw (long_unsigned_type_node);
286934b4
UB
9800 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9801 build_call_expr (get_fpscr, 0));
9802 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9803 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9804 update_call
9805 = build_call_expr (atomic_feraiseexcept, 1,
9806 fold_convert (integer_type_node, new_fenv_var));
9807 *update = build2 (COMPOUND_EXPR, void_type_node,
9808 build2 (COMPOUND_EXPR, void_type_node,
9809 reload_fenv, restore_fnenv), update_call);
9810}
f939c3e6
RS
9811
9812/* Implement TARGET_HARD_REGNO_MODE_OK. On Alpha, the integer registers
9813 can hold any mode. The floating-point registers can hold 64-bit
9814 integers as well, but not smaller values. */
9815
9816static bool
9817alpha_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9818{
9819 if (IN_RANGE (regno, 32, 62))
9820 return (mode == SFmode
9821 || mode == DFmode
9822 || mode == DImode
9823 || mode == SCmode
9824 || mode == DCmode);
9825 return true;
9826}
99e1629f
RS
9827
9828/* Implement TARGET_MODES_TIEABLE_P. This asymmetric test is true when
9829 MODE1 could be put in an FP register but MODE2 could not. */
9830
9831static bool
9832alpha_modes_tieable_p (machine_mode mode1, machine_mode mode2)
9833{
9834 return (alpha_hard_regno_mode_ok (32, mode1)
9835 ? alpha_hard_regno_mode_ok (32, mode2)
9836 : true);
9837}
0d803030
RS
9838
9839/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
9840
9841static bool
9842alpha_can_change_mode_class (machine_mode from, machine_mode to,
9843 reg_class_t rclass)
9844{
9845 return (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)
9846 || !reg_classes_intersect_p (FLOAT_REGS, rclass));
9847}
a5c24926
RH
9848\f
9849/* Initialize the GCC target structure. */
9850#if TARGET_ABI_OPEN_VMS
9851# undef TARGET_ATTRIBUTE_TABLE
9852# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
7b5cbb57
AS
9853# undef TARGET_CAN_ELIMINATE
9854# define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
a5c24926
RH
9855#endif
9856
9857#undef TARGET_IN_SMALL_DATA_P
9858#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9859
a5c24926
RH
9860#undef TARGET_ASM_ALIGNED_HI_OP
9861#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9862#undef TARGET_ASM_ALIGNED_DI_OP
9863#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9864
9865/* Default unaligned ops are provided for ELF systems. To get unaligned
9866 data for non-ELF systems, we have to turn off auto alignment. */
46e1a769 9867#if TARGET_ABI_OPEN_VMS
a5c24926
RH
9868#undef TARGET_ASM_UNALIGNED_HI_OP
9869#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9870#undef TARGET_ASM_UNALIGNED_SI_OP
9871#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9872#undef TARGET_ASM_UNALIGNED_DI_OP
9873#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9874#endif
9875
9b580a0b
RH
9876#undef TARGET_ASM_RELOC_RW_MASK
9877#define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
a5c24926
RH
9878#undef TARGET_ASM_SELECT_RTX_SECTION
9879#define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
ae069803
RH
9880#undef TARGET_SECTION_TYPE_FLAGS
9881#define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
a5c24926
RH
9882
9883#undef TARGET_ASM_FUNCTION_END_PROLOGUE
9884#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9885
c15c90bb
ZW
9886#undef TARGET_INIT_LIBFUNCS
9887#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9888
506d7b68
PB
9889#undef TARGET_LEGITIMIZE_ADDRESS
9890#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
b0f6b612
NF
9891#undef TARGET_MODE_DEPENDENT_ADDRESS_P
9892#define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
506d7b68 9893
1bc7c5b6
ZW
9894#undef TARGET_ASM_FILE_START
9895#define TARGET_ASM_FILE_START alpha_file_start
1bc7c5b6 9896
a5c24926
RH
9897#undef TARGET_SCHED_ADJUST_COST
9898#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9899#undef TARGET_SCHED_ISSUE_RATE
9900#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
a5c24926
RH
9901#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9902#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9903 alpha_multipass_dfa_lookahead
9904
9905#undef TARGET_HAVE_TLS
9906#define TARGET_HAVE_TLS HAVE_AS_TLS
9907
fd930388
RH
9908#undef TARGET_BUILTIN_DECL
9909#define TARGET_BUILTIN_DECL alpha_builtin_decl
a5c24926
RH
9910#undef TARGET_INIT_BUILTINS
9911#define TARGET_INIT_BUILTINS alpha_init_builtins
9912#undef TARGET_EXPAND_BUILTIN
9913#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
36013987
RH
9914#undef TARGET_FOLD_BUILTIN
9915#define TARGET_FOLD_BUILTIN alpha_fold_builtin
b6db8af6
UB
9916#undef TARGET_GIMPLE_FOLD_BUILTIN
9917#define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
a5c24926
RH
9918
9919#undef TARGET_FUNCTION_OK_FOR_SIBCALL
9920#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9921#undef TARGET_CANNOT_COPY_INSN_P
9922#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
1a627b35
RS
9923#undef TARGET_LEGITIMATE_CONSTANT_P
9924#define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
04886dc0
RH
9925#undef TARGET_CANNOT_FORCE_CONST_MEM
9926#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
a5c24926
RH
9927
9928#if TARGET_ABI_OSF
9929#undef TARGET_ASM_OUTPUT_MI_THUNK
9930#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9931#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 9932#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
3f620b5f
RH
9933#undef TARGET_STDARG_OPTIMIZE_HOOK
9934#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
a5c24926
RH
9935#endif
9936
f83e2262
UB
9937#undef TARGET_PRINT_OPERAND
9938#define TARGET_PRINT_OPERAND alpha_print_operand
9939#undef TARGET_PRINT_OPERAND_ADDRESS
9940#define TARGET_PRINT_OPERAND_ADDRESS alpha_print_operand_address
9941#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
9942#define TARGET_PRINT_OPERAND_PUNCT_VALID_P alpha_print_operand_punct_valid_p
9943
f7a57cdc
TG
9944/* Use 16-bits anchor. */
9945#undef TARGET_MIN_ANCHOR_OFFSET
9946#define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9947#undef TARGET_MAX_ANCHOR_OFFSET
9948#define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9949#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9950#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9951
ef995717
UB
9952#undef TARGET_REGISTER_MOVE_COST
9953#define TARGET_REGISTER_MOVE_COST alpha_register_move_cost
9954#undef TARGET_MEMORY_MOVE_COST
9955#define TARGET_MEMORY_MOVE_COST alpha_memory_move_cost
a5c24926
RH
9956#undef TARGET_RTX_COSTS
9957#define TARGET_RTX_COSTS alpha_rtx_costs
9958#undef TARGET_ADDRESS_COST
b413068c 9959#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
a5c24926
RH
9960
9961#undef TARGET_MACHINE_DEPENDENT_REORG
9962#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9963
cde0f3fd
PB
9964#undef TARGET_PROMOTE_FUNCTION_MODE
9965#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
f93c2180 9966#undef TARGET_PROMOTE_PROTOTYPES
586de218 9967#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
ef995717
UB
9968
9969#undef TARGET_FUNCTION_VALUE
9970#define TARGET_FUNCTION_VALUE alpha_function_value
9971#undef TARGET_LIBCALL_VALUE
9972#define TARGET_LIBCALL_VALUE alpha_libcall_value
9973#undef TARGET_FUNCTION_VALUE_REGNO_P
9974#define TARGET_FUNCTION_VALUE_REGNO_P alpha_function_value_regno_p
f93c2180
RH
9975#undef TARGET_RETURN_IN_MEMORY
9976#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
8cd5a4e0
RH
9977#undef TARGET_PASS_BY_REFERENCE
9978#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
f93c2180
RH
9979#undef TARGET_SETUP_INCOMING_VARARGS
9980#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9981#undef TARGET_STRICT_ARGUMENT_NAMING
9982#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9983#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9984#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
42ba5130
RH
9985#undef TARGET_SPLIT_COMPLEX_ARG
9986#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
28245018
RH
9987#undef TARGET_GIMPLIFY_VA_ARG_EXPR
9988#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
78a52f11
RH
9989#undef TARGET_ARG_PARTIAL_BYTES
9990#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
0c3a9758
NF
9991#undef TARGET_FUNCTION_ARG
9992#define TARGET_FUNCTION_ARG alpha_function_arg
9993#undef TARGET_FUNCTION_ARG_ADVANCE
9994#define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
2d7b663a
RH
9995#undef TARGET_TRAMPOLINE_INIT
9996#define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
6dd53648 9997
1e46eb2a
UB
9998#undef TARGET_INSTANTIATE_DECLS
9999#define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10000
48f46219
RH
10001#undef TARGET_SECONDARY_RELOAD
10002#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
f15643d4
RS
10003#undef TARGET_SECONDARY_MEMORY_NEEDED
10004#define TARGET_SECONDARY_MEMORY_NEEDED alpha_secondary_memory_needed
94e23f53
RS
10005#undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
10006#define TARGET_SECONDARY_MEMORY_NEEDED_MODE alpha_secondary_memory_needed_mode
48f46219 10007
6dd53648
RH
10008#undef TARGET_SCALAR_MODE_SUPPORTED_P
10009#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
f676971a
EC
10010#undef TARGET_VECTOR_MODE_SUPPORTED_P
10011#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
f93c2180 10012
c35d187f
RH
10013#undef TARGET_BUILD_BUILTIN_VA_LIST
10014#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10015
d7bd8aeb
JJ
10016#undef TARGET_EXPAND_BUILTIN_VA_START
10017#define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10018
c5387660
JM
10019#undef TARGET_OPTION_OVERRIDE
10020#define TARGET_OPTION_OVERRIDE alpha_option_override
10021
ad2c39af
UB
10022#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
10023#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
10024 alpha_override_options_after_change
10025
7269aee7 10026#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
608063c3
JB
10027#undef TARGET_MANGLE_TYPE
10028#define TARGET_MANGLE_TYPE alpha_mangle_type
7269aee7
AH
10029#endif
10030
d81db636
SB
10031#undef TARGET_LRA_P
10032#define TARGET_LRA_P hook_bool_void_false
10033
c6c3dba9
PB
10034#undef TARGET_LEGITIMATE_ADDRESS_P
10035#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10036
5efd84c5
NF
10037#undef TARGET_CONDITIONAL_REGISTER_USAGE
10038#define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10039
c354951b
AK
10040#undef TARGET_CANONICALIZE_COMPARISON
10041#define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10042
286934b4
UB
10043#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10044#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10045
f939c3e6
RS
10046#undef TARGET_HARD_REGNO_MODE_OK
10047#define TARGET_HARD_REGNO_MODE_OK alpha_hard_regno_mode_ok
10048
99e1629f
RS
10049#undef TARGET_MODES_TIEABLE_P
10050#define TARGET_MODES_TIEABLE_P alpha_modes_tieable_p
10051
0d803030
RS
10052#undef TARGET_CAN_CHANGE_MODE_CLASS
10053#define TARGET_CAN_CHANGE_MODE_CLASS alpha_can_change_mode_class
10054
a5c24926
RH
10055struct gcc_target targetm = TARGET_INITIALIZER;
10056
10057\f
e2500fed 10058#include "gt-alpha.h"