]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/alpha/alpha.c
gimple-predict.h: New file.
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
CommitLineData
a6f12d7c 1/* Subroutines used for code generation on the DEC Alpha.
5624e564 2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
d60a05a1 3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
a6f12d7c 4
7ec022b2 5This file is part of GCC.
a6f12d7c 6
7ec022b2 7GCC is free software; you can redistribute it and/or modify
a6f12d7c 8it under the terms of the GNU General Public License as published by
2f83c7d6 9the Free Software Foundation; either version 3, or (at your option)
a6f12d7c
RK
10any later version.
11
7ec022b2 12GCC is distributed in the hope that it will be useful,
a6f12d7c
RK
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
2f83c7d6
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
a6f12d7c
RK
20
21
a6f12d7c 22#include "config.h"
3c303f52 23#include "system.h"
4977bab6 24#include "coretypes.h"
c7131fb2 25#include "backend.h"
9fdcd34e 26#include "cfghooks.h"
c7131fb2
AM
27#include "tree.h"
28#include "gimple.h"
a6f12d7c 29#include "rtl.h"
c7131fb2
AM
30#include "df.h"
31#include "ssa.h"
40e23961 32#include "alias.h"
40e23961 33#include "fold-const.h"
d8a2d370
DN
34#include "stor-layout.h"
35#include "calls.h"
36#include "varasm.h"
a6f12d7c 37#include "regs.h"
a6f12d7c
RK
38#include "insn-config.h"
39#include "conditions.h"
a6f12d7c
RK
40#include "output.h"
41#include "insn-attr.h"
42#include "flags.h"
43#include "recog.h"
36566b39
PK
44#include "expmed.h"
45#include "dojump.h"
46#include "explow.h"
47#include "emit-rtl.h"
48#include "stmt.h"
a6f12d7c 49#include "expr.h"
b0710fe1 50#include "insn-codes.h"
e78d8e51
ZW
51#include "optabs.h"
52#include "reload.h"
a6f12d7c 53#include "obstack.h"
9ecc37f0 54#include "except.h"
718f9c0f 55#include "diagnostic-core.h"
aead1ca3 56#include "tm_p.h"
672a6f42 57#include "target.h"
677f3fa8 58#include "common/common-target.h"
14691f8d 59#include "debug.h"
f1e639b1 60#include "langhooks.h"
60393bbc
AM
61#include "cfgrtl.h"
62#include "cfganal.h"
63#include "lcm.h"
64#include "cfgbuild.h"
65#include "cfgcleanup.h"
2fb9a547
AM
66#include "internal-fn.h"
67#include "gimple-fold.h"
68#include "tree-eh.h"
4bdf6418
UB
69#include "tree-pass.h"
70#include "context.h"
71#include "pass_manager.h"
b6db8af6 72#include "gimple-iterator.h"
45b0be94 73#include "gimplify.h"
9d30f3c1 74#include "tree-stdarg.h"
dfcbeaa5 75#include "tm-constrs.h"
5e3fef6c 76#include "libfuncs.h"
96e45421 77#include "opts.h"
f49278e6 78#include "params.h"
9b2b7279 79#include "builtins.h"
572e01c7 80#include "rtl-iter.h"
9ecc37f0 81
994c5d85 82/* This file should be included last. */
d58627a0
RS
83#include "target-def.h"
84
285a5742 85/* Specify which cpu to schedule for. */
8bea7f7c 86enum processor_type alpha_tune;
9ecc37f0 87
8bea7f7c 88/* Which cpu we're generating code for. */
9b009d45 89enum processor_type alpha_cpu;
8bea7f7c 90
f676971a 91static const char * const alpha_cpu_name[] =
bcbbac26
RH
92{
93 "ev4", "ev5", "ev6"
94};
da792a68 95
6245e3df
RK
96/* Specify how accurate floating-point traps need to be. */
97
98enum alpha_trap_precision alpha_tp;
99
100/* Specify the floating-point rounding mode. */
101
102enum alpha_fp_rounding_mode alpha_fprm;
103
104/* Specify which things cause traps. */
105
106enum alpha_fp_trap_mode alpha_fptm;
107
825dda42 108/* Nonzero if inside of a function, because the Alpha asm can't
48f6bfac
RK
109 handle .files inside of functions. */
110
111static int inside_function = FALSE;
112
bcbbac26
RH
113/* The number of cycles of latency we should assume on memory reads. */
114
115int alpha_memory_latency = 3;
116
9c0e94a5
RH
117/* Whether the function needs the GP. */
118
119static int alpha_function_needs_gp;
120
941cc05a
RK
121/* The assembler name of the current function. */
122
123static const char *alpha_fnname;
124
1eb356b9 125/* The next explicit relocation sequence number. */
f030826a 126extern GTY(()) int alpha_next_sequence_number;
1eb356b9
RH
127int alpha_next_sequence_number = 1;
128
129/* The literal and gpdisp sequence numbers for this insn, as printed
130 by %# and %* respectively. */
f030826a
RH
131extern GTY(()) int alpha_this_literal_sequence_number;
132extern GTY(()) int alpha_this_gpdisp_sequence_number;
1eb356b9
RH
133int alpha_this_literal_sequence_number;
134int alpha_this_gpdisp_sequence_number;
135
3c50106f
RH
136/* Costs of various operations on the different architectures. */
137
138struct alpha_rtx_cost_data
139{
140 unsigned char fp_add;
141 unsigned char fp_mult;
142 unsigned char fp_div_sf;
143 unsigned char fp_div_df;
144 unsigned char int_mult_si;
145 unsigned char int_mult_di;
146 unsigned char int_shift;
147 unsigned char int_cmov;
8260c194 148 unsigned short int_div;
3c50106f
RH
149};
150
151static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
152{
153 { /* EV4 */
154 COSTS_N_INSNS (6), /* fp_add */
155 COSTS_N_INSNS (6), /* fp_mult */
156 COSTS_N_INSNS (34), /* fp_div_sf */
157 COSTS_N_INSNS (63), /* fp_div_df */
158 COSTS_N_INSNS (23), /* int_mult_si */
159 COSTS_N_INSNS (23), /* int_mult_di */
160 COSTS_N_INSNS (2), /* int_shift */
161 COSTS_N_INSNS (2), /* int_cmov */
9b4f6a07 162 COSTS_N_INSNS (97), /* int_div */
3c50106f
RH
163 },
164 { /* EV5 */
165 COSTS_N_INSNS (4), /* fp_add */
166 COSTS_N_INSNS (4), /* fp_mult */
167 COSTS_N_INSNS (15), /* fp_div_sf */
168 COSTS_N_INSNS (22), /* fp_div_df */
169 COSTS_N_INSNS (8), /* int_mult_si */
170 COSTS_N_INSNS (12), /* int_mult_di */
171 COSTS_N_INSNS (1) + 1, /* int_shift */
172 COSTS_N_INSNS (1), /* int_cmov */
9b4f6a07 173 COSTS_N_INSNS (83), /* int_div */
3c50106f
RH
174 },
175 { /* EV6 */
176 COSTS_N_INSNS (4), /* fp_add */
177 COSTS_N_INSNS (4), /* fp_mult */
178 COSTS_N_INSNS (12), /* fp_div_sf */
179 COSTS_N_INSNS (15), /* fp_div_df */
180 COSTS_N_INSNS (7), /* int_mult_si */
181 COSTS_N_INSNS (7), /* int_mult_di */
182 COSTS_N_INSNS (1), /* int_shift */
183 COSTS_N_INSNS (2), /* int_cmov */
9b4f6a07 184 COSTS_N_INSNS (86), /* int_div */
3c50106f
RH
185 },
186};
187
8260c194
RH
188/* Similar but tuned for code size instead of execution latency. The
189 extra +N is fractional cost tuning based on latency. It's used to
190 encourage use of cheaper insns like shift, but only if there's just
191 one of them. */
192
193static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
194{
195 COSTS_N_INSNS (1), /* fp_add */
196 COSTS_N_INSNS (1), /* fp_mult */
197 COSTS_N_INSNS (1), /* fp_div_sf */
198 COSTS_N_INSNS (1) + 1, /* fp_div_df */
199 COSTS_N_INSNS (1) + 1, /* int_mult_si */
200 COSTS_N_INSNS (1) + 2, /* int_mult_di */
201 COSTS_N_INSNS (1), /* int_shift */
202 COSTS_N_INSNS (1), /* int_cmov */
203 COSTS_N_INSNS (6), /* int_div */
204};
205
e9a25f70 206/* Get the number of args of a function in one of two ways. */
75db85d8 207#if TARGET_ABI_OPEN_VMS
38173d38 208#define NUM_ARGS crtl->args.info.num_args
e9a25f70 209#else
38173d38 210#define NUM_ARGS crtl->args.info
e9a25f70 211#endif
26250081 212
26250081
RH
213#define REG_PV 27
214#define REG_RA 26
6d8fd7bb 215
a5c24926
RH
216/* Declarations of static functions. */
217static struct machine_function *alpha_init_machine_status (void);
0da4e73a 218static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
4bdf6418
UB
219static void alpha_handle_trap_shadows (void);
220static void alpha_align_insns (void);
ad2c39af 221static void alpha_override_options_after_change (void);
4977bab6 222
a5c24926 223#if TARGET_ABI_OPEN_VMS
735f469b 224static void alpha_write_linkage (FILE *, const char *);
ef4bddc2 225static bool vms_valid_pointer_mode (machine_mode);
171da07a
RH
226#else
227#define vms_patch_builtins() gcc_unreachable()
c590b625 228#endif
672a6f42 229\f
4bdf6418
UB
230static unsigned int
231rest_of_handle_trap_shadows (void)
232{
233 alpha_handle_trap_shadows ();
234 return 0;
235}
236
237namespace {
238
239const pass_data pass_data_handle_trap_shadows =
240{
241 RTL_PASS,
242 "trap_shadows", /* name */
243 OPTGROUP_NONE, /* optinfo_flags */
244 TV_NONE, /* tv_id */
245 0, /* properties_required */
246 0, /* properties_provided */
247 0, /* properties_destroyed */
248 0, /* todo_flags_start */
249 TODO_df_finish, /* todo_flags_finish */
250};
251
252class pass_handle_trap_shadows : public rtl_opt_pass
253{
254public:
255 pass_handle_trap_shadows(gcc::context *ctxt)
256 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
257 {}
258
259 /* opt_pass methods: */
260 virtual bool gate (function *)
261 {
262 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
263 }
264
265 virtual unsigned int execute (function *)
266 {
267 return rest_of_handle_trap_shadows ();
268 }
269
270}; // class pass_handle_trap_shadows
271
272} // anon namespace
273
274rtl_opt_pass *
275make_pass_handle_trap_shadows (gcc::context *ctxt)
276{
277 return new pass_handle_trap_shadows (ctxt);
278}
279
280static unsigned int
281rest_of_align_insns (void)
282{
283 alpha_align_insns ();
284 return 0;
285}
286
287namespace {
288
289const pass_data pass_data_align_insns =
290{
291 RTL_PASS,
292 "align_insns", /* name */
293 OPTGROUP_NONE, /* optinfo_flags */
294 TV_NONE, /* tv_id */
295 0, /* properties_required */
296 0, /* properties_provided */
297 0, /* properties_destroyed */
298 0, /* todo_flags_start */
299 TODO_df_finish, /* todo_flags_finish */
300};
301
302class pass_align_insns : public rtl_opt_pass
303{
304public:
305 pass_align_insns(gcc::context *ctxt)
306 : rtl_opt_pass(pass_data_align_insns, ctxt)
307 {}
308
309 /* opt_pass methods: */
310 virtual bool gate (function *)
311 {
312 /* Due to the number of extra trapb insns, don't bother fixing up
313 alignment when trap precision is instruction. Moreover, we can
314 only do our job when sched2 is run. */
315 return ((alpha_tune == PROCESSOR_EV4
316 || alpha_tune == PROCESSOR_EV5)
317 && optimize && !optimize_size
318 && alpha_tp != ALPHA_TP_INSN
319 && flag_schedule_insns_after_reload);
320 }
321
322 virtual unsigned int execute (function *)
323 {
324 return rest_of_align_insns ();
325 }
326
327}; // class pass_align_insns
328
329} // anon namespace
330
331rtl_opt_pass *
332make_pass_align_insns (gcc::context *ctxt)
333{
334 return new pass_align_insns (ctxt);
335}
336
7269aee7 337#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
608063c3 338/* Implement TARGET_MANGLE_TYPE. */
7269aee7
AH
339
340static const char *
3101faab 341alpha_mangle_type (const_tree type)
7269aee7
AH
342{
343 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
344 && TARGET_LONG_DOUBLE_128)
345 return "g";
346
347 /* For all other types, use normal C++ mangling. */
348 return NULL;
349}
350#endif
351
285a5742 352/* Parse target option strings. */
6245e3df 353
c5387660
JM
354static void
355alpha_option_override (void)
6245e3df 356{
8b60264b
KG
357 static const struct cpu_table {
358 const char *const name;
359 const enum processor_type processor;
360 const int flags;
f49278e6
RH
361 const unsigned short line_size; /* in bytes */
362 const unsigned short l1_size; /* in kb. */
363 const unsigned short l2_size; /* in kb. */
a3b815cb 364 } cpu_table[] = {
f49278e6
RH
365 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
366 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
367 had 64k to 8M 8-byte direct Bcache. */
368 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
369 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
370 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
371
372 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
373 and 1M to 16M 64 byte L3 (not modeled).
374 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
375 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
376 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
377 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
378 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
379 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
380 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
381 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
382 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
383
384 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
385 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
386 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
387 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
388 64, 64, 16*1024 },
389 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
390 64, 64, 16*1024 }
a3b815cb 391 };
f676971a 392
4bdf6418 393 opt_pass *pass_handle_trap_shadows = make_pass_handle_trap_shadows (g);
10fdd6e9 394 struct register_pass_info handle_trap_shadows_info
4bdf6418
UB
395 = { pass_handle_trap_shadows, "eh_ranges",
396 1, PASS_POS_INSERT_AFTER
397 };
398
399 opt_pass *pass_align_insns = make_pass_align_insns (g);
10fdd6e9 400 struct register_pass_info align_insns_info
4bdf6418
UB
401 = { pass_align_insns, "shorten",
402 1, PASS_POS_INSERT_BEFORE
403 };
404
8224166e 405 int const ct_size = ARRAY_SIZE (cpu_table);
f49278e6 406 int line_size = 0, l1_size = 0, l2_size = 0;
8bea7f7c
RH
407 int i;
408
c5387660
JM
409#ifdef SUBTARGET_OVERRIDE_OPTIONS
410 SUBTARGET_OVERRIDE_OPTIONS;
411#endif
412
1e86df8d
UB
413 /* Default to full IEEE compliance mode for Go language. */
414 if (strcmp (lang_hooks.name, "GNU Go") == 0
415 && !(target_flags_explicit & MASK_IEEE))
416 target_flags |= MASK_IEEE;
417
75db85d8 418 alpha_fprm = ALPHA_FPRM_NORM;
6245e3df 419 alpha_tp = ALPHA_TP_PROG;
6245e3df
RK
420 alpha_fptm = ALPHA_FPTM_N;
421
422 if (TARGET_IEEE)
423 {
75db85d8
RH
424 alpha_tp = ALPHA_TP_INSN;
425 alpha_fptm = ALPHA_FPTM_SU;
6245e3df 426 }
6245e3df
RK
427 if (TARGET_IEEE_WITH_INEXACT)
428 {
75db85d8
RH
429 alpha_tp = ALPHA_TP_INSN;
430 alpha_fptm = ALPHA_FPTM_SUI;
6245e3df
RK
431 }
432
433 if (alpha_tp_string)
10d5c73f
RK
434 {
435 if (! strcmp (alpha_tp_string, "p"))
6245e3df 436 alpha_tp = ALPHA_TP_PROG;
10d5c73f 437 else if (! strcmp (alpha_tp_string, "f"))
6245e3df 438 alpha_tp = ALPHA_TP_FUNC;
10d5c73f 439 else if (! strcmp (alpha_tp_string, "i"))
6245e3df 440 alpha_tp = ALPHA_TP_INSN;
10d5c73f 441 else
9e637a26 442 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
10d5c73f 443 }
6245e3df
RK
444
445 if (alpha_fprm_string)
10d5c73f
RK
446 {
447 if (! strcmp (alpha_fprm_string, "n"))
6245e3df 448 alpha_fprm = ALPHA_FPRM_NORM;
10d5c73f 449 else if (! strcmp (alpha_fprm_string, "m"))
6245e3df 450 alpha_fprm = ALPHA_FPRM_MINF;
10d5c73f 451 else if (! strcmp (alpha_fprm_string, "c"))
6245e3df 452 alpha_fprm = ALPHA_FPRM_CHOP;
10d5c73f 453 else if (! strcmp (alpha_fprm_string,"d"))
6245e3df 454 alpha_fprm = ALPHA_FPRM_DYN;
10d5c73f 455 else
9e637a26 456 error ("bad value %qs for -mfp-rounding-mode switch",
6245e3df 457 alpha_fprm_string);
10d5c73f 458 }
6245e3df
RK
459
460 if (alpha_fptm_string)
10d5c73f
RK
461 {
462 if (strcmp (alpha_fptm_string, "n") == 0)
463 alpha_fptm = ALPHA_FPTM_N;
464 else if (strcmp (alpha_fptm_string, "u") == 0)
465 alpha_fptm = ALPHA_FPTM_U;
466 else if (strcmp (alpha_fptm_string, "su") == 0)
467 alpha_fptm = ALPHA_FPTM_SU;
468 else if (strcmp (alpha_fptm_string, "sui") == 0)
469 alpha_fptm = ALPHA_FPTM_SUI;
470 else
9e637a26 471 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
10d5c73f 472 }
6245e3df 473
de4abb91
RH
474 if (alpha_cpu_string)
475 {
8224166e 476 for (i = 0; i < ct_size; i++)
a3b815cb
JJ
477 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
478 {
f49278e6
RH
479 alpha_tune = alpha_cpu = cpu_table[i].processor;
480 line_size = cpu_table[i].line_size;
481 l1_size = cpu_table[i].l1_size;
482 l2_size = cpu_table[i].l2_size;
8bea7f7c 483 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
f49278e6 484 target_flags |= cpu_table[i].flags;
a3b815cb
JJ
485 break;
486 }
8224166e 487 if (i == ct_size)
9e637a26 488 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
de4abb91
RH
489 }
490
a3b815cb
JJ
491 if (alpha_tune_string)
492 {
8224166e 493 for (i = 0; i < ct_size; i++)
a3b815cb
JJ
494 if (! strcmp (alpha_tune_string, cpu_table [i].name))
495 {
f49278e6
RH
496 alpha_tune = cpu_table[i].processor;
497 line_size = cpu_table[i].line_size;
498 l1_size = cpu_table[i].l1_size;
499 l2_size = cpu_table[i].l2_size;
a3b815cb
JJ
500 break;
501 }
8224166e 502 if (i == ct_size)
02d43000 503 error ("bad value %qs for -mtune switch", alpha_tune_string);
a3b815cb
JJ
504 }
505
f49278e6
RH
506 if (line_size)
507 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
508 global_options.x_param_values,
509 global_options_set.x_param_values);
510 if (l1_size)
511 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
512 global_options.x_param_values,
513 global_options_set.x_param_values);
514 if (l2_size)
515 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
516 global_options.x_param_values,
517 global_options_set.x_param_values);
518
285a5742 519 /* Do some sanity checks on the above options. */
6245e3df 520
10d5c73f 521 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
8bea7f7c 522 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
6245e3df 523 {
d4ee4d25 524 warning (0, "fp software completion requires -mtrap-precision=i");
6245e3df
RK
525 alpha_tp = ALPHA_TP_INSN;
526 }
89cfc2c6 527
8bea7f7c 528 if (alpha_cpu == PROCESSOR_EV6)
981a828e
RH
529 {
530 /* Except for EV6 pass 1 (not released), we always have precise
531 arithmetic traps. Which means we can do software completion
532 without minding trap shadows. */
533 alpha_tp = ALPHA_TP_PROG;
534 }
535
89cfc2c6
RK
536 if (TARGET_FLOAT_VAX)
537 {
538 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
539 {
d4ee4d25 540 warning (0, "rounding mode not supported for VAX floats");
89cfc2c6
RK
541 alpha_fprm = ALPHA_FPRM_NORM;
542 }
543 if (alpha_fptm == ALPHA_FPTM_SUI)
544 {
d4ee4d25 545 warning (0, "trap mode not supported for VAX floats");
89cfc2c6
RK
546 alpha_fptm = ALPHA_FPTM_SU;
547 }
0f15adbd 548 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
d4ee4d25 549 warning (0, "128-bit long double not supported for VAX floats");
0f15adbd 550 target_flags &= ~MASK_LONG_DOUBLE_128;
89cfc2c6 551 }
bcbbac26
RH
552
553 {
554 char *end;
555 int lat;
556
557 if (!alpha_mlat_string)
558 alpha_mlat_string = "L1";
559
d1e6b55b 560 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
bcbbac26
RH
561 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
562 ;
563 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
d1e6b55b 564 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
bcbbac26
RH
565 && alpha_mlat_string[2] == '\0')
566 {
f676971a 567 static int const cache_latency[][4] =
bcbbac26
RH
568 {
569 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
570 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
285a5742 571 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
bcbbac26
RH
572 };
573
574 lat = alpha_mlat_string[1] - '0';
8bea7f7c 575 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
bcbbac26 576 {
d4ee4d25 577 warning (0, "L%d cache latency unknown for %s",
8bea7f7c 578 lat, alpha_cpu_name[alpha_tune]);
bcbbac26
RH
579 lat = 3;
580 }
581 else
8bea7f7c 582 lat = cache_latency[alpha_tune][lat-1];
bcbbac26
RH
583 }
584 else if (! strcmp (alpha_mlat_string, "main"))
585 {
586 /* Most current memories have about 370ns latency. This is
587 a reasonable guess for a fast cpu. */
588 lat = 150;
589 }
590 else
591 {
d4ee4d25 592 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
bcbbac26
RH
593 lat = 3;
594 }
595
596 alpha_memory_latency = lat;
597 }
bb8ebb7f
RH
598
599 /* Default the definition of "small data" to 8 bytes. */
fa37ed29 600 if (!global_options_set.x_g_switch_value)
bb8ebb7f 601 g_switch_value = 8;
3873d24b 602
133d3133
RH
603 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
604 if (flag_pic == 1)
605 target_flags |= MASK_SMALL_DATA;
606 else if (flag_pic == 2)
607 target_flags &= ~MASK_SMALL_DATA;
608
ad2c39af 609 alpha_override_options_after_change ();
c176c051 610
30102605
RH
611 /* Register variables and functions with the garbage collector. */
612
30102605
RH
613 /* Set up function hooks. */
614 init_machine_status = alpha_init_machine_status;
3dc85dfb
RH
615
616 /* Tell the compiler when we're using VAX floating point. */
617 if (TARGET_FLOAT_VAX)
618 {
70a01792
ZW
619 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
620 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
621 REAL_MODE_FORMAT (TFmode) = NULL;
3dc85dfb 622 }
ed965309
JJ
623
624#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
625 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
626 target_flags |= MASK_LONG_DOUBLE_128;
627#endif
4bdf6418
UB
628
629 /* This needs to be done at start up. It's convenient to do it here. */
630 register_pass (&handle_trap_shadows_info);
631 register_pass (&align_insns_info);
6245e3df 632}
ad2c39af
UB
633
634/* Implement targetm.override_options_after_change. */
635
636static void
637alpha_override_options_after_change (void)
638{
639 /* Align labels and loops for optimal branching. */
640 /* ??? Kludge these by not doing anything if we don't optimize. */
641 if (optimize > 0)
642 {
643 if (align_loops <= 0)
644 align_loops = 16;
645 if (align_jumps <= 0)
646 align_jumps = 16;
647 }
648 if (align_functions <= 0)
649 align_functions = 16;
650}
6245e3df 651\f
a6f12d7c
RK
652/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
653
654int
a5c24926 655zap_mask (HOST_WIDE_INT value)
a6f12d7c
RK
656{
657 int i;
658
659 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
660 i++, value >>= 8)
661 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
662 return 0;
663
664 return 1;
665}
666
f676971a 667/* Return true if OP is valid for a particular TLS relocation.
201312c2 668 We are already guaranteed that OP is a CONST. */
a6f12d7c
RK
669
670int
201312c2 671tls_symbolic_operand_1 (rtx op, int size, int unspec)
a6f12d7c 672{
6f9b006d
RH
673 op = XEXP (op, 0);
674
675 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
676 return 0;
677 op = XVECEXP (op, 0, 0);
678
679 if (GET_CODE (op) != SYMBOL_REF)
680 return 0;
6f9b006d 681
d055668e 682 switch (SYMBOL_REF_TLS_MODEL (op))
3094247f 683 {
d055668e 684 case TLS_MODEL_LOCAL_DYNAMIC:
f6326c19 685 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
d055668e 686 case TLS_MODEL_INITIAL_EXEC:
3094247f 687 return unspec == UNSPEC_TPREL && size == 64;
d055668e 688 case TLS_MODEL_LOCAL_EXEC:
f6326c19 689 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
3094247f 690 default:
56daab84 691 gcc_unreachable ();
3094247f 692 }
6f9b006d
RH
693}
694
201312c2
RH
695/* Used by aligned_memory_operand and unaligned_memory_operand to
696 resolve what reload is going to do with OP if it's a register. */
8f4773ea 697
201312c2
RH
698rtx
699resolve_reload_operand (rtx op)
a6f12d7c 700{
4e46365b 701 if (reload_in_progress)
a6f12d7c 702 {
4e46365b
RH
703 rtx tmp = op;
704 if (GET_CODE (tmp) == SUBREG)
705 tmp = SUBREG_REG (tmp);
7d83f4f5 706 if (REG_P (tmp)
4e46365b
RH
707 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
708 {
f2034d06 709 op = reg_equiv_memory_loc (REGNO (tmp));
4e46365b
RH
710 if (op == 0)
711 return 0;
712 }
a6f12d7c 713 }
201312c2 714 return op;
3611aef0
RH
715}
716
6dd53648
RH
717/* The scalar modes supported differs from the default check-what-c-supports
718 version in that sometimes TFmode is available even when long double
75db85d8 719 indicates only DFmode. */
6dd53648
RH
720
721static bool
ef4bddc2 722alpha_scalar_mode_supported_p (machine_mode mode)
6dd53648
RH
723{
724 switch (mode)
725 {
726 case QImode:
727 case HImode:
728 case SImode:
729 case DImode:
730 case TImode: /* via optabs.c */
731 return true;
732
733 case SFmode:
734 case DFmode:
735 return true;
736
737 case TFmode:
738 return TARGET_HAS_XFLOATING_LIBS;
739
740 default:
741 return false;
742 }
743}
744
745/* Alpha implements a couple of integer vector mode operations when
e2ea71ea
RH
746 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
747 which allows the vectorizer to operate on e.g. move instructions,
748 or when expand_vector_operations can do something useful. */
6dd53648 749
f676971a 750static bool
ef4bddc2 751alpha_vector_mode_supported_p (machine_mode mode)
f676971a 752{
e2ea71ea 753 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
f676971a
EC
754}
755
39157bcc
RH
756/* Return 1 if this function can directly return via $26. */
757
758int
a5c24926 759direct_return (void)
39157bcc 760{
42d085c1 761 return (TARGET_ABI_OSF
be7b80f4
RH
762 && reload_completed
763 && alpha_sa_size () == 0
39157bcc 764 && get_frame_size () == 0
38173d38
JH
765 && crtl->outgoing_args_size == 0
766 && crtl->args.pretend_args_size == 0);
39157bcc 767}
25e21aed 768
6f9b006d
RH
769/* Return the TLS model to use for SYMBOL. */
770
771static enum tls_model
a5c24926 772tls_symbolic_operand_type (rtx symbol)
6f9b006d 773{
d055668e 774 enum tls_model model;
6f9b006d
RH
775
776 if (GET_CODE (symbol) != SYMBOL_REF)
8224166e 777 return TLS_MODEL_NONE;
d055668e 778 model = SYMBOL_REF_TLS_MODEL (symbol);
6f9b006d 779
d055668e
RH
780 /* Local-exec with a 64-bit size is the same code as initial-exec. */
781 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
782 model = TLS_MODEL_INITIAL_EXEC;
6f9b006d 783
d055668e 784 return model;
6f9b006d 785}
3611aef0 786\f
3094247f
RH
787/* Return true if the function DECL will share the same GP as any
788 function in the current unit of translation. */
789
790static bool
3101faab 791decl_has_samegp (const_tree decl)
3094247f
RH
792{
793 /* Functions that are not local can be overridden, and thus may
794 not share the same gp. */
795 if (!(*targetm.binds_local_p) (decl))
796 return false;
797
798 /* If -msmall-data is in effect, assume that there is only one GP
799 for the module, and so any local symbol has this property. We
800 need explicit relocations to be able to enforce this for symbols
801 not defined in this unit of translation, however. */
802 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
803 return true;
804
805 /* Functions that are not external are defined in this UoT. */
7f24e7c5
RH
806 /* ??? Irritatingly, static functions not yet emitted are still
807 marked "external". Apply this to non-static functions only. */
808 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
3094247f
RH
809}
810
ae46c4e0
RH
811/* Return true if EXP should be placed in the small data section. */
812
813static bool
3101faab 814alpha_in_small_data_p (const_tree exp)
ae46c4e0 815{
34a6c2ec
RH
816 /* We want to merge strings, so we never consider them small data. */
817 if (TREE_CODE (exp) == STRING_CST)
818 return false;
819
7179b6db
RH
820 /* Functions are never in the small data area. Duh. */
821 if (TREE_CODE (exp) == FUNCTION_DECL)
822 return false;
823
dcdeca7a
UB
824 /* COMMON symbols are never small data. */
825 if (TREE_CODE (exp) == VAR_DECL && DECL_COMMON (exp))
826 return false;
827
ae46c4e0
RH
828 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
829 {
f961457f 830 const char *section = DECL_SECTION_NAME (exp);
ae46c4e0
RH
831 if (strcmp (section, ".sdata") == 0
832 || strcmp (section, ".sbss") == 0)
833 return true;
834 }
835 else
836 {
837 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
838
839 /* If this is an incomplete type with size 0, then we can't put it
840 in sdata because it might be too big when completed. */
fa37ed29 841 if (size > 0 && size <= g_switch_value)
ae46c4e0
RH
842 return true;
843 }
844
845 return false;
846}
847
1330f7d5 848#if TARGET_ABI_OPEN_VMS
dfe6ba6d 849static bool
ef4bddc2 850vms_valid_pointer_mode (machine_mode mode)
dfe6ba6d
DR
851{
852 return (mode == SImode || mode == DImode);
853}
854
1330f7d5 855static bool
a5c24926 856alpha_linkage_symbol_p (const char *symname)
1330f7d5
DR
857{
858 int symlen = strlen (symname);
859
860 if (symlen > 4)
861 return strcmp (&symname [symlen - 4], "..lk") == 0;
862
863 return false;
864}
865
866#define LINKAGE_SYMBOL_REF_P(X) \
867 ((GET_CODE (X) == SYMBOL_REF \
868 && alpha_linkage_symbol_p (XSTR (X, 0))) \
869 || (GET_CODE (X) == CONST \
870 && GET_CODE (XEXP (X, 0)) == PLUS \
871 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
872 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
873#endif
874
a39bdefc
RH
875/* legitimate_address_p recognizes an RTL expression that is a valid
876 memory address for an instruction. The MODE argument is the
877 machine mode for the MEM expression that wants to use this address.
878
879 For Alpha, we have either a constant address or the sum of a
880 register and a constant address, or just a register. For DImode,
881 any of those forms can be surrounded with an AND that clear the
882 low-order three bits; this is an "unaligned" access. */
883
c6c3dba9 884static bool
ef4bddc2 885alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
a39bdefc
RH
886{
887 /* If this is an ldq_u type address, discard the outer AND. */
888 if (mode == DImode
889 && GET_CODE (x) == AND
7d83f4f5 890 && CONST_INT_P (XEXP (x, 1))
a39bdefc
RH
891 && INTVAL (XEXP (x, 1)) == -8)
892 x = XEXP (x, 0);
893
894 /* Discard non-paradoxical subregs. */
895 if (GET_CODE (x) == SUBREG
896 && (GET_MODE_SIZE (GET_MODE (x))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
898 x = SUBREG_REG (x);
899
900 /* Unadorned general registers are valid. */
901 if (REG_P (x)
902 && (strict
903 ? STRICT_REG_OK_FOR_BASE_P (x)
904 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
905 return true;
906
907 /* Constant addresses (i.e. +/- 32k) are valid. */
908 if (CONSTANT_ADDRESS_P (x))
909 return true;
910
1330f7d5
DR
911#if TARGET_ABI_OPEN_VMS
912 if (LINKAGE_SYMBOL_REF_P (x))
913 return true;
914#endif
915
a39bdefc
RH
916 /* Register plus a small constant offset is valid. */
917 if (GET_CODE (x) == PLUS)
918 {
919 rtx ofs = XEXP (x, 1);
920 x = XEXP (x, 0);
921
922 /* Discard non-paradoxical subregs. */
923 if (GET_CODE (x) == SUBREG
924 && (GET_MODE_SIZE (GET_MODE (x))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
926 x = SUBREG_REG (x);
927
928 if (REG_P (x))
929 {
930 if (! strict
931 && NONSTRICT_REG_OK_FP_BASE_P (x)
7d83f4f5 932 && CONST_INT_P (ofs))
a39bdefc
RH
933 return true;
934 if ((strict
935 ? STRICT_REG_OK_FOR_BASE_P (x)
936 : NONSTRICT_REG_OK_FOR_BASE_P (x))
937 && CONSTANT_ADDRESS_P (ofs))
938 return true;
939 }
a39bdefc
RH
940 }
941
26d5bf5b
UB
942 /* If we're managing explicit relocations, LO_SUM is valid, as are small
943 data symbols. Avoid explicit relocations of modes larger than word
944 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
945 else if (TARGET_EXPLICIT_RELOCS
946 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
1eb356b9 947 {
551cc6fd 948 if (small_symbolic_operand (x, Pmode))
1eb356b9 949 return true;
551cc6fd
RH
950
951 if (GET_CODE (x) == LO_SUM)
952 {
953 rtx ofs = XEXP (x, 1);
954 x = XEXP (x, 0);
955
956 /* Discard non-paradoxical subregs. */
957 if (GET_CODE (x) == SUBREG
958 && (GET_MODE_SIZE (GET_MODE (x))
959 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
960 x = SUBREG_REG (x);
961
962 /* Must have a valid base register. */
963 if (! (REG_P (x)
964 && (strict
965 ? STRICT_REG_OK_FOR_BASE_P (x)
966 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
967 return false;
968
969 /* The symbol must be local. */
6f9b006d
RH
970 if (local_symbolic_operand (ofs, Pmode)
971 || dtp32_symbolic_operand (ofs, Pmode)
972 || tp32_symbolic_operand (ofs, Pmode))
551cc6fd
RH
973 return true;
974 }
1eb356b9
RH
975 }
976
a39bdefc
RH
977 return false;
978}
979
d055668e
RH
980/* Build the SYMBOL_REF for __tls_get_addr. */
981
982static GTY(()) rtx tls_get_addr_libfunc;
983
984static rtx
a5c24926 985get_tls_get_addr (void)
d055668e
RH
986{
987 if (!tls_get_addr_libfunc)
988 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
989 return tls_get_addr_libfunc;
990}
991
aead1ca3
RH
992/* Try machine-dependent ways of modifying an illegitimate address
993 to be legitimate. If we find one, return the new, valid address. */
994
506d7b68 995static rtx
ef4bddc2 996alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
aead1ca3
RH
997{
998 HOST_WIDE_INT addend;
999
1000 /* If the address is (plus reg const_int) and the CONST_INT is not a
1001 valid offset, compute the high part of the constant and add it to
1002 the register. Then our address is (plus temp low-part-const). */
1003 if (GET_CODE (x) == PLUS
7d83f4f5
UB
1004 && REG_P (XEXP (x, 0))
1005 && CONST_INT_P (XEXP (x, 1))
aead1ca3
RH
1006 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1007 {
1008 addend = INTVAL (XEXP (x, 1));
1009 x = XEXP (x, 0);
1010 goto split_addend;
1011 }
1012
1013 /* If the address is (const (plus FOO const_int)), find the low-order
1014 part of the CONST_INT. Then load FOO plus any high-order part of the
1015 CONST_INT into a register. Our address is (plus reg low-part-const).
1016 This is done to reduce the number of GOT entries. */
b3a13419 1017 if (can_create_pseudo_p ()
551cc6fd 1018 && GET_CODE (x) == CONST
aead1ca3 1019 && GET_CODE (XEXP (x, 0)) == PLUS
7d83f4f5 1020 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
aead1ca3
RH
1021 {
1022 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1023 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1024 goto split_addend;
1025 }
1026
1027 /* If we have a (plus reg const), emit the load as in (2), then add
1028 the two registers, and finally generate (plus reg low-part-const) as
1029 our address. */
b3a13419 1030 if (can_create_pseudo_p ()
551cc6fd 1031 && GET_CODE (x) == PLUS
7d83f4f5 1032 && REG_P (XEXP (x, 0))
aead1ca3
RH
1033 && GET_CODE (XEXP (x, 1)) == CONST
1034 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
7d83f4f5 1035 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
aead1ca3
RH
1036 {
1037 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1038 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1039 XEXP (XEXP (XEXP (x, 1), 0), 0),
1040 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1041 goto split_addend;
1042 }
1043
26d5bf5b
UB
1044 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1045 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1046 around +/- 32k offset. */
1047 if (TARGET_EXPLICIT_RELOCS
1048 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1049 && symbolic_operand (x, Pmode))
1eb356b9 1050 {
6f9b006d
RH
1051 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1052
1053 switch (tls_symbolic_operand_type (x))
1054 {
6cb718e4
RH
1055 case TLS_MODEL_NONE:
1056 break;
1057
6f9b006d
RH
1058 case TLS_MODEL_GLOBAL_DYNAMIC:
1059 start_sequence ();
1060
1061 r0 = gen_rtx_REG (Pmode, 0);
1062 r16 = gen_rtx_REG (Pmode, 16);
d055668e 1063 tga = get_tls_get_addr ();
6f9b006d
RH
1064 dest = gen_reg_rtx (Pmode);
1065 seq = GEN_INT (alpha_next_sequence_number++);
f676971a 1066
6f9b006d
RH
1067 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1068 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1069 insn = emit_call_insn (insn);
becfd6e5 1070 RTL_CONST_CALL_P (insn) = 1;
6f9b006d
RH
1071 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1072
1073 insn = get_insns ();
1074 end_sequence ();
1075
1076 emit_libcall_block (insn, dest, r0, x);
1077 return dest;
1078
1079 case TLS_MODEL_LOCAL_DYNAMIC:
1080 start_sequence ();
1081
1082 r0 = gen_rtx_REG (Pmode, 0);
1083 r16 = gen_rtx_REG (Pmode, 16);
d055668e 1084 tga = get_tls_get_addr ();
6f9b006d
RH
1085 scratch = gen_reg_rtx (Pmode);
1086 seq = GEN_INT (alpha_next_sequence_number++);
1087
1088 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1089 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1090 insn = emit_call_insn (insn);
becfd6e5 1091 RTL_CONST_CALL_P (insn) = 1;
6f9b006d
RH
1092 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1093
1094 insn = get_insns ();
1095 end_sequence ();
1096
1097 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1098 UNSPEC_TLSLDM_CALL);
1099 emit_libcall_block (insn, scratch, r0, eqv);
1100
1101 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1102 eqv = gen_rtx_CONST (Pmode, eqv);
1103
1104 if (alpha_tls_size == 64)
1105 {
1106 dest = gen_reg_rtx (Pmode);
f7df4a84 1107 emit_insn (gen_rtx_SET (dest, eqv));
6f9b006d
RH
1108 emit_insn (gen_adddi3 (dest, dest, scratch));
1109 return dest;
1110 }
1111 if (alpha_tls_size == 32)
1112 {
1113 insn = gen_rtx_HIGH (Pmode, eqv);
1114 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1115 scratch = gen_reg_rtx (Pmode);
f7df4a84 1116 emit_insn (gen_rtx_SET (scratch, insn));
6f9b006d
RH
1117 }
1118 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1119
1120 case TLS_MODEL_INITIAL_EXEC:
1121 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1122 eqv = gen_rtx_CONST (Pmode, eqv);
1123 tp = gen_reg_rtx (Pmode);
1124 scratch = gen_reg_rtx (Pmode);
1125 dest = gen_reg_rtx (Pmode);
1126
f959607b 1127 emit_insn (gen_get_thread_pointerdi (tp));
f7df4a84 1128 emit_insn (gen_rtx_SET (scratch, eqv));
6f9b006d
RH
1129 emit_insn (gen_adddi3 (dest, tp, scratch));
1130 return dest;
1131
1132 case TLS_MODEL_LOCAL_EXEC:
1133 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1134 eqv = gen_rtx_CONST (Pmode, eqv);
1135 tp = gen_reg_rtx (Pmode);
1136
f959607b 1137 emit_insn (gen_get_thread_pointerdi (tp));
6f9b006d
RH
1138 if (alpha_tls_size == 32)
1139 {
1140 insn = gen_rtx_HIGH (Pmode, eqv);
1141 insn = gen_rtx_PLUS (Pmode, tp, insn);
1142 tp = gen_reg_rtx (Pmode);
f7df4a84 1143 emit_insn (gen_rtx_SET (tp, insn));
6f9b006d
RH
1144 }
1145 return gen_rtx_LO_SUM (Pmode, tp, eqv);
6cb718e4
RH
1146
1147 default:
1148 gcc_unreachable ();
6f9b006d
RH
1149 }
1150
e2c9fb9b
RH
1151 if (local_symbolic_operand (x, Pmode))
1152 {
1153 if (small_symbolic_operand (x, Pmode))
551cc6fd 1154 return x;
e2c9fb9b
RH
1155 else
1156 {
b3a13419 1157 if (can_create_pseudo_p ())
551cc6fd 1158 scratch = gen_reg_rtx (Pmode);
f7df4a84 1159 emit_insn (gen_rtx_SET (scratch, gen_rtx_HIGH (Pmode, x)));
551cc6fd 1160 return gen_rtx_LO_SUM (Pmode, scratch, x);
e2c9fb9b 1161 }
133d3133 1162 }
1eb356b9
RH
1163 }
1164
aead1ca3
RH
1165 return NULL;
1166
1167 split_addend:
1168 {
551cc6fd
RH
1169 HOST_WIDE_INT low, high;
1170
1171 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1172 addend -= low;
1173 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1174 addend -= high;
1175
1176 if (addend)
1177 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
b3a13419 1178 (!can_create_pseudo_p () ? scratch : NULL_RTX),
551cc6fd
RH
1179 1, OPTAB_LIB_WIDEN);
1180 if (high)
1181 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
b3a13419 1182 (!can_create_pseudo_p () ? scratch : NULL_RTX),
551cc6fd
RH
1183 1, OPTAB_LIB_WIDEN);
1184
0a81f074 1185 return plus_constant (Pmode, x, low);
aead1ca3
RH
1186 }
1187}
1188
506d7b68
PB
1189
1190/* Try machine-dependent ways of modifying an illegitimate address
1191 to be legitimate. Return X or the new, valid address. */
1192
1193static rtx
1194alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 1195 machine_mode mode)
506d7b68
PB
1196{
1197 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1198 return new_x ? new_x : x;
1199}
1200
b0f6b612
NF
1201/* Return true if ADDR has an effect that depends on the machine mode it
1202 is used for. On the Alpha this is true only for the unaligned modes.
1203 We can simplify the test since we know that the address must be valid. */
1204
1205static bool
5bfed9a9
GJL
1206alpha_mode_dependent_address_p (const_rtx addr,
1207 addr_space_t as ATTRIBUTE_UNUSED)
b0f6b612
NF
1208{
1209 return GET_CODE (addr) == AND;
1210}
1211
04886dc0
RH
1212/* Primarily this is required for TLS symbols, but given that our move
1213 patterns *ought* to be able to handle any symbol at any time, we
1214 should never be spilling symbolic operands to the constant pool, ever. */
1215
1216static bool
ef4bddc2 1217alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
04886dc0
RH
1218{
1219 enum rtx_code code = GET_CODE (x);
1220 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1221}
1222
4977bab6 1223/* We do not allow indirect calls to be optimized into sibling calls, nor
3094247f
RH
1224 can we allow a call to a function with a different GP to be optimized
1225 into a sibcall. */
1226
4977bab6 1227static bool
a5c24926 1228alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4977bab6 1229{
3094247f
RH
1230 /* Can't do indirect tail calls, since we don't know if the target
1231 uses the same GP. */
1232 if (!decl)
1233 return false;
1234
1235 /* Otherwise, we can make a tail call if the target function shares
1236 the same GP. */
1237 return decl_has_samegp (decl);
4977bab6
ZW
1238}
1239
dbb838b7
RS
1240bool
1241some_small_symbolic_operand_int (rtx x)
1e7e480e 1242{
dbb838b7
RS
1243 subrtx_var_iterator::array_type array;
1244 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1245 {
1246 rtx x = *iter;
1247 /* Don't re-split. */
1248 if (GET_CODE (x) == LO_SUM)
1249 iter.skip_subrtxes ();
1250 else if (small_symbolic_operand (x, Pmode))
1251 return true;
1252 }
1253 return false;
551cc6fd
RH
1254}
1255
a5c24926
RH
1256rtx
1257split_small_symbolic_operand (rtx x)
1258{
1259 x = copy_insn (x);
572e01c7
RS
1260 subrtx_ptr_iterator::array_type array;
1261 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1262 {
1263 rtx *ptr = *iter;
1264 rtx x = *ptr;
1265 /* Don't re-split. */
1266 if (GET_CODE (x) == LO_SUM)
1267 iter.skip_subrtxes ();
1268 else if (small_symbolic_operand (x, Pmode))
1269 {
1270 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1271 iter.skip_subrtxes ();
1272 }
1273 }
a5c24926
RH
1274 return x;
1275}
1276
0b077eac
RH
1277/* Indicate that INSN cannot be duplicated. This is true for any insn
1278 that we've marked with gpdisp relocs, since those have to stay in
1279 1-1 correspondence with one another.
1280
093354e0 1281 Technically we could copy them if we could set up a mapping from one
0b077eac
RH
1282 sequence number to another, across the set of insns to be duplicated.
1283 This seems overly complicated and error-prone since interblock motion
501e79ef
RH
1284 from sched-ebb could move one of the pair of insns to a different block.
1285
1286 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1287 then they'll be in a different block from their ldgp. Which could lead
1288 the bb reorder code to think that it would be ok to copy just the block
1289 containing the call and branch to the block containing the ldgp. */
0b077eac
RH
1290
1291static bool
ac44248e 1292alpha_cannot_copy_insn_p (rtx_insn *insn)
0b077eac 1293{
0b077eac
RH
1294 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1295 return false;
501e79ef
RH
1296 if (recog_memoized (insn) >= 0)
1297 return get_attr_cannot_copy (insn);
1298 else
0b077eac 1299 return false;
0b077eac
RH
1300}
1301
f676971a 1302
aead1ca3
RH
1303/* Try a machine-dependent way of reloading an illegitimate address
1304 operand. If we find one, push the reload and return the new rtx. */
f676971a 1305
aead1ca3 1306rtx
a5c24926 1307alpha_legitimize_reload_address (rtx x,
ef4bddc2 1308 machine_mode mode ATTRIBUTE_UNUSED,
a5c24926
RH
1309 int opnum, int type,
1310 int ind_levels ATTRIBUTE_UNUSED)
aead1ca3
RH
1311{
1312 /* We must recognize output that we have already generated ourselves. */
1313 if (GET_CODE (x) == PLUS
1314 && GET_CODE (XEXP (x, 0)) == PLUS
7d83f4f5
UB
1315 && REG_P (XEXP (XEXP (x, 0), 0))
1316 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1317 && CONST_INT_P (XEXP (x, 1)))
aead1ca3
RH
1318 {
1319 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1320 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
bf758008 1321 opnum, (enum reload_type) type);
aead1ca3
RH
1322 return x;
1323 }
1324
1325 /* We wish to handle large displacements off a base register by
1326 splitting the addend across an ldah and the mem insn. This
1327 cuts number of extra insns needed from 3 to 1. */
1328 if (GET_CODE (x) == PLUS
7d83f4f5 1329 && REG_P (XEXP (x, 0))
aead1ca3
RH
1330 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1331 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
c799797d 1332 && CONST_INT_P (XEXP (x, 1)))
aead1ca3
RH
1333 {
1334 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1335 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1336 HOST_WIDE_INT high
1337 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1338
1339 /* Check for 32-bit overflow. */
1340 if (high + low != val)
1341 return NULL_RTX;
1342
1343 /* Reload the high part into a base reg; leave the low part
1344 in the mem directly. */
1345 x = gen_rtx_PLUS (GET_MODE (x),
1346 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1347 GEN_INT (high)),
1348 GEN_INT (low));
1349
1350 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1351 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
bf758008 1352 opnum, (enum reload_type) type);
aead1ca3
RH
1353 return x;
1354 }
1355
1356 return NULL_RTX;
1357}
1358\f
3c50106f
RH
1359/* Compute a (partial) cost for rtx X. Return true if the complete
1360 cost has been computed, and false if subexpressions should be
1361 scanned. In either case, *TOTAL contains the cost result. */
1362
1363static bool
e548c9df 1364alpha_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno, int *total,
f40751dd 1365 bool speed)
3c50106f 1366{
e548c9df 1367 int code = GET_CODE (x);
3c50106f 1368 bool float_mode_p = FLOAT_MODE_P (mode);
8260c194
RH
1369 const struct alpha_rtx_cost_data *cost_data;
1370
f40751dd 1371 if (!speed)
8260c194
RH
1372 cost_data = &alpha_rtx_cost_size;
1373 else
8bea7f7c 1374 cost_data = &alpha_rtx_cost_data[alpha_tune];
3c50106f
RH
1375
1376 switch (code)
1377 {
8260c194 1378 case CONST_INT:
3c50106f
RH
1379 /* If this is an 8-bit constant, return zero since it can be used
1380 nearly anywhere with no cost. If it is a valid operand for an
1381 ADD or AND, likewise return 0 if we know it will be used in that
1382 context. Otherwise, return 2 since it might be used there later.
1383 All other constants take at least two insns. */
3c50106f
RH
1384 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1385 {
1386 *total = 0;
1387 return true;
1388 }
5efb1046 1389 /* FALLTHRU */
3c50106f
RH
1390
1391 case CONST_DOUBLE:
f06ed650 1392 case CONST_WIDE_INT:
3c50106f
RH
1393 if (x == CONST0_RTX (mode))
1394 *total = 0;
1395 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1396 || (outer_code == AND && and_operand (x, VOIDmode)))
1397 *total = 0;
1398 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1399 *total = 2;
1400 else
1401 *total = COSTS_N_INSNS (2);
1402 return true;
f676971a 1403
3c50106f
RH
1404 case CONST:
1405 case SYMBOL_REF:
1406 case LABEL_REF:
1407 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1408 *total = COSTS_N_INSNS (outer_code != MEM);
1409 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1410 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1411 else if (tls_symbolic_operand_type (x))
1412 /* Estimate of cost for call_pal rduniq. */
8260c194 1413 /* ??? How many insns do we emit here? More than one... */
3c50106f
RH
1414 *total = COSTS_N_INSNS (15);
1415 else
1416 /* Otherwise we do a load from the GOT. */
f40751dd 1417 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
3c50106f 1418 return true;
f676971a 1419
72910a0b
RH
1420 case HIGH:
1421 /* This is effectively an add_operand. */
1422 *total = 2;
1423 return true;
1424
3c50106f
RH
1425 case PLUS:
1426 case MINUS:
1427 if (float_mode_p)
8260c194 1428 *total = cost_data->fp_add;
3c50106f
RH
1429 else if (GET_CODE (XEXP (x, 0)) == MULT
1430 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1431 {
e548c9df 1432 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), mode,
68f932c4 1433 (enum rtx_code) outer_code, opno, speed)
e548c9df 1434 + rtx_cost (XEXP (x, 1), mode,
68f932c4 1435 (enum rtx_code) outer_code, opno, speed)
bf758008 1436 + COSTS_N_INSNS (1));
3c50106f
RH
1437 return true;
1438 }
1439 return false;
1440
1441 case MULT:
1442 if (float_mode_p)
8260c194 1443 *total = cost_data->fp_mult;
3c50106f 1444 else if (mode == DImode)
8260c194 1445 *total = cost_data->int_mult_di;
3c50106f 1446 else
8260c194 1447 *total = cost_data->int_mult_si;
3c50106f
RH
1448 return false;
1449
1450 case ASHIFT:
7d83f4f5 1451 if (CONST_INT_P (XEXP (x, 1))
3c50106f
RH
1452 && INTVAL (XEXP (x, 1)) <= 3)
1453 {
1454 *total = COSTS_N_INSNS (1);
1455 return false;
1456 }
5efb1046 1457 /* FALLTHRU */
3c50106f
RH
1458
1459 case ASHIFTRT:
1460 case LSHIFTRT:
8260c194 1461 *total = cost_data->int_shift;
3c50106f
RH
1462 return false;
1463
1464 case IF_THEN_ELSE:
1465 if (float_mode_p)
8260c194 1466 *total = cost_data->fp_add;
3c50106f 1467 else
8260c194 1468 *total = cost_data->int_cmov;
3c50106f
RH
1469 return false;
1470
1471 case DIV:
1472 case UDIV:
1473 case MOD:
1474 case UMOD:
1475 if (!float_mode_p)
8260c194 1476 *total = cost_data->int_div;
3c50106f 1477 else if (mode == SFmode)
8260c194 1478 *total = cost_data->fp_div_sf;
3c50106f 1479 else
8260c194 1480 *total = cost_data->fp_div_df;
3c50106f
RH
1481 return false;
1482
1483 case MEM:
f40751dd 1484 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
3c50106f
RH
1485 return true;
1486
1487 case NEG:
1488 if (! float_mode_p)
1489 {
1490 *total = COSTS_N_INSNS (1);
1491 return false;
1492 }
5efb1046 1493 /* FALLTHRU */
3c50106f
RH
1494
1495 case ABS:
1496 if (! float_mode_p)
1497 {
8260c194 1498 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
3c50106f
RH
1499 return false;
1500 }
5efb1046 1501 /* FALLTHRU */
3c50106f
RH
1502
1503 case FLOAT:
1504 case UNSIGNED_FLOAT:
1505 case FIX:
1506 case UNSIGNED_FIX:
3c50106f 1507 case FLOAT_TRUNCATE:
8260c194 1508 *total = cost_data->fp_add;
3c50106f
RH
1509 return false;
1510
a220ee34 1511 case FLOAT_EXTEND:
7d83f4f5 1512 if (MEM_P (XEXP (x, 0)))
a220ee34
RH
1513 *total = 0;
1514 else
1515 *total = cost_data->fp_add;
1516 return false;
1517
3c50106f
RH
1518 default:
1519 return false;
1520 }
1521}
1522\f
a6f12d7c
RK
1523/* REF is an alignable memory location. Place an aligned SImode
1524 reference into *PALIGNED_MEM and the number of bits to shift into
96043e7e
RH
1525 *PBITNUM. SCRATCH is a free register for use in reloading out
1526 of range stack slots. */
a6f12d7c
RK
1527
1528void
a5c24926 1529get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
a6f12d7c
RK
1530{
1531 rtx base;
525e67c1 1532 HOST_WIDE_INT disp, offset;
a6f12d7c 1533
7d83f4f5 1534 gcc_assert (MEM_P (ref));
a6f12d7c 1535
30a5d3e6 1536 if (reload_in_progress)
96043e7e 1537 {
4e46365b 1538 base = find_replacement (&XEXP (ref, 0));
56daab84 1539 gcc_assert (memory_address_p (GET_MODE (ref), base));
96043e7e 1540 }
a6f12d7c 1541 else
56daab84 1542 base = XEXP (ref, 0);
a6f12d7c
RK
1543
1544 if (GET_CODE (base) == PLUS)
525e67c1
RH
1545 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1546 else
1547 disp = 0;
1548
1549 /* Find the byte offset within an aligned word. If the memory itself is
1550 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1551 will have examined the base register and determined it is aligned, and
1552 thus displacements from it are naturally alignable. */
1553 if (MEM_ALIGN (ref) >= 32)
1554 offset = 0;
1555 else
1556 offset = disp & 3;
a6f12d7c 1557
02143d0b
UB
1558 /* The location should not cross aligned word boundary. */
1559 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1560 <= GET_MODE_SIZE (SImode));
1561
525e67c1
RH
1562 /* Access the entire aligned word. */
1563 *paligned_mem = widen_memory_access (ref, SImode, -offset);
a6f12d7c 1564
525e67c1 1565 /* Convert the byte offset within the word to a bit offset. */
0b2a7367 1566 offset *= BITS_PER_UNIT;
525e67c1 1567 *pbitnum = GEN_INT (offset);
a6f12d7c
RK
1568}
1569
f676971a 1570/* Similar, but just get the address. Handle the two reload cases.
adb18b68 1571 Add EXTRA_OFFSET to the address we return. */
a6f12d7c
RK
1572
1573rtx
60e93525 1574get_unaligned_address (rtx ref)
a6f12d7c
RK
1575{
1576 rtx base;
1577 HOST_WIDE_INT offset = 0;
1578
7d83f4f5 1579 gcc_assert (MEM_P (ref));
a6f12d7c 1580
30a5d3e6 1581 if (reload_in_progress)
96043e7e 1582 {
96043e7e 1583 base = find_replacement (&XEXP (ref, 0));
56daab84 1584 gcc_assert (memory_address_p (GET_MODE (ref), base));
96043e7e 1585 }
a6f12d7c 1586 else
56daab84 1587 base = XEXP (ref, 0);
a6f12d7c
RK
1588
1589 if (GET_CODE (base) == PLUS)
1590 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1591
0a81f074 1592 return plus_constant (Pmode, base, offset);
60e93525
RH
1593}
1594
1595/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1596 X is always returned in a register. */
1597
1598rtx
1599get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1600{
1601 if (GET_CODE (addr) == PLUS)
1602 {
1603 ofs += INTVAL (XEXP (addr, 1));
1604 addr = XEXP (addr, 0);
1605 }
1606
1607 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1608 NULL_RTX, 1, OPTAB_LIB_WIDEN);
a6f12d7c 1609}
3611aef0 1610
551cc6fd 1611/* On the Alpha, all (non-symbolic) constants except zero go into
f676971a 1612 a floating-point register via memory. Note that we cannot
0a2aaacc 1613 return anything that is not a subset of RCLASS, and that some
551cc6fd
RH
1614 symbolic constants cannot be dropped to memory. */
1615
1616enum reg_class
0a2aaacc 1617alpha_preferred_reload_class(rtx x, enum reg_class rclass)
551cc6fd
RH
1618{
1619 /* Zero is present in any register class. */
1620 if (x == CONST0_RTX (GET_MODE (x)))
0a2aaacc 1621 return rclass;
551cc6fd
RH
1622
1623 /* These sorts of constants we can easily drop to memory. */
c799797d
UB
1624 if (CONST_SCALAR_INT_P (x)
1625 || CONST_DOUBLE_P (x)
72910a0b 1626 || GET_CODE (x) == CONST_VECTOR)
551cc6fd 1627 {
0a2aaacc 1628 if (rclass == FLOAT_REGS)
551cc6fd 1629 return NO_REGS;
0a2aaacc 1630 if (rclass == ALL_REGS)
551cc6fd 1631 return GENERAL_REGS;
0a2aaacc 1632 return rclass;
551cc6fd
RH
1633 }
1634
1635 /* All other kinds of constants should not (and in the case of HIGH
1636 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1637 secondary reload. */
1638 if (CONSTANT_P (x))
0a2aaacc 1639 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
551cc6fd 1640
0a2aaacc 1641 return rclass;
551cc6fd
RH
1642}
1643
48f46219 1644/* Inform reload about cases where moving X with a mode MODE to a register in
0a2aaacc 1645 RCLASS requires an extra scratch or immediate register. Return the class
48f46219 1646 needed for the immediate register. */
3611aef0 1647
a87cf97e
JR
1648static reg_class_t
1649alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
ef4bddc2 1650 machine_mode mode, secondary_reload_info *sri)
3611aef0 1651{
a87cf97e
JR
1652 enum reg_class rclass = (enum reg_class) rclass_i;
1653
48f46219
RH
1654 /* Loading and storing HImode or QImode values to and from memory
1655 usually requires a scratch register. */
1656 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
35a414df 1657 {
48f46219 1658 if (any_memory_operand (x, mode))
35a414df 1659 {
48f46219
RH
1660 if (in_p)
1661 {
1662 if (!aligned_memory_operand (x, mode))
f9621cc4 1663 sri->icode = direct_optab_handler (reload_in_optab, mode);
48f46219
RH
1664 }
1665 else
f9621cc4 1666 sri->icode = direct_optab_handler (reload_out_optab, mode);
48f46219 1667 return NO_REGS;
35a414df
RH
1668 }
1669 }
3611aef0 1670
48f46219
RH
1671 /* We also cannot do integral arithmetic into FP regs, as might result
1672 from register elimination into a DImode fp register. */
0a2aaacc 1673 if (rclass == FLOAT_REGS)
3611aef0 1674 {
48f46219 1675 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
3611aef0 1676 return GENERAL_REGS;
48f46219
RH
1677 if (in_p && INTEGRAL_MODE_P (mode)
1678 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
3611aef0
RH
1679 return GENERAL_REGS;
1680 }
1681
1682 return NO_REGS;
1683}
a6f12d7c 1684\f
13a4e577 1685/* Given SEQ, which is an INSN list, look for any MEMs in either
2f937369
DM
1686 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1687 volatile flags from REF into each of the MEMs found. If REF is not
1688 a MEM, don't do anything. */
a6f12d7c
RK
1689
1690void
13a4e577 1691alpha_set_memflags (rtx seq, rtx ref)
a6f12d7c 1692{
b32d5189 1693 rtx_insn *insn;
3873d24b 1694
13a4e577 1695 if (!MEM_P (ref))
a6f12d7c
RK
1696 return;
1697
f676971a 1698 /* This is only called from alpha.md, after having had something
3873d24b
RH
1699 generated from one of the insn patterns. So if everything is
1700 zero, the pattern is already up-to-date. */
389fdba0 1701 if (!MEM_VOLATILE_P (ref)
389fdba0
RH
1702 && !MEM_NOTRAP_P (ref)
1703 && !MEM_READONLY_P (ref))
3873d24b
RH
1704 return;
1705
f9a20af0 1706 subrtx_var_iterator::array_type array;
b32d5189 1707 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
13a4e577 1708 if (INSN_P (insn))
f9a20af0
RS
1709 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1710 {
1711 rtx x = *iter;
1712 if (MEM_P (x))
1713 {
1714 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1715 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1716 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1717 /* Sadly, we cannot use alias sets because the extra
1718 aliasing produced by the AND interferes. Given that
1719 two-byte quantities are the only thing we would be
1720 able to differentiate anyway, there does not seem to
1721 be any point in convoluting the early out of the
1722 alias check. */
1723 iter.skip_subrtxes ();
1724 }
1725 }
13a4e577
UB
1726 else
1727 gcc_unreachable ();
a6f12d7c
RK
1728}
1729\f
ef4bddc2 1730static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
72910a0b
RH
1731 int, bool);
1732
1733/* Internal routine for alpha_emit_set_const to check for N or below insns.
1734 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1735 and return pc_rtx if successful. */
9102cd1f
RK
1736
1737static rtx
ef4bddc2 1738alpha_emit_set_const_1 (rtx target, machine_mode mode,
72910a0b 1739 HOST_WIDE_INT c, int n, bool no_output)
a6f12d7c 1740{
0a2aaacc 1741 HOST_WIDE_INT new_const;
a6f12d7c 1742 int i, bits;
fd94addf
RK
1743 /* Use a pseudo if highly optimizing and still generating RTL. */
1744 rtx subtarget
b3a13419 1745 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
b83b7fa3 1746 rtx temp, insn;
a6f12d7c 1747
a6f12d7c 1748 /* If this is a sign-extended 32-bit constant, we can do this in at most
c37aa43b 1749 three insns, so do it if we have enough insns left. */
a6f12d7c 1750
c37aa43b 1751 if (c >> 31 == -1 || c >> 31 == 0)
a6f12d7c 1752 {
20a4db98 1753 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
a6f12d7c 1754 HOST_WIDE_INT tmp1 = c - low;
20a4db98 1755 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
a6f12d7c
RK
1756 HOST_WIDE_INT extra = 0;
1757
ab034cfc
RK
1758 /* If HIGH will be interpreted as negative but the constant is
1759 positive, we must adjust it to do two ldha insns. */
1760
1761 if ((high & 0x8000) != 0 && c >= 0)
a6f12d7c
RK
1762 {
1763 extra = 0x4000;
1764 tmp1 -= 0x40000000;
1765 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1766 }
1767
1768 if (c == low || (low == 0 && extra == 0))
858e4e8c
RH
1769 {
1770 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1771 but that meant that we can't handle INT_MIN on 32-bit machines
f676971a 1772 (like NT/Alpha), because we recurse indefinitely through
858e4e8c
RH
1773 emit_move_insn to gen_movdi. So instead, since we know exactly
1774 what we want, create it explicitly. */
1775
72910a0b
RH
1776 if (no_output)
1777 return pc_rtx;
858e4e8c
RH
1778 if (target == NULL)
1779 target = gen_reg_rtx (mode);
f7df4a84 1780 emit_insn (gen_rtx_SET (target, GEN_INT (c)));
858e4e8c
RH
1781 return target;
1782 }
9102cd1f 1783 else if (n >= 2 + (extra != 0))
a6f12d7c 1784 {
72910a0b
RH
1785 if (no_output)
1786 return pc_rtx;
b3a13419 1787 if (!can_create_pseudo_p ())
c3cda381 1788 {
f7df4a84 1789 emit_insn (gen_rtx_SET (target, GEN_INT (high << 16)));
c3cda381
FH
1790 temp = target;
1791 }
1792 else
1793 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1794 subtarget, mode);
fd94addf 1795
b83b7fa3
RH
1796 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1797 This means that if we go through expand_binop, we'll try to
1798 generate extensions, etc, which will require new pseudos, which
1799 will fail during some split phases. The SImode add patterns
1800 still exist, but are not named. So build the insns by hand. */
1801
a6f12d7c 1802 if (extra != 0)
b83b7fa3
RH
1803 {
1804 if (! subtarget)
1805 subtarget = gen_reg_rtx (mode);
1806 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
f7df4a84 1807 insn = gen_rtx_SET (subtarget, insn);
b83b7fa3 1808 emit_insn (insn);
1ef9531b 1809 temp = subtarget;
b83b7fa3 1810 }
a6f12d7c 1811
b83b7fa3
RH
1812 if (target == NULL)
1813 target = gen_reg_rtx (mode);
1814 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
f7df4a84 1815 insn = gen_rtx_SET (target, insn);
b83b7fa3
RH
1816 emit_insn (insn);
1817 return target;
a6f12d7c
RK
1818 }
1819 }
1820
0af3ee30 1821 /* If we couldn't do it that way, try some other methods. But if we have
f444f304
RK
1822 no instructions left, don't bother. Likewise, if this is SImode and
1823 we can't make pseudos, we can't do anything since the expand_binop
1824 and expand_unop calls will widen and try to make pseudos. */
a6f12d7c 1825
b3a13419 1826 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
a6f12d7c
RK
1827 return 0;
1828
0af3ee30 1829 /* Next, see if we can load a related constant and then shift and possibly
a6f12d7c
RK
1830 negate it to get the constant we want. Try this once each increasing
1831 numbers of insns. */
1832
1833 for (i = 1; i < n; i++)
1834 {
20a4db98
RH
1835 /* First, see if minus some low bits, we've an easy load of
1836 high bits. */
1837
0a2aaacc
KG
1838 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1839 if (new_const != 0)
72910a0b 1840 {
0a2aaacc 1841 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
72910a0b
RH
1842 if (temp)
1843 {
1844 if (no_output)
1845 return temp;
0a2aaacc 1846 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
72910a0b
RH
1847 target, 0, OPTAB_WIDEN);
1848 }
1849 }
20a4db98
RH
1850
1851 /* Next try complementing. */
72910a0b
RH
1852 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1853 if (temp)
1854 {
1855 if (no_output)
1856 return temp;
1857 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1858 }
a6f12d7c 1859
fd94addf 1860 /* Next try to form a constant and do a left shift. We can do this
a6f12d7c
RK
1861 if some low-order bits are zero; the exact_log2 call below tells
1862 us that information. The bits we are shifting out could be any
1863 value, but here we'll just try the 0- and sign-extended forms of
1864 the constant. To try to increase the chance of having the same
1865 constant in more than one insn, start at the highest number of
1866 bits to shift, but try all possibilities in case a ZAPNOT will
1867 be useful. */
1868
72910a0b
RH
1869 bits = exact_log2 (c & -c);
1870 if (bits > 0)
a6f12d7c 1871 for (; bits > 0; bits--)
72910a0b 1872 {
0a2aaacc
KG
1873 new_const = c >> bits;
1874 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
72910a0b
RH
1875 if (!temp && c < 0)
1876 {
0a2aaacc
KG
1877 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1878 temp = alpha_emit_set_const (subtarget, mode, new_const,
72910a0b
RH
1879 i, no_output);
1880 }
1881 if (temp)
1882 {
1883 if (no_output)
1884 return temp;
1885 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1886 target, 0, OPTAB_WIDEN);
1887 }
1888 }
a6f12d7c
RK
1889
1890 /* Now try high-order zero bits. Here we try the shifted-in bits as
57cfde96
RK
1891 all zero and all ones. Be careful to avoid shifting outside the
1892 mode and to avoid shifting outside the host wide int size. */
a6f12d7c 1893
72910a0b 1894 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
c37aa43b 1895 - floor_log2 (c) - 1);
72910a0b 1896 if (bits > 0)
a6f12d7c 1897 for (; bits > 0; bits--)
72910a0b 1898 {
0a2aaacc
KG
1899 new_const = c << bits;
1900 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
72910a0b
RH
1901 if (!temp)
1902 {
c37aa43b 1903 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
0a2aaacc 1904 temp = alpha_emit_set_const (subtarget, mode, new_const,
72910a0b
RH
1905 i, no_output);
1906 }
1907 if (temp)
1908 {
1909 if (no_output)
1910 return temp;
1911 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1912 target, 1, OPTAB_WIDEN);
1913 }
1914 }
a6f12d7c
RK
1915
1916 /* Now try high-order 1 bits. We get that with a sign-extension.
57cfde96 1917 But one bit isn't enough here. Be careful to avoid shifting outside
285a5742 1918 the mode and to avoid shifting outside the host wide int size. */
30102605 1919
72910a0b
RH
1920 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1921 - floor_log2 (~ c) - 2);
1922 if (bits > 0)
a6f12d7c 1923 for (; bits > 0; bits--)
72910a0b 1924 {
0a2aaacc
KG
1925 new_const = c << bits;
1926 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
72910a0b
RH
1927 if (!temp)
1928 {
c37aa43b 1929 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
0a2aaacc 1930 temp = alpha_emit_set_const (subtarget, mode, new_const,
72910a0b
RH
1931 i, no_output);
1932 }
1933 if (temp)
1934 {
1935 if (no_output)
1936 return temp;
1937 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1938 target, 0, OPTAB_WIDEN);
1939 }
1940 }
a6f12d7c
RK
1941 }
1942
20a4db98
RH
1943 /* Finally, see if can load a value into the target that is the same as the
1944 constant except that all bytes that are 0 are changed to be 0xff. If we
1945 can, then we can do a ZAPNOT to obtain the desired constant. */
1946
0a2aaacc 1947 new_const = c;
20a4db98 1948 for (i = 0; i < 64; i += 8)
0a2aaacc
KG
1949 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1950 new_const |= (HOST_WIDE_INT) 0xff << i;
e68c380c 1951
20a4db98
RH
1952 /* We are only called for SImode and DImode. If this is SImode, ensure that
1953 we are sign extended to a full word. */
1954
1955 if (mode == SImode)
0a2aaacc 1956 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
20a4db98 1957
0a2aaacc 1958 if (new_const != c)
72910a0b 1959 {
0a2aaacc 1960 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
72910a0b
RH
1961 if (temp)
1962 {
1963 if (no_output)
1964 return temp;
0a2aaacc 1965 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
72910a0b
RH
1966 target, 0, OPTAB_WIDEN);
1967 }
1968 }
e68c380c 1969
a6f12d7c
RK
1970 return 0;
1971}
758d2c0c 1972
a5c24926
RH
1973/* Try to output insns to set TARGET equal to the constant C if it can be
1974 done in less than N insns. Do all computations in MODE. Returns the place
1975 where the output has been placed if it can be done and the insns have been
1976 emitted. If it would take more than N insns, zero is returned and no
1977 insns and emitted. */
1978
72910a0b 1979static rtx
ef4bddc2 1980alpha_emit_set_const (rtx target, machine_mode mode,
72910a0b 1981 HOST_WIDE_INT c, int n, bool no_output)
a5c24926 1982{
ef4bddc2 1983 machine_mode orig_mode = mode;
a5c24926 1984 rtx orig_target = target;
72910a0b 1985 rtx result = 0;
a5c24926
RH
1986 int i;
1987
1988 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1989 can't load this constant in one insn, do this in DImode. */
b3a13419 1990 if (!can_create_pseudo_p () && mode == SImode
7d83f4f5 1991 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
a5c24926 1992 {
72910a0b
RH
1993 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1994 if (result)
1995 return result;
1996
1997 target = no_output ? NULL : gen_lowpart (DImode, target);
1998 mode = DImode;
1999 }
2000 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2001 {
2002 target = no_output ? NULL : gen_lowpart (DImode, target);
a5c24926
RH
2003 mode = DImode;
2004 }
2005
2006 /* Try 1 insn, then 2, then up to N. */
2007 for (i = 1; i <= n; i++)
2008 {
72910a0b 2009 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
a5c24926
RH
2010 if (result)
2011 {
cad003ba
DM
2012 rtx_insn *insn;
2013 rtx set;
72910a0b
RH
2014
2015 if (no_output)
2016 return result;
2017
2018 insn = get_last_insn ();
2019 set = single_set (insn);
a5c24926
RH
2020 if (! CONSTANT_P (SET_SRC (set)))
2021 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2022 break;
2023 }
2024 }
2025
2026 /* Allow for the case where we changed the mode of TARGET. */
72910a0b
RH
2027 if (result)
2028 {
2029 if (result == target)
2030 result = orig_target;
2031 else if (mode != orig_mode)
2032 result = gen_lowpart (orig_mode, result);
2033 }
a5c24926
RH
2034
2035 return result;
2036}
2037
97aea203
RK
2038/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2039 fall back to a straight forward decomposition. We do this to avoid
2040 exponential run times encountered when looking for longer sequences
2041 with alpha_emit_set_const. */
2042
72910a0b 2043static rtx
da80c6b8 2044alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
97aea203 2045{
97aea203 2046 HOST_WIDE_INT d1, d2, d3, d4;
97aea203
RK
2047
2048 /* Decompose the entire word */
c37aa43b 2049
3fe5612d
RH
2050 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2051 c1 -= d1;
2052 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2053 c1 = (c1 - d2) >> 32;
2054 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2055 c1 -= d3;
2056 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
56daab84 2057 gcc_assert (c1 == d4);
97aea203
RK
2058
2059 /* Construct the high word */
3fe5612d
RH
2060 if (d4)
2061 {
2062 emit_move_insn (target, GEN_INT (d4));
2063 if (d3)
2064 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2065 }
97aea203 2066 else
3fe5612d 2067 emit_move_insn (target, GEN_INT (d3));
97aea203
RK
2068
2069 /* Shift it into place */
3fe5612d 2070 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
97aea203 2071
3fe5612d
RH
2072 /* Add in the low bits. */
2073 if (d2)
2074 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2075 if (d1)
2076 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
97aea203 2077
3fe5612d 2078 return target;
97aea203 2079}
97aea203 2080
7eb05850 2081/* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
72910a0b 2082
da80c6b8
UB
2083static HOST_WIDE_INT
2084alpha_extract_integer (rtx x)
72910a0b 2085{
72910a0b
RH
2086 if (GET_CODE (x) == CONST_VECTOR)
2087 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2088
7eb05850
UB
2089 gcc_assert (CONST_INT_P (x));
2090
2091 return INTVAL (x);
72910a0b
RH
2092}
2093
1a627b35
RS
2094/* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2095 we are willing to load the value into a register via a move pattern.
72910a0b
RH
2096 Normally this is all symbolic constants, integral constants that
2097 take three or fewer instructions, and floating-point zero. */
2098
2099bool
ef4bddc2 2100alpha_legitimate_constant_p (machine_mode mode, rtx x)
72910a0b 2101{
da80c6b8 2102 HOST_WIDE_INT i0;
72910a0b
RH
2103
2104 switch (GET_CODE (x))
2105 {
72910a0b 2106 case LABEL_REF:
72910a0b
RH
2107 case HIGH:
2108 return true;
2109
42a9ba1d
UB
2110 case CONST:
2111 if (GET_CODE (XEXP (x, 0)) == PLUS
c799797d 2112 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
42a9ba1d
UB
2113 x = XEXP (XEXP (x, 0), 0);
2114 else
2115 return true;
2116
2117 if (GET_CODE (x) != SYMBOL_REF)
2118 return true;
42a9ba1d
UB
2119 /* FALLTHRU */
2120
e584065d
RH
2121 case SYMBOL_REF:
2122 /* TLS symbols are never valid. */
2123 return SYMBOL_REF_TLS_MODEL (x) == 0;
2124
f06ed650 2125 case CONST_WIDE_INT:
7eb05850
UB
2126 if (TARGET_BUILD_CONSTANTS)
2127 return true;
72910a0b
RH
2128 if (x == CONST0_RTX (mode))
2129 return true;
7eb05850
UB
2130 mode = DImode;
2131 gcc_assert (CONST_WIDE_INT_NUNITS (x) == 2);
2132 i0 = CONST_WIDE_INT_ELT (x, 1);
2133 if (alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) == NULL)
2134 return false;
2135 i0 = CONST_WIDE_INT_ELT (x, 0);
72910a0b
RH
2136 goto do_integer;
2137
f06ed650
UB
2138 case CONST_DOUBLE:
2139 if (x == CONST0_RTX (mode))
2140 return true;
2141 return false;
2142
72910a0b
RH
2143 case CONST_VECTOR:
2144 if (x == CONST0_RTX (mode))
2145 return true;
2146 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2147 return false;
2148 if (GET_MODE_SIZE (mode) != 8)
2149 return false;
7eb05850 2150 /* FALLTHRU */
72910a0b
RH
2151
2152 case CONST_INT:
72910a0b
RH
2153 if (TARGET_BUILD_CONSTANTS)
2154 return true;
da80c6b8 2155 i0 = alpha_extract_integer (x);
7eb05850
UB
2156 do_integer:
2157 return alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) != NULL;
72910a0b
RH
2158
2159 default:
2160 return false;
2161 }
2162}
2163
2164/* Operand 1 is known to be a constant, and should require more than one
2165 instruction to load. Emit that multi-part load. */
2166
2167bool
ef4bddc2 2168alpha_split_const_mov (machine_mode mode, rtx *operands)
72910a0b 2169{
da80c6b8 2170 HOST_WIDE_INT i0;
72910a0b
RH
2171 rtx temp = NULL_RTX;
2172
da80c6b8 2173 i0 = alpha_extract_integer (operands[1]);
72910a0b 2174
c37aa43b 2175 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
72910a0b
RH
2176
2177 if (!temp && TARGET_BUILD_CONSTANTS)
da80c6b8 2178 temp = alpha_emit_set_long_const (operands[0], i0);
72910a0b
RH
2179
2180 if (temp)
2181 {
2182 if (!rtx_equal_p (operands[0], temp))
2183 emit_move_insn (operands[0], temp);
2184 return true;
2185 }
2186
2187 return false;
2188}
2189
23296a36
RH
2190/* Expand a move instruction; return true if all work is done.
2191 We don't handle non-bwx subword loads here. */
2192
2193bool
ef4bddc2 2194alpha_expand_mov (machine_mode mode, rtx *operands)
23296a36 2195{
9dadeeb8
UB
2196 rtx tmp;
2197
23296a36 2198 /* If the output is not a register, the input must be. */
7d83f4f5 2199 if (MEM_P (operands[0])
23296a36
RH
2200 && ! reg_or_0_operand (operands[1], mode))
2201 operands[1] = force_reg (mode, operands[1]);
2202
551cc6fd 2203 /* Allow legitimize_address to perform some simplifications. */
d3e98208 2204 if (mode == Pmode && symbolic_operand (operands[1], mode))
1eb356b9 2205 {
506d7b68 2206 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
551cc6fd 2207 if (tmp)
133d3133 2208 {
6f9b006d
RH
2209 if (tmp == operands[0])
2210 return true;
551cc6fd 2211 operands[1] = tmp;
e2c9fb9b
RH
2212 return false;
2213 }
1eb356b9
RH
2214 }
2215
23296a36
RH
2216 /* Early out for non-constants and valid constants. */
2217 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2218 return false;
2219
2220 /* Split large integers. */
7d83f4f5 2221 if (CONST_INT_P (operands[1])
72910a0b 2222 || GET_CODE (operands[1]) == CONST_VECTOR)
23296a36 2223 {
72910a0b
RH
2224 if (alpha_split_const_mov (mode, operands))
2225 return true;
23296a36
RH
2226 }
2227
2228 /* Otherwise we've nothing left but to drop the thing to memory. */
9dadeeb8
UB
2229 tmp = force_const_mem (mode, operands[1]);
2230
2231 if (tmp == NULL_RTX)
2232 return false;
2233
23296a36
RH
2234 if (reload_in_progress)
2235 {
9dadeeb8
UB
2236 emit_move_insn (operands[0], XEXP (tmp, 0));
2237 operands[1] = replace_equiv_address (tmp, operands[0]);
23296a36
RH
2238 }
2239 else
9dadeeb8 2240 operands[1] = validize_mem (tmp);
23296a36
RH
2241 return false;
2242}
2243
2244/* Expand a non-bwx QImode or HImode move instruction;
2245 return true if all work is done. */
2246
2247bool
ef4bddc2 2248alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
23296a36 2249{
48f46219
RH
2250 rtx seq;
2251
23296a36 2252 /* If the output is not a register, the input must be. */
48f46219 2253 if (MEM_P (operands[0]))
23296a36
RH
2254 operands[1] = force_reg (mode, operands[1]);
2255
2256 /* Handle four memory cases, unaligned and aligned for either the input
2257 or the output. The only case where we can be called during reload is
2258 for aligned loads; all other cases require temporaries. */
2259
48f46219 2260 if (any_memory_operand (operands[1], mode))
23296a36
RH
2261 {
2262 if (aligned_memory_operand (operands[1], mode))
2263 {
2264 if (reload_in_progress)
2265 {
48f46219
RH
2266 if (mode == QImode)
2267 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2268 else
2269 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2270 emit_insn (seq);
23296a36
RH
2271 }
2272 else
2273 {
2274 rtx aligned_mem, bitnum;
2275 rtx scratch = gen_reg_rtx (SImode);
62e88293
RH
2276 rtx subtarget;
2277 bool copyout;
23296a36
RH
2278
2279 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2280
62e88293 2281 subtarget = operands[0];
7d83f4f5 2282 if (REG_P (subtarget))
62e88293
RH
2283 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2284 else
2285 subtarget = gen_reg_rtx (DImode), copyout = true;
2286
48f46219
RH
2287 if (mode == QImode)
2288 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2289 bitnum, scratch);
2290 else
2291 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2292 bitnum, scratch);
2293 emit_insn (seq);
62e88293
RH
2294
2295 if (copyout)
2296 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
23296a36
RH
2297 }
2298 }
2299 else
2300 {
2301 /* Don't pass these as parameters since that makes the generated
2302 code depend on parameter evaluation order which will cause
2303 bootstrap failures. */
2304
48f46219 2305 rtx temp1, temp2, subtarget, ua;
62e88293
RH
2306 bool copyout;
2307
2308 temp1 = gen_reg_rtx (DImode);
2309 temp2 = gen_reg_rtx (DImode);
23296a36 2310
62e88293 2311 subtarget = operands[0];
7d83f4f5 2312 if (REG_P (subtarget))
62e88293
RH
2313 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2314 else
2315 subtarget = gen_reg_rtx (DImode), copyout = true;
2316
48f46219
RH
2317 ua = get_unaligned_address (operands[1]);
2318 if (mode == QImode)
2319 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2320 else
2321 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2322
23296a36
RH
2323 alpha_set_memflags (seq, operands[1]);
2324 emit_insn (seq);
62e88293
RH
2325
2326 if (copyout)
2327 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
23296a36
RH
2328 }
2329 return true;
2330 }
2331
48f46219 2332 if (any_memory_operand (operands[0], mode))
23296a36
RH
2333 {
2334 if (aligned_memory_operand (operands[0], mode))
2335 {
2336 rtx aligned_mem, bitnum;
2337 rtx temp1 = gen_reg_rtx (SImode);
2338 rtx temp2 = gen_reg_rtx (SImode);
2339
2340 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2341
2342 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2343 temp1, temp2));
2344 }
2345 else
2346 {
2347 rtx temp1 = gen_reg_rtx (DImode);
2348 rtx temp2 = gen_reg_rtx (DImode);
2349 rtx temp3 = gen_reg_rtx (DImode);
48f46219
RH
2350 rtx ua = get_unaligned_address (operands[0]);
2351
2352 if (mode == QImode)
2353 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2354 else
2355 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
23296a36
RH
2356
2357 alpha_set_memflags (seq, operands[0]);
2358 emit_insn (seq);
2359 }
2360 return true;
2361 }
2362
2363 return false;
2364}
2365
ad78a663 2366/* Implement the movmisalign patterns. One of the operands is a memory
35c0104b 2367 that is not naturally aligned. Emit instructions to load it. */
ad78a663
RH
2368
2369void
ef4bddc2 2370alpha_expand_movmisalign (machine_mode mode, rtx *operands)
ad78a663
RH
2371{
2372 /* Honor misaligned loads, for those we promised to do so. */
2373 if (MEM_P (operands[1]))
2374 {
2375 rtx tmp;
2376
2377 if (register_operand (operands[0], mode))
2378 tmp = operands[0];
2379 else
2380 tmp = gen_reg_rtx (mode);
2381
2382 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2383 if (tmp != operands[0])
2384 emit_move_insn (operands[0], tmp);
2385 }
2386 else if (MEM_P (operands[0]))
2387 {
2388 if (!reg_or_0_operand (operands[1], mode))
2389 operands[1] = force_reg (mode, operands[1]);
2390 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2391 }
2392 else
2393 gcc_unreachable ();
2394}
2395
01b9e84e
RH
2396/* Generate an unsigned DImode to FP conversion. This is the same code
2397 optabs would emit if we didn't have TFmode patterns.
2398
2399 For SFmode, this is the only construction I've found that can pass
2400 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2401 intermediates will work, because you'll get intermediate rounding
2402 that ruins the end result. Some of this could be fixed by turning
2403 on round-to-positive-infinity, but that requires diddling the fpsr,
2404 which kills performance. I tried turning this around and converting
2405 to a negative number, so that I could turn on /m, but either I did
2406 it wrong or there's something else cause I wound up with the exact
2407 same single-bit error. There is a branch-less form of this same code:
2408
2409 srl $16,1,$1
2410 and $16,1,$2
2411 cmplt $16,0,$3
2412 or $1,$2,$2
2413 cmovge $16,$16,$2
2414 itoft $3,$f10
2415 itoft $2,$f11
2416 cvtqs $f11,$f11
2417 adds $f11,$f11,$f0
2418 fcmoveq $f10,$f11,$f0
2419
2420 I'm not using it because it's the same number of instructions as
2421 this branch-full form, and it has more serialized long latency
2422 instructions on the critical path.
2423
2424 For DFmode, we can avoid rounding errors by breaking up the word
2425 into two pieces, converting them separately, and adding them back:
2426
2427 LC0: .long 0,0x5f800000
2428
2429 itoft $16,$f11
2430 lda $2,LC0
70994f30 2431 cmplt $16,0,$1
01b9e84e
RH
2432 cpyse $f11,$f31,$f10
2433 cpyse $f31,$f11,$f11
2434 s4addq $1,$2,$1
2435 lds $f12,0($1)
2436 cvtqt $f10,$f10
2437 cvtqt $f11,$f11
2438 addt $f12,$f10,$f0
2439 addt $f0,$f11,$f0
2440
2441 This doesn't seem to be a clear-cut win over the optabs form.
2442 It probably all depends on the distribution of numbers being
2443 converted -- in the optabs form, all but high-bit-set has a
2444 much lower minimum execution time. */
2445
2446void
a5c24926 2447alpha_emit_floatuns (rtx operands[2])
01b9e84e
RH
2448{
2449 rtx neglab, donelab, i0, i1, f0, in, out;
ef4bddc2 2450 machine_mode mode;
01b9e84e
RH
2451
2452 out = operands[0];
57014cb9 2453 in = force_reg (DImode, operands[1]);
01b9e84e
RH
2454 mode = GET_MODE (out);
2455 neglab = gen_label_rtx ();
2456 donelab = gen_label_rtx ();
2457 i0 = gen_reg_rtx (DImode);
2458 i1 = gen_reg_rtx (DImode);
2459 f0 = gen_reg_rtx (mode);
2460
d43e0b7d 2461 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
01b9e84e 2462
f7df4a84 2463 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
01b9e84e 2464 emit_jump_insn (gen_jump (donelab));
70994f30 2465 emit_barrier ();
01b9e84e
RH
2466
2467 emit_label (neglab);
2468
2469 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2470 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2471 emit_insn (gen_iordi3 (i0, i0, i1));
f7df4a84
RS
2472 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
2473 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
01b9e84e
RH
2474
2475 emit_label (donelab);
2476}
2477
f283421d
RH
2478/* Generate the comparison for a conditional branch. */
2479
f90b7a5a 2480void
ef4bddc2 2481alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
f283421d
RH
2482{
2483 enum rtx_code cmp_code, branch_code;
ef4bddc2 2484 machine_mode branch_mode = VOIDmode;
f90b7a5a
PB
2485 enum rtx_code code = GET_CODE (operands[0]);
2486 rtx op0 = operands[1], op1 = operands[2];
f283421d
RH
2487 rtx tem;
2488
f90b7a5a 2489 if (cmp_mode == TFmode)
5495cc55 2490 {
0da4e73a 2491 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
5495cc55 2492 op1 = const0_rtx;
f90b7a5a 2493 cmp_mode = DImode;
5495cc55
RH
2494 }
2495
f283421d
RH
2496 /* The general case: fold the comparison code to the types of compares
2497 that we have, choosing the branch as necessary. */
2498 switch (code)
2499 {
2500 case EQ: case LE: case LT: case LEU: case LTU:
1eb8759b 2501 case UNORDERED:
d2b21f20 2502 /* We have these compares. */
f283421d
RH
2503 cmp_code = code, branch_code = NE;
2504 break;
2505
2506 case NE:
1eb8759b 2507 case ORDERED:
285a5742 2508 /* These must be reversed. */
1eb8759b 2509 cmp_code = reverse_condition (code), branch_code = EQ;
f283421d
RH
2510 break;
2511
2512 case GE: case GT: case GEU: case GTU:
2513 /* For FP, we swap them, for INT, we reverse them. */
f90b7a5a 2514 if (cmp_mode == DFmode)
f283421d
RH
2515 {
2516 cmp_code = swap_condition (code);
2517 branch_code = NE;
3f3f5af0 2518 std::swap (op0, op1);
f283421d
RH
2519 }
2520 else
2521 {
2522 cmp_code = reverse_condition (code);
2523 branch_code = EQ;
2524 }
2525 break;
2526
2527 default:
56daab84 2528 gcc_unreachable ();
f283421d
RH
2529 }
2530
f90b7a5a 2531 if (cmp_mode == DFmode)
f283421d 2532 {
ec46190f 2533 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
f283421d
RH
2534 {
2535 /* When we are not as concerned about non-finite values, and we
2536 are comparing against zero, we can branch directly. */
2537 if (op1 == CONST0_RTX (DFmode))
f822d252 2538 cmp_code = UNKNOWN, branch_code = code;
f283421d
RH
2539 else if (op0 == CONST0_RTX (DFmode))
2540 {
2541 /* Undo the swap we probably did just above. */
3f3f5af0 2542 std::swap (op0, op1);
b771b6b4 2543 branch_code = swap_condition (cmp_code);
f822d252 2544 cmp_code = UNKNOWN;
f283421d
RH
2545 }
2546 }
2547 else
2548 {
27d30956 2549 /* ??? We mark the branch mode to be CCmode to prevent the
f676971a 2550 compare and branch from being combined, since the compare
f283421d
RH
2551 insn follows IEEE rules that the branch does not. */
2552 branch_mode = CCmode;
2553 }
2554 }
2555 else
2556 {
f283421d
RH
2557 /* The following optimizations are only for signed compares. */
2558 if (code != LEU && code != LTU && code != GEU && code != GTU)
2559 {
2560 /* Whee. Compare and branch against 0 directly. */
2561 if (op1 == const0_rtx)
f822d252 2562 cmp_code = UNKNOWN, branch_code = code;
f283421d 2563
e006ced2
FH
2564 /* If the constants doesn't fit into an immediate, but can
2565 be generated by lda/ldah, we adjust the argument and
2566 compare against zero, so we can use beq/bne directly. */
4a4f95d9
RH
2567 /* ??? Don't do this when comparing against symbols, otherwise
2568 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2569 be declared false out of hand (at least for non-weak). */
7d83f4f5 2570 else if (CONST_INT_P (op1)
4a4f95d9
RH
2571 && (code == EQ || code == NE)
2572 && !(symbolic_operand (op0, VOIDmode)
7d83f4f5 2573 || (REG_P (op0) && REG_POINTER (op0))))
f283421d 2574 {
dfcbeaa5
RH
2575 rtx n_op1 = GEN_INT (-INTVAL (op1));
2576
2577 if (! satisfies_constraint_I (op1)
2578 && (satisfies_constraint_K (n_op1)
2579 || satisfies_constraint_L (n_op1)))
2580 cmp_code = PLUS, branch_code = code, op1 = n_op1;
f283421d
RH
2581 }
2582 }
f283421d 2583
9e495700
RH
2584 if (!reg_or_0_operand (op0, DImode))
2585 op0 = force_reg (DImode, op0);
2586 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2587 op1 = force_reg (DImode, op1);
2588 }
f283421d
RH
2589
2590 /* Emit an initial compare instruction, if necessary. */
2591 tem = op0;
f822d252 2592 if (cmp_code != UNKNOWN)
f283421d
RH
2593 {
2594 tem = gen_reg_rtx (cmp_mode);
2595 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2596 }
2597
f90b7a5a 2598 /* Emit the branch instruction. */
f7df4a84 2599 tem = gen_rtx_SET (pc_rtx,
f90b7a5a
PB
2600 gen_rtx_IF_THEN_ELSE (VOIDmode,
2601 gen_rtx_fmt_ee (branch_code,
2602 branch_mode, tem,
2603 CONST0_RTX (cmp_mode)),
2604 gen_rtx_LABEL_REF (VOIDmode,
2605 operands[3]),
2606 pc_rtx));
2607 emit_jump_insn (tem);
f283421d
RH
2608}
2609
9e495700
RH
2610/* Certain simplifications can be done to make invalid setcc operations
2611 valid. Return the final comparison, or NULL if we can't work. */
2612
f90b7a5a 2613bool
ef4bddc2 2614alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
9e495700
RH
2615{
2616 enum rtx_code cmp_code;
f90b7a5a
PB
2617 enum rtx_code code = GET_CODE (operands[1]);
2618 rtx op0 = operands[2], op1 = operands[3];
9e495700
RH
2619 rtx tmp;
2620
f90b7a5a 2621 if (cmp_mode == TFmode)
9e495700 2622 {
0da4e73a 2623 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
9e495700 2624 op1 = const0_rtx;
f90b7a5a 2625 cmp_mode = DImode;
9e495700
RH
2626 }
2627
f90b7a5a
PB
2628 if (cmp_mode == DFmode && !TARGET_FIX)
2629 return 0;
9e495700
RH
2630
2631 /* The general case: fold the comparison code to the types of compares
2632 that we have, choosing the branch as necessary. */
2633
f822d252 2634 cmp_code = UNKNOWN;
9e495700
RH
2635 switch (code)
2636 {
2637 case EQ: case LE: case LT: case LEU: case LTU:
2638 case UNORDERED:
2639 /* We have these compares. */
f90b7a5a 2640 if (cmp_mode == DFmode)
9e495700
RH
2641 cmp_code = code, code = NE;
2642 break;
2643
2644 case NE:
f90b7a5a 2645 if (cmp_mode == DImode && op1 == const0_rtx)
9e495700 2646 break;
5efb1046 2647 /* FALLTHRU */
9e495700
RH
2648
2649 case ORDERED:
2650 cmp_code = reverse_condition (code);
2651 code = EQ;
2652 break;
2653
2654 case GE: case GT: case GEU: case GTU:
56f19d92 2655 /* These normally need swapping, but for integer zero we have
c74fa144 2656 special patterns that recognize swapped operands. */
f90b7a5a 2657 if (cmp_mode == DImode && op1 == const0_rtx)
c74fa144 2658 break;
9e495700 2659 code = swap_condition (code);
f90b7a5a 2660 if (cmp_mode == DFmode)
9e495700 2661 cmp_code = code, code = NE;
3f3f5af0 2662 std::swap (op0, op1);
9e495700
RH
2663 break;
2664
2665 default:
56daab84 2666 gcc_unreachable ();
9e495700
RH
2667 }
2668
f90b7a5a 2669 if (cmp_mode == DImode)
9e495700 2670 {
c74fa144 2671 if (!register_operand (op0, DImode))
9e495700
RH
2672 op0 = force_reg (DImode, op0);
2673 if (!reg_or_8bit_operand (op1, DImode))
2674 op1 = force_reg (DImode, op1);
2675 }
2676
2677 /* Emit an initial compare instruction, if necessary. */
f822d252 2678 if (cmp_code != UNKNOWN)
9e495700 2679 {
f90b7a5a 2680 tmp = gen_reg_rtx (cmp_mode);
f7df4a84
RS
2681 emit_insn (gen_rtx_SET (tmp, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2682 op0, op1)));
9e495700 2683
7c1db202 2684 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
9e495700
RH
2685 op1 = const0_rtx;
2686 }
2687
f90b7a5a 2688 /* Emit the setcc instruction. */
f7df4a84
RS
2689 emit_insn (gen_rtx_SET (operands[0], gen_rtx_fmt_ee (code, DImode,
2690 op0, op1)));
f90b7a5a 2691 return true;
9e495700
RH
2692}
2693
f283421d 2694
758d2c0c
RK
2695/* Rewrite a comparison against zero CMP of the form
2696 (CODE (cc0) (const_int 0)) so it can be written validly in
2697 a conditional move (if_then_else CMP ...).
825dda42 2698 If both of the operands that set cc0 are nonzero we must emit
758d2c0c 2699 an insn to perform the compare (it can't be done within
285a5742 2700 the conditional move). */
a5c24926 2701
758d2c0c 2702rtx
ef4bddc2 2703alpha_emit_conditional_move (rtx cmp, machine_mode mode)
758d2c0c 2704{
1ad2a62d 2705 enum rtx_code code = GET_CODE (cmp);
89b7c471 2706 enum rtx_code cmov_code = NE;
f90b7a5a
PB
2707 rtx op0 = XEXP (cmp, 0);
2708 rtx op1 = XEXP (cmp, 1);
ef4bddc2 2709 machine_mode cmp_mode
1ad2a62d 2710 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
ef4bddc2 2711 machine_mode cmov_mode = VOIDmode;
de6c5979 2712 int local_fast_math = flag_unsafe_math_optimizations;
1ad2a62d 2713 rtx tem;
758d2c0c 2714
387c39e1
UB
2715 if (cmp_mode == TFmode)
2716 {
2717 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2718 op1 = const0_rtx;
2719 cmp_mode = DImode;
2720 }
2721
f90b7a5a 2722 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
6db21c7f 2723
f90b7a5a 2724 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
09fe1c49
RH
2725 {
2726 enum rtx_code cmp_code;
2727
2728 if (! TARGET_FIX)
2729 return 0;
2730
2731 /* If we have fp<->int register move instructions, do a cmov by
2732 performing the comparison in fp registers, and move the
825dda42 2733 zero/nonzero value to integer registers, where we can then
09fe1c49
RH
2734 use a normal cmov, or vice-versa. */
2735
2736 switch (code)
2737 {
2738 case EQ: case LE: case LT: case LEU: case LTU:
d2b21f20 2739 case UNORDERED:
09fe1c49
RH
2740 /* We have these compares. */
2741 cmp_code = code, code = NE;
2742 break;
2743
2744 case NE:
d2b21f20
UB
2745 case ORDERED:
2746 /* These must be reversed. */
2747 cmp_code = reverse_condition (code), code = EQ;
09fe1c49
RH
2748 break;
2749
2750 case GE: case GT: case GEU: case GTU:
56f19d92
RH
2751 /* These normally need swapping, but for integer zero we have
2752 special patterns that recognize swapped operands. */
f90b7a5a 2753 if (cmp_mode == DImode && op1 == const0_rtx)
c53f9f5b
RH
2754 cmp_code = code, code = NE;
2755 else
2756 {
2757 cmp_code = swap_condition (code);
2758 code = NE;
3f3f5af0 2759 std::swap (op0, op1);
c53f9f5b 2760 }
09fe1c49
RH
2761 break;
2762
2763 default:
56daab84 2764 gcc_unreachable ();
09fe1c49
RH
2765 }
2766
d2b21f20
UB
2767 if (cmp_mode == DImode)
2768 {
2769 if (!reg_or_0_operand (op0, DImode))
2770 op0 = force_reg (DImode, op0);
2771 if (!reg_or_8bit_operand (op1, DImode))
2772 op1 = force_reg (DImode, op1);
2773 }
2774
f90b7a5a 2775 tem = gen_reg_rtx (cmp_mode);
f7df4a84
RS
2776 emit_insn (gen_rtx_SET (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2777 op0, op1)));
09fe1c49 2778
f90b7a5a
PB
2779 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2780 op0 = gen_lowpart (cmp_mode, tem);
2781 op1 = CONST0_RTX (cmp_mode);
41dedebd 2782 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
09fe1c49
RH
2783 local_fast_math = 1;
2784 }
758d2c0c 2785
d2b21f20
UB
2786 if (cmp_mode == DImode)
2787 {
2788 if (!reg_or_0_operand (op0, DImode))
2789 op0 = force_reg (DImode, op0);
2790 if (!reg_or_8bit_operand (op1, DImode))
2791 op1 = force_reg (DImode, op1);
2792 }
2793
758d2c0c 2794 /* We may be able to use a conditional move directly.
285a5742 2795 This avoids emitting spurious compares. */
01b9e84e 2796 if (signed_comparison_operator (cmp, VOIDmode)
f90b7a5a 2797 && (cmp_mode == DImode || local_fast_math)
1ad2a62d 2798 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
38a448ca 2799 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
758d2c0c 2800
000ca373 2801 /* We can't put the comparison inside the conditional move;
758d2c0c 2802 emit a compare instruction and put that inside the
1ad2a62d
RK
2803 conditional move. Make sure we emit only comparisons we have;
2804 swap or reverse as necessary. */
758d2c0c 2805
b3a13419 2806 if (!can_create_pseudo_p ())
000ca373
RH
2807 return NULL_RTX;
2808
758d2c0c
RK
2809 switch (code)
2810 {
1ad2a62d 2811 case EQ: case LE: case LT: case LEU: case LTU:
d2b21f20 2812 case UNORDERED:
1ad2a62d 2813 /* We have these compares: */
758d2c0c 2814 break;
1ad2a62d 2815
758d2c0c 2816 case NE:
d2b21f20
UB
2817 case ORDERED:
2818 /* These must be reversed. */
1ad2a62d 2819 code = reverse_condition (code);
89b7c471 2820 cmov_code = EQ;
758d2c0c 2821 break;
1ad2a62d
RK
2822
2823 case GE: case GT: case GEU: case GTU:
c1e183a9
UB
2824 /* These normally need swapping, but for integer zero we have
2825 special patterns that recognize swapped operands. */
2826 if (cmp_mode == DImode && op1 == const0_rtx)
2827 break;
2828 code = swap_condition (code);
3f3f5af0 2829 std::swap (op0, op1);
758d2c0c 2830 break;
1ad2a62d 2831
758d2c0c 2832 default:
56daab84 2833 gcc_unreachable ();
758d2c0c
RK
2834 }
2835
f90b7a5a 2836 if (cmp_mode == DImode)
9e495700
RH
2837 {
2838 if (!reg_or_0_operand (op0, DImode))
2839 op0 = force_reg (DImode, op0);
2840 if (!reg_or_8bit_operand (op1, DImode))
2841 op1 = force_reg (DImode, op1);
2842 }
2843
68aed21b 2844 /* ??? We mark the branch mode to be CCmode to prevent the compare
f283421d
RH
2845 and cmov from being combined, since the compare insn follows IEEE
2846 rules that the cmov does not. */
f90b7a5a 2847 if (cmp_mode == DFmode && !local_fast_math)
f283421d
RH
2848 cmov_mode = CCmode;
2849
f90b7a5a
PB
2850 tem = gen_reg_rtx (cmp_mode);
2851 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2852 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
758d2c0c 2853}
8f4773ea
RH
2854
2855/* Simplify a conditional move of two constants into a setcc with
2856 arithmetic. This is done with a splitter since combine would
2857 just undo the work if done during code generation. It also catches
2858 cases we wouldn't have before cse. */
2859
2860int
a5c24926
RH
2861alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2862 rtx t_rtx, rtx f_rtx)
8f4773ea
RH
2863{
2864 HOST_WIDE_INT t, f, diff;
ef4bddc2 2865 machine_mode mode;
8f4773ea
RH
2866 rtx target, subtarget, tmp;
2867
2868 mode = GET_MODE (dest);
2869 t = INTVAL (t_rtx);
2870 f = INTVAL (f_rtx);
2871 diff = t - f;
2872
2873 if (((code == NE || code == EQ) && diff < 0)
2874 || (code == GE || code == GT))
2875 {
2876 code = reverse_condition (code);
2877 diff = t, t = f, f = diff;
2878 diff = t - f;
2879 }
2880
2881 subtarget = target = dest;
2882 if (mode != DImode)
2883 {
2884 target = gen_lowpart (DImode, dest);
b3a13419 2885 if (can_create_pseudo_p ())
8f4773ea
RH
2886 subtarget = gen_reg_rtx (DImode);
2887 else
2888 subtarget = target;
2889 }
a5376276
RH
2890 /* Below, we must be careful to use copy_rtx on target and subtarget
2891 in intermediate insns, as they may be a subreg rtx, which may not
2892 be shared. */
8f4773ea
RH
2893
2894 if (f == 0 && exact_log2 (diff) > 0
9a9f7594 2895 /* On EV6, we've got enough shifters to make non-arithmetic shifts
8f4773ea 2896 viable over a longer latency cmove. On EV5, the E0 slot is a
285a5742 2897 scarce resource, and on EV4 shift has the same latency as a cmove. */
8bea7f7c 2898 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
8f4773ea
RH
2899 {
2900 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
f7df4a84 2901 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
8f4773ea 2902
a5376276
RH
2903 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2904 GEN_INT (exact_log2 (t)));
f7df4a84 2905 emit_insn (gen_rtx_SET (target, tmp));
8f4773ea
RH
2906 }
2907 else if (f == 0 && t == -1)
2908 {
2909 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
f7df4a84 2910 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
8f4773ea 2911
a5376276 2912 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
8f4773ea
RH
2913 }
2914 else if (diff == 1 || diff == 4 || diff == 8)
2915 {
2916 rtx add_op;
2917
2918 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
f7df4a84 2919 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
8f4773ea
RH
2920
2921 if (diff == 1)
a5376276 2922 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
8f4773ea
RH
2923 else
2924 {
2925 add_op = GEN_INT (f);
2926 if (sext_add_operand (add_op, mode))
2927 {
a5376276
RH
2928 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2929 GEN_INT (diff));
8f4773ea 2930 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
f7df4a84 2931 emit_insn (gen_rtx_SET (target, tmp));
8f4773ea
RH
2932 }
2933 else
2934 return 0;
2935 }
2936 }
2937 else
2938 return 0;
2939
2940 return 1;
2941}
6c174fc0 2942\f
5495cc55
RH
2943/* Look up the function X_floating library function name for the
2944 given operation. */
2945
d1b38208 2946struct GTY(()) xfloating_op
75959f0a
RH
2947{
2948 const enum rtx_code code;
1431042e
ZW
2949 const char *const GTY((skip)) osf_func;
2950 const char *const GTY((skip)) vms_func;
75959f0a
RH
2951 rtx libcall;
2952};
2953
f676971a 2954static GTY(()) struct xfloating_op xfloating_ops[] =
75959f0a
RH
2955{
2956 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2957 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2958 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2959 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2960 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2961 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2962 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2963 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2964 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2965 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2966 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2967 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2968 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2969 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2970 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2971};
2972
2973static GTY(()) struct xfloating_op vax_cvt_ops[] =
2974{
2975 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2976 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2977};
2978
2979static rtx
a5c24926 2980alpha_lookup_xfloating_lib_func (enum rtx_code code)
5495cc55 2981{
75959f0a
RH
2982 struct xfloating_op *ops = xfloating_ops;
2983 long n = ARRAY_SIZE (xfloating_ops);
5495cc55
RH
2984 long i;
2985
0da4e73a
RH
2986 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2987
75959f0a
RH
2988 /* How irritating. Nothing to key off for the main table. */
2989 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
5495cc55 2990 {
75959f0a
RH
2991 ops = vax_cvt_ops;
2992 n = ARRAY_SIZE (vax_cvt_ops);
5495cc55
RH
2993 }
2994
75959f0a
RH
2995 for (i = 0; i < n; ++i, ++ops)
2996 if (ops->code == code)
2997 {
2998 rtx func = ops->libcall;
2999 if (!func)
3000 {
3001 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3002 ? ops->vms_func : ops->osf_func);
3003 ops->libcall = func;
3004 }
3005 return func;
3006 }
5495cc55 3007
56daab84 3008 gcc_unreachable ();
5495cc55
RH
3009}
3010
3011/* Most X_floating operations take the rounding mode as an argument.
3012 Compute that here. */
3013
3014static int
a5c24926
RH
3015alpha_compute_xfloating_mode_arg (enum rtx_code code,
3016 enum alpha_fp_rounding_mode round)
5495cc55
RH
3017{
3018 int mode;
3019
3020 switch (round)
3021 {
3022 case ALPHA_FPRM_NORM:
3023 mode = 2;
3024 break;
3025 case ALPHA_FPRM_MINF:
3026 mode = 1;
3027 break;
3028 case ALPHA_FPRM_CHOP:
3029 mode = 0;
3030 break;
3031 case ALPHA_FPRM_DYN:
3032 mode = 4;
3033 break;
3034 default:
56daab84 3035 gcc_unreachable ();
5495cc55
RH
3036
3037 /* XXX For reference, round to +inf is mode = 3. */
3038 }
3039
3040 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3041 mode |= 0x10000;
3042
3043 return mode;
3044}
3045
3046/* Emit an X_floating library function call.
3047
3048 Note that these functions do not follow normal calling conventions:
3049 TFmode arguments are passed in two integer registers (as opposed to
f676971a 3050 indirect); TFmode return values appear in R16+R17.
5495cc55 3051
75959f0a 3052 FUNC is the function to call.
5495cc55
RH
3053 TARGET is where the output belongs.
3054 OPERANDS are the inputs.
3055 NOPERANDS is the count of inputs.
3056 EQUIV is the expression equivalent for the function.
3057*/
3058
3059static void
75959f0a 3060alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
a5c24926 3061 int noperands, rtx equiv)
5495cc55
RH
3062{
3063 rtx usage = NULL_RTX, tmp, reg;
3064 int regno = 16, i;
3065
3066 start_sequence ();
3067
3068 for (i = 0; i < noperands; ++i)
3069 {
3070 switch (GET_MODE (operands[i]))
3071 {
3072 case TFmode:
3073 reg = gen_rtx_REG (TFmode, regno);
3074 regno += 2;
3075 break;
3076
3077 case DFmode:
3078 reg = gen_rtx_REG (DFmode, regno + 32);
3079 regno += 1;
3080 break;
3081
3082 case VOIDmode:
7d83f4f5 3083 gcc_assert (CONST_INT_P (operands[i]));
5efb1046 3084 /* FALLTHRU */
5495cc55
RH
3085 case DImode:
3086 reg = gen_rtx_REG (DImode, regno);
3087 regno += 1;
3088 break;
3089
3090 default:
56daab84 3091 gcc_unreachable ();
5495cc55
RH
3092 }
3093
3094 emit_move_insn (reg, operands[i]);
44f370bf 3095 use_reg (&usage, reg);
5495cc55
RH
3096 }
3097
3098 switch (GET_MODE (target))
3099 {
3100 case TFmode:
3101 reg = gen_rtx_REG (TFmode, 16);
3102 break;
3103 case DFmode:
3104 reg = gen_rtx_REG (DFmode, 32);
3105 break;
3106 case DImode:
3107 reg = gen_rtx_REG (DImode, 0);
3108 break;
3109 default:
56daab84 3110 gcc_unreachable ();
5495cc55
RH
3111 }
3112
75959f0a 3113 tmp = gen_rtx_MEM (QImode, func);
0499c2e4 3114 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
5495cc55
RH
3115 const0_rtx, const0_rtx));
3116 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
becfd6e5 3117 RTL_CONST_CALL_P (tmp) = 1;
5495cc55
RH
3118
3119 tmp = get_insns ();
3120 end_sequence ();
3121
3122 emit_libcall_block (tmp, target, reg, equiv);
3123}
3124
3125/* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3126
3127void
a5c24926 3128alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
5495cc55 3129{
75959f0a 3130 rtx func;
5495cc55 3131 int mode;
c77f46c6 3132 rtx out_operands[3];
5495cc55
RH
3133
3134 func = alpha_lookup_xfloating_lib_func (code);
3135 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3136
c77f46c6
AO
3137 out_operands[0] = operands[1];
3138 out_operands[1] = operands[2];
3139 out_operands[2] = GEN_INT (mode);
f676971a 3140 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
5495cc55
RH
3141 gen_rtx_fmt_ee (code, TFmode, operands[1],
3142 operands[2]));
3143}
3144
3145/* Emit an X_floating library function call for a comparison. */
3146
3147static rtx
0da4e73a 3148alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
5495cc55 3149{
0da4e73a 3150 enum rtx_code cmp_code, res_code;
32891ff6 3151 rtx func, out, operands[2], note;
5495cc55 3152
0da4e73a
RH
3153 /* X_floating library comparison functions return
3154 -1 unordered
3155 0 false
3156 1 true
3157 Convert the compare against the raw return value. */
3158
3159 cmp_code = *pcode;
3160 switch (cmp_code)
3161 {
3162 case UNORDERED:
3163 cmp_code = EQ;
3164 res_code = LT;
3165 break;
3166 case ORDERED:
3167 cmp_code = EQ;
3168 res_code = GE;
3169 break;
3170 case NE:
3171 res_code = NE;
3172 break;
3173 case EQ:
3174 case LT:
3175 case GT:
3176 case LE:
3177 case GE:
3178 res_code = GT;
3179 break;
3180 default:
3181 gcc_unreachable ();
3182 }
3183 *pcode = res_code;
3184
3185 func = alpha_lookup_xfloating_lib_func (cmp_code);
5495cc55
RH
3186
3187 operands[0] = op0;
3188 operands[1] = op1;
3189 out = gen_reg_rtx (DImode);
3190
70cc1536 3191 /* What's actually returned is -1,0,1, not a proper boolean value. */
57b29ca6
UB
3192 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3193 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
32891ff6 3194 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
5495cc55
RH
3195
3196 return out;
3197}
3198
3199/* Emit an X_floating library function call for a conversion. */
3200
3201void
64bb2e1d 3202alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
5495cc55
RH
3203{
3204 int noperands = 1, mode;
c77f46c6 3205 rtx out_operands[2];
75959f0a 3206 rtx func;
64bb2e1d
RH
3207 enum rtx_code code = orig_code;
3208
3209 if (code == UNSIGNED_FIX)
3210 code = FIX;
5495cc55
RH
3211
3212 func = alpha_lookup_xfloating_lib_func (code);
3213
c77f46c6
AO
3214 out_operands[0] = operands[1];
3215
5495cc55
RH
3216 switch (code)
3217 {
3218 case FIX:
3219 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
c77f46c6 3220 out_operands[1] = GEN_INT (mode);
d6cde845 3221 noperands = 2;
5495cc55
RH
3222 break;
3223 case FLOAT_TRUNCATE:
3224 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
c77f46c6 3225 out_operands[1] = GEN_INT (mode);
d6cde845 3226 noperands = 2;
5495cc55
RH
3227 break;
3228 default:
3229 break;
3230 }
3231
c77f46c6 3232 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
64bb2e1d
RH
3233 gen_rtx_fmt_e (orig_code,
3234 GET_MODE (operands[0]),
5495cc55
RH
3235 operands[1]));
3236}
628d74de 3237
b2f39494
EB
3238/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3239 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3240 guarantee that the sequence
3241 set (OP[0] OP[2])
3242 set (OP[1] OP[3])
3243 is valid. Naturally, output operand ordering is little-endian.
3244 This is used by *movtf_internal and *movti_internal. */
3245
628d74de 3246void
ef4bddc2 3247alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
b2f39494 3248 bool fixup_overlap)
628d74de 3249{
56daab84 3250 switch (GET_CODE (operands[1]))
628d74de 3251 {
56daab84 3252 case REG:
628d74de
RH
3253 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3254 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
56daab84
NS
3255 break;
3256
3257 case MEM:
f4ef873c
RK
3258 operands[3] = adjust_address (operands[1], DImode, 8);
3259 operands[2] = adjust_address (operands[1], DImode, 0);
56daab84
NS
3260 break;
3261
c799797d 3262 CASE_CONST_SCALAR_INT:
65ab381c 3263 case CONST_DOUBLE:
b2f39494 3264 gcc_assert (operands[1] == CONST0_RTX (mode));
56daab84
NS
3265 operands[2] = operands[3] = const0_rtx;
3266 break;
3267
3268 default:
3269 gcc_unreachable ();
628d74de 3270 }
628d74de 3271
56daab84 3272 switch (GET_CODE (operands[0]))
628d74de 3273 {
56daab84 3274 case REG:
628d74de
RH
3275 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3276 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
56daab84
NS
3277 break;
3278
3279 case MEM:
f4ef873c
RK
3280 operands[1] = adjust_address (operands[0], DImode, 8);
3281 operands[0] = adjust_address (operands[0], DImode, 0);
56daab84
NS
3282 break;
3283
3284 default:
3285 gcc_unreachable ();
628d74de 3286 }
b2f39494
EB
3287
3288 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3289 {
3f3f5af0
UB
3290 std::swap (operands[0], operands[1]);
3291 std::swap (operands[2], operands[3]);
b2f39494 3292 }
628d74de 3293}
f940c352 3294
f676971a
EC
3295/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3296 op2 is a register containing the sign bit, operation is the
f940c352
RH
3297 logical operation to be performed. */
3298
3299void
a5c24926 3300alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
f940c352
RH
3301{
3302 rtx high_bit = operands[2];
3303 rtx scratch;
3304 int move;
3305
b2f39494 3306 alpha_split_tmode_pair (operands, TFmode, false);
f940c352 3307
825dda42 3308 /* Detect three flavors of operand overlap. */
f940c352
RH
3309 move = 1;
3310 if (rtx_equal_p (operands[0], operands[2]))
3311 move = 0;
3312 else if (rtx_equal_p (operands[1], operands[2]))
3313 {
3314 if (rtx_equal_p (operands[0], high_bit))
3315 move = 2;
3316 else
3317 move = -1;
3318 }
3319
3320 if (move < 0)
3321 emit_move_insn (operands[0], operands[2]);
3322
3323 /* ??? If the destination overlaps both source tf and high_bit, then
3324 assume source tf is dead in its entirety and use the other half
3325 for a scratch register. Otherwise "scratch" is just the proper
3326 destination register. */
3327 scratch = operands[move < 2 ? 1 : 3];
3328
3329 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3330
3331 if (move > 0)
3332 {
3333 emit_move_insn (operands[0], operands[2]);
3334 if (move > 1)
3335 emit_move_insn (operands[1], scratch);
3336 }
3337}
5495cc55 3338\f
6c174fc0
RH
3339/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3340 unaligned data:
3341
3342 unsigned: signed:
3343 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3344 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3345 lda r3,X(r11) lda r3,X+2(r11)
3346 extwl r1,r3,r1 extql r1,r3,r1
3347 extwh r2,r3,r2 extqh r2,r3,r2
3348 or r1.r2.r1 or r1,r2,r1
3349 sra r1,48,r1
3350
3351 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3352 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3353 lda r3,X(r11) lda r3,X(r11)
3354 extll r1,r3,r1 extll r1,r3,r1
3355 extlh r2,r3,r2 extlh r2,r3,r2
3356 or r1.r2.r1 addl r1,r2,r1
3357
3358 quad: ldq_u r1,X(r11)
3359 ldq_u r2,X+7(r11)
3360 lda r3,X(r11)
3361 extql r1,r3,r1
3362 extqh r2,r3,r2
3363 or r1.r2.r1
3364*/
3365
3366void
a5c24926
RH
3367alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3368 HOST_WIDE_INT ofs, int sign)
6c174fc0 3369{
1eb356b9 3370 rtx meml, memh, addr, extl, exth, tmp, mema;
ef4bddc2 3371 machine_mode mode;
6c174fc0 3372
9f7d06d6
RH
3373 if (TARGET_BWX && size == 2)
3374 {
34642493
RH
3375 meml = adjust_address (mem, QImode, ofs);
3376 memh = adjust_address (mem, QImode, ofs+1);
9f7d06d6
RH
3377 extl = gen_reg_rtx (DImode);
3378 exth = gen_reg_rtx (DImode);
3379 emit_insn (gen_zero_extendqidi2 (extl, meml));
3380 emit_insn (gen_zero_extendqidi2 (exth, memh));
3381 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3382 NULL, 1, OPTAB_LIB_WIDEN);
3383 addr = expand_simple_binop (DImode, IOR, extl, exth,
3384 NULL, 1, OPTAB_LIB_WIDEN);
3385
3386 if (sign && GET_MODE (tgt) != HImode)
3387 {
3388 addr = gen_lowpart (HImode, addr);
3389 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3390 }
3391 else
3392 {
3393 if (GET_MODE (tgt) != DImode)
3394 addr = gen_lowpart (GET_MODE (tgt), addr);
3395 emit_move_insn (tgt, addr);
3396 }
3397 return;
3398 }
3399
6c174fc0
RH
3400 meml = gen_reg_rtx (DImode);
3401 memh = gen_reg_rtx (DImode);
3402 addr = gen_reg_rtx (DImode);
3403 extl = gen_reg_rtx (DImode);
3404 exth = gen_reg_rtx (DImode);
3405
1eb356b9
RH
3406 mema = XEXP (mem, 0);
3407 if (GET_CODE (mema) == LO_SUM)
3408 mema = force_reg (Pmode, mema);
3409
e01acbb1 3410 /* AND addresses cannot be in any alias set, since they may implicitly
f676971a 3411 alias surrounding code. Ideally we'd have some alias set that
e01acbb1
RH
3412 covered all types except those with alignment 8 or higher. */
3413
3414 tmp = change_address (mem, DImode,
f676971a 3415 gen_rtx_AND (DImode,
0a81f074 3416 plus_constant (DImode, mema, ofs),
e01acbb1 3417 GEN_INT (-8)));
ba4828e0 3418 set_mem_alias_set (tmp, 0);
e01acbb1
RH
3419 emit_move_insn (meml, tmp);
3420
3421 tmp = change_address (mem, DImode,
f676971a 3422 gen_rtx_AND (DImode,
0a81f074
RS
3423 plus_constant (DImode, mema,
3424 ofs + size - 1),
e01acbb1 3425 GEN_INT (-8)));
ba4828e0 3426 set_mem_alias_set (tmp, 0);
e01acbb1 3427 emit_move_insn (memh, tmp);
6c174fc0 3428
0b2a7367 3429 if (sign && size == 2)
6c174fc0 3430 {
0a81f074 3431 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
6c174fc0 3432
e533b2a4 3433 emit_insn (gen_extql (extl, meml, addr));
0b2a7367 3434 emit_insn (gen_extqh (exth, memh, addr));
6c174fc0 3435
1a7cb241
JW
3436 /* We must use tgt here for the target. Alpha-vms port fails if we use
3437 addr for the target, because addr is marked as a pointer and combine
a50aa827 3438 knows that pointers are always sign-extended 32-bit values. */
1a7cb241 3439 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
f676971a 3440 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
4208b40f 3441 addr, 1, OPTAB_WIDEN);
6c174fc0 3442 }
4208b40f 3443 else
6c174fc0 3444 {
0a81f074 3445 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
0b2a7367
RH
3446 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3447 switch ((int) size)
30102605 3448 {
0b2a7367
RH
3449 case 2:
3450 emit_insn (gen_extwh (exth, memh, addr));
3451 mode = HImode;
3452 break;
0b2a7367
RH
3453 case 4:
3454 emit_insn (gen_extlh (exth, memh, addr));
3455 mode = SImode;
3456 break;
0b2a7367
RH
3457 case 8:
3458 emit_insn (gen_extqh (exth, memh, addr));
3459 mode = DImode;
3460 break;
0b2a7367
RH
3461 default:
3462 gcc_unreachable ();
4208b40f
RH
3463 }
3464
3465 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3466 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3467 sign, OPTAB_WIDEN);
6c174fc0
RH
3468 }
3469
4208b40f 3470 if (addr != tgt)
9f7d06d6 3471 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
6c174fc0
RH
3472}
3473
3474/* Similarly, use ins and msk instructions to perform unaligned stores. */
3475
3476void
a5c24926
RH
3477alpha_expand_unaligned_store (rtx dst, rtx src,
3478 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
6c174fc0 3479{
1eb356b9 3480 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
f676971a 3481
9f7d06d6
RH
3482 if (TARGET_BWX && size == 2)
3483 {
3484 if (src != const0_rtx)
3485 {
3486 dstl = gen_lowpart (QImode, src);
3487 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3488 NULL, 1, OPTAB_LIB_WIDEN);
3489 dsth = gen_lowpart (QImode, dsth);
3490 }
3491 else
3492 dstl = dsth = const0_rtx;
3493
34642493
RH
3494 meml = adjust_address (dst, QImode, ofs);
3495 memh = adjust_address (dst, QImode, ofs+1);
9f7d06d6
RH
3496
3497 emit_move_insn (meml, dstl);
3498 emit_move_insn (memh, dsth);
3499 return;
3500 }
3501
6c174fc0
RH
3502 dstl = gen_reg_rtx (DImode);
3503 dsth = gen_reg_rtx (DImode);
3504 insl = gen_reg_rtx (DImode);
3505 insh = gen_reg_rtx (DImode);
3506
1eb356b9
RH
3507 dsta = XEXP (dst, 0);
3508 if (GET_CODE (dsta) == LO_SUM)
3509 dsta = force_reg (Pmode, dsta);
3510
e01acbb1 3511 /* AND addresses cannot be in any alias set, since they may implicitly
f676971a 3512 alias surrounding code. Ideally we'd have some alias set that
e01acbb1
RH
3513 covered all types except those with alignment 8 or higher. */
3514
6c174fc0 3515 meml = change_address (dst, DImode,
f676971a 3516 gen_rtx_AND (DImode,
0a81f074 3517 plus_constant (DImode, dsta, ofs),
38a448ca 3518 GEN_INT (-8)));
ba4828e0 3519 set_mem_alias_set (meml, 0);
e01acbb1 3520
6c174fc0 3521 memh = change_address (dst, DImode,
f676971a 3522 gen_rtx_AND (DImode,
0a81f074
RS
3523 plus_constant (DImode, dsta,
3524 ofs + size - 1),
38a448ca 3525 GEN_INT (-8)));
ba4828e0 3526 set_mem_alias_set (memh, 0);
6c174fc0
RH
3527
3528 emit_move_insn (dsth, memh);
3529 emit_move_insn (dstl, meml);
30102605 3530
0a81f074 3531 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
0b2a7367
RH
3532
3533 if (src != CONST0_RTX (GET_MODE (src)))
3534 {
3535 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3536 GEN_INT (size*8), addr));
6c174fc0 3537
c8d8ed65 3538 switch ((int) size)
6c174fc0
RH
3539 {
3540 case 2:
0b2a7367 3541 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
6c174fc0
RH
3542 break;
3543 case 4:
0b2a7367
RH
3544 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3545 break;
c4b50f1a 3546 case 8:
0b2a7367 3547 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
6c174fc0 3548 break;
0b2a7367
RH
3549 default:
3550 gcc_unreachable ();
6c174fc0
RH
3551 }
3552 }
30102605 3553
0b2a7367 3554 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
30102605 3555
0b2a7367
RH
3556 switch ((int) size)
3557 {
3558 case 2:
e533b2a4 3559 emit_insn (gen_mskwl (dstl, dstl, addr));
0b2a7367
RH
3560 break;
3561 case 4:
e533b2a4
RH
3562 emit_insn (gen_mskll (dstl, dstl, addr));
3563 break;
0b2a7367 3564 case 8:
e533b2a4 3565 emit_insn (gen_mskql (dstl, dstl, addr));
0b2a7367
RH
3566 break;
3567 default:
3568 gcc_unreachable ();
6c174fc0
RH
3569 }
3570
e2ea71ea 3571 if (src != CONST0_RTX (GET_MODE (src)))
6c174fc0 3572 {
4208b40f
RH
3573 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3574 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
6c174fc0 3575 }
f676971a 3576
0b2a7367
RH
3577 /* Must store high before low for degenerate case of aligned. */
3578 emit_move_insn (memh, dsth);
3579 emit_move_insn (meml, dstl);
6c174fc0
RH
3580}
3581
4208b40f
RH
3582/* The block move code tries to maximize speed by separating loads and
3583 stores at the expense of register pressure: we load all of the data
3584 before we store it back out. There are two secondary effects worth
3585 mentioning, that this speeds copying to/from aligned and unaligned
3586 buffers, and that it makes the code significantly easier to write. */
6c174fc0 3587
4208b40f
RH
3588#define MAX_MOVE_WORDS 8
3589
3590/* Load an integral number of consecutive unaligned quadwords. */
6c174fc0
RH
3591
3592static void
a5c24926
RH
3593alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3594 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
6c174fc0
RH
3595{
3596 rtx const im8 = GEN_INT (-8);
4208b40f 3597 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1eb356b9 3598 rtx sreg, areg, tmp, smema;
6c174fc0
RH
3599 HOST_WIDE_INT i;
3600
1eb356b9
RH
3601 smema = XEXP (smem, 0);
3602 if (GET_CODE (smema) == LO_SUM)
3603 smema = force_reg (Pmode, smema);
3604
6c174fc0
RH
3605 /* Generate all the tmp registers we need. */
3606 for (i = 0; i < words; ++i)
4208b40f
RH
3607 {
3608 data_regs[i] = out_regs[i];
3609 ext_tmps[i] = gen_reg_rtx (DImode);
3610 }
3611 data_regs[words] = gen_reg_rtx (DImode);
3612
3613 if (ofs != 0)
f4ef873c 3614 smem = adjust_address (smem, GET_MODE (smem), ofs);
f676971a 3615
6c174fc0
RH
3616 /* Load up all of the source data. */
3617 for (i = 0; i < words; ++i)
3618 {
e01acbb1
RH
3619 tmp = change_address (smem, DImode,
3620 gen_rtx_AND (DImode,
0a81f074 3621 plus_constant (DImode, smema, 8*i),
e01acbb1 3622 im8));
ba4828e0 3623 set_mem_alias_set (tmp, 0);
e01acbb1 3624 emit_move_insn (data_regs[i], tmp);
6c174fc0 3625 }
e01acbb1
RH
3626
3627 tmp = change_address (smem, DImode,
3628 gen_rtx_AND (DImode,
0a81f074
RS
3629 plus_constant (DImode, smema,
3630 8*words - 1),
e01acbb1 3631 im8));
ba4828e0 3632 set_mem_alias_set (tmp, 0);
e01acbb1 3633 emit_move_insn (data_regs[words], tmp);
6c174fc0
RH
3634
3635 /* Extract the half-word fragments. Unfortunately DEC decided to make
f676971a 3636 extxh with offset zero a noop instead of zeroing the register, so
6c174fc0
RH
3637 we must take care of that edge condition ourselves with cmov. */
3638
1eb356b9 3639 sreg = copy_addr_to_reg (smema);
f676971a 3640 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
4208b40f 3641 1, OPTAB_WIDEN);
6c174fc0
RH
3642 for (i = 0; i < words; ++i)
3643 {
e533b2a4 3644 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
0b2a7367 3645 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
f7df4a84 3646 emit_insn (gen_rtx_SET (ext_tmps[i],
38a448ca 3647 gen_rtx_IF_THEN_ELSE (DImode,
4208b40f
RH
3648 gen_rtx_EQ (DImode, areg,
3649 const0_rtx),
38a448ca 3650 const0_rtx, ext_tmps[i])));
6c174fc0
RH
3651 }
3652
3653 /* Merge the half-words into whole words. */
3654 for (i = 0; i < words; ++i)
3655 {
4208b40f
RH
3656 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3657 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
6c174fc0
RH
3658 }
3659}
3660
3661/* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3662 may be NULL to store zeros. */
3663
3664static void
a5c24926
RH
3665alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3666 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
6c174fc0
RH
3667{
3668 rtx const im8 = GEN_INT (-8);
6c174fc0 3669 rtx ins_tmps[MAX_MOVE_WORDS];
4208b40f 3670 rtx st_tmp_1, st_tmp_2, dreg;
1eb356b9 3671 rtx st_addr_1, st_addr_2, dmema;
6c174fc0
RH
3672 HOST_WIDE_INT i;
3673
1eb356b9
RH
3674 dmema = XEXP (dmem, 0);
3675 if (GET_CODE (dmema) == LO_SUM)
3676 dmema = force_reg (Pmode, dmema);
3677
6c174fc0
RH
3678 /* Generate all the tmp registers we need. */
3679 if (data_regs != NULL)
3680 for (i = 0; i < words; ++i)
3681 ins_tmps[i] = gen_reg_rtx(DImode);
3682 st_tmp_1 = gen_reg_rtx(DImode);
3683 st_tmp_2 = gen_reg_rtx(DImode);
f676971a 3684
4208b40f 3685 if (ofs != 0)
f4ef873c 3686 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
4208b40f
RH
3687
3688 st_addr_2 = change_address (dmem, DImode,
38a448ca 3689 gen_rtx_AND (DImode,
0a81f074
RS
3690 plus_constant (DImode, dmema,
3691 words*8 - 1),
3692 im8));
ba4828e0 3693 set_mem_alias_set (st_addr_2, 0);
e01acbb1 3694
4208b40f 3695 st_addr_1 = change_address (dmem, DImode,
1eb356b9 3696 gen_rtx_AND (DImode, dmema, im8));
ba4828e0 3697 set_mem_alias_set (st_addr_1, 0);
6c174fc0
RH
3698
3699 /* Load up the destination end bits. */
3700 emit_move_insn (st_tmp_2, st_addr_2);
3701 emit_move_insn (st_tmp_1, st_addr_1);
3702
3703 /* Shift the input data into place. */
1eb356b9 3704 dreg = copy_addr_to_reg (dmema);
6c174fc0
RH
3705 if (data_regs != NULL)
3706 {
3707 for (i = words-1; i >= 0; --i)
3708 {
e533b2a4 3709 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
0b2a7367 3710 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
6c174fc0 3711 }
6c174fc0
RH
3712 for (i = words-1; i > 0; --i)
3713 {
4208b40f
RH
3714 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3715 ins_tmps[i-1], ins_tmps[i-1], 1,
3716 OPTAB_WIDEN);
6c174fc0
RH
3717 }
3718 }
3719
3720 /* Split and merge the ends with the destination data. */
e533b2a4
RH
3721 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3722 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
6c174fc0
RH
3723
3724 if (data_regs != NULL)
3725 {
4208b40f
RH
3726 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3727 st_tmp_2, 1, OPTAB_WIDEN);
3728 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3729 st_tmp_1, 1, OPTAB_WIDEN);
6c174fc0
RH
3730 }
3731
3732 /* Store it all. */
0b2a7367 3733 emit_move_insn (st_addr_2, st_tmp_2);
6c174fc0
RH
3734 for (i = words-1; i > 0; --i)
3735 {
e01acbb1
RH
3736 rtx tmp = change_address (dmem, DImode,
3737 gen_rtx_AND (DImode,
0a81f074
RS
3738 plus_constant (DImode,
3739 dmema, i*8),
e01acbb1 3740 im8));
ba4828e0 3741 set_mem_alias_set (tmp, 0);
e01acbb1 3742 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
6c174fc0 3743 }
0b2a7367 3744 emit_move_insn (st_addr_1, st_tmp_1);
6c174fc0
RH
3745}
3746
3747
3748/* Expand string/block move operations.
3749
3750 operands[0] is the pointer to the destination.
3751 operands[1] is the pointer to the source.
3752 operands[2] is the number of bytes to move.
3753 operands[3] is the alignment. */
3754
3755int
a5c24926 3756alpha_expand_block_move (rtx operands[])
6c174fc0
RH
3757{
3758 rtx bytes_rtx = operands[2];
3759 rtx align_rtx = operands[3];
f35cba21 3760 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
c17f08e1
RH
3761 HOST_WIDE_INT bytes = orig_bytes;
3762 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3763 HOST_WIDE_INT dst_align = src_align;
bdb429a5
RK
3764 rtx orig_src = operands[1];
3765 rtx orig_dst = operands[0];
3766 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
4208b40f 3767 rtx tmp;
1eb356b9 3768 unsigned int i, words, ofs, nregs = 0;
f676971a 3769
bdb429a5 3770 if (orig_bytes <= 0)
6c174fc0 3771 return 1;
c17f08e1 3772 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
6c174fc0
RH
3773 return 0;
3774
4208b40f
RH
3775 /* Look for additional alignment information from recorded register info. */
3776
3777 tmp = XEXP (orig_src, 0);
7d83f4f5 3778 if (REG_P (tmp))
bdb429a5 3779 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4208b40f 3780 else if (GET_CODE (tmp) == PLUS
7d83f4f5
UB
3781 && REG_P (XEXP (tmp, 0))
3782 && CONST_INT_P (XEXP (tmp, 1)))
4208b40f 3783 {
bdb429a5
RK
3784 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3785 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4208b40f
RH
3786
3787 if (a > src_align)
3788 {
bdb429a5
RK
3789 if (a >= 64 && c % 8 == 0)
3790 src_align = 64;
3791 else if (a >= 32 && c % 4 == 0)
3792 src_align = 32;
3793 else if (a >= 16 && c % 2 == 0)
3794 src_align = 16;
4208b40f
RH
3795 }
3796 }
f676971a 3797
4208b40f 3798 tmp = XEXP (orig_dst, 0);
7d83f4f5 3799 if (REG_P (tmp))
bdb429a5 3800 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4208b40f 3801 else if (GET_CODE (tmp) == PLUS
7d83f4f5
UB
3802 && REG_P (XEXP (tmp, 0))
3803 && CONST_INT_P (XEXP (tmp, 1)))
4208b40f 3804 {
bdb429a5
RK
3805 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3806 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4208b40f
RH
3807
3808 if (a > dst_align)
3809 {
bdb429a5
RK
3810 if (a >= 64 && c % 8 == 0)
3811 dst_align = 64;
3812 else if (a >= 32 && c % 4 == 0)
3813 dst_align = 32;
3814 else if (a >= 16 && c % 2 == 0)
3815 dst_align = 16;
4208b40f
RH
3816 }
3817 }
3818
4208b40f 3819 ofs = 0;
bdb429a5 3820 if (src_align >= 64 && bytes >= 8)
6c174fc0
RH
3821 {
3822 words = bytes / 8;
3823
6c174fc0 3824 for (i = 0; i < words; ++i)
5197bd50 3825 data_regs[nregs + i] = gen_reg_rtx (DImode);
6c174fc0 3826
6c174fc0 3827 for (i = 0; i < words; ++i)
bdb429a5 3828 emit_move_insn (data_regs[nregs + i],
f4ef873c 3829 adjust_address (orig_src, DImode, ofs + i * 8));
6c174fc0 3830
4208b40f 3831 nregs += words;
6c174fc0 3832 bytes -= words * 8;
cd36edbd 3833 ofs += words * 8;
6c174fc0 3834 }
bdb429a5
RK
3835
3836 if (src_align >= 32 && bytes >= 4)
6c174fc0
RH
3837 {
3838 words = bytes / 4;
3839
6c174fc0 3840 for (i = 0; i < words; ++i)
5197bd50 3841 data_regs[nregs + i] = gen_reg_rtx (SImode);
6c174fc0 3842
6c174fc0 3843 for (i = 0; i < words; ++i)
bdb429a5 3844 emit_move_insn (data_regs[nregs + i],
792760b9 3845 adjust_address (orig_src, SImode, ofs + i * 4));
6c174fc0 3846
4208b40f 3847 nregs += words;
6c174fc0 3848 bytes -= words * 4;
cd36edbd 3849 ofs += words * 4;
6c174fc0 3850 }
bdb429a5 3851
c17f08e1 3852 if (bytes >= 8)
6c174fc0
RH
3853 {
3854 words = bytes / 8;
3855
6c174fc0 3856 for (i = 0; i < words+1; ++i)
5197bd50 3857 data_regs[nregs + i] = gen_reg_rtx (DImode);
6c174fc0 3858
c576fce7
RH
3859 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3860 words, ofs);
6c174fc0 3861
4208b40f 3862 nregs += words;
6c174fc0 3863 bytes -= words * 8;
cd36edbd 3864 ofs += words * 8;
6c174fc0 3865 }
bdb429a5 3866
bdb429a5 3867 if (! TARGET_BWX && bytes >= 4)
6c174fc0 3868 {
4208b40f 3869 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
6c174fc0 3870 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
6c174fc0
RH
3871 bytes -= 4;
3872 ofs += 4;
3873 }
bdb429a5 3874
6c174fc0
RH
3875 if (bytes >= 2)
3876 {
bdb429a5 3877 if (src_align >= 16)
6c174fc0
RH
3878 {
3879 do {
4208b40f 3880 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
f4ef873c 3881 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
6c174fc0
RH
3882 bytes -= 2;
3883 ofs += 2;
3884 } while (bytes >= 2);
3885 }
bdb429a5 3886 else if (! TARGET_BWX)
6c174fc0 3887 {
4208b40f 3888 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
6c174fc0 3889 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
6c174fc0
RH
3890 bytes -= 2;
3891 ofs += 2;
3892 }
3893 }
bdb429a5 3894
6c174fc0
RH
3895 while (bytes > 0)
3896 {
4208b40f 3897 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
f4ef873c 3898 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
6c174fc0
RH
3899 bytes -= 1;
3900 ofs += 1;
3901 }
bdb429a5 3902
56daab84 3903 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4208b40f 3904
bdb429a5 3905 /* Now save it back out again. */
4208b40f
RH
3906
3907 i = 0, ofs = 0;
3908
4208b40f 3909 /* Write out the data in whatever chunks reading the source allowed. */
bdb429a5 3910 if (dst_align >= 64)
4208b40f
RH
3911 {
3912 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3913 {
f4ef873c 3914 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4208b40f
RH
3915 data_regs[i]);
3916 ofs += 8;
3917 i++;
3918 }
3919 }
bdb429a5
RK
3920
3921 if (dst_align >= 32)
4208b40f
RH
3922 {
3923 /* If the source has remaining DImode regs, write them out in
3924 two pieces. */
3925 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3926 {
3927 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3928 NULL_RTX, 1, OPTAB_WIDEN);
3929
f4ef873c 3930 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4208b40f 3931 gen_lowpart (SImode, data_regs[i]));
f4ef873c 3932 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4208b40f
RH
3933 gen_lowpart (SImode, tmp));
3934 ofs += 8;
3935 i++;
3936 }
3937
3938 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3939 {
f4ef873c 3940 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4208b40f
RH
3941 data_regs[i]);
3942 ofs += 4;
3943 i++;
3944 }
3945 }
bdb429a5 3946
4208b40f
RH
3947 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3948 {
3949 /* Write out a remaining block of words using unaligned methods. */
3950
bdb429a5
RK
3951 for (words = 1; i + words < nregs; words++)
3952 if (GET_MODE (data_regs[i + words]) != DImode)
4208b40f
RH
3953 break;
3954
3955 if (words == 1)
3956 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3957 else
bdb429a5
RK
3958 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3959 words, ofs);
f676971a 3960
4208b40f
RH
3961 i += words;
3962 ofs += words * 8;
3963 }
3964
3965 /* Due to the above, this won't be aligned. */
3966 /* ??? If we have more than one of these, consider constructing full
3967 words in registers and using alpha_expand_unaligned_store_words. */
3968 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3969 {
3970 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3971 ofs += 4;
3972 i++;
3973 }
3974
bdb429a5 3975 if (dst_align >= 16)
4208b40f
RH
3976 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3977 {
f4ef873c 3978 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4208b40f
RH
3979 i++;
3980 ofs += 2;
3981 }
3982 else
3983 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3984 {
3985 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3986 i++;
3987 ofs += 2;
3988 }
bdb429a5 3989
56daab84
NS
3990 /* The remainder must be byte copies. */
3991 while (i < nregs)
4208b40f 3992 {
56daab84 3993 gcc_assert (GET_MODE (data_regs[i]) == QImode);
f4ef873c 3994 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4208b40f
RH
3995 i++;
3996 ofs += 1;
3997 }
bdb429a5 3998
6c174fc0
RH
3999 return 1;
4000}
4001
4002int
a5c24926 4003alpha_expand_block_clear (rtx operands[])
6c174fc0
RH
4004{
4005 rtx bytes_rtx = operands[1];
57e84f18 4006 rtx align_rtx = operands[3];
bdb429a5 4007 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
c17f08e1
RH
4008 HOST_WIDE_INT bytes = orig_bytes;
4009 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4010 HOST_WIDE_INT alignofs = 0;
bdb429a5 4011 rtx orig_dst = operands[0];
4208b40f 4012 rtx tmp;
c17f08e1 4013 int i, words, ofs = 0;
f676971a 4014
bdb429a5 4015 if (orig_bytes <= 0)
6c174fc0 4016 return 1;
c17f08e1 4017 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
6c174fc0
RH
4018 return 0;
4019
4208b40f 4020 /* Look for stricter alignment. */
4208b40f 4021 tmp = XEXP (orig_dst, 0);
7d83f4f5 4022 if (REG_P (tmp))
bdb429a5 4023 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4208b40f 4024 else if (GET_CODE (tmp) == PLUS
7d83f4f5
UB
4025 && REG_P (XEXP (tmp, 0))
4026 && CONST_INT_P (XEXP (tmp, 1)))
4208b40f 4027 {
c17f08e1
RH
4028 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4029 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4208b40f
RH
4030
4031 if (a > align)
4032 {
c17f08e1
RH
4033 if (a >= 64)
4034 align = a, alignofs = 8 - c % 8;
4035 else if (a >= 32)
4036 align = a, alignofs = 4 - c % 4;
4037 else if (a >= 16)
4038 align = a, alignofs = 2 - c % 2;
4208b40f
RH
4039 }
4040 }
4041
c17f08e1
RH
4042 /* Handle an unaligned prefix first. */
4043
4044 if (alignofs > 0)
4045 {
c17f08e1
RH
4046 /* Given that alignofs is bounded by align, the only time BWX could
4047 generate three stores is for a 7 byte fill. Prefer two individual
4048 stores over a load/mask/store sequence. */
4049 if ((!TARGET_BWX || alignofs == 7)
4050 && align >= 32
4051 && !(alignofs == 4 && bytes >= 4))
4052 {
ef4bddc2 4053 machine_mode mode = (align >= 64 ? DImode : SImode);
c17f08e1
RH
4054 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4055 rtx mem, tmp;
4056 HOST_WIDE_INT mask;
4057
f4ef873c 4058 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
ba4828e0 4059 set_mem_alias_set (mem, 0);
c17f08e1
RH
4060
4061 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4062 if (bytes < alignofs)
4063 {
4064 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4065 ofs += bytes;
4066 bytes = 0;
4067 }
4068 else
4069 {
4070 bytes -= alignofs;
4071 ofs += alignofs;
4072 }
4073 alignofs = 0;
4074
4075 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4076 NULL_RTX, 1, OPTAB_WIDEN);
4077
4078 emit_move_insn (mem, tmp);
4079 }
c17f08e1
RH
4080
4081 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4082 {
f4ef873c 4083 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
c17f08e1
RH
4084 bytes -= 1;
4085 ofs += 1;
4086 alignofs -= 1;
4087 }
4088 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4089 {
f4ef873c 4090 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
c17f08e1
RH
4091 bytes -= 2;
4092 ofs += 2;
4093 alignofs -= 2;
4094 }
4095 if (alignofs == 4 && bytes >= 4)
4096 {
f4ef873c 4097 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
c17f08e1
RH
4098 bytes -= 4;
4099 ofs += 4;
4100 alignofs = 0;
4101 }
4102
4103 /* If we've not used the extra lead alignment information by now,
4104 we won't be able to. Downgrade align to match what's left over. */
4105 if (alignofs > 0)
4106 {
4107 alignofs = alignofs & -alignofs;
4108 align = MIN (align, alignofs * BITS_PER_UNIT);
4109 }
4110 }
4111
4112 /* Handle a block of contiguous long-words. */
6c174fc0 4113
bdb429a5 4114 if (align >= 64 && bytes >= 8)
6c174fc0
RH
4115 {
4116 words = bytes / 8;
4117
4118 for (i = 0; i < words; ++i)
1eb356b9 4119 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
f4ef873c 4120 const0_rtx);
6c174fc0
RH
4121
4122 bytes -= words * 8;
cd36edbd 4123 ofs += words * 8;
6c174fc0 4124 }
bdb429a5 4125
c17f08e1
RH
4126 /* If the block is large and appropriately aligned, emit a single
4127 store followed by a sequence of stq_u insns. */
4128
4129 if (align >= 32 && bytes > 16)
4130 {
1eb356b9
RH
4131 rtx orig_dsta;
4132
f4ef873c 4133 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
c17f08e1
RH
4134 bytes -= 4;
4135 ofs += 4;
4136
1eb356b9
RH
4137 orig_dsta = XEXP (orig_dst, 0);
4138 if (GET_CODE (orig_dsta) == LO_SUM)
4139 orig_dsta = force_reg (Pmode, orig_dsta);
4140
c17f08e1
RH
4141 words = bytes / 8;
4142 for (i = 0; i < words; ++i)
4143 {
ba4828e0
RK
4144 rtx mem
4145 = change_address (orig_dst, DImode,
4146 gen_rtx_AND (DImode,
0a81f074
RS
4147 plus_constant (DImode, orig_dsta,
4148 ofs + i*8),
ba4828e0
RK
4149 GEN_INT (-8)));
4150 set_mem_alias_set (mem, 0);
c17f08e1
RH
4151 emit_move_insn (mem, const0_rtx);
4152 }
4153
4154 /* Depending on the alignment, the first stq_u may have overlapped
4155 with the initial stl, which means that the last stq_u didn't
4156 write as much as it would appear. Leave those questionable bytes
4157 unaccounted for. */
4158 bytes -= words * 8 - 4;
4159 ofs += words * 8 - 4;
4160 }
4161
4162 /* Handle a smaller block of aligned words. */
4163
4164 if ((align >= 64 && bytes == 4)
4165 || (align == 32 && bytes >= 4))
6c174fc0
RH
4166 {
4167 words = bytes / 4;
4168
4169 for (i = 0; i < words; ++i)
f4ef873c 4170 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
bdb429a5 4171 const0_rtx);
6c174fc0
RH
4172
4173 bytes -= words * 4;
cd36edbd 4174 ofs += words * 4;
6c174fc0 4175 }
bdb429a5 4176
c17f08e1
RH
4177 /* An unaligned block uses stq_u stores for as many as possible. */
4178
4179 if (bytes >= 8)
6c174fc0
RH
4180 {
4181 words = bytes / 8;
4182
cd36edbd 4183 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
6c174fc0
RH
4184
4185 bytes -= words * 8;
cd36edbd 4186 ofs += words * 8;
6c174fc0
RH
4187 }
4188
c17f08e1 4189 /* Next clean up any trailing pieces. */
6c174fc0 4190
c17f08e1
RH
4191 /* Count the number of bits in BYTES for which aligned stores could
4192 be emitted. */
4193 words = 0;
4194 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4195 if (bytes & i)
4196 words += 1;
4197
4198 /* If we have appropriate alignment (and it wouldn't take too many
4199 instructions otherwise), mask out the bytes we need. */
4200 if (TARGET_BWX ? words > 2 : bytes > 0)
4201 {
4202 if (align >= 64)
4203 {
4204 rtx mem, tmp;
4205 HOST_WIDE_INT mask;
4206
f4ef873c 4207 mem = adjust_address (orig_dst, DImode, ofs);
ba4828e0 4208 set_mem_alias_set (mem, 0);
c17f08e1
RH
4209
4210 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4211
4212 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4213 NULL_RTX, 1, OPTAB_WIDEN);
4214
4215 emit_move_insn (mem, tmp);
4216 return 1;
4217 }
4218 else if (align >= 32 && bytes < 4)
4219 {
4220 rtx mem, tmp;
4221 HOST_WIDE_INT mask;
4222
f4ef873c 4223 mem = adjust_address (orig_dst, SImode, ofs);
ba4828e0 4224 set_mem_alias_set (mem, 0);
c17f08e1
RH
4225
4226 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4227
4228 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4229 NULL_RTX, 1, OPTAB_WIDEN);
4230
4231 emit_move_insn (mem, tmp);
4232 return 1;
4233 }
6c174fc0 4234 }
bdb429a5 4235
6c174fc0
RH
4236 if (!TARGET_BWX && bytes >= 4)
4237 {
4238 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4239 bytes -= 4;
4240 ofs += 4;
4241 }
bdb429a5 4242
6c174fc0
RH
4243 if (bytes >= 2)
4244 {
bdb429a5 4245 if (align >= 16)
6c174fc0
RH
4246 {
4247 do {
f4ef873c 4248 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
6c174fc0
RH
4249 const0_rtx);
4250 bytes -= 2;
4251 ofs += 2;
4252 } while (bytes >= 2);
4253 }
bdb429a5 4254 else if (! TARGET_BWX)
6c174fc0
RH
4255 {
4256 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4257 bytes -= 2;
4258 ofs += 2;
4259 }
4260 }
bdb429a5 4261
6c174fc0
RH
4262 while (bytes > 0)
4263 {
f4ef873c 4264 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
6c174fc0
RH
4265 bytes -= 1;
4266 ofs += 1;
4267 }
4268
4269 return 1;
4270}
6d8fd7bb
RH
4271
4272/* Returns a mask so that zap(x, value) == x & mask. */
4273
4274rtx
a5c24926 4275alpha_expand_zap_mask (HOST_WIDE_INT value)
6d8fd7bb
RH
4276{
4277 rtx result;
4278 int i;
c37aa43b 4279 HOST_WIDE_INT mask = 0;
6d8fd7bb 4280
c37aa43b 4281 for (i = 7; i >= 0; --i)
6d8fd7bb 4282 {
c37aa43b
UB
4283 mask <<= 8;
4284 if (!((value >> i) & 1))
4285 mask |= 0xff;
6d8fd7bb 4286 }
6d8fd7bb 4287
c37aa43b 4288 result = gen_int_mode (mask, DImode);
6d8fd7bb
RH
4289 return result;
4290}
4291
4292void
a5c24926 4293alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
ef4bddc2 4294 machine_mode mode,
a5c24926 4295 rtx op0, rtx op1, rtx op2)
6d8fd7bb
RH
4296{
4297 op0 = gen_lowpart (mode, op0);
4298
4299 if (op1 == const0_rtx)
4300 op1 = CONST0_RTX (mode);
4301 else
4302 op1 = gen_lowpart (mode, op1);
c4b50f1a
RH
4303
4304 if (op2 == const0_rtx)
6d8fd7bb
RH
4305 op2 = CONST0_RTX (mode);
4306 else
4307 op2 = gen_lowpart (mode, op2);
4308
4309 emit_insn ((*gen) (op0, op1, op2));
4310}
0b196b18 4311
b686c48c
RH
4312/* A subroutine of the atomic operation splitters. Jump to LABEL if
4313 COND is true. Mark the jump as unlikely to be taken. */
4314
4315static void
4316emit_unlikely_jump (rtx cond, rtx label)
4317{
e5af9ddd 4318 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
b686c48c
RH
4319 rtx x;
4320
4321 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
f7df4a84 4322 x = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
e5af9ddd 4323 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
b686c48c
RH
4324}
4325
4326/* A subroutine of the atomic operation splitters. Emit a load-locked
4327 instruction in MODE. */
4328
4329static void
ef4bddc2 4330emit_load_locked (machine_mode mode, rtx reg, rtx mem)
b686c48c
RH
4331{
4332 rtx (*fn) (rtx, rtx) = NULL;
4333 if (mode == SImode)
4334 fn = gen_load_locked_si;
4335 else if (mode == DImode)
4336 fn = gen_load_locked_di;
4337 emit_insn (fn (reg, mem));
4338}
4339
4340/* A subroutine of the atomic operation splitters. Emit a store-conditional
4341 instruction in MODE. */
4342
4343static void
ef4bddc2 4344emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
b686c48c
RH
4345{
4346 rtx (*fn) (rtx, rtx, rtx) = NULL;
4347 if (mode == SImode)
4348 fn = gen_store_conditional_si;
4349 else if (mode == DImode)
4350 fn = gen_store_conditional_di;
4351 emit_insn (fn (res, mem, val));
4352}
4353
2371d1a0
RH
4354/* Subroutines of the atomic operation splitters. Emit barriers
4355 as needed for the memory MODEL. */
4356
4357static void
4358alpha_pre_atomic_barrier (enum memmodel model)
4359{
8930883e
MK
4360 if (need_atomic_barrier_p (model, true))
4361 emit_insn (gen_memory_barrier ());
2371d1a0
RH
4362}
4363
4364static void
4365alpha_post_atomic_barrier (enum memmodel model)
4366{
8930883e
MK
4367 if (need_atomic_barrier_p (model, false))
4368 emit_insn (gen_memory_barrier ());
2371d1a0
RH
4369}
4370
38f31687
RH
4371/* A subroutine of the atomic operation splitters. Emit an insxl
4372 instruction in MODE. */
4373
4374static rtx
ef4bddc2 4375emit_insxl (machine_mode mode, rtx op1, rtx op2)
38f31687
RH
4376{
4377 rtx ret = gen_reg_rtx (DImode);
4378 rtx (*fn) (rtx, rtx, rtx);
4379
e533b2a4
RH
4380 switch (mode)
4381 {
4382 case QImode:
4383 fn = gen_insbl;
4384 break;
4385 case HImode:
4386 fn = gen_inswl;
4387 break;
4388 case SImode:
4389 fn = gen_insll;
4390 break;
4391 case DImode:
4392 fn = gen_insql;
4393 break;
4394 default:
4395 gcc_unreachable ();
4396 }
0b2a7367 4397
f2477b06 4398 op1 = force_reg (mode, op1);
38f31687
RH
4399 emit_insn (fn (ret, op1, op2));
4400
4401 return ret;
4402}
4403
ea2c620c 4404/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
0b196b18
RH
4405 to perform. MEM is the memory on which to operate. VAL is the second
4406 operand of the binary operator. BEFORE and AFTER are optional locations to
4407 return the value of MEM either before of after the operation. SCRATCH is
4408 a scratch register. */
4409
4410void
2371d1a0
RH
4411alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4412 rtx after, rtx scratch, enum memmodel model)
0b196b18 4413{
ef4bddc2 4414 machine_mode mode = GET_MODE (mem);
b686c48c 4415 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
0b196b18 4416
2371d1a0 4417 alpha_pre_atomic_barrier (model);
0b196b18
RH
4418
4419 label = gen_label_rtx ();
4420 emit_label (label);
4421 label = gen_rtx_LABEL_REF (DImode, label);
4422
4423 if (before == NULL)
4424 before = scratch;
b686c48c 4425 emit_load_locked (mode, before, mem);
0b196b18
RH
4426
4427 if (code == NOT)
d04dceb5
UB
4428 {
4429 x = gen_rtx_AND (mode, before, val);
f7df4a84 4430 emit_insn (gen_rtx_SET (val, x));
d04dceb5
UB
4431
4432 x = gen_rtx_NOT (mode, val);
4433 }
0b196b18
RH
4434 else
4435 x = gen_rtx_fmt_ee (code, mode, before, val);
0b196b18 4436 if (after)
f7df4a84
RS
4437 emit_insn (gen_rtx_SET (after, copy_rtx (x)));
4438 emit_insn (gen_rtx_SET (scratch, x));
0b196b18 4439
b686c48c
RH
4440 emit_store_conditional (mode, cond, mem, scratch);
4441
4442 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4443 emit_unlikely_jump (x, label);
4444
2371d1a0 4445 alpha_post_atomic_barrier (model);
b686c48c
RH
4446}
4447
4448/* Expand a compare and swap operation. */
4449
4450void
2371d1a0 4451alpha_split_compare_and_swap (rtx operands[])
b686c48c 4452{
2371d1a0
RH
4453 rtx cond, retval, mem, oldval, newval;
4454 bool is_weak;
4455 enum memmodel mod_s, mod_f;
ef4bddc2 4456 machine_mode mode;
2371d1a0
RH
4457 rtx label1, label2, x;
4458
4459 cond = operands[0];
4460 retval = operands[1];
4461 mem = operands[2];
4462 oldval = operands[3];
4463 newval = operands[4];
4464 is_weak = (operands[5] != const0_rtx);
46b35980
AM
4465 mod_s = memmodel_from_int (INTVAL (operands[6]));
4466 mod_f = memmodel_from_int (INTVAL (operands[7]));
2371d1a0
RH
4467 mode = GET_MODE (mem);
4468
4469 alpha_pre_atomic_barrier (mod_s);
4470
4471 label1 = NULL_RTX;
4472 if (!is_weak)
4473 {
4474 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4475 emit_label (XEXP (label1, 0));
4476 }
b686c48c 4477 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
b686c48c
RH
4478
4479 emit_load_locked (mode, retval, mem);
4480
4481 x = gen_lowpart (DImode, retval);
4482 if (oldval == const0_rtx)
2371d1a0
RH
4483 {
4484 emit_move_insn (cond, const0_rtx);
4485 x = gen_rtx_NE (DImode, x, const0_rtx);
4486 }
0b196b18 4487 else
b686c48c
RH
4488 {
4489 x = gen_rtx_EQ (DImode, x, oldval);
f7df4a84 4490 emit_insn (gen_rtx_SET (cond, x));
b686c48c
RH
4491 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4492 }
4493 emit_unlikely_jump (x, label2);
4494
2371d1a0
RH
4495 emit_move_insn (cond, newval);
4496 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
0b196b18 4497
2371d1a0
RH
4498 if (!is_weak)
4499 {
4500 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4501 emit_unlikely_jump (x, label1);
4502 }
4503
46b35980 4504 if (!is_mm_relaxed (mod_f))
2371d1a0
RH
4505 emit_label (XEXP (label2, 0));
4506
4507 alpha_post_atomic_barrier (mod_s);
b686c48c 4508
46b35980 4509 if (is_mm_relaxed (mod_f))
2371d1a0 4510 emit_label (XEXP (label2, 0));
b686c48c
RH
4511}
4512
38f31687 4513void
2371d1a0 4514alpha_expand_compare_and_swap_12 (rtx operands[])
38f31687 4515{
2371d1a0 4516 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
ef4bddc2 4517 machine_mode mode;
38f31687 4518 rtx addr, align, wdst;
2371d1a0
RH
4519 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4520
4521 cond = operands[0];
4522 dst = operands[1];
4523 mem = operands[2];
4524 oldval = operands[3];
4525 newval = operands[4];
4526 is_weak = operands[5];
4527 mod_s = operands[6];
4528 mod_f = operands[7];
4529 mode = GET_MODE (mem);
4530
4531 /* We forced the address into a register via mem_noofs_operand. */
4532 addr = XEXP (mem, 0);
4533 gcc_assert (register_operand (addr, DImode));
38f31687 4534
38f31687
RH
4535 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4536 NULL_RTX, 1, OPTAB_DIRECT);
4537
4538 oldval = convert_modes (DImode, mode, oldval, 1);
2371d1a0
RH
4539
4540 if (newval != const0_rtx)
4541 newval = emit_insxl (mode, newval, addr);
38f31687
RH
4542
4543 wdst = gen_reg_rtx (DImode);
4544 if (mode == QImode)
2371d1a0 4545 gen = gen_atomic_compare_and_swapqi_1;
38f31687 4546 else
2371d1a0
RH
4547 gen = gen_atomic_compare_and_swaphi_1;
4548 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4549 is_weak, mod_s, mod_f));
38f31687
RH
4550
4551 emit_move_insn (dst, gen_lowpart (mode, wdst));
4552}
4553
4554void
2371d1a0 4555alpha_split_compare_and_swap_12 (rtx operands[])
38f31687 4556{
2371d1a0 4557 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
ef4bddc2 4558 machine_mode mode;
2371d1a0
RH
4559 bool is_weak;
4560 enum memmodel mod_s, mod_f;
4561 rtx label1, label2, mem, addr, width, mask, x;
4562
4563 cond = operands[0];
4564 dest = operands[1];
4565 orig_mem = operands[2];
4566 oldval = operands[3];
4567 newval = operands[4];
4568 align = operands[5];
4569 is_weak = (operands[6] != const0_rtx);
46b35980
AM
4570 mod_s = memmodel_from_int (INTVAL (operands[7]));
4571 mod_f = memmodel_from_int (INTVAL (operands[8]));
2371d1a0
RH
4572 scratch = operands[9];
4573 mode = GET_MODE (orig_mem);
4574 addr = XEXP (orig_mem, 0);
38f31687
RH
4575
4576 mem = gen_rtx_MEM (DImode, align);
2371d1a0 4577 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4eace304
RH
4578 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4579 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
38f31687 4580
2371d1a0
RH
4581 alpha_pre_atomic_barrier (mod_s);
4582
4583 label1 = NULL_RTX;
4584 if (!is_weak)
4585 {
4586 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4587 emit_label (XEXP (label1, 0));
4588 }
38f31687 4589 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
38f31687
RH
4590
4591 emit_load_locked (DImode, scratch, mem);
4592
4593 width = GEN_INT (GET_MODE_BITSIZE (mode));
4594 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
0b2a7367 4595 emit_insn (gen_extxl (dest, scratch, width, addr));
38f31687
RH
4596
4597 if (oldval == const0_rtx)
2371d1a0
RH
4598 {
4599 emit_move_insn (cond, const0_rtx);
4600 x = gen_rtx_NE (DImode, dest, const0_rtx);
4601 }
38f31687
RH
4602 else
4603 {
4604 x = gen_rtx_EQ (DImode, dest, oldval);
f7df4a84 4605 emit_insn (gen_rtx_SET (cond, x));
38f31687
RH
4606 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4607 }
4608 emit_unlikely_jump (x, label2);
4609
2371d1a0 4610 emit_insn (gen_mskxl (cond, scratch, mask, addr));
38f31687 4611
2371d1a0
RH
4612 if (newval != const0_rtx)
4613 emit_insn (gen_iordi3 (cond, cond, newval));
38f31687 4614
2371d1a0 4615 emit_store_conditional (DImode, cond, mem, cond);
38f31687 4616
2371d1a0
RH
4617 if (!is_weak)
4618 {
4619 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4620 emit_unlikely_jump (x, label1);
4621 }
4622
46b35980 4623 if (!is_mm_relaxed (mod_f))
2371d1a0
RH
4624 emit_label (XEXP (label2, 0));
4625
4626 alpha_post_atomic_barrier (mod_s);
4627
46b35980 4628 if (is_mm_relaxed (mod_f))
2371d1a0 4629 emit_label (XEXP (label2, 0));
38f31687
RH
4630}
4631
b686c48c
RH
4632/* Expand an atomic exchange operation. */
4633
4634void
2371d1a0 4635alpha_split_atomic_exchange (rtx operands[])
b686c48c 4636{
2371d1a0
RH
4637 rtx retval, mem, val, scratch;
4638 enum memmodel model;
ef4bddc2 4639 machine_mode mode;
2371d1a0
RH
4640 rtx label, x, cond;
4641
4642 retval = operands[0];
4643 mem = operands[1];
4644 val = operands[2];
4645 model = (enum memmodel) INTVAL (operands[3]);
4646 scratch = operands[4];
4647 mode = GET_MODE (mem);
4648 cond = gen_lowpart (DImode, scratch);
4649
4650 alpha_pre_atomic_barrier (model);
0b196b18 4651
b686c48c
RH
4652 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4653 emit_label (XEXP (label, 0));
4654
4655 emit_load_locked (mode, retval, mem);
4656 emit_move_insn (scratch, val);
4657 emit_store_conditional (mode, cond, mem, scratch);
4658
4659 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4660 emit_unlikely_jump (x, label);
27738602 4661
2371d1a0 4662 alpha_post_atomic_barrier (model);
0b196b18 4663}
38f31687
RH
4664
4665void
2371d1a0 4666alpha_expand_atomic_exchange_12 (rtx operands[])
38f31687 4667{
2371d1a0 4668 rtx dst, mem, val, model;
ef4bddc2 4669 machine_mode mode;
38f31687 4670 rtx addr, align, wdst;
2371d1a0 4671 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
38f31687 4672
2371d1a0
RH
4673 dst = operands[0];
4674 mem = operands[1];
4675 val = operands[2];
4676 model = operands[3];
4677 mode = GET_MODE (mem);
4678
4679 /* We forced the address into a register via mem_noofs_operand. */
4680 addr = XEXP (mem, 0);
4681 gcc_assert (register_operand (addr, DImode));
38f31687 4682
38f31687
RH
4683 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4684 NULL_RTX, 1, OPTAB_DIRECT);
4685
4686 /* Insert val into the correct byte location within the word. */
2371d1a0
RH
4687 if (val != const0_rtx)
4688 val = emit_insxl (mode, val, addr);
38f31687
RH
4689
4690 wdst = gen_reg_rtx (DImode);
4691 if (mode == QImode)
2371d1a0 4692 gen = gen_atomic_exchangeqi_1;
38f31687 4693 else
2371d1a0
RH
4694 gen = gen_atomic_exchangehi_1;
4695 emit_insn (gen (wdst, mem, val, align, model));
38f31687
RH
4696
4697 emit_move_insn (dst, gen_lowpart (mode, wdst));
4698}
4699
4700void
2371d1a0 4701alpha_split_atomic_exchange_12 (rtx operands[])
38f31687 4702{
2371d1a0 4703 rtx dest, orig_mem, addr, val, align, scratch;
38f31687 4704 rtx label, mem, width, mask, x;
ef4bddc2 4705 machine_mode mode;
2371d1a0
RH
4706 enum memmodel model;
4707
4708 dest = operands[0];
4709 orig_mem = operands[1];
4710 val = operands[2];
4711 align = operands[3];
4712 model = (enum memmodel) INTVAL (operands[4]);
4713 scratch = operands[5];
4714 mode = GET_MODE (orig_mem);
4715 addr = XEXP (orig_mem, 0);
38f31687
RH
4716
4717 mem = gen_rtx_MEM (DImode, align);
2371d1a0 4718 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4eace304
RH
4719 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4720 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
2371d1a0
RH
4721
4722 alpha_pre_atomic_barrier (model);
38f31687 4723
38f31687
RH
4724 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4725 emit_label (XEXP (label, 0));
4726
4727 emit_load_locked (DImode, scratch, mem);
4728
4729 width = GEN_INT (GET_MODE_BITSIZE (mode));
4730 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
0b2a7367
RH
4731 emit_insn (gen_extxl (dest, scratch, width, addr));
4732 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
2371d1a0
RH
4733 if (val != const0_rtx)
4734 emit_insn (gen_iordi3 (scratch, scratch, val));
38f31687
RH
4735
4736 emit_store_conditional (DImode, scratch, mem, scratch);
4737
4738 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4739 emit_unlikely_jump (x, label);
27738602 4740
2371d1a0 4741 alpha_post_atomic_barrier (model);
38f31687 4742}
a6f12d7c
RK
4743\f
4744/* Adjust the cost of a scheduling dependency. Return the new cost of
4745 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4746
c237e94a 4747static int
ac44248e 4748alpha_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
a6f12d7c 4749{
d58770e7 4750 enum attr_type dep_insn_type;
a6f12d7c
RK
4751
4752 /* If the dependence is an anti-dependence, there is no cost. For an
4753 output dependence, there is sometimes a cost, but it doesn't seem
4754 worth handling those few cases. */
a6f12d7c 4755 if (REG_NOTE_KIND (link) != 0)
98791e3a 4756 return cost;
a6f12d7c 4757
26250081
RH
4758 /* If we can't recognize the insns, we can't really do anything. */
4759 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4760 return cost;
4761
26250081
RH
4762 dep_insn_type = get_attr_type (dep_insn);
4763
bcbbac26 4764 /* Bring in the user-defined memory latency. */
71d9b493
RH
4765 if (dep_insn_type == TYPE_ILD
4766 || dep_insn_type == TYPE_FLD
4767 || dep_insn_type == TYPE_LDSYM)
bcbbac26
RH
4768 cost += alpha_memory_latency-1;
4769
98791e3a 4770 /* Everything else handled in DFA bypasses now. */
74835ed8 4771
a6f12d7c
RK
4772 return cost;
4773}
c237e94a 4774
98791e3a
RH
4775/* The number of instructions that can be issued per cycle. */
4776
c237e94a 4777static int
a5c24926 4778alpha_issue_rate (void)
c237e94a 4779{
8bea7f7c 4780 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
c237e94a
ZW
4781}
4782
98791e3a
RH
4783/* How many alternative schedules to try. This should be as wide as the
4784 scheduling freedom in the DFA, but no wider. Making this value too
4785 large results extra work for the scheduler.
4786
4787 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4788 alternative schedules. For EV5, we can choose between E0/E1 and
9a9f7594 4789 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
98791e3a
RH
4790
4791static int
a5c24926 4792alpha_multipass_dfa_lookahead (void)
98791e3a 4793{
8bea7f7c 4794 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
98791e3a 4795}
9ecc37f0 4796\f
6f9b006d
RH
4797/* Machine-specific function data. */
4798
735f469b
TG
4799struct GTY(()) alpha_links;
4800
d1b38208 4801struct GTY(()) machine_function
6f9b006d 4802{
5c30094f 4803 /* For flag_reorder_blocks_and_partition. */
984514ac 4804 rtx gp_save_rtx;
221cf9ab
OH
4805
4806 /* For VMS condition handlers. */
735f469b
TG
4807 bool uses_condition_handler;
4808
4809 /* Linkage entries. */
fb5c464a 4810 hash_map<nofree_string_hash, alpha_links *> *links;
6f9b006d
RH
4811};
4812
e2500fed 4813/* How to allocate a 'struct machine_function'. */
30102605 4814
e2500fed 4815static struct machine_function *
a5c24926 4816alpha_init_machine_status (void)
30102605 4817{
766090c2 4818 return ggc_cleared_alloc<machine_function> ();
30102605 4819}
30102605 4820
221cf9ab
OH
4821/* Support for frame based VMS condition handlers. */
4822
4823/* A VMS condition handler may be established for a function with a call to
4824 __builtin_establish_vms_condition_handler, and cancelled with a call to
4825 __builtin_revert_vms_condition_handler.
4826
4827 The VMS Condition Handling Facility knows about the existence of a handler
4828 from the procedure descriptor .handler field. As the VMS native compilers,
4829 we store the user specified handler's address at a fixed location in the
4830 stack frame and point the procedure descriptor at a common wrapper which
4831 fetches the real handler's address and issues an indirect call.
4832
4833 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4834
4835 We force the procedure kind to PT_STACK, and the fixed frame location is
4836 fp+8, just before the register save area. We use the handler_data field in
4837 the procedure descriptor to state the fp offset at which the installed
4838 handler address can be found. */
4839
4840#define VMS_COND_HANDLER_FP_OFFSET 8
4841
4842/* Expand code to store the currently installed user VMS condition handler
4843 into TARGET and install HANDLER as the new condition handler. */
4844
4845void
4846alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4847{
0a81f074
RS
4848 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4849 VMS_COND_HANDLER_FP_OFFSET);
221cf9ab
OH
4850
4851 rtx handler_slot
4852 = gen_rtx_MEM (DImode, handler_slot_address);
4853
4854 emit_move_insn (target, handler_slot);
4855 emit_move_insn (handler_slot, handler);
4856
4857 /* Notify the start/prologue/epilogue emitters that the condition handler
4858 slot is needed. In addition to reserving the slot space, this will force
4859 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4860 use above is correct. */
4861 cfun->machine->uses_condition_handler = true;
4862}
4863
4864/* Expand code to store the current VMS condition handler into TARGET and
4865 nullify it. */
4866
4867void
4868alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4869{
4870 /* We implement this by establishing a null condition handler, with the tiny
4871 side effect of setting uses_condition_handler. This is a little bit
4872 pessimistic if no actual builtin_establish call is ever issued, which is
4873 not a real problem and expected never to happen anyway. */
4874
4875 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4876}
4877
9ecc37f0
RH
4878/* Functions to save and restore alpha_return_addr_rtx. */
4879
9ecc37f0
RH
4880/* Start the ball rolling with RETURN_ADDR_RTX. */
4881
4882rtx
a5c24926 4883alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
9ecc37f0 4884{
9ecc37f0
RH
4885 if (count != 0)
4886 return const0_rtx;
4887
b91055dd 4888 return get_hard_reg_initial_val (Pmode, REG_RA);
9ecc37f0
RH
4889}
4890
229aa352 4891/* Return or create a memory slot containing the gp value for the current
ccb83cbc
RH
4892 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4893
4894rtx
a5c24926 4895alpha_gp_save_rtx (void)
ccb83cbc 4896{
cad003ba
DM
4897 rtx_insn *seq;
4898 rtx m = cfun->machine->gp_save_rtx;
229aa352
RH
4899
4900 if (m == NULL)
4901 {
4902 start_sequence ();
4903
4904 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4905 m = validize_mem (m);
4906 emit_move_insn (m, pic_offset_table_rtx);
4907
4908 seq = get_insns ();
4909 end_sequence ();
8deb1d31
EB
4910
4911 /* We used to simply emit the sequence after entry_of_function.
4912 However this breaks the CFG if the first instruction in the
4913 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4914 label. Emit the sequence properly on the edge. We are only
4915 invoked from dw2_build_landing_pads and finish_eh_generation
4916 will call commit_edge_insertions thanks to a kludge. */
fefa31b5
DM
4917 insert_insn_on_edge (seq,
4918 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
229aa352
RH
4919
4920 cfun->machine->gp_save_rtx = m;
4921 }
4922
4923 return m;
ccb83cbc
RH
4924}
4925
1e46eb2a
UB
4926static void
4927alpha_instantiate_decls (void)
4928{
4929 if (cfun->machine->gp_save_rtx != NULL_RTX)
4930 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4931}
4932
9ecc37f0 4933static int
a5c24926 4934alpha_ra_ever_killed (void)
9ecc37f0 4935{
cad003ba 4936 rtx_insn *top;
6abc6f40 4937
b91055dd 4938 if (!has_hard_reg_initial_val (Pmode, REG_RA))
6fb5fa3c 4939 return (int)df_regs_ever_live_p (REG_RA);
9ecc37f0 4940
6abc6f40
RH
4941 push_topmost_sequence ();
4942 top = get_insns ();
4943 pop_topmost_sequence ();
4944
a5d567ec 4945 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
9ecc37f0
RH
4946}
4947
a6f12d7c 4948\f
be7560ea 4949/* Return the trap mode suffix applicable to the current
285a5742 4950 instruction, or NULL. */
a6f12d7c 4951
be7560ea 4952static const char *
a5c24926 4953get_trap_mode_suffix (void)
a6f12d7c 4954{
be7560ea 4955 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
a6f12d7c 4956
be7560ea 4957 switch (s)
a6f12d7c 4958 {
be7560ea
RH
4959 case TRAP_SUFFIX_NONE:
4960 return NULL;
6245e3df 4961
be7560ea 4962 case TRAP_SUFFIX_SU:
981a828e 4963 if (alpha_fptm >= ALPHA_FPTM_SU)
be7560ea
RH
4964 return "su";
4965 return NULL;
6245e3df 4966
be7560ea
RH
4967 case TRAP_SUFFIX_SUI:
4968 if (alpha_fptm >= ALPHA_FPTM_SUI)
4969 return "sui";
4970 return NULL;
4971
4972 case TRAP_SUFFIX_V_SV:
e83015a9
RH
4973 switch (alpha_fptm)
4974 {
4975 case ALPHA_FPTM_N:
be7560ea 4976 return NULL;
e83015a9 4977 case ALPHA_FPTM_U:
be7560ea 4978 return "v";
e83015a9
RH
4979 case ALPHA_FPTM_SU:
4980 case ALPHA_FPTM_SUI:
be7560ea 4981 return "sv";
56daab84
NS
4982 default:
4983 gcc_unreachable ();
e83015a9 4984 }
e83015a9 4985
be7560ea 4986 case TRAP_SUFFIX_V_SV_SVI:
0022a940
DMT
4987 switch (alpha_fptm)
4988 {
4989 case ALPHA_FPTM_N:
be7560ea 4990 return NULL;
0022a940 4991 case ALPHA_FPTM_U:
be7560ea 4992 return "v";
0022a940 4993 case ALPHA_FPTM_SU:
be7560ea 4994 return "sv";
0022a940 4995 case ALPHA_FPTM_SUI:
be7560ea 4996 return "svi";
56daab84
NS
4997 default:
4998 gcc_unreachable ();
0022a940
DMT
4999 }
5000 break;
5001
be7560ea 5002 case TRAP_SUFFIX_U_SU_SUI:
6245e3df
RK
5003 switch (alpha_fptm)
5004 {
5005 case ALPHA_FPTM_N:
be7560ea 5006 return NULL;
6245e3df 5007 case ALPHA_FPTM_U:
be7560ea 5008 return "u";
6245e3df 5009 case ALPHA_FPTM_SU:
be7560ea 5010 return "su";
6245e3df 5011 case ALPHA_FPTM_SUI:
be7560ea 5012 return "sui";
56daab84
NS
5013 default:
5014 gcc_unreachable ();
6245e3df
RK
5015 }
5016 break;
56daab84
NS
5017
5018 default:
5019 gcc_unreachable ();
be7560ea 5020 }
56daab84 5021 gcc_unreachable ();
be7560ea 5022}
6245e3df 5023
be7560ea 5024/* Return the rounding mode suffix applicable to the current
285a5742 5025 instruction, or NULL. */
be7560ea
RH
5026
5027static const char *
a5c24926 5028get_round_mode_suffix (void)
be7560ea
RH
5029{
5030 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5031
5032 switch (s)
5033 {
5034 case ROUND_SUFFIX_NONE:
5035 return NULL;
5036 case ROUND_SUFFIX_NORMAL:
5037 switch (alpha_fprm)
6245e3df 5038 {
be7560ea
RH
5039 case ALPHA_FPRM_NORM:
5040 return NULL;
f676971a 5041 case ALPHA_FPRM_MINF:
be7560ea
RH
5042 return "m";
5043 case ALPHA_FPRM_CHOP:
5044 return "c";
5045 case ALPHA_FPRM_DYN:
5046 return "d";
56daab84
NS
5047 default:
5048 gcc_unreachable ();
6245e3df
RK
5049 }
5050 break;
5051
be7560ea
RH
5052 case ROUND_SUFFIX_C:
5053 return "c";
56daab84
NS
5054
5055 default:
5056 gcc_unreachable ();
be7560ea 5057 }
56daab84 5058 gcc_unreachable ();
be7560ea
RH
5059}
5060
5061/* Print an operand. Recognize special options, documented below. */
5062
5063void
a5c24926 5064print_operand (FILE *file, rtx x, int code)
be7560ea
RH
5065{
5066 int i;
5067
5068 switch (code)
5069 {
5070 case '~':
5071 /* Print the assembler name of the current function. */
5072 assemble_name (file, alpha_fnname);
5073 break;
5074
6f9b006d 5075 case '&':
4fbca4ba
RS
5076 if (const char *name = get_some_local_dynamic_name ())
5077 assemble_name (file, name);
5078 else
5079 output_operand_lossage ("'%%&' used without any "
5080 "local dynamic TLS references");
6f9b006d
RH
5081 break;
5082
be7560ea
RH
5083 case '/':
5084 {
5085 const char *trap = get_trap_mode_suffix ();
5086 const char *round = get_round_mode_suffix ();
5087
5088 if (trap || round)
46e1a769 5089 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
be7560ea
RH
5090 break;
5091 }
5092
89cfc2c6
RK
5093 case ',':
5094 /* Generates single precision instruction suffix. */
be7560ea 5095 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
89cfc2c6
RK
5096 break;
5097
5098 case '-':
5099 /* Generates double precision instruction suffix. */
be7560ea 5100 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
89cfc2c6
RK
5101 break;
5102
1eb356b9
RH
5103 case '#':
5104 if (alpha_this_literal_sequence_number == 0)
5105 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5106 fprintf (file, "%d", alpha_this_literal_sequence_number);
5107 break;
5108
5109 case '*':
5110 if (alpha_this_gpdisp_sequence_number == 0)
5111 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5112 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5113 break;
5114
40571d67 5115 case 'J':
6f9b006d
RH
5116 {
5117 const char *lituse;
5118
5119 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5120 {
5121 x = XVECEXP (x, 0, 0);
5122 lituse = "lituse_tlsgd";
5123 }
5124 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5125 {
5126 x = XVECEXP (x, 0, 0);
5127 lituse = "lituse_tlsldm";
5128 }
7d83f4f5 5129 else if (CONST_INT_P (x))
6f9b006d
RH
5130 lituse = "lituse_jsr";
5131 else
5132 {
5133 output_operand_lossage ("invalid %%J value");
5134 break;
5135 }
5136
5137 if (x != const0_rtx)
5138 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5139 }
40571d67
RH
5140 break;
5141
d006f5eb
RH
5142 case 'j':
5143 {
5144 const char *lituse;
5145
5146#ifdef HAVE_AS_JSRDIRECT_RELOCS
5147 lituse = "lituse_jsrdirect";
5148#else
5149 lituse = "lituse_jsr";
5150#endif
5151
5152 gcc_assert (INTVAL (x) != 0);
5153 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5154 }
5155 break;
a6f12d7c
RK
5156 case 'r':
5157 /* If this operand is the constant zero, write it as "$31". */
7d83f4f5 5158 if (REG_P (x))
a6f12d7c
RK
5159 fprintf (file, "%s", reg_names[REGNO (x)]);
5160 else if (x == CONST0_RTX (GET_MODE (x)))
5161 fprintf (file, "$31");
5162 else
5163 output_operand_lossage ("invalid %%r value");
a6f12d7c
RK
5164 break;
5165
5166 case 'R':
5167 /* Similar, but for floating-point. */
7d83f4f5 5168 if (REG_P (x))
a6f12d7c
RK
5169 fprintf (file, "%s", reg_names[REGNO (x)]);
5170 else if (x == CONST0_RTX (GET_MODE (x)))
5171 fprintf (file, "$f31");
5172 else
5173 output_operand_lossage ("invalid %%R value");
a6f12d7c
RK
5174 break;
5175
5176 case 'N':
5177 /* Write the 1's complement of a constant. */
7d83f4f5 5178 if (!CONST_INT_P (x))
a6f12d7c
RK
5179 output_operand_lossage ("invalid %%N value");
5180
0bc8ae6e 5181 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
a6f12d7c
RK
5182 break;
5183
5184 case 'P':
5185 /* Write 1 << C, for a constant C. */
7d83f4f5 5186 if (!CONST_INT_P (x))
a6f12d7c
RK
5187 output_operand_lossage ("invalid %%P value");
5188
c37aa43b 5189 fprintf (file, HOST_WIDE_INT_PRINT_DEC, HOST_WIDE_INT_1 << INTVAL (x));
a6f12d7c
RK
5190 break;
5191
5192 case 'h':
5193 /* Write the high-order 16 bits of a constant, sign-extended. */
7d83f4f5 5194 if (!CONST_INT_P (x))
a6f12d7c
RK
5195 output_operand_lossage ("invalid %%h value");
5196
0bc8ae6e 5197 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
a6f12d7c
RK
5198 break;
5199
5200 case 'L':
5201 /* Write the low-order 16 bits of a constant, sign-extended. */
7d83f4f5 5202 if (!CONST_INT_P (x))
a6f12d7c
RK
5203 output_operand_lossage ("invalid %%L value");
5204
0bc8ae6e
RK
5205 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5206 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
a6f12d7c
RK
5207 break;
5208
5209 case 'm':
5210 /* Write mask for ZAP insn. */
f06ed650 5211 if (CONST_INT_P (x))
a6f12d7c
RK
5212 {
5213 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5214
5215 for (i = 0; i < 8; i++, value >>= 8)
5216 if (value & 0xff)
5217 mask |= (1 << i);
5218
0bc8ae6e 5219 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
a6f12d7c
RK
5220 }
5221 else
5222 output_operand_lossage ("invalid %%m value");
5223 break;
5224
5225 case 'M':
6c174fc0 5226 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
c799797d 5227 if (!mode_width_operand (x, VOIDmode))
a6f12d7c
RK
5228 output_operand_lossage ("invalid %%M value");
5229
5230 fprintf (file, "%s",
6c174fc0
RH
5231 (INTVAL (x) == 8 ? "b"
5232 : INTVAL (x) == 16 ? "w"
5233 : INTVAL (x) == 32 ? "l"
5234 : "q"));
a6f12d7c
RK
5235 break;
5236
5237 case 'U':
5238 /* Similar, except do it from the mask. */
7d83f4f5 5239 if (CONST_INT_P (x))
c4b50f1a
RH
5240 {
5241 HOST_WIDE_INT value = INTVAL (x);
5242
5243 if (value == 0xff)
5244 {
5245 fputc ('b', file);
5246 break;
5247 }
5248 if (value == 0xffff)
5249 {
5250 fputc ('w', file);
5251 break;
5252 }
5253 if (value == 0xffffffff)
5254 {
5255 fputc ('l', file);
5256 break;
5257 }
5258 if (value == -1)
5259 {
5260 fputc ('q', file);
5261 break;
5262 }
5263 }
c37aa43b 5264
c4b50f1a 5265 output_operand_lossage ("invalid %%U value");
a6f12d7c
RK
5266 break;
5267
5268 case 's':
0b2a7367 5269 /* Write the constant value divided by 8. */
7d83f4f5 5270 if (!CONST_INT_P (x)
0b2a7367 5271 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
30102605 5272 || (INTVAL (x) & 7) != 0)
a6f12d7c
RK
5273 output_operand_lossage ("invalid %%s value");
5274
0b2a7367 5275 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
a6f12d7c
RK
5276 break;
5277
5278 case 'S':
5279 /* Same, except compute (64 - c) / 8 */
5280
7d83f4f5 5281 if (!CONST_INT_P (x)
a6f12d7c
RK
5282 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5283 && (INTVAL (x) & 7) != 8)
5284 output_operand_lossage ("invalid %%s value");
5285
0bc8ae6e 5286 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
a6f12d7c
RK
5287 break;
5288
bdd4c95a 5289 case 'C': case 'D': case 'c': case 'd':
a6f12d7c 5290 /* Write out comparison name. */
bdd4c95a
RK
5291 {
5292 enum rtx_code c = GET_CODE (x);
5293
ec8e098d 5294 if (!COMPARISON_P (x))
bdd4c95a
RK
5295 output_operand_lossage ("invalid %%C value");
5296
948068e2 5297 else if (code == 'D')
bdd4c95a
RK
5298 c = reverse_condition (c);
5299 else if (code == 'c')
5300 c = swap_condition (c);
5301 else if (code == 'd')
5302 c = swap_condition (reverse_condition (c));
5303
5304 if (c == LEU)
5305 fprintf (file, "ule");
5306 else if (c == LTU)
5307 fprintf (file, "ult");
1eb8759b
RH
5308 else if (c == UNORDERED)
5309 fprintf (file, "un");
bdd4c95a
RK
5310 else
5311 fprintf (file, "%s", GET_RTX_NAME (c));
5312 }
ab561e66
RK
5313 break;
5314
a6f12d7c
RK
5315 case 'E':
5316 /* Write the divide or modulus operator. */
5317 switch (GET_CODE (x))
5318 {
5319 case DIV:
5320 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5321 break;
5322 case UDIV:
5323 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5324 break;
5325 case MOD:
5326 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5327 break;
5328 case UMOD:
5329 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5330 break;
5331 default:
5332 output_operand_lossage ("invalid %%E value");
5333 break;
5334 }
5335 break;
5336
a6f12d7c
RK
5337 case 'A':
5338 /* Write "_u" for unaligned access. */
7d83f4f5 5339 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
a6f12d7c
RK
5340 fprintf (file, "_u");
5341 break;
5342
5343 case 0:
7d83f4f5 5344 if (REG_P (x))
a6f12d7c 5345 fprintf (file, "%s", reg_names[REGNO (x)]);
7d83f4f5 5346 else if (MEM_P (x))
a6f12d7c 5347 output_address (XEXP (x, 0));
6f9b006d
RH
5348 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5349 {
5350 switch (XINT (XEXP (x, 0), 1))
5351 {
5352 case UNSPEC_DTPREL:
5353 case UNSPEC_TPREL:
5354 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5355 break;
5356 default:
5357 output_operand_lossage ("unknown relocation unspec");
5358 break;
5359 }
5360 }
a6f12d7c
RK
5361 else
5362 output_addr_const (file, x);
5363 break;
5364
5365 default:
5366 output_operand_lossage ("invalid %%xn code");
5367 }
5368}
714b019c
RH
5369
5370void
a5c24926 5371print_operand_address (FILE *file, rtx addr)
714b019c 5372{
e03ec28f 5373 int basereg = 31;
714b019c
RH
5374 HOST_WIDE_INT offset = 0;
5375
5376 if (GET_CODE (addr) == AND)
5377 addr = XEXP (addr, 0);
714b019c 5378
e03ec28f 5379 if (GET_CODE (addr) == PLUS
7d83f4f5 5380 && CONST_INT_P (XEXP (addr, 1)))
714b019c
RH
5381 {
5382 offset = INTVAL (XEXP (addr, 1));
e03ec28f 5383 addr = XEXP (addr, 0);
714b019c 5384 }
1eb356b9
RH
5385
5386 if (GET_CODE (addr) == LO_SUM)
5387 {
6f9b006d
RH
5388 const char *reloc16, *reloclo;
5389 rtx op1 = XEXP (addr, 1);
5390
5391 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5392 {
5393 op1 = XEXP (op1, 0);
5394 switch (XINT (op1, 1))
5395 {
5396 case UNSPEC_DTPREL:
5397 reloc16 = NULL;
5398 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5399 break;
5400 case UNSPEC_TPREL:
5401 reloc16 = NULL;
5402 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5403 break;
5404 default:
5405 output_operand_lossage ("unknown relocation unspec");
5406 return;
5407 }
5408
5409 output_addr_const (file, XVECEXP (op1, 0, 0));
5410 }
5411 else
5412 {
5413 reloc16 = "gprel";
5414 reloclo = "gprellow";
5415 output_addr_const (file, op1);
5416 }
5417
1eb356b9 5418 if (offset)
4a0a75dd 5419 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
f676971a 5420
1eb356b9 5421 addr = XEXP (addr, 0);
56daab84
NS
5422 switch (GET_CODE (addr))
5423 {
5424 case REG:
5425 basereg = REGNO (addr);
5426 break;
5427
5428 case SUBREG:
5429 basereg = subreg_regno (addr);
5430 break;
5431
5432 default:
5433 gcc_unreachable ();
5434 }
133d3133
RH
5435
5436 fprintf (file, "($%d)\t\t!%s", basereg,
6f9b006d 5437 (basereg == 29 ? reloc16 : reloclo));
1eb356b9
RH
5438 return;
5439 }
5440
56daab84
NS
5441 switch (GET_CODE (addr))
5442 {
5443 case REG:
5444 basereg = REGNO (addr);
5445 break;
5446
5447 case SUBREG:
5448 basereg = subreg_regno (addr);
5449 break;
5450
5451 case CONST_INT:
5452 offset = INTVAL (addr);
5453 break;
1330f7d5 5454
56daab84 5455 case SYMBOL_REF:
20a951e6 5456 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
1330f7d5
DR
5457 fprintf (file, "%s", XSTR (addr, 0));
5458 return;
56daab84
NS
5459
5460 case CONST:
20a951e6 5461 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
56daab84
NS
5462 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5463 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
74eda121 5464 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
1330f7d5
DR
5465 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5466 INTVAL (XEXP (XEXP (addr, 0), 1)));
5467 return;
20a951e6 5468
56daab84 5469 default:
20a951e6
RH
5470 output_operand_lossage ("invalid operand address");
5471 return;
56daab84 5472 }
714b019c 5473
4a0a75dd 5474 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
714b019c 5475}
a6f12d7c 5476\f
9ec36da5 5477/* Emit RTL insns to initialize the variable parts of a trampoline at
2d7b663a
RH
5478 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5479 for the static chain value for the function. */
c714f03d 5480
2d7b663a
RH
5481static void
5482alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
9ec36da5 5483{
2d7b663a
RH
5484 rtx fnaddr, mem, word1, word2;
5485
5486 fnaddr = XEXP (DECL_RTL (fndecl), 0);
9ec36da5 5487
d2692ef8 5488#ifdef POINTERS_EXTEND_UNSIGNED
2d7b663a
RH
5489 fnaddr = convert_memory_address (Pmode, fnaddr);
5490 chain_value = convert_memory_address (Pmode, chain_value);
d2692ef8
DT
5491#endif
5492
fe2786f5
DR
5493 if (TARGET_ABI_OPEN_VMS)
5494 {
fe2786f5
DR
5495 const char *fnname;
5496 char *trname;
5497
5498 /* Construct the name of the trampoline entry point. */
5499 fnname = XSTR (fnaddr, 0);
5500 trname = (char *) alloca (strlen (fnname) + 5);
5501 strcpy (trname, fnname);
5502 strcat (trname, "..tr");
2d7b663a
RH
5503 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5504 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
fe2786f5
DR
5505
5506 /* Trampoline (or "bounded") procedure descriptor is constructed from
5507 the function's procedure descriptor with certain fields zeroed IAW
5508 the VMS calling standard. This is stored in the first quadword. */
2d7b663a 5509 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
4522854a 5510 word1 = expand_and (DImode, word1,
fd2d9121
RH
5511 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5512 NULL);
fe2786f5 5513 }
2d7b663a
RH
5514 else
5515 {
5516 /* These 4 instructions are:
5517 ldq $1,24($27)
5518 ldq $27,16($27)
5519 jmp $31,($27),0
5520 nop
5521 We don't bother setting the HINT field of the jump; the nop
5522 is merely there for padding. */
fd2d9121
RH
5523 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5524 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
2d7b663a
RH
5525 }
5526
5527 /* Store the first two words, as computed above. */
5528 mem = adjust_address (m_tramp, DImode, 0);
5529 emit_move_insn (mem, word1);
5530 mem = adjust_address (m_tramp, DImode, 8);
5531 emit_move_insn (mem, word2);
5532
5533 /* Store function address and static chain value. */
5534 mem = adjust_address (m_tramp, Pmode, 16);
5535 emit_move_insn (mem, fnaddr);
5536 mem = adjust_address (m_tramp, Pmode, 24);
5537 emit_move_insn (mem, chain_value);
fe2786f5 5538
42d085c1 5539 if (TARGET_ABI_OSF)
2d7b663a
RH
5540 {
5541 emit_insn (gen_imb ());
10e48e39 5542#ifdef HAVE_ENABLE_EXECUTE_STACK
2d7b663a
RH
5543 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5544 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9ec36da5 5545#endif
2d7b663a 5546 }
9ec36da5
JL
5547}
5548\f
5495cc55
RH
5549/* Determine where to put an argument to a function.
5550 Value is zero to push the argument on the stack,
5551 or a hard register in which to store the argument.
5552
5553 MODE is the argument's machine mode.
5554 TYPE is the data type of the argument (as a tree).
5555 This is null for libcalls where that information may
5556 not be available.
5557 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5558 the preceding args and about the function being called.
5559 NAMED is nonzero if this argument is a named parameter
5560 (otherwise it is an extra parameter matching an ellipsis).
5561
5562 On Alpha the first 6 words of args are normally in registers
5563 and the rest are pushed. */
5564
0c3a9758 5565static rtx
ef4bddc2 5566alpha_function_arg (cumulative_args_t cum_v, machine_mode mode,
0c3a9758 5567 const_tree type, bool named ATTRIBUTE_UNUSED)
5495cc55 5568{
d5cc9181 5569 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5495cc55 5570 int basereg;
a82c7f05 5571 int num_args;
5495cc55 5572
7e4fb06a
RH
5573 /* Don't get confused and pass small structures in FP registers. */
5574 if (type && AGGREGATE_TYPE_P (type))
30102605 5575 basereg = 16;
7e4fb06a
RH
5576 else
5577 {
5578#ifdef ENABLE_CHECKING
42ba5130 5579 /* With alpha_split_complex_arg, we shouldn't see any raw complex
7e4fb06a 5580 values here. */
56daab84 5581 gcc_assert (!COMPLEX_MODE_P (mode));
7e4fb06a
RH
5582#endif
5583
5584 /* Set up defaults for FP operands passed in FP registers, and
5585 integral operands passed in integer registers. */
5586 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5587 basereg = 32 + 16;
5588 else
5589 basereg = 16;
5590 }
30102605
RH
5591
5592 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
0c3a9758 5593 the two platforms, so we can't avoid conditional compilation. */
be7b80f4 5594#if TARGET_ABI_OPEN_VMS
30102605
RH
5595 {
5596 if (mode == VOIDmode)
1f5576a8 5597 return alpha_arg_info_reg_val (*cum);
be7b80f4 5598
0c3a9758 5599 num_args = cum->num_args;
fe984136
RH
5600 if (num_args >= 6
5601 || targetm.calls.must_pass_in_stack (mode, type))
30102605
RH
5602 return NULL_RTX;
5603 }
7e4fb06a 5604#elif TARGET_ABI_OSF
30102605 5605 {
0c3a9758 5606 if (*cum >= 6)
30102605 5607 return NULL_RTX;
0c3a9758 5608 num_args = *cum;
30102605
RH
5609
5610 /* VOID is passed as a special flag for "last argument". */
5611 if (type == void_type_node)
5612 basereg = 16;
fe984136 5613 else if (targetm.calls.must_pass_in_stack (mode, type))
30102605 5614 return NULL_RTX;
30102605 5615 }
7e4fb06a
RH
5616#else
5617#error Unhandled ABI
5618#endif
5495cc55 5619
a82c7f05 5620 return gen_rtx_REG (mode, num_args + basereg);
5495cc55
RH
5621}
5622
0c3a9758
NF
5623/* Update the data in CUM to advance over an argument
5624 of mode MODE and data type TYPE.
5625 (TYPE is null for libcalls where that information may not be available.) */
5626
5627static void
ef4bddc2 5628alpha_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
bdf057c6 5629 const_tree type, bool named ATTRIBUTE_UNUSED)
0c3a9758 5630{
d5cc9181 5631 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
0c3a9758
NF
5632 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5633 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5634
5635#if TARGET_ABI_OSF
5636 *cum += increment;
5637#else
5638 if (!onstack && cum->num_args < 6)
5639 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5640 cum->num_args += increment;
5641#endif
5642}
5643
78a52f11 5644static int
d5cc9181 5645alpha_arg_partial_bytes (cumulative_args_t cum_v,
ef4bddc2 5646 machine_mode mode ATTRIBUTE_UNUSED,
78a52f11
RH
5647 tree type ATTRIBUTE_UNUSED,
5648 bool named ATTRIBUTE_UNUSED)
5649{
5650 int words = 0;
d5cc9181 5651 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
78a52f11
RH
5652
5653#if TARGET_ABI_OPEN_VMS
5654 if (cum->num_args < 6
5655 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
907f033f 5656 words = 6 - cum->num_args;
78a52f11
RH
5657#elif TARGET_ABI_OSF
5658 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5659 words = 6 - *cum;
5660#else
5661#error Unhandled ABI
5662#endif
5663
5664 return words * UNITS_PER_WORD;
5665}
5666
5667
7e4fb06a
RH
5668/* Return true if TYPE must be returned in memory, instead of in registers. */
5669
f93c2180 5670static bool
586de218 5671alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
7e4fb06a 5672{
ef4bddc2 5673 machine_mode mode = VOIDmode;
7e4fb06a
RH
5674 int size;
5675
5676 if (type)
5677 {
5678 mode = TYPE_MODE (type);
5679
050d3f9d
VF
5680 /* All aggregates are returned in memory, except on OpenVMS where
5681 records that fit 64 bits should be returned by immediate value
5682 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5683 if (TARGET_ABI_OPEN_VMS
5684 && TREE_CODE (type) != ARRAY_TYPE
5685 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5686 return false;
5687
7e4fb06a
RH
5688 if (AGGREGATE_TYPE_P (type))
5689 return true;
5690 }
5691
5692 size = GET_MODE_SIZE (mode);
5693 switch (GET_MODE_CLASS (mode))
5694 {
5695 case MODE_VECTOR_FLOAT:
5696 /* Pass all float vectors in memory, like an aggregate. */
5697 return true;
5698
5699 case MODE_COMPLEX_FLOAT:
5700 /* We judge complex floats on the size of their element,
5701 not the size of the whole type. */
5702 size = GET_MODE_UNIT_SIZE (mode);
5703 break;
5704
5705 case MODE_INT:
5706 case MODE_FLOAT:
5707 case MODE_COMPLEX_INT:
5708 case MODE_VECTOR_INT:
5709 break;
5710
5711 default:
f676971a 5712 /* ??? We get called on all sorts of random stuff from
56daab84
NS
5713 aggregate_value_p. We must return something, but it's not
5714 clear what's safe to return. Pretend it's a struct I
5715 guess. */
7e4fb06a
RH
5716 return true;
5717 }
5718
5719 /* Otherwise types must fit in one register. */
5720 return size > UNITS_PER_WORD;
5721}
5722
8cd5a4e0
RH
5723/* Return true if TYPE should be passed by invisible reference. */
5724
5725static bool
d5cc9181 5726alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
ef4bddc2 5727 machine_mode mode,
586de218 5728 const_tree type ATTRIBUTE_UNUSED,
8cd5a4e0
RH
5729 bool named ATTRIBUTE_UNUSED)
5730{
5731 return mode == TFmode || mode == TCmode;
5732}
5733
7e4fb06a
RH
5734/* Define how to find the value returned by a function. VALTYPE is the
5735 data type of the value (as a tree). If the precise function being
5736 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5737 MODE is set instead of VALTYPE for libcalls.
5738
5739 On Alpha the value is found in $0 for integer functions and
5740 $f0 for floating-point functions. */
5741
5742rtx
586de218 5743function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
ef4bddc2 5744 machine_mode mode)
7e4fb06a 5745{
d58770e7 5746 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
0a2aaacc 5747 enum mode_class mclass;
7e4fb06a 5748
56daab84 5749 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
7e4fb06a
RH
5750
5751 if (valtype)
5752 mode = TYPE_MODE (valtype);
5753
0a2aaacc
KG
5754 mclass = GET_MODE_CLASS (mode);
5755 switch (mclass)
7e4fb06a
RH
5756 {
5757 case MODE_INT:
050d3f9d
VF
5758 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5759 where we have them returning both SImode and DImode. */
5760 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5761 PROMOTE_MODE (mode, dummy, valtype);
5efb1046 5762 /* FALLTHRU */
7e4fb06a
RH
5763
5764 case MODE_COMPLEX_INT:
5765 case MODE_VECTOR_INT:
5766 regnum = 0;
5767 break;
5768
5769 case MODE_FLOAT:
5770 regnum = 32;
5771 break;
5772
5773 case MODE_COMPLEX_FLOAT:
5774 {
ef4bddc2 5775 machine_mode cmode = GET_MODE_INNER (mode);
7e4fb06a
RH
5776
5777 return gen_rtx_PARALLEL
5778 (VOIDmode,
5779 gen_rtvec (2,
5780 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
a556fd39 5781 const0_rtx),
7e4fb06a
RH
5782 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5783 GEN_INT (GET_MODE_SIZE (cmode)))));
5784 }
5785
050d3f9d
VF
5786 case MODE_RANDOM:
5787 /* We should only reach here for BLKmode on VMS. */
5788 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5789 regnum = 0;
5790 break;
5791
7e4fb06a 5792 default:
56daab84 5793 gcc_unreachable ();
7e4fb06a
RH
5794 }
5795
5796 return gen_rtx_REG (mode, regnum);
5797}
5798
f676971a 5799/* TCmode complex values are passed by invisible reference. We
42ba5130
RH
5800 should not split these values. */
5801
5802static bool
3101faab 5803alpha_split_complex_arg (const_tree type)
42ba5130
RH
5804{
5805 return TYPE_MODE (type) != TCmode;
5806}
5807
c35d187f
RH
5808static tree
5809alpha_build_builtin_va_list (void)
a6f12d7c 5810{
5849d27c 5811 tree base, ofs, space, record, type_decl;
a6f12d7c 5812
75db85d8 5813 if (TARGET_ABI_OPEN_VMS)
63966b3b
RH
5814 return ptr_type_node;
5815
f1e639b1 5816 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4c4bde29
AH
5817 type_decl = build_decl (BUILTINS_LOCATION,
5818 TYPE_DECL, get_identifier ("__va_list_tag"), record);
0fd2eac2 5819 TYPE_STUB_DECL (record) = type_decl;
d4b15af9
RH
5820 TYPE_NAME (record) = type_decl;
5821
63966b3b 5822 /* C++? SET_IS_AGGR_TYPE (record, 1); */
a6f12d7c 5823
5849d27c 5824 /* Dummy field to prevent alignment warnings. */
4c4bde29
AH
5825 space = build_decl (BUILTINS_LOCATION,
5826 FIELD_DECL, NULL_TREE, integer_type_node);
5849d27c
RH
5827 DECL_FIELD_CONTEXT (space) = record;
5828 DECL_ARTIFICIAL (space) = 1;
5829 DECL_IGNORED_P (space) = 1;
5830
4c4bde29
AH
5831 ofs = build_decl (BUILTINS_LOCATION,
5832 FIELD_DECL, get_identifier ("__offset"),
63966b3b
RH
5833 integer_type_node);
5834 DECL_FIELD_CONTEXT (ofs) = record;
910ad8de 5835 DECL_CHAIN (ofs) = space;
29587b1c 5836
4c4bde29
AH
5837 base = build_decl (BUILTINS_LOCATION,
5838 FIELD_DECL, get_identifier ("__base"),
63966b3b
RH
5839 ptr_type_node);
5840 DECL_FIELD_CONTEXT (base) = record;
910ad8de 5841 DECL_CHAIN (base) = ofs;
29587b1c 5842
63966b3b
RH
5843 TYPE_FIELDS (record) = base;
5844 layout_type (record);
5845
9d30f3c1 5846 va_list_gpr_counter_field = ofs;
63966b3b
RH
5847 return record;
5848}
5849
3f620b5f 5850#if TARGET_ABI_OSF
9d30f3c1
JJ
5851/* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5852 and constant additions. */
5853
777b1fbe 5854static gimple
9d30f3c1
JJ
5855va_list_skip_additions (tree lhs)
5856{
777b1fbe 5857 gimple stmt;
9d30f3c1
JJ
5858
5859 for (;;)
5860 {
777b1fbe
JJ
5861 enum tree_code code;
5862
9d30f3c1
JJ
5863 stmt = SSA_NAME_DEF_STMT (lhs);
5864
777b1fbe 5865 if (gimple_code (stmt) == GIMPLE_PHI)
9d30f3c1
JJ
5866 return stmt;
5867
777b1fbe
JJ
5868 if (!is_gimple_assign (stmt)
5869 || gimple_assign_lhs (stmt) != lhs)
5870 return NULL;
9d30f3c1 5871
777b1fbe
JJ
5872 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5873 return stmt;
5874 code = gimple_assign_rhs_code (stmt);
5875 if (!CONVERT_EXPR_CODE_P (code)
5876 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5877 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
cc269bb6 5878 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
777b1fbe 5879 return stmt;
9d30f3c1 5880
777b1fbe 5881 lhs = gimple_assign_rhs1 (stmt);
9d30f3c1
JJ
5882 }
5883}
5884
5885/* Check if LHS = RHS statement is
5886 LHS = *(ap.__base + ap.__offset + cst)
5887 or
5888 LHS = *(ap.__base
5889 + ((ap.__offset + cst <= 47)
5890 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5891 If the former, indicate that GPR registers are needed,
5892 if the latter, indicate that FPR registers are needed.
138ae41e
RH
5893
5894 Also look for LHS = (*ptr).field, where ptr is one of the forms
5895 listed above.
5896
9d30f3c1 5897 On alpha, cfun->va_list_gpr_size is used as size of the needed
138ae41e
RH
5898 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5899 registers are needed and bit 1 set if FPR registers are needed.
5900 Return true if va_list references should not be scanned for the
5901 current statement. */
9d30f3c1
JJ
5902
5903static bool
726a989a 5904alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
9d30f3c1 5905{
777b1fbe 5906 tree base, offset, rhs;
9d30f3c1 5907 int offset_arg = 1;
777b1fbe 5908 gimple base_stmt;
9d30f3c1 5909
777b1fbe
JJ
5910 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5911 != GIMPLE_SINGLE_RHS)
5912 return false;
5913
5914 rhs = gimple_assign_rhs1 (stmt);
138ae41e
RH
5915 while (handled_component_p (rhs))
5916 rhs = TREE_OPERAND (rhs, 0);
70f34814 5917 if (TREE_CODE (rhs) != MEM_REF
9d30f3c1
JJ
5918 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5919 return false;
5920
777b1fbe
JJ
5921 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5922 if (stmt == NULL
5923 || !is_gimple_assign (stmt)
5924 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
9d30f3c1
JJ
5925 return false;
5926
777b1fbe 5927 base = gimple_assign_rhs1 (stmt);
9d30f3c1 5928 if (TREE_CODE (base) == SSA_NAME)
777b1fbe
JJ
5929 {
5930 base_stmt = va_list_skip_additions (base);
5931 if (base_stmt
5932 && is_gimple_assign (base_stmt)
5933 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5934 base = gimple_assign_rhs1 (base_stmt);
5935 }
9d30f3c1
JJ
5936
5937 if (TREE_CODE (base) != COMPONENT_REF
5938 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5939 {
777b1fbe 5940 base = gimple_assign_rhs2 (stmt);
9d30f3c1 5941 if (TREE_CODE (base) == SSA_NAME)
777b1fbe
JJ
5942 {
5943 base_stmt = va_list_skip_additions (base);
5944 if (base_stmt
5945 && is_gimple_assign (base_stmt)
5946 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5947 base = gimple_assign_rhs1 (base_stmt);
5948 }
9d30f3c1
JJ
5949
5950 if (TREE_CODE (base) != COMPONENT_REF
5951 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5952 return false;
5953
5954 offset_arg = 0;
5955 }
5956
5957 base = get_base_address (base);
5958 if (TREE_CODE (base) != VAR_DECL
fcecf84f 5959 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
9d30f3c1
JJ
5960 return false;
5961
777b1fbe 5962 offset = gimple_op (stmt, 1 + offset_arg);
9d30f3c1 5963 if (TREE_CODE (offset) == SSA_NAME)
9d30f3c1 5964 {
777b1fbe 5965 gimple offset_stmt = va_list_skip_additions (offset);
9d30f3c1 5966
777b1fbe
JJ
5967 if (offset_stmt
5968 && gimple_code (offset_stmt) == GIMPLE_PHI)
9d30f3c1 5969 {
777b1fbe
JJ
5970 HOST_WIDE_INT sub;
5971 gimple arg1_stmt, arg2_stmt;
5972 tree arg1, arg2;
5973 enum tree_code code1, code2;
9d30f3c1 5974
777b1fbe 5975 if (gimple_phi_num_args (offset_stmt) != 2)
3f620b5f 5976 goto escapes;
9d30f3c1 5977
777b1fbe
JJ
5978 arg1_stmt
5979 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5980 arg2_stmt
5981 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5982 if (arg1_stmt == NULL
5983 || !is_gimple_assign (arg1_stmt)
5984 || arg2_stmt == NULL
5985 || !is_gimple_assign (arg2_stmt))
5986 goto escapes;
9d30f3c1 5987
777b1fbe
JJ
5988 code1 = gimple_assign_rhs_code (arg1_stmt);
5989 code2 = gimple_assign_rhs_code (arg2_stmt);
5990 if (code1 == COMPONENT_REF
5991 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5992 /* Do nothing. */;
5993 else if (code2 == COMPONENT_REF
5994 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5995 {
5996 gimple tem = arg1_stmt;
5997 code2 = code1;
5998 arg1_stmt = arg2_stmt;
5999 arg2_stmt = tem;
6000 }
6001 else
6002 goto escapes;
3f620b5f 6003
9541ffee 6004 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
777b1fbe 6005 goto escapes;
3f620b5f 6006
9439e9a1 6007 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
777b1fbe
JJ
6008 if (code2 == MINUS_EXPR)
6009 sub = -sub;
6010 if (sub < -48 || sub > -32)
6011 goto escapes;
9d30f3c1 6012
777b1fbe
JJ
6013 arg1 = gimple_assign_rhs1 (arg1_stmt);
6014 arg2 = gimple_assign_rhs1 (arg2_stmt);
6015 if (TREE_CODE (arg2) == SSA_NAME)
6016 {
6017 arg2_stmt = va_list_skip_additions (arg2);
6018 if (arg2_stmt == NULL
6019 || !is_gimple_assign (arg2_stmt)
6020 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6021 goto escapes;
6022 arg2 = gimple_assign_rhs1 (arg2_stmt);
6023 }
6024 if (arg1 != arg2)
6025 goto escapes;
6026
6027 if (TREE_CODE (arg1) != COMPONENT_REF
6028 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6029 || get_base_address (arg1) != base)
6030 goto escapes;
6031
6032 /* Need floating point regs. */
6033 cfun->va_list_fpr_size |= 2;
6034 return false;
6035 }
6036 if (offset_stmt
6037 && is_gimple_assign (offset_stmt)
6038 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6039 offset = gimple_assign_rhs1 (offset_stmt);
6040 }
6041 if (TREE_CODE (offset) != COMPONENT_REF
6042 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6043 || get_base_address (offset) != base)
9d30f3c1
JJ
6044 goto escapes;
6045 else
6046 /* Need general regs. */
6047 cfun->va_list_fpr_size |= 1;
6048 return false;
6049
6050escapes:
6051 si->va_list_escapes = true;
6052 return false;
6053}
3f620b5f 6054#endif
9d30f3c1 6055
35d9c403 6056/* Perform any needed actions needed for a function that is receiving a
f93c2180 6057 variable number of arguments. */
35d9c403 6058
f93c2180 6059static void
ef4bddc2 6060alpha_setup_incoming_varargs (cumulative_args_t pcum, machine_mode mode,
bae47977 6061 tree type, int *pretend_size, int no_rtl)
f93c2180 6062{
d5cc9181 6063 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
bae47977
RH
6064
6065 /* Skip the current argument. */
d5cc9181
JR
6066 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6067 true);
bae47977 6068
75db85d8 6069#if TARGET_ABI_OPEN_VMS
f93c2180 6070 /* For VMS, we allocate space for all 6 arg registers plus a count.
35d9c403 6071
f93c2180
RH
6072 However, if NO registers need to be saved, don't allocate any space.
6073 This is not only because we won't need the space, but because AP
6074 includes the current_pretend_args_size and we don't want to mess up
6075 any ap-relative addresses already made. */
bae47977 6076 if (cum.num_args < 6)
f93c2180
RH
6077 {
6078 if (!no_rtl)
6079 {
6080 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6081 emit_insn (gen_arg_home ());
6082 }
6083 *pretend_size = 7 * UNITS_PER_WORD;
6084 }
6085#else
6086 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6087 only push those that are remaining. However, if NO registers need to
6088 be saved, don't allocate any space. This is not only because we won't
6089 need the space, but because AP includes the current_pretend_args_size
6090 and we don't want to mess up any ap-relative addresses already made.
6091
6092 If we are not to use the floating-point registers, save the integer
6093 registers where we would put the floating-point registers. This is
6094 not the most efficient way to implement varargs with just one register
6095 class, but it isn't worth doing anything more efficient in this rare
6096 case. */
35d9c403
RH
6097 if (cum >= 6)
6098 return;
6099
6100 if (!no_rtl)
6101 {
4862826d
ILT
6102 int count;
6103 alias_set_type set = get_varargs_alias_set ();
35d9c403
RH
6104 rtx tmp;
6105
3f620b5f
RH
6106 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6107 if (count > 6 - cum)
6108 count = 6 - cum;
35d9c403 6109
3f620b5f
RH
6110 /* Detect whether integer registers or floating-point registers
6111 are needed by the detected va_arg statements. See above for
6112 how these values are computed. Note that the "escape" value
6113 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6114 these bits set. */
6115 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6116
6117 if (cfun->va_list_fpr_size & 1)
6118 {
6119 tmp = gen_rtx_MEM (BLKmode,
0a81f074 6120 plus_constant (Pmode, virtual_incoming_args_rtx,
3f620b5f 6121 (cum + 6) * UNITS_PER_WORD));
8476af98 6122 MEM_NOTRAP_P (tmp) = 1;
3f620b5f
RH
6123 set_mem_alias_set (tmp, set);
6124 move_block_from_reg (16 + cum, tmp, count);
6125 }
6126
6127 if (cfun->va_list_fpr_size & 2)
6128 {
6129 tmp = gen_rtx_MEM (BLKmode,
0a81f074 6130 plus_constant (Pmode, virtual_incoming_args_rtx,
3f620b5f 6131 cum * UNITS_PER_WORD));
8476af98 6132 MEM_NOTRAP_P (tmp) = 1;
3f620b5f
RH
6133 set_mem_alias_set (tmp, set);
6134 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6135 }
6136 }
35d9c403 6137 *pretend_size = 12 * UNITS_PER_WORD;
a5fe455b 6138#endif
f93c2180 6139}
35d9c403 6140
d7bd8aeb 6141static void
a5c24926 6142alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
63966b3b
RH
6143{
6144 HOST_WIDE_INT offset;
6145 tree t, offset_field, base_field;
29587b1c 6146
bdb429a5
RK
6147 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6148 return;
6149
bd5bd7ac 6150 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
63966b3b
RH
6151 up by 48, storing fp arg registers in the first 48 bytes, and the
6152 integer arg registers in the next 48 bytes. This is only done,
6153 however, if any integer registers need to be stored.
6154
6155 If no integer registers need be stored, then we must subtract 48
6156 in order to account for the integer arg registers which are counted
35d9c403
RH
6157 in argsize above, but which are not actually stored on the stack.
6158 Must further be careful here about structures straddling the last
f676971a 6159 integer argument register; that futzes with pretend_args_size,
35d9c403 6160 which changes the meaning of AP. */
63966b3b 6161
bae47977 6162 if (NUM_ARGS < 6)
f7130778 6163 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
89cfc2c6 6164 else
38173d38 6165 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
63966b3b 6166
f7130778
DR
6167 if (TARGET_ABI_OPEN_VMS)
6168 {
050d3f9d 6169 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5d49b6a7 6170 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
050d3f9d 6171 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
f7130778 6172 TREE_SIDE_EFFECTS (t) = 1;
f7130778
DR
6173 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6174 }
6175 else
6176 {
6177 base_field = TYPE_FIELDS (TREE_TYPE (valist));
910ad8de 6178 offset_field = DECL_CHAIN (base_field);
f7130778 6179
47a25a46
RG
6180 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6181 valist, base_field, NULL_TREE);
6182 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6183 valist, offset_field, NULL_TREE);
f7130778
DR
6184
6185 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5d49b6a7 6186 t = fold_build_pointer_plus_hwi (t, offset);
726a989a 6187 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
f7130778
DR
6188 TREE_SIDE_EFFECTS (t) = 1;
6189 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6190
7d60be94 6191 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
726a989a 6192 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
f7130778
DR
6193 TREE_SIDE_EFFECTS (t) = 1;
6194 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6195 }
63966b3b
RH
6196}
6197
28245018 6198static tree
777b1fbe 6199alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
726a989a 6200 gimple_seq *pre_p)
28245018 6201{
777b1fbe
JJ
6202 tree type_size, ptr_type, addend, t, addr;
6203 gimple_seq internal_post;
28245018 6204
28245018
RH
6205 /* If the type could not be passed in registers, skip the block
6206 reserved for the registers. */
fe984136 6207 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
28245018 6208 {
7d60be94 6209 t = build_int_cst (TREE_TYPE (offset), 6*8);
726a989a
RB
6210 gimplify_assign (offset,
6211 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6212 pre_p);
28245018
RH
6213 }
6214
6215 addend = offset;
1f063d10 6216 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
28245018 6217
08b0dc1b 6218 if (TREE_CODE (type) == COMPLEX_TYPE)
28245018
RH
6219 {
6220 tree real_part, imag_part, real_temp;
6221
65da5a20
RH
6222 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6223 offset, pre_p);
6224
6225 /* Copy the value into a new temporary, lest the formal temporary
28245018 6226 be reused out from under us. */
65da5a20 6227 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
28245018 6228
65da5a20
RH
6229 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6230 offset, pre_p);
28245018 6231
47a25a46 6232 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
28245018
RH
6233 }
6234 else if (TREE_CODE (type) == REAL_TYPE)
6235 {
6236 tree fpaddend, cond, fourtyeight;
6237
7d60be94 6238 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
47a25a46
RG
6239 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6240 addend, fourtyeight);
6241 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6242 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6243 fpaddend, addend);
28245018
RH
6244 }
6245
6246 /* Build the final address and force that value into a temporary. */
5d49b6a7 6247 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
65da5a20
RH
6248 internal_post = NULL;
6249 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
777b1fbe 6250 gimple_seq_add_seq (pre_p, internal_post);
28245018
RH
6251
6252 /* Update the offset field. */
65da5a20
RH
6253 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6254 if (type_size == NULL || TREE_OVERFLOW (type_size))
6255 t = size_zero_node;
6256 else
6257 {
6258 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6259 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6260 t = size_binop (MULT_EXPR, t, size_int (8));
6261 }
6262 t = fold_convert (TREE_TYPE (offset), t);
726a989a
RB
6263 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6264 pre_p);
28245018 6265
d6e9821f 6266 return build_va_arg_indirect_ref (addr);
28245018
RH
6267}
6268
23a60a04 6269static tree
726a989a
RB
6270alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6271 gimple_seq *post_p)
28245018 6272{
23a60a04 6273 tree offset_field, base_field, offset, base, t, r;
08b0dc1b 6274 bool indirect;
28245018 6275
75db85d8 6276 if (TARGET_ABI_OPEN_VMS)
23a60a04 6277 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
28245018
RH
6278
6279 base_field = TYPE_FIELDS (va_list_type_node);
910ad8de 6280 offset_field = DECL_CHAIN (base_field);
47a25a46
RG
6281 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6282 valist, base_field, NULL_TREE);
6283 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6284 valist, offset_field, NULL_TREE);
28245018 6285
65da5a20
RH
6286 /* Pull the fields of the structure out into temporaries. Since we never
6287 modify the base field, we can use a formal temporary. Sign-extend the
6288 offset field so that it's the proper width for pointer arithmetic. */
6289 base = get_formal_tmp_var (base_field, pre_p);
28245018 6290
21fa2faf 6291 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
65da5a20 6292 offset = get_initialized_tmp_var (t, pre_p, NULL);
28245018 6293
08b0dc1b
RH
6294 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6295 if (indirect)
1f063d10 6296 type = build_pointer_type_for_mode (type, ptr_mode, true);
08b0dc1b 6297
28245018
RH
6298 /* Find the value. Note that this will be a stable indirection, or
6299 a composite of stable indirections in the case of complex. */
65da5a20 6300 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
28245018
RH
6301
6302 /* Stuff the offset temporary back into its field. */
777b1fbe 6303 gimplify_assign (unshare_expr (offset_field),
726a989a 6304 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
23a60a04 6305
08b0dc1b 6306 if (indirect)
d6e9821f 6307 r = build_va_arg_indirect_ref (r);
08b0dc1b 6308
23a60a04 6309 return r;
28245018 6310}
a6f12d7c 6311\f
6d8fd7bb
RH
6312/* Builtins. */
6313
6314enum alpha_builtin
6315{
6316 ALPHA_BUILTIN_CMPBGE,
c4b50f1a
RH
6317 ALPHA_BUILTIN_EXTBL,
6318 ALPHA_BUILTIN_EXTWL,
6319 ALPHA_BUILTIN_EXTLL,
6d8fd7bb 6320 ALPHA_BUILTIN_EXTQL,
c4b50f1a
RH
6321 ALPHA_BUILTIN_EXTWH,
6322 ALPHA_BUILTIN_EXTLH,
6d8fd7bb 6323 ALPHA_BUILTIN_EXTQH,
c4b50f1a
RH
6324 ALPHA_BUILTIN_INSBL,
6325 ALPHA_BUILTIN_INSWL,
6326 ALPHA_BUILTIN_INSLL,
6327 ALPHA_BUILTIN_INSQL,
6328 ALPHA_BUILTIN_INSWH,
6329 ALPHA_BUILTIN_INSLH,
6330 ALPHA_BUILTIN_INSQH,
6331 ALPHA_BUILTIN_MSKBL,
6332 ALPHA_BUILTIN_MSKWL,
6333 ALPHA_BUILTIN_MSKLL,
6334 ALPHA_BUILTIN_MSKQL,
6335 ALPHA_BUILTIN_MSKWH,
6336 ALPHA_BUILTIN_MSKLH,
6337 ALPHA_BUILTIN_MSKQH,
6338 ALPHA_BUILTIN_UMULH,
6d8fd7bb
RH
6339 ALPHA_BUILTIN_ZAP,
6340 ALPHA_BUILTIN_ZAPNOT,
6341 ALPHA_BUILTIN_AMASK,
6342 ALPHA_BUILTIN_IMPLVER,
6343 ALPHA_BUILTIN_RPCC,
221cf9ab
OH
6344 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6345 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6d8fd7bb
RH
6346
6347 /* TARGET_MAX */
6348 ALPHA_BUILTIN_MINUB8,
6349 ALPHA_BUILTIN_MINSB8,
6350 ALPHA_BUILTIN_MINUW4,
6351 ALPHA_BUILTIN_MINSW4,
6352 ALPHA_BUILTIN_MAXUB8,
6353 ALPHA_BUILTIN_MAXSB8,
6354 ALPHA_BUILTIN_MAXUW4,
6355 ALPHA_BUILTIN_MAXSW4,
6356 ALPHA_BUILTIN_PERR,
6357 ALPHA_BUILTIN_PKLB,
6358 ALPHA_BUILTIN_PKWB,
6359 ALPHA_BUILTIN_UNPKBL,
6360 ALPHA_BUILTIN_UNPKBW,
6361
c4b50f1a
RH
6362 /* TARGET_CIX */
6363 ALPHA_BUILTIN_CTTZ,
6364 ALPHA_BUILTIN_CTLZ,
6365 ALPHA_BUILTIN_CTPOP,
6366
6d8fd7bb
RH
6367 ALPHA_BUILTIN_max
6368};
6369
e3136fa2 6370static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
c4b50f1a 6371 CODE_FOR_builtin_cmpbge,
0b2a7367
RH
6372 CODE_FOR_extbl,
6373 CODE_FOR_extwl,
6374 CODE_FOR_extll,
6375 CODE_FOR_extql,
6376 CODE_FOR_extwh,
6377 CODE_FOR_extlh,
6378 CODE_FOR_extqh,
c4b50f1a
RH
6379 CODE_FOR_builtin_insbl,
6380 CODE_FOR_builtin_inswl,
6381 CODE_FOR_builtin_insll,
0b2a7367
RH
6382 CODE_FOR_insql,
6383 CODE_FOR_inswh,
6384 CODE_FOR_inslh,
6385 CODE_FOR_insqh,
6386 CODE_FOR_mskbl,
6387 CODE_FOR_mskwl,
6388 CODE_FOR_mskll,
6389 CODE_FOR_mskql,
6390 CODE_FOR_mskwh,
6391 CODE_FOR_msklh,
6392 CODE_FOR_mskqh,
c4b50f1a
RH
6393 CODE_FOR_umuldi3_highpart,
6394 CODE_FOR_builtin_zap,
6395 CODE_FOR_builtin_zapnot,
6396 CODE_FOR_builtin_amask,
6397 CODE_FOR_builtin_implver,
6398 CODE_FOR_builtin_rpcc,
221cf9ab
OH
6399 CODE_FOR_builtin_establish_vms_condition_handler,
6400 CODE_FOR_builtin_revert_vms_condition_handler,
c4b50f1a
RH
6401
6402 /* TARGET_MAX */
6403 CODE_FOR_builtin_minub8,
6404 CODE_FOR_builtin_minsb8,
6405 CODE_FOR_builtin_minuw4,
6406 CODE_FOR_builtin_minsw4,
6407 CODE_FOR_builtin_maxub8,
6408 CODE_FOR_builtin_maxsb8,
6409 CODE_FOR_builtin_maxuw4,
6410 CODE_FOR_builtin_maxsw4,
6411 CODE_FOR_builtin_perr,
6412 CODE_FOR_builtin_pklb,
6413 CODE_FOR_builtin_pkwb,
6414 CODE_FOR_builtin_unpkbl,
6415 CODE_FOR_builtin_unpkbw,
6416
6417 /* TARGET_CIX */
36013987
RH
6418 CODE_FOR_ctzdi2,
6419 CODE_FOR_clzdi2,
6420 CODE_FOR_popcountdi2
c4b50f1a
RH
6421};
6422
6d8fd7bb
RH
6423struct alpha_builtin_def
6424{
6425 const char *name;
6426 enum alpha_builtin code;
6427 unsigned int target_mask;
36013987 6428 bool is_const;
6d8fd7bb
RH
6429};
6430
6431static struct alpha_builtin_def const zero_arg_builtins[] = {
36013987
RH
6432 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6433 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6d8fd7bb
RH
6434};
6435
6436static struct alpha_builtin_def const one_arg_builtins[] = {
36013987
RH
6437 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6438 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6439 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6440 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6441 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6442 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6443 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6444 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6d8fd7bb
RH
6445};
6446
6447static struct alpha_builtin_def const two_arg_builtins[] = {
36013987
RH
6448 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6449 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6450 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6451 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6452 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6453 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6454 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6455 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6456 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6457 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6458 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6459 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6460 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6461 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6462 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6463 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6464 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6465 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6466 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6467 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6468 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6469 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6470 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6471 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6472 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6473 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6474 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6475 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6476 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6477 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6478 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6479 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6480 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6481 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6d8fd7bb
RH
6482};
6483
64a5dc56 6484static GTY(()) tree alpha_dimode_u;
36013987
RH
6485static GTY(()) tree alpha_v8qi_u;
6486static GTY(()) tree alpha_v8qi_s;
6487static GTY(()) tree alpha_v4hi_u;
6488static GTY(()) tree alpha_v4hi_s;
6489
fd930388
RH
6490static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6491
6492/* Return the alpha builtin for CODE. */
6493
6494static tree
6495alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6496{
6497 if (code >= ALPHA_BUILTIN_max)
6498 return error_mark_node;
6499 return alpha_builtins[code];
6500}
6501
6502/* Helper function of alpha_init_builtins. Add the built-in specified
6503 by NAME, TYPE, CODE, and ECF. */
6504
6505static void
6506alpha_builtin_function (const char *name, tree ftype,
6507 enum alpha_builtin code, unsigned ecf)
6508{
6509 tree decl = add_builtin_function (name, ftype, (int) code,
6510 BUILT_IN_MD, NULL, NULL_TREE);
6511
6512 if (ecf & ECF_CONST)
6513 TREE_READONLY (decl) = 1;
6514 if (ecf & ECF_NOTHROW)
6515 TREE_NOTHROW (decl) = 1;
6516
6517 alpha_builtins [(int) code] = decl;
6518}
6519
b6e46ca1
RS
6520/* Helper function of alpha_init_builtins. Add the COUNT built-in
6521 functions pointed to by P, with function type FTYPE. */
6522
6523static void
6524alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6525 tree ftype)
6526{
b6e46ca1
RS
6527 size_t i;
6528
6529 for (i = 0; i < count; ++i, ++p)
6530 if ((target_flags & p->target_mask) == p->target_mask)
fd930388
RH
6531 alpha_builtin_function (p->name, ftype, p->code,
6532 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
b6e46ca1
RS
6533}
6534
6d8fd7bb 6535static void
a5c24926 6536alpha_init_builtins (void)
6d8fd7bb 6537{
fd930388 6538 tree ftype;
6d8fd7bb 6539
64a5dc56
RH
6540 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6541 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6542 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6543 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6544 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
f001093a 6545
64a5dc56
RH
6546 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6547 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6d8fd7bb 6548
64a5dc56
RH
6549 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6550 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6d8fd7bb 6551
64a5dc56
RH
6552 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6553 alpha_dimode_u, NULL_TREE);
6554 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
116b7a5e 6555
221cf9ab
OH
6556 if (TARGET_ABI_OPEN_VMS)
6557 {
6558 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6559 NULL_TREE);
fd930388
RH
6560 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6561 ftype,
6562 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6563 0);
221cf9ab
OH
6564
6565 ftype = build_function_type_list (ptr_type_node, void_type_node,
6566 NULL_TREE);
fd930388
RH
6567 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6568 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
db8b22ef
TG
6569
6570 vms_patch_builtins ();
221cf9ab 6571 }
6d8fd7bb
RH
6572}
6573
6574/* Expand an expression EXP that calls a built-in function,
6575 with result going to TARGET if that's convenient
6576 (and in mode MODE if that's convenient).
6577 SUBTARGET may be used as the target for computing one of EXP's operands.
6578 IGNORE is nonzero if the value is to be ignored. */
6579
6580static rtx
a5c24926
RH
6581alpha_expand_builtin (tree exp, rtx target,
6582 rtx subtarget ATTRIBUTE_UNUSED,
ef4bddc2 6583 machine_mode mode ATTRIBUTE_UNUSED,
a5c24926 6584 int ignore ATTRIBUTE_UNUSED)
6d8fd7bb 6585{
6d8fd7bb
RH
6586#define MAX_ARGS 2
6587
5039610b 6588 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6d8fd7bb 6589 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
5039610b
SL
6590 tree arg;
6591 call_expr_arg_iterator iter;
6d8fd7bb
RH
6592 enum insn_code icode;
6593 rtx op[MAX_ARGS], pat;
6594 int arity;
116b7a5e 6595 bool nonvoid;
6d8fd7bb
RH
6596
6597 if (fcode >= ALPHA_BUILTIN_max)
6598 internal_error ("bad builtin fcode");
6599 icode = code_for_builtin[fcode];
6600 if (icode == 0)
6601 internal_error ("bad builtin fcode");
6602
116b7a5e
RH
6603 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6604
5039610b
SL
6605 arity = 0;
6606 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6d8fd7bb
RH
6607 {
6608 const struct insn_operand_data *insn_op;
6609
6d8fd7bb
RH
6610 if (arg == error_mark_node)
6611 return NULL_RTX;
6612 if (arity > MAX_ARGS)
6613 return NULL_RTX;
6614
116b7a5e
RH
6615 insn_op = &insn_data[icode].operand[arity + nonvoid];
6616
bf758008 6617 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6d8fd7bb 6618
6d8fd7bb
RH
6619 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6620 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
5039610b 6621 arity++;
6d8fd7bb
RH
6622 }
6623
116b7a5e
RH
6624 if (nonvoid)
6625 {
ef4bddc2 6626 machine_mode tmode = insn_data[icode].operand[0].mode;
116b7a5e
RH
6627 if (!target
6628 || GET_MODE (target) != tmode
6629 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6630 target = gen_reg_rtx (tmode);
6631 }
6d8fd7bb
RH
6632
6633 switch (arity)
6634 {
6635 case 0:
6636 pat = GEN_FCN (icode) (target);
6637 break;
6638 case 1:
116b7a5e
RH
6639 if (nonvoid)
6640 pat = GEN_FCN (icode) (target, op[0]);
6641 else
6642 pat = GEN_FCN (icode) (op[0]);
6d8fd7bb
RH
6643 break;
6644 case 2:
6645 pat = GEN_FCN (icode) (target, op[0], op[1]);
6646 break;
6647 default:
56daab84 6648 gcc_unreachable ();
6d8fd7bb
RH
6649 }
6650 if (!pat)
6651 return NULL_RTX;
6652 emit_insn (pat);
6653
116b7a5e
RH
6654 if (nonvoid)
6655 return target;
6656 else
6657 return const0_rtx;
6d8fd7bb 6658}
36013987 6659
36013987 6660/* Fold the builtin for the CMPBGE instruction. This is a vector comparison
a50aa827 6661 with an 8-bit output vector. OPINT contains the integer operands; bit N
36013987
RH
6662 of OP_CONST is set if OPINT[N] is valid. */
6663
6664static tree
6665alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6666{
6667 if (op_const == 3)
6668 {
6669 int i, val;
6670 for (i = 0, val = 0; i < 8; ++i)
6671 {
6672 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6673 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6674 if (c0 >= c1)
6675 val |= 1 << i;
6676 }
64a5dc56 6677 return build_int_cst (alpha_dimode_u, val);
36013987 6678 }
18410793 6679 else if (op_const == 2 && opint[1] == 0)
64a5dc56 6680 return build_int_cst (alpha_dimode_u, 0xff);
36013987
RH
6681 return NULL;
6682}
6683
6684/* Fold the builtin for the ZAPNOT instruction. This is essentially a
6685 specialized form of an AND operation. Other byte manipulation instructions
6686 are defined in terms of this instruction, so this is also used as a
6687 subroutine for other builtins.
6688
6689 OP contains the tree operands; OPINT contains the extracted integer values.
6690 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6691 OPINT may be considered. */
6692
6693static tree
6694alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6695 long op_const)
6696{
6697 if (op_const & 2)
6698 {
6699 unsigned HOST_WIDE_INT mask = 0;
6700 int i;
6701
6702 for (i = 0; i < 8; ++i)
6703 if ((opint[1] >> i) & 1)
6704 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6705
6706 if (op_const & 1)
64a5dc56 6707 return build_int_cst (alpha_dimode_u, opint[0] & mask);
36013987
RH
6708
6709 if (op)
64a5dc56
RH
6710 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6711 build_int_cst (alpha_dimode_u, mask));
36013987
RH
6712 }
6713 else if ((op_const & 1) && opint[0] == 0)
64a5dc56 6714 return build_int_cst (alpha_dimode_u, 0);
36013987
RH
6715 return NULL;
6716}
6717
6718/* Fold the builtins for the EXT family of instructions. */
6719
6720static tree
6721alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6722 long op_const, unsigned HOST_WIDE_INT bytemask,
6723 bool is_high)
6724{
6725 long zap_const = 2;
6726 tree *zap_op = NULL;
6727
6728 if (op_const & 2)
6729 {
6730 unsigned HOST_WIDE_INT loc;
6731
6732 loc = opint[1] & 7;
0b2a7367 6733 loc *= BITS_PER_UNIT;
36013987
RH
6734
6735 if (loc != 0)
6736 {
6737 if (op_const & 1)
6738 {
6739 unsigned HOST_WIDE_INT temp = opint[0];
6740 if (is_high)
6741 temp <<= loc;
6742 else
6743 temp >>= loc;
6744 opint[0] = temp;
6745 zap_const = 3;
6746 }
6747 }
6748 else
6749 zap_op = op;
6750 }
6751
6752 opint[1] = bytemask;
6753 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6754}
6755
6756/* Fold the builtins for the INS family of instructions. */
6757
6758static tree
6759alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6760 long op_const, unsigned HOST_WIDE_INT bytemask,
6761 bool is_high)
6762{
6763 if ((op_const & 1) && opint[0] == 0)
64a5dc56 6764 return build_int_cst (alpha_dimode_u, 0);
36013987
RH
6765
6766 if (op_const & 2)
6767 {
6768 unsigned HOST_WIDE_INT temp, loc, byteloc;
6769 tree *zap_op = NULL;
6770
6771 loc = opint[1] & 7;
36013987
RH
6772 bytemask <<= loc;
6773
6774 temp = opint[0];
6775 if (is_high)
6776 {
6777 byteloc = (64 - (loc * 8)) & 0x3f;
6778 if (byteloc == 0)
6779 zap_op = op;
6780 else
6781 temp >>= byteloc;
6782 bytemask >>= 8;
6783 }
6784 else
6785 {
6786 byteloc = loc * 8;
6787 if (byteloc == 0)
6788 zap_op = op;
6789 else
6790 temp <<= byteloc;
6791 }
6792
6793 opint[0] = temp;
6794 opint[1] = bytemask;
6795 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6796 }
6797
6798 return NULL;
6799}
6800
6801static tree
6802alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6803 long op_const, unsigned HOST_WIDE_INT bytemask,
6804 bool is_high)
6805{
6806 if (op_const & 2)
6807 {
6808 unsigned HOST_WIDE_INT loc;
6809
6810 loc = opint[1] & 7;
36013987
RH
6811 bytemask <<= loc;
6812
6813 if (is_high)
6814 bytemask >>= 8;
6815
6816 opint[1] = bytemask ^ 0xff;
6817 }
6818
6819 return alpha_fold_builtin_zapnot (op, opint, op_const);
6820}
6821
36013987
RH
6822static tree
6823alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6824{
6825 tree op0 = fold_convert (vtype, op[0]);
6826 tree op1 = fold_convert (vtype, op[1]);
31ff73b5 6827 tree val = fold_build2 (code, vtype, op0, op1);
64a5dc56 6828 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
36013987
RH
6829}
6830
6831static tree
6832alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6833{
6834 unsigned HOST_WIDE_INT temp = 0;
6835 int i;
6836
6837 if (op_const != 3)
6838 return NULL;
6839
6840 for (i = 0; i < 8; ++i)
6841 {
6842 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6843 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6844 if (a >= b)
6845 temp += a - b;
6846 else
6847 temp += b - a;
6848 }
6849
64a5dc56 6850 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6851}
6852
6853static tree
6854alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6855{
6856 unsigned HOST_WIDE_INT temp;
6857
6858 if (op_const == 0)
6859 return NULL;
6860
6861 temp = opint[0] & 0xff;
6862 temp |= (opint[0] >> 24) & 0xff00;
6863
64a5dc56 6864 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6865}
6866
6867static tree
6868alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6869{
6870 unsigned HOST_WIDE_INT temp;
6871
6872 if (op_const == 0)
6873 return NULL;
6874
6875 temp = opint[0] & 0xff;
6876 temp |= (opint[0] >> 8) & 0xff00;
6877 temp |= (opint[0] >> 16) & 0xff0000;
6878 temp |= (opint[0] >> 24) & 0xff000000;
6879
64a5dc56 6880 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6881}
6882
6883static tree
6884alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6885{
6886 unsigned HOST_WIDE_INT temp;
6887
6888 if (op_const == 0)
6889 return NULL;
6890
6891 temp = opint[0] & 0xff;
6892 temp |= (opint[0] & 0xff00) << 24;
6893
64a5dc56 6894 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6895}
6896
6897static tree
6898alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6899{
6900 unsigned HOST_WIDE_INT temp;
6901
6902 if (op_const == 0)
6903 return NULL;
6904
6905 temp = opint[0] & 0xff;
6906 temp |= (opint[0] & 0x0000ff00) << 8;
6907 temp |= (opint[0] & 0x00ff0000) << 16;
6908 temp |= (opint[0] & 0xff000000) << 24;
6909
64a5dc56 6910 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6911}
6912
6913static tree
6914alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6915{
6916 unsigned HOST_WIDE_INT temp;
6917
6918 if (op_const == 0)
6919 return NULL;
6920
6921 if (opint[0] == 0)
6922 temp = 64;
6923 else
6924 temp = exact_log2 (opint[0] & -opint[0]);
6925
64a5dc56 6926 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6927}
6928
6929static tree
6930alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6931{
6932 unsigned HOST_WIDE_INT temp;
6933
6934 if (op_const == 0)
6935 return NULL;
6936
6937 if (opint[0] == 0)
6938 temp = 64;
6939 else
6940 temp = 64 - floor_log2 (opint[0]) - 1;
6941
64a5dc56 6942 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6943}
6944
6945static tree
6946alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6947{
6948 unsigned HOST_WIDE_INT temp, op;
6949
6950 if (op_const == 0)
6951 return NULL;
6952
6953 op = opint[0];
6954 temp = 0;
6955 while (op)
6956 temp++, op &= op - 1;
6957
64a5dc56 6958 return build_int_cst (alpha_dimode_u, temp);
36013987
RH
6959}
6960
6961/* Fold one of our builtin functions. */
6962
6963static tree
f311c3b4
NF
6964alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6965 bool ignore ATTRIBUTE_UNUSED)
36013987 6966{
36013987 6967 unsigned HOST_WIDE_INT opint[MAX_ARGS];
58a11859 6968 long op_const = 0;
f311c3b4 6969 int i;
36013987 6970
64a5dc56 6971 if (n_args > MAX_ARGS)
f311c3b4
NF
6972 return NULL;
6973
6974 for (i = 0; i < n_args; i++)
36013987 6975 {
f311c3b4 6976 tree arg = op[i];
36013987
RH
6977 if (arg == error_mark_node)
6978 return NULL;
36013987 6979
f311c3b4 6980 opint[i] = 0;
36013987
RH
6981 if (TREE_CODE (arg) == INTEGER_CST)
6982 {
f311c3b4
NF
6983 op_const |= 1L << i;
6984 opint[i] = int_cst_value (arg);
36013987
RH
6985 }
6986 }
6987
6988 switch (DECL_FUNCTION_CODE (fndecl))
6989 {
6990 case ALPHA_BUILTIN_CMPBGE:
6991 return alpha_fold_builtin_cmpbge (opint, op_const);
6992
6993 case ALPHA_BUILTIN_EXTBL:
6994 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6995 case ALPHA_BUILTIN_EXTWL:
6996 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6997 case ALPHA_BUILTIN_EXTLL:
6998 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6999 case ALPHA_BUILTIN_EXTQL:
7000 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7001 case ALPHA_BUILTIN_EXTWH:
7002 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7003 case ALPHA_BUILTIN_EXTLH:
7004 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7005 case ALPHA_BUILTIN_EXTQH:
7006 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7007
7008 case ALPHA_BUILTIN_INSBL:
7009 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7010 case ALPHA_BUILTIN_INSWL:
7011 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7012 case ALPHA_BUILTIN_INSLL:
7013 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7014 case ALPHA_BUILTIN_INSQL:
7015 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7016 case ALPHA_BUILTIN_INSWH:
7017 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7018 case ALPHA_BUILTIN_INSLH:
7019 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7020 case ALPHA_BUILTIN_INSQH:
7021 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7022
7023 case ALPHA_BUILTIN_MSKBL:
7024 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7025 case ALPHA_BUILTIN_MSKWL:
7026 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7027 case ALPHA_BUILTIN_MSKLL:
7028 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7029 case ALPHA_BUILTIN_MSKQL:
7030 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7031 case ALPHA_BUILTIN_MSKWH:
7032 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7033 case ALPHA_BUILTIN_MSKLH:
7034 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7035 case ALPHA_BUILTIN_MSKQH:
7036 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7037
36013987
RH
7038 case ALPHA_BUILTIN_ZAP:
7039 opint[1] ^= 0xff;
7040 /* FALLTHRU */
7041 case ALPHA_BUILTIN_ZAPNOT:
7042 return alpha_fold_builtin_zapnot (op, opint, op_const);
7043
7044 case ALPHA_BUILTIN_MINUB8:
7045 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7046 case ALPHA_BUILTIN_MINSB8:
7047 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7048 case ALPHA_BUILTIN_MINUW4:
7049 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7050 case ALPHA_BUILTIN_MINSW4:
7051 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7052 case ALPHA_BUILTIN_MAXUB8:
7053 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7054 case ALPHA_BUILTIN_MAXSB8:
7055 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7056 case ALPHA_BUILTIN_MAXUW4:
7057 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7058 case ALPHA_BUILTIN_MAXSW4:
7059 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7060
7061 case ALPHA_BUILTIN_PERR:
7062 return alpha_fold_builtin_perr (opint, op_const);
7063 case ALPHA_BUILTIN_PKLB:
7064 return alpha_fold_builtin_pklb (opint, op_const);
7065 case ALPHA_BUILTIN_PKWB:
7066 return alpha_fold_builtin_pkwb (opint, op_const);
7067 case ALPHA_BUILTIN_UNPKBL:
7068 return alpha_fold_builtin_unpkbl (opint, op_const);
7069 case ALPHA_BUILTIN_UNPKBW:
7070 return alpha_fold_builtin_unpkbw (opint, op_const);
7071
7072 case ALPHA_BUILTIN_CTTZ:
7073 return alpha_fold_builtin_cttz (opint, op_const);
7074 case ALPHA_BUILTIN_CTLZ:
7075 return alpha_fold_builtin_ctlz (opint, op_const);
7076 case ALPHA_BUILTIN_CTPOP:
7077 return alpha_fold_builtin_ctpop (opint, op_const);
7078
7079 case ALPHA_BUILTIN_AMASK:
7080 case ALPHA_BUILTIN_IMPLVER:
7081 case ALPHA_BUILTIN_RPCC:
36013987
RH
7082 /* None of these are foldable at compile-time. */
7083 default:
7084 return NULL;
7085 }
7086}
b6db8af6
UB
7087
7088bool
7089alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7090{
7091 bool changed = false;
7092 gimple stmt = gsi_stmt (*gsi);
7093 tree call = gimple_call_fn (stmt);
7094 gimple new_stmt = NULL;
7095
7096 if (call)
7097 {
7098 tree fndecl = gimple_call_fndecl (stmt);
7099
7100 if (fndecl)
7101 {
7102 tree arg0, arg1;
7103
7104 switch (DECL_FUNCTION_CODE (fndecl))
7105 {
7106 case ALPHA_BUILTIN_UMULH:
7107 arg0 = gimple_call_arg (stmt, 0);
7108 arg1 = gimple_call_arg (stmt, 1);
7109
0d3d8152
JJ
7110 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
7111 MULT_HIGHPART_EXPR, arg0, arg1);
b6db8af6
UB
7112 break;
7113 default:
7114 break;
7115 }
7116 }
7117 }
7118
7119 if (new_stmt)
7120 {
7121 gsi_replace (gsi, new_stmt, true);
7122 changed = true;
7123 }
7124
7125 return changed;
7126}
6d8fd7bb 7127\f
a6f12d7c
RK
7128/* This page contains routines that are used to determine what the function
7129 prologue and epilogue code will do and write them out. */
7130
7131/* Compute the size of the save area in the stack. */
7132
89cfc2c6
RK
7133/* These variables are used for communication between the following functions.
7134 They indicate various things about the current function being compiled
7135 that are used to tell what kind of prologue, epilogue and procedure
839a4992 7136 descriptor to generate. */
89cfc2c6
RK
7137
7138/* Nonzero if we need a stack procedure. */
c2ea1ac6
DR
7139enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7140static enum alpha_procedure_types alpha_procedure_type;
89cfc2c6
RK
7141
7142/* Register number (either FP or SP) that is used to unwind the frame. */
9c0e94a5 7143static int vms_unwind_regno;
89cfc2c6
RK
7144
7145/* Register number used to save FP. We need not have one for RA since
7146 we don't modify it for register procedures. This is only defined
7147 for register frame procedures. */
9c0e94a5 7148static int vms_save_fp_regno;
89cfc2c6
RK
7149
7150/* Register number used to reference objects off our PV. */
9c0e94a5 7151static int vms_base_regno;
89cfc2c6 7152
acd92049 7153/* Compute register masks for saved registers. */
89cfc2c6
RK
7154
7155static void
a5c24926 7156alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
89cfc2c6
RK
7157{
7158 unsigned long imask = 0;
7159 unsigned long fmask = 0;
1eb356b9 7160 unsigned int i;
89cfc2c6 7161
dd292d0a
MM
7162 /* When outputting a thunk, we don't have valid register life info,
7163 but assemble_start_function wants to output .frame and .mask
7164 directives. */
3c072c6b 7165 if (cfun->is_thunk)
acd92049 7166 {
14691f8d
RH
7167 *imaskP = 0;
7168 *fmaskP = 0;
7169 return;
7170 }
89cfc2c6 7171
c2ea1ac6 7172 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
409f52d3 7173 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
89cfc2c6 7174
14691f8d
RH
7175 /* One for every register we have to save. */
7176 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7177 if (! fixed_regs[i] && ! call_used_regs[i]
75db85d8 7178 && df_regs_ever_live_p (i) && i != REG_RA)
14691f8d
RH
7179 {
7180 if (i < 32)
409f52d3 7181 imask |= (1UL << i);
14691f8d 7182 else
409f52d3 7183 fmask |= (1UL << (i - 32));
14691f8d
RH
7184 }
7185
7186 /* We need to restore these for the handler. */
e3b5732b 7187 if (crtl->calls_eh_return)
ed80cd68
RH
7188 {
7189 for (i = 0; ; ++i)
7190 {
7191 unsigned regno = EH_RETURN_DATA_REGNO (i);
7192 if (regno == INVALID_REGNUM)
7193 break;
7194 imask |= 1UL << regno;
7195 }
ed80cd68 7196 }
f676971a 7197
14691f8d
RH
7198 /* If any register spilled, then spill the return address also. */
7199 /* ??? This is required by the Digital stack unwind specification
7200 and isn't needed if we're doing Dwarf2 unwinding. */
7201 if (imask || fmask || alpha_ra_ever_killed ())
409f52d3 7202 imask |= (1UL << REG_RA);
9c0e94a5 7203
89cfc2c6
RK
7204 *imaskP = imask;
7205 *fmaskP = fmask;
89cfc2c6
RK
7206}
7207
7208int
a5c24926 7209alpha_sa_size (void)
89cfc2c6 7210{
61334ebe 7211 unsigned long mask[2];
89cfc2c6 7212 int sa_size = 0;
61334ebe 7213 int i, j;
89cfc2c6 7214
61334ebe
RH
7215 alpha_sa_mask (&mask[0], &mask[1]);
7216
75db85d8
RH
7217 for (j = 0; j < 2; ++j)
7218 for (i = 0; i < 32; ++i)
7219 if ((mask[j] >> i) & 1)
7220 sa_size++;
30102605 7221
75db85d8 7222 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 7223 {
1d3499d8
OH
7224 /* Start with a stack procedure if we make any calls (REG_RA used), or
7225 need a frame pointer, with a register procedure if we otherwise need
7226 at least a slot, and with a null procedure in other cases. */
7227 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
c2ea1ac6
DR
7228 alpha_procedure_type = PT_STACK;
7229 else if (get_frame_size() != 0)
7230 alpha_procedure_type = PT_REGISTER;
7231 else
7232 alpha_procedure_type = PT_NULL;
61334ebe 7233
cb9a8e97 7234 /* Don't reserve space for saving FP & RA yet. Do that later after we've
61334ebe 7235 made the final decision on stack procedure vs register procedure. */
c2ea1ac6 7236 if (alpha_procedure_type == PT_STACK)
cb9a8e97 7237 sa_size -= 2;
9c0e94a5
RH
7238
7239 /* Decide whether to refer to objects off our PV via FP or PV.
7240 If we need FP for something else or if we receive a nonlocal
7241 goto (which expects PV to contain the value), we must use PV.
7242 Otherwise, start by assuming we can use FP. */
c2ea1ac6
DR
7243
7244 vms_base_regno
7245 = (frame_pointer_needed
e3b5732b 7246 || cfun->has_nonlocal_label
c2ea1ac6 7247 || alpha_procedure_type == PT_STACK
38173d38 7248 || crtl->outgoing_args_size)
c2ea1ac6 7249 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
9c0e94a5
RH
7250
7251 /* If we want to copy PV into FP, we need to find some register
7252 in which to save FP. */
7253
7254 vms_save_fp_regno = -1;
7255 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7256 for (i = 0; i < 32; i++)
6fb5fa3c 7257 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
9c0e94a5
RH
7258 vms_save_fp_regno = i;
7259
221cf9ab
OH
7260 /* A VMS condition handler requires a stack procedure in our
7261 implementation. (not required by the calling standard). */
7262 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7263 || cfun->machine->uses_condition_handler)
c2ea1ac6
DR
7264 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7265 else if (alpha_procedure_type == PT_NULL)
7266 vms_base_regno = REG_PV;
9c0e94a5
RH
7267
7268 /* Stack unwinding should be done via FP unless we use it for PV. */
7269 vms_unwind_regno = (vms_base_regno == REG_PV
7270 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7271
221cf9ab
OH
7272 /* If this is a stack procedure, allow space for saving FP, RA and
7273 a condition handler slot if needed. */
c2ea1ac6 7274 if (alpha_procedure_type == PT_STACK)
221cf9ab 7275 sa_size += 2 + cfun->machine->uses_condition_handler;
9c0e94a5
RH
7276 }
7277 else
7278 {
9c0e94a5
RH
7279 /* Our size must be even (multiple of 16 bytes). */
7280 if (sa_size & 1)
7281 sa_size++;
7282 }
89cfc2c6
RK
7283
7284 return sa_size * 8;
7285}
7286
35d9c403
RH
7287/* Define the offset between two registers, one to be eliminated,
7288 and the other its replacement, at the start of a routine. */
7289
7290HOST_WIDE_INT
a5c24926
RH
7291alpha_initial_elimination_offset (unsigned int from,
7292 unsigned int to ATTRIBUTE_UNUSED)
35d9c403
RH
7293{
7294 HOST_WIDE_INT ret;
7295
7296 ret = alpha_sa_size ();
38173d38 7297 ret += ALPHA_ROUND (crtl->outgoing_args_size);
35d9c403 7298
56daab84
NS
7299 switch (from)
7300 {
7301 case FRAME_POINTER_REGNUM:
7302 break;
7303
7304 case ARG_POINTER_REGNUM:
7305 ret += (ALPHA_ROUND (get_frame_size ()
38173d38
JH
7306 + crtl->args.pretend_args_size)
7307 - crtl->args.pretend_args_size);
56daab84
NS
7308 break;
7309
7310 default:
7311 gcc_unreachable ();
7312 }
35d9c403
RH
7313
7314 return ret;
7315}
7316
1d3499d8
OH
7317#if TARGET_ABI_OPEN_VMS
7318
7b5cbb57
AS
7319/* Worker function for TARGET_CAN_ELIMINATE. */
7320
7321static bool
7322alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
89cfc2c6 7323{
1d3499d8 7324 /* We need the alpha_procedure_type to decide. Evaluate it now. */
89cfc2c6 7325 alpha_sa_size ();
1d3499d8
OH
7326
7327 switch (alpha_procedure_type)
7328 {
7329 case PT_NULL:
7330 /* NULL procedures have no frame of their own and we only
7331 know how to resolve from the current stack pointer. */
7332 return to == STACK_POINTER_REGNUM;
7333
7334 case PT_REGISTER:
7335 case PT_STACK:
7336 /* We always eliminate except to the stack pointer if there is no
7337 usable frame pointer at hand. */
7338 return (to != STACK_POINTER_REGNUM
7339 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7340 }
7341
7342 gcc_unreachable ();
89cfc2c6
RK
7343}
7344
1d3499d8
OH
7345/* FROM is to be eliminated for TO. Return the offset so that TO+offset
7346 designates the same location as FROM. */
7347
7348HOST_WIDE_INT
7349alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7350{
7351 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7352 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7353 on the proper computations and will need the register save area size
7354 in most cases. */
7355
7356 HOST_WIDE_INT sa_size = alpha_sa_size ();
7357
7358 /* PT_NULL procedures have no frame of their own and we only allow
7359 elimination to the stack pointer. This is the argument pointer and we
7360 resolve the soft frame pointer to that as well. */
7361
7362 if (alpha_procedure_type == PT_NULL)
7363 return 0;
7364
7365 /* For a PT_STACK procedure the frame layout looks as follows
7366
7367 -----> decreasing addresses
7368
7369 < size rounded up to 16 | likewise >
7370 --------------#------------------------------+++--------------+++-------#
7371 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7372 --------------#---------------------------------------------------------#
7373 ^ ^ ^ ^
7374 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7375
7376
7377 PT_REGISTER procedures are similar in that they may have a frame of their
7378 own. They have no regs-sa/pv/outgoing-args area.
7379
7380 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7381 to STACK_PTR if need be. */
7382
7383 {
7384 HOST_WIDE_INT offset;
7385 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7386
7387 switch (from)
7388 {
7389 case FRAME_POINTER_REGNUM:
7390 offset = ALPHA_ROUND (sa_size + pv_save_size);
7391 break;
7392 case ARG_POINTER_REGNUM:
7393 offset = (ALPHA_ROUND (sa_size + pv_save_size
7394 + get_frame_size ()
7395 + crtl->args.pretend_args_size)
7396 - crtl->args.pretend_args_size);
7397 break;
7398 default:
7399 gcc_unreachable ();
7400 }
7401
7402 if (to == STACK_POINTER_REGNUM)
7403 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7404
7405 return offset;
7406 }
89cfc2c6
RK
7407}
7408
18fd5621
EB
7409#define COMMON_OBJECT "common_object"
7410
7411static tree
7412common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7413 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7414 bool *no_add_attrs ATTRIBUTE_UNUSED)
7415{
7416 tree decl = *node;
7417 gcc_assert (DECL_P (decl));
7418
7419 DECL_COMMON (decl) = 1;
7420 return NULL_TREE;
7421}
8289c43b 7422
6bc7bc14 7423static const struct attribute_spec vms_attribute_table[] =
a6f12d7c 7424{
62d784f7
KT
7425 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7426 affects_type_identity } */
7427 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7428 { NULL, 0, 0, false, false, false, NULL, false }
91d231cb 7429};
a6f12d7c 7430
18fd5621
EB
7431void
7432vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7433 unsigned HOST_WIDE_INT size,
7434 unsigned int align)
7435{
7436 tree attr = DECL_ATTRIBUTES (decl);
7437 fprintf (file, "%s", COMMON_ASM_OP);
7438 assemble_name (file, name);
7439 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7440 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7441 fprintf (file, ",%u", align / BITS_PER_UNIT);
7442 if (attr)
7443 {
7444 attr = lookup_attribute (COMMON_OBJECT, attr);
7445 if (attr)
7446 fprintf (file, ",%s",
7447 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7448 }
7449 fputc ('\n', file);
7450}
7451
7452#undef COMMON_OBJECT
7453
8289c43b
NB
7454#endif
7455
4dba3553 7456bool
a5c24926 7457alpha_find_lo_sum_using_gp (rtx insn)
77480b0b 7458{
4dba3553
RS
7459 subrtx_iterator::array_type array;
7460 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7461 {
7462 const_rtx x = *iter;
7463 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7464 return true;
7465 }
7466 return false;
1eb356b9
RH
7467}
7468
9c0e94a5 7469static int
a5c24926 7470alpha_does_function_need_gp (void)
9c0e94a5 7471{
cad003ba 7472 rtx_insn *insn;
a6f12d7c 7473
30102605
RH
7474 /* The GP being variable is an OSF abi thing. */
7475 if (! TARGET_ABI_OSF)
9c0e94a5 7476 return 0;
a6f12d7c 7477
b64de1fe 7478 /* We need the gp to load the address of __mcount. */
e3b5732b 7479 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
9c0e94a5 7480 return 1;
d60a05a1 7481
b64de1fe 7482 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
3c072c6b 7483 if (cfun->is_thunk)
acd92049 7484 return 1;
acd92049 7485
b64de1fe
RH
7486 /* The nonlocal receiver pattern assumes that the gp is valid for
7487 the nested function. Reasonable because it's almost always set
7488 correctly already. For the cases where that's wrong, make sure
7489 the nested function loads its gp on entry. */
e3b5732b 7490 if (crtl->has_nonlocal_goto)
b64de1fe
RH
7491 return 1;
7492
f676971a 7493 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
9c0e94a5
RH
7494 Even if we are a static function, we still need to do this in case
7495 our address is taken and passed to something like qsort. */
a6f12d7c 7496
9c0e94a5
RH
7497 push_topmost_sequence ();
7498 insn = get_insns ();
7499 pop_topmost_sequence ();
89cfc2c6 7500
9c0e94a5 7501 for (; insn; insn = NEXT_INSN (insn))
14e58be0 7502 if (NONDEBUG_INSN_P (insn)
9c0e94a5 7503 && GET_CODE (PATTERN (insn)) != USE
77480b0b
RH
7504 && GET_CODE (PATTERN (insn)) != CLOBBER
7505 && get_attr_usegp (insn))
7506 return 1;
a6f12d7c 7507
9c0e94a5 7508 return 0;
a6f12d7c
RK
7509}
7510
ec6840c1 7511\f
6abc6f40
RH
7512/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7513 sequences. */
7514
cad003ba 7515static rtx_insn *
a5c24926 7516set_frame_related_p (void)
6abc6f40 7517{
cad003ba
DM
7518 rtx_insn *seq = get_insns ();
7519 rtx_insn *insn;
2f937369 7520
6abc6f40
RH
7521 end_sequence ();
7522
2f937369 7523 if (!seq)
cad003ba 7524 return NULL;
2f937369
DM
7525
7526 if (INSN_P (seq))
6abc6f40 7527 {
2f937369
DM
7528 insn = seq;
7529 while (insn != NULL_RTX)
7530 {
7531 RTX_FRAME_RELATED_P (insn) = 1;
7532 insn = NEXT_INSN (insn);
7533 }
7534 seq = emit_insn (seq);
6abc6f40
RH
7535 }
7536 else
7537 {
7538 seq = emit_insn (seq);
7539 RTX_FRAME_RELATED_P (seq) = 1;
6abc6f40 7540 }
2f937369 7541 return seq;
6abc6f40
RH
7542}
7543
7544#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7545
45f413e4 7546/* Generates a store with the proper unwind info attached. VALUE is
0e40b5f2 7547 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
45f413e4
RH
7548 contains SP+FRAME_BIAS, and that is the unwind info that should be
7549 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7550 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7551
7552static void
7553emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7554 HOST_WIDE_INT base_ofs, rtx frame_reg)
7555{
cad003ba
DM
7556 rtx addr, mem;
7557 rtx_insn *insn;
45f413e4 7558
0a81f074 7559 addr = plus_constant (Pmode, base_reg, base_ofs);
7a81008b 7560 mem = gen_frame_mem (DImode, addr);
45f413e4
RH
7561
7562 insn = emit_move_insn (mem, value);
7563 RTX_FRAME_RELATED_P (insn) = 1;
7564
7565 if (frame_bias || value != frame_reg)
7566 {
7567 if (frame_bias)
7568 {
0a81f074
RS
7569 addr = plus_constant (Pmode, stack_pointer_rtx,
7570 frame_bias + base_ofs);
45f413e4
RH
7571 mem = gen_rtx_MEM (DImode, addr);
7572 }
7573
bf758008 7574 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
f7df4a84 7575 gen_rtx_SET (mem, frame_reg));
45f413e4
RH
7576 }
7577}
7578
7579static void
7580emit_frame_store (unsigned int regno, rtx base_reg,
7581 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7582{
7583 rtx reg = gen_rtx_REG (DImode, regno);
7584 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7585}
7586
d3c12306
EB
7587/* Compute the frame size. SIZE is the size of the "naked" frame
7588 and SA_SIZE is the size of the register save area. */
7589
7590static HOST_WIDE_INT
7591compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7592{
7593 if (TARGET_ABI_OPEN_VMS)
7594 return ALPHA_ROUND (sa_size
7595 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7596 + size
7597 + crtl->args.pretend_args_size);
d3c12306
EB
7598 else
7599 return ALPHA_ROUND (crtl->outgoing_args_size)
7600 + sa_size
7601 + ALPHA_ROUND (size
7602 + crtl->args.pretend_args_size);
7603}
7604
a6f12d7c
RK
7605/* Write function prologue. */
7606
89cfc2c6
RK
7607/* On vms we have two kinds of functions:
7608
7609 - stack frame (PROC_STACK)
7610 these are 'normal' functions with local vars and which are
7611 calling other functions
7612 - register frame (PROC_REGISTER)
7613 keeps all data in registers, needs no stack
7614
7615 We must pass this to the assembler so it can generate the
7616 proper pdsc (procedure descriptor)
7617 This is done with the '.pdesc' command.
7618
9c0e94a5
RH
7619 On not-vms, we don't really differentiate between the two, as we can
7620 simply allocate stack without saving registers. */
89cfc2c6
RK
7621
7622void
a5c24926 7623alpha_expand_prologue (void)
89cfc2c6 7624{
9c0e94a5 7625 /* Registers to save. */
89cfc2c6
RK
7626 unsigned long imask = 0;
7627 unsigned long fmask = 0;
7628 /* Stack space needed for pushing registers clobbered by us. */
75db85d8 7629 HOST_WIDE_INT sa_size, sa_bias;
89cfc2c6
RK
7630 /* Complete stack size needed. */
7631 HOST_WIDE_INT frame_size;
10937190
EB
7632 /* Probed stack size; it additionally includes the size of
7633 the "reserve region" if any. */
7634 HOST_WIDE_INT probed_size;
89cfc2c6 7635 /* Offset from base reg to register save area. */
9c0e94a5 7636 HOST_WIDE_INT reg_offset;
45f413e4 7637 rtx sa_reg;
89cfc2c6
RK
7638 int i;
7639
7640 sa_size = alpha_sa_size ();
d3c12306 7641 frame_size = compute_frame_size (get_frame_size (), sa_size);
89cfc2c6 7642
a11e0df4 7643 if (flag_stack_usage_info)
d3c12306 7644 current_function_static_stack_size = frame_size;
89cfc2c6 7645
be7b80f4 7646 if (TARGET_ABI_OPEN_VMS)
221cf9ab 7647 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
9c0e94a5 7648 else
38173d38 7649 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
89cfc2c6 7650
9c0e94a5 7651 alpha_sa_mask (&imask, &fmask);
89cfc2c6 7652
941cc05a 7653 /* Emit an insn to reload GP, if needed. */
be7b80f4 7654 if (TARGET_ABI_OSF)
941cc05a
RK
7655 {
7656 alpha_function_needs_gp = alpha_does_function_need_gp ();
7657 if (alpha_function_needs_gp)
7658 emit_insn (gen_prologue_ldgp ());
7659 }
7660
4f1c5cce
RH
7661 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7662 the call to mcount ourselves, rather than having the linker do it
7663 magically in response to -pg. Since _mcount has special linkage,
7664 don't represent the call as a call. */
e3b5732b 7665 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
4f1c5cce 7666 emit_insn (gen_prologue_mcount ());
30102605 7667
89cfc2c6
RK
7668 /* Adjust the stack by the frame size. If the frame size is > 4096
7669 bytes, we need to be sure we probe somewhere in the first and last
7670 4096 bytes (we can probably get away without the latter test) and
7671 every 8192 bytes in between. If the frame size is > 32768, we
7672 do this in a loop. Otherwise, we generate the explicit probe
f676971a 7673 instructions.
89cfc2c6
RK
7674
7675 Note that we are only allowed to adjust sp once in the prologue. */
7676
10937190
EB
7677 probed_size = frame_size;
7678 if (flag_stack_check)
7679 probed_size += STACK_CHECK_PROTECT;
7680
7681 if (probed_size <= 32768)
89cfc2c6 7682 {
10937190 7683 if (probed_size > 4096)
89cfc2c6 7684 {
11eef578 7685 int probed;
89cfc2c6 7686
10937190 7687 for (probed = 4096; probed < probed_size; probed += 8192)
75db85d8 7688 emit_insn (gen_probe_stack (GEN_INT (-probed)));
89cfc2c6 7689
10937190
EB
7690 /* We only have to do this probe if we aren't saving registers or
7691 if we are probing beyond the frame because of -fstack-check. */
7692 if ((sa_size == 0 && probed_size > probed - 4096)
7693 || flag_stack_check)
7694 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
89cfc2c6
RK
7695 }
7696
7697 if (frame_size != 0)
8207e7c6 7698 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
75db85d8 7699 GEN_INT (-frame_size))));
89cfc2c6
RK
7700 }
7701 else
7702 {
9c0e94a5 7703 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
89cfc2c6
RK
7704 number of 8192 byte blocks to probe. We then probe each block
7705 in the loop and then set SP to the proper location. If the
7706 amount remaining is > 4096, we have to do one more probe if we
10937190
EB
7707 are not saving any registers or if we are probing beyond the
7708 frame because of -fstack-check. */
89cfc2c6 7709
10937190
EB
7710 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7711 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
9c0e94a5
RH
7712 rtx ptr = gen_rtx_REG (DImode, 22);
7713 rtx count = gen_rtx_REG (DImode, 23);
37679e06 7714 rtx seq;
89cfc2c6 7715
9c0e94a5 7716 emit_move_insn (count, GEN_INT (blocks));
75db85d8 7717 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
89cfc2c6 7718
9c0e94a5
RH
7719 /* Because of the difficulty in emitting a new basic block this
7720 late in the compilation, generate the loop as a single insn. */
7721 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
89cfc2c6 7722
10937190 7723 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
9c0e94a5 7724 {
0a81f074
RS
7725 rtx last = gen_rtx_MEM (DImode,
7726 plus_constant (Pmode, ptr, -leftover));
9c0e94a5
RH
7727 MEM_VOLATILE_P (last) = 1;
7728 emit_move_insn (last, const0_rtx);
7729 }
89cfc2c6 7730
800d1de1 7731 if (flag_stack_check)
f9d7e5cd 7732 {
800d1de1
RH
7733 /* If -fstack-check is specified we have to load the entire
7734 constant into a register and subtract from the sp in one go,
7735 because the probed stack size is not equal to the frame size. */
f9d7e5cd 7736 HOST_WIDE_INT lo, hi;
14eecd34
RH
7737 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7738 hi = frame_size - lo;
6abc6f40 7739
37679e06 7740 emit_move_insn (ptr, GEN_INT (hi));
5c9948f4 7741 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
37679e06
RH
7742 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7743 ptr));
f9d7e5cd
RH
7744 }
7745 else
7746 {
f9d7e5cd
RH
7747 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7748 GEN_INT (-leftover)));
f9d7e5cd 7749 }
37679e06
RH
7750
7751 /* This alternative is special, because the DWARF code cannot
7752 possibly intuit through the loop above. So we invent this
7753 note it looks at instead. */
7754 RTX_FRAME_RELATED_P (seq) = 1;
bf758008 7755 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
f7df4a84 7756 gen_rtx_SET (stack_pointer_rtx,
0a81f074 7757 plus_constant (Pmode, stack_pointer_rtx,
75db85d8 7758 -frame_size)));
89cfc2c6
RK
7759 }
7760
75db85d8
RH
7761 /* Cope with very large offsets to the register save area. */
7762 sa_bias = 0;
7763 sa_reg = stack_pointer_rtx;
7764 if (reg_offset + sa_size > 0x8000)
89cfc2c6 7765 {
75db85d8
RH
7766 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7767 rtx sa_bias_rtx;
89cfc2c6 7768
75db85d8
RH
7769 if (low + sa_size <= 0x8000)
7770 sa_bias = reg_offset - low, reg_offset = low;
7771 else
7772 sa_bias = reg_offset, reg_offset = 0;
f676971a 7773
75db85d8
RH
7774 sa_reg = gen_rtx_REG (DImode, 24);
7775 sa_bias_rtx = GEN_INT (sa_bias);
89cfc2c6 7776
75db85d8
RH
7777 if (add_operand (sa_bias_rtx, DImode))
7778 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7779 else
30102605 7780 {
75db85d8
RH
7781 emit_move_insn (sa_reg, sa_bias_rtx);
7782 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
30102605 7783 }
75db85d8 7784 }
89cfc2c6 7785
75db85d8
RH
7786 /* Save regs in stack order. Beginning with VMS PV. */
7787 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7788 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
89cfc2c6 7789
75db85d8
RH
7790 /* Save register RA next. */
7791 if (imask & (1UL << REG_RA))
30102605 7792 {
75db85d8
RH
7793 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7794 imask &= ~(1UL << REG_RA);
7795 reg_offset += 8;
30102605 7796 }
89cfc2c6 7797
75db85d8
RH
7798 /* Now save any other registers required to be saved. */
7799 for (i = 0; i < 31; i++)
7800 if (imask & (1UL << i))
7801 {
7802 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7803 reg_offset += 8;
7804 }
7805
7806 for (i = 0; i < 31; i++)
7807 if (fmask & (1UL << i))
7808 {
7809 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7810 reg_offset += 8;
7811 }
7812
be7b80f4 7813 if (TARGET_ABI_OPEN_VMS)
89cfc2c6 7814 {
15cb981a 7815 /* Register frame procedures save the fp. */
c2ea1ac6 7816 if (alpha_procedure_type == PT_REGISTER)
15cb981a 7817 {
cad003ba
DM
7818 rtx_insn *insn =
7819 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7820 hard_frame_pointer_rtx);
15cb981a
RH
7821 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7822 RTX_FRAME_RELATED_P (insn) = 1;
7823 }
89cfc2c6 7824
c2ea1ac6 7825 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
54aaa4ea
RH
7826 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7827 gen_rtx_REG (DImode, REG_PV)));
89cfc2c6 7828
c2ea1ac6
DR
7829 if (alpha_procedure_type != PT_NULL
7830 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8207e7c6 7831 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
89cfc2c6 7832
9c0e94a5 7833 /* If we have to allocate space for outgoing args, do it now. */
38173d38 7834 if (crtl->outgoing_args_size != 0)
c1238896 7835 {
cad003ba 7836 rtx_insn *seq
f676971a 7837 = emit_move_insn (stack_pointer_rtx,
c1238896 7838 plus_constant
0a81f074 7839 (Pmode, hard_frame_pointer_rtx,
c1238896 7840 - (ALPHA_ROUND
38173d38 7841 (crtl->outgoing_args_size))));
f676971a 7842
c1238896
OH
7843 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7844 if ! frame_pointer_needed. Setting the bit will change the CFA
7845 computation rule to use sp again, which would be wrong if we had
7846 frame_pointer_needed, as this means sp might move unpredictably
7847 later on.
7848
7849 Also, note that
7850 frame_pointer_needed
7851 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7852 and
38173d38 7853 crtl->outgoing_args_size != 0
c1238896
OH
7854 => alpha_procedure_type != PT_NULL,
7855
7856 so when we are not setting the bit here, we are guaranteed to
093354e0 7857 have emitted an FRP frame pointer update just before. */
c1238896
OH
7858 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7859 }
9c0e94a5 7860 }
75db85d8 7861 else
9c0e94a5
RH
7862 {
7863 /* If we need a frame pointer, set it from the stack pointer. */
7864 if (frame_pointer_needed)
7865 {
7866 if (TARGET_CAN_FAULT_IN_PROLOGUE)
6abc6f40 7867 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
89cfc2c6 7868 else
8207e7c6
RK
7869 /* This must always be the last instruction in the
7870 prologue, thus we emit a special move + clobber. */
6abc6f40
RH
7871 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7872 stack_pointer_rtx, sa_reg)));
89cfc2c6 7873 }
89cfc2c6
RK
7874 }
7875
9c0e94a5
RH
7876 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7877 the prologue, for exception handling reasons, we cannot do this for
7878 any insn that might fault. We could prevent this for mems with a
7879 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7880 have to prevent all such scheduling with a blockage.
89cfc2c6 7881
f676971a 7882 Linux, on the other hand, never bothered to implement OSF/1's
9c0e94a5
RH
7883 exception handling, and so doesn't care about such things. Anyone
7884 planning to use dwarf2 frame-unwind info can also omit the blockage. */
89cfc2c6 7885
9c0e94a5
RH
7886 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7887 emit_insn (gen_blockage ());
ef86d2ee
WL
7888}
7889
3e487b21 7890/* Count the number of .file directives, so that .loc is up to date. */
93a27b7b 7891int num_source_filenames = 0;
3e487b21 7892
acd92049 7893/* Output the textual info surrounding the prologue. */
89cfc2c6 7894
9c0e94a5 7895void
a5c24926
RH
7896alpha_start_function (FILE *file, const char *fnname,
7897 tree decl ATTRIBUTE_UNUSED)
9ecc37f0 7898{
9c0e94a5
RH
7899 unsigned long imask = 0;
7900 unsigned long fmask = 0;
7901 /* Stack space needed for pushing registers clobbered by us. */
7902 HOST_WIDE_INT sa_size;
7903 /* Complete stack size needed. */
3ee10665 7904 unsigned HOST_WIDE_INT frame_size;
5c30094f
RO
7905 /* The maximum debuggable frame size. */
7906 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
9c0e94a5
RH
7907 /* Offset from base reg to register save area. */
7908 HOST_WIDE_INT reg_offset;
acd92049 7909 char *entry_label = (char *) alloca (strlen (fnname) + 6);
fe2786f5 7910 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
9c0e94a5 7911 int i;
9ecc37f0 7912
5ea8f977 7913#if TARGET_ABI_OPEN_VMS
4b12e93d 7914 vms_start_function (fnname);
5ea8f977
DR
7915#endif
7916
941cc05a 7917 alpha_fnname = fnname;
9c0e94a5 7918 sa_size = alpha_sa_size ();
d3c12306 7919 frame_size = compute_frame_size (get_frame_size (), sa_size);
9ecc37f0 7920
be7b80f4 7921 if (TARGET_ABI_OPEN_VMS)
221cf9ab 7922 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
9c0e94a5 7923 else
38173d38 7924 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
9ecc37f0 7925
9c0e94a5 7926 alpha_sa_mask (&imask, &fmask);
a6f12d7c 7927
9c0e94a5 7928 /* Issue function start and label. */
75db85d8 7929 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
33d01c33 7930 {
9c0e94a5 7931 fputs ("\t.ent ", file);
acd92049 7932 assemble_name (file, fnname);
9c0e94a5 7933 putc ('\n', file);
941cc05a
RK
7934
7935 /* If the function needs GP, we'll write the "..ng" label there.
7936 Otherwise, do it here. */
14691f8d
RH
7937 if (TARGET_ABI_OSF
7938 && ! alpha_function_needs_gp
3c072c6b 7939 && ! cfun->is_thunk)
941cc05a
RK
7940 {
7941 putc ('$', file);
7942 assemble_name (file, fnname);
7943 fputs ("..ng:\n", file);
7944 }
33d01c33 7945 }
fe2786f5
DR
7946 /* Nested functions on VMS that are potentially called via trampoline
7947 get a special transfer entry point that loads the called functions
7948 procedure descriptor and static chain. */
7949 if (TARGET_ABI_OPEN_VMS
7950 && !TREE_PUBLIC (decl)
7951 && DECL_CONTEXT (decl)
cf45cd09
TG
7952 && !TYPE_P (DECL_CONTEXT (decl))
7953 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
fe2786f5
DR
7954 {
7955 strcpy (tramp_label, fnname);
7956 strcat (tramp_label, "..tr");
7957 ASM_OUTPUT_LABEL (file, tramp_label);
7958 fprintf (file, "\tldq $1,24($27)\n");
7959 fprintf (file, "\tldq $27,16($27)\n");
7960 }
48f6bfac 7961
acd92049 7962 strcpy (entry_label, fnname);
be7b80f4 7963 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 7964 strcat (entry_label, "..en");
30102605 7965
9c0e94a5
RH
7966 ASM_OUTPUT_LABEL (file, entry_label);
7967 inside_function = TRUE;
48f6bfac 7968
be7b80f4 7969 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 7970 fprintf (file, "\t.base $%d\n", vms_base_regno);
a6f12d7c 7971
42d085c1
RH
7972 if (TARGET_ABI_OSF
7973 && TARGET_IEEE_CONFORMANT
9c0e94a5 7974 && !flag_inhibit_size_directive)
9973f4a2 7975 {
9c0e94a5
RH
7976 /* Set flags in procedure descriptor to request IEEE-conformant
7977 math-library routines. The value we set it to is PDSC_EXC_IEEE
285a5742 7978 (/usr/include/pdsc.h). */
9c0e94a5 7979 fputs ("\t.eflag 48\n", file);
9973f4a2 7980 }
a6f12d7c 7981
9c0e94a5 7982 /* Set up offsets to alpha virtual arg/local debugging pointer. */
38173d38 7983 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
9c0e94a5 7984 alpha_arg_offset = -frame_size + 48;
c97e3db7 7985
9c0e94a5
RH
7986 /* Describe our frame. If the frame size is larger than an integer,
7987 print it as zero to avoid an assembler error. We won't be
7988 properly describing such a frame, but that's the best we can do. */
75db85d8 7989 if (TARGET_ABI_OPEN_VMS)
4a0a75dd
KG
7990 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7991 HOST_WIDE_INT_PRINT_DEC "\n",
7992 vms_unwind_regno,
7993 frame_size >= (1UL << 31) ? 0 : frame_size,
7994 reg_offset);
9c0e94a5 7995 else if (!flag_inhibit_size_directive)
4a0a75dd
KG
7996 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7997 (frame_pointer_needed
7998 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
b598cb38 7999 frame_size >= max_frame_size ? 0 : frame_size,
38173d38 8000 crtl->args.pretend_args_size);
0d24ff5d 8001
9c0e94a5 8002 /* Describe which registers were spilled. */
75db85d8 8003 if (TARGET_ABI_OPEN_VMS)
0d24ff5d 8004 {
9c0e94a5 8005 if (imask)
30102605 8006 /* ??? Does VMS care if mask contains ra? The old code didn't
9c0e94a5 8007 set it, so I don't here. */
409f52d3 8008 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
9c0e94a5 8009 if (fmask)
3c303f52 8010 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
c2ea1ac6 8011 if (alpha_procedure_type == PT_REGISTER)
9c0e94a5
RH
8012 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8013 }
8014 else if (!flag_inhibit_size_directive)
8015 {
8016 if (imask)
0d24ff5d 8017 {
4a0a75dd 8018 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
b598cb38 8019 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
9c0e94a5
RH
8020
8021 for (i = 0; i < 32; ++i)
409f52d3 8022 if (imask & (1UL << i))
9c0e94a5 8023 reg_offset += 8;
0d24ff5d 8024 }
9c0e94a5
RH
8025
8026 if (fmask)
4a0a75dd 8027 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
b598cb38 8028 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
a6f12d7c
RK
8029 }
8030
be7b80f4 8031#if TARGET_ABI_OPEN_VMS
221cf9ab
OH
8032 /* If a user condition handler has been installed at some point, emit
8033 the procedure descriptor bits to point the Condition Handling Facility
8034 at the indirection wrapper, and state the fp offset at which the user
8035 handler may be found. */
8036 if (cfun->machine->uses_condition_handler)
8037 {
8038 fprintf (file, "\t.handler __gcc_shell_handler\n");
8039 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8040 }
8041
735f469b
TG
8042#ifdef TARGET_VMS_CRASH_DEBUG
8043 /* Support of minimal traceback info. */
d6b5193b 8044 switch_to_section (readonly_data_section);
9c0e94a5 8045 fprintf (file, "\t.align 3\n");
acd92049 8046 assemble_name (file, fnname); fputs ("..na:\n", file);
9c0e94a5 8047 fputs ("\t.ascii \"", file);
acd92049 8048 assemble_name (file, fnname);
9c0e94a5 8049 fputs ("\\0\"\n", file);
d6b5193b 8050 switch_to_section (text_section);
9c0e94a5 8051#endif
735f469b 8052#endif /* TARGET_ABI_OPEN_VMS */
9c0e94a5 8053}
a6f12d7c 8054
9c0e94a5 8055/* Emit the .prologue note at the scheduled end of the prologue. */
0f33506c 8056
b4c25db2 8057static void
a5c24926 8058alpha_output_function_end_prologue (FILE *file)
9c0e94a5 8059{
75db85d8 8060 if (TARGET_ABI_OPEN_VMS)
9c0e94a5 8061 fputs ("\t.prologue\n", file);
9c0e94a5 8062 else if (!flag_inhibit_size_directive)
14691f8d 8063 fprintf (file, "\t.prologue %d\n",
3c072c6b 8064 alpha_function_needs_gp || cfun->is_thunk);
a6f12d7c
RK
8065}
8066
8067/* Write function epilogue. */
8068
8069void
a5c24926 8070alpha_expand_epilogue (void)
a6f12d7c 8071{
9c0e94a5
RH
8072 /* Registers to save. */
8073 unsigned long imask = 0;
8074 unsigned long fmask = 0;
8075 /* Stack space needed for pushing registers clobbered by us. */
8076 HOST_WIDE_INT sa_size;
8077 /* Complete stack size needed. */
8078 HOST_WIDE_INT frame_size;
8079 /* Offset from base reg to register save area. */
8080 HOST_WIDE_INT reg_offset;
8081 int fp_is_frame_pointer, fp_offset;
8082 rtx sa_reg, sa_reg_exp = NULL;
15cb981a 8083 rtx sp_adj1, sp_adj2, mem, reg, insn;
01439aee 8084 rtx eh_ofs;
15cb981a 8085 rtx cfa_restores = NULL_RTX;
a6f12d7c
RK
8086 int i;
8087
9c0e94a5 8088 sa_size = alpha_sa_size ();
d3c12306 8089 frame_size = compute_frame_size (get_frame_size (), sa_size);
a6f12d7c 8090
be7b80f4 8091 if (TARGET_ABI_OPEN_VMS)
c2ea1ac6
DR
8092 {
8093 if (alpha_procedure_type == PT_STACK)
221cf9ab 8094 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
c2ea1ac6
DR
8095 else
8096 reg_offset = 0;
8097 }
9c0e94a5 8098 else
38173d38 8099 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
9c0e94a5
RH
8100
8101 alpha_sa_mask (&imask, &fmask);
8102
c2ea1ac6 8103 fp_is_frame_pointer
42d085c1
RH
8104 = (TARGET_ABI_OPEN_VMS
8105 ? alpha_procedure_type == PT_STACK
8106 : frame_pointer_needed);
c8d8ed65
RK
8107 fp_offset = 0;
8108 sa_reg = stack_pointer_rtx;
9c0e94a5 8109
e3b5732b 8110 if (crtl->calls_eh_return)
4573b4de
RH
8111 eh_ofs = EH_RETURN_STACKADJ_RTX;
8112 else
8113 eh_ofs = NULL_RTX;
8114
75db85d8 8115 if (sa_size)
9c0e94a5
RH
8116 {
8117 /* If we have a frame pointer, restore SP from it. */
42d085c1
RH
8118 if (TARGET_ABI_OPEN_VMS
8119 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8120 : frame_pointer_needed)
15cb981a 8121 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
0d24ff5d 8122
9c0e94a5 8123 /* Cope with very large offsets to the register save area. */
9c0e94a5 8124 if (reg_offset + sa_size > 0x8000)
a6f12d7c 8125 {
9c0e94a5
RH
8126 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8127 HOST_WIDE_INT bias;
8128
8129 if (low + sa_size <= 0x8000)
8130 bias = reg_offset - low, reg_offset = low;
f676971a 8131 else
9c0e94a5
RH
8132 bias = reg_offset, reg_offset = 0;
8133
8134 sa_reg = gen_rtx_REG (DImode, 22);
0a81f074 8135 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
9c0e94a5 8136
15cb981a 8137 emit_move_insn (sa_reg, sa_reg_exp);
a6f12d7c 8138 }
f676971a 8139
285a5742 8140 /* Restore registers in order, excepting a true frame pointer. */
a6f12d7c 8141
0a81f074 8142 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
15cb981a
RH
8143 reg = gen_rtx_REG (DImode, REG_RA);
8144 emit_move_insn (reg, mem);
8145 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
4573b4de 8146
9c0e94a5 8147 reg_offset += 8;
409f52d3 8148 imask &= ~(1UL << REG_RA);
0f33506c 8149
ed80cd68 8150 for (i = 0; i < 31; ++i)
409f52d3 8151 if (imask & (1UL << i))
a6f12d7c 8152 {
9c0e94a5 8153 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
0f33506c
RK
8154 fp_offset = reg_offset;
8155 else
9c0e94a5 8156 {
7a81008b 8157 mem = gen_frame_mem (DImode,
0a81f074
RS
8158 plus_constant (Pmode, sa_reg,
8159 reg_offset));
15cb981a
RH
8160 reg = gen_rtx_REG (DImode, i);
8161 emit_move_insn (reg, mem);
8162 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8163 cfa_restores);
9c0e94a5 8164 }
a6f12d7c
RK
8165 reg_offset += 8;
8166 }
8167
ed80cd68 8168 for (i = 0; i < 31; ++i)
409f52d3 8169 if (fmask & (1UL << i))
a6f12d7c 8170 {
0a81f074
RS
8171 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8172 reg_offset));
15cb981a
RH
8173 reg = gen_rtx_REG (DFmode, i+32);
8174 emit_move_insn (reg, mem);
8175 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
a6f12d7c
RK
8176 reg_offset += 8;
8177 }
9c0e94a5 8178 }
a6f12d7c 8179
01439aee 8180 if (frame_size || eh_ofs)
9c0e94a5 8181 {
71038426
RH
8182 sp_adj1 = stack_pointer_rtx;
8183
01439aee 8184 if (eh_ofs)
71038426
RH
8185 {
8186 sp_adj1 = gen_rtx_REG (DImode, 23);
8187 emit_move_insn (sp_adj1,
01439aee 8188 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
71038426
RH
8189 }
8190
9c0e94a5
RH
8191 /* If the stack size is large, begin computation into a temporary
8192 register so as not to interfere with a potential fp restore,
8193 which must be consecutive with an SP restore. */
75db85d8 8194 if (frame_size < 32768 && !cfun->calls_alloca)
71038426 8195 sp_adj2 = GEN_INT (frame_size);
9c0e94a5
RH
8196 else if (frame_size < 0x40007fffL)
8197 {
8198 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8199
0a81f074 8200 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
9c0e94a5
RH
8201 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8202 sp_adj1 = sa_reg;
8203 else
8204 {
8205 sp_adj1 = gen_rtx_REG (DImode, 23);
15cb981a 8206 emit_move_insn (sp_adj1, sp_adj2);
9c0e94a5
RH
8207 }
8208 sp_adj2 = GEN_INT (low);
8209 }
d60a05a1 8210 else
9c0e94a5 8211 {
71038426 8212 rtx tmp = gen_rtx_REG (DImode, 23);
15cb981a 8213 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
71038426 8214 if (!sp_adj2)
9c0e94a5
RH
8215 {
8216 /* We can't drop new things to memory this late, afaik,
8217 so build it up by pieces. */
da80c6b8 8218 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size);
56daab84 8219 gcc_assert (sp_adj2);
9c0e94a5 8220 }
9c0e94a5 8221 }
a6f12d7c 8222
9c0e94a5
RH
8223 /* From now on, things must be in order. So emit blockages. */
8224
8225 /* Restore the frame pointer. */
75db85d8 8226 if (fp_is_frame_pointer)
9c0e94a5
RH
8227 {
8228 emit_insn (gen_blockage ());
0a81f074
RS
8229 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8230 fp_offset));
15cb981a
RH
8231 emit_move_insn (hard_frame_pointer_rtx, mem);
8232 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8233 hard_frame_pointer_rtx, cfa_restores);
9c0e94a5 8234 }
be7b80f4 8235 else if (TARGET_ABI_OPEN_VMS)
9c0e94a5
RH
8236 {
8237 emit_insn (gen_blockage ());
15cb981a
RH
8238 emit_move_insn (hard_frame_pointer_rtx,
8239 gen_rtx_REG (DImode, vms_save_fp_regno));
8240 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8241 hard_frame_pointer_rtx, cfa_restores);
9c0e94a5
RH
8242 }
8243
8244 /* Restore the stack pointer. */
8245 emit_insn (gen_blockage ());
30102605 8246 if (sp_adj2 == const0_rtx)
15cb981a 8247 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
30102605 8248 else
15cb981a
RH
8249 insn = emit_move_insn (stack_pointer_rtx,
8250 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8251 REG_NOTES (insn) = cfa_restores;
8252 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8253 RTX_FRAME_RELATED_P (insn) = 1;
9c0e94a5 8254 }
f676971a 8255 else
9c0e94a5 8256 {
15cb981a
RH
8257 gcc_assert (cfa_restores == NULL);
8258
c2ea1ac6 8259 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
9c0e94a5
RH
8260 {
8261 emit_insn (gen_blockage ());
15cb981a
RH
8262 insn = emit_move_insn (hard_frame_pointer_rtx,
8263 gen_rtx_REG (DImode, vms_save_fp_regno));
8264 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8265 RTX_FRAME_RELATED_P (insn) = 1;
9c0e94a5 8266 }
a6f12d7c 8267 }
9c0e94a5 8268}
1330f7d5 8269\f
9c0e94a5
RH
8270/* Output the rest of the textual info surrounding the epilogue. */
8271
8272void
a5c24926 8273alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
9c0e94a5 8274{
cad003ba 8275 rtx_insn *insn;
e4bec638
RH
8276
8277 /* We output a nop after noreturn calls at the very end of the function to
8278 ensure that the return address always remains in the caller's code range,
8279 as not doing so might confuse unwinding engines. */
8280 insn = get_last_insn ();
8281 if (!INSN_P (insn))
8282 insn = prev_active_insn (insn);
3eb96d01 8283 if (insn && CALL_P (insn))
e4bec638
RH
8284 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8285
7053a0e2 8286#if TARGET_ABI_OPEN_VMS
735f469b
TG
8287 /* Write the linkage entries. */
8288 alpha_write_linkage (file, fnname);
7053a0e2
BG
8289#endif
8290
a6f12d7c 8291 /* End the function. */
b213221d
TG
8292 if (TARGET_ABI_OPEN_VMS
8293 || !flag_inhibit_size_directive)
33d01c33 8294 {
9c0e94a5 8295 fputs ("\t.end ", file);
acd92049 8296 assemble_name (file, fnname);
9c0e94a5 8297 putc ('\n', file);
33d01c33 8298 }
48f6bfac 8299 inside_function = FALSE;
a6f12d7c 8300}
14691f8d 8301
c590b625
RH
8302#if TARGET_ABI_OSF
8303/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
14691f8d
RH
8304
8305 In order to avoid the hordes of differences between generated code
8306 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8307 lots of code loading up large constants, generate rtl and emit it
8308 instead of going straight to text.
8309
8310 Not sure why this idea hasn't been explored before... */
8311
c590b625 8312static void
a5c24926
RH
8313alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8314 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8315 tree function)
14691f8d
RH
8316{
8317 HOST_WIDE_INT hi, lo;
cad003ba
DM
8318 rtx this_rtx, funexp;
8319 rtx_insn *insn;
14691f8d
RH
8320
8321 /* We always require a valid GP. */
8322 emit_insn (gen_prologue_ldgp ());
2e040219 8323 emit_note (NOTE_INSN_PROLOGUE_END);
14691f8d
RH
8324
8325 /* Find the "this" pointer. If the function returns a structure,
8326 the structure return pointer is in $16. */
61f71b34 8327 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
0a2aaacc 8328 this_rtx = gen_rtx_REG (Pmode, 17);
14691f8d 8329 else
0a2aaacc 8330 this_rtx = gen_rtx_REG (Pmode, 16);
14691f8d
RH
8331
8332 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8333 entire constant for the add. */
8334 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8335 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8336 if (hi + lo == delta)
8337 {
8338 if (hi)
0a2aaacc 8339 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
14691f8d 8340 if (lo)
0a2aaacc 8341 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
14691f8d
RH
8342 }
8343 else
8344 {
da80c6b8 8345 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0), delta);
0a2aaacc 8346 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
14691f8d
RH
8347 }
8348
e2358068
RH
8349 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8350 if (vcall_offset)
8351 {
8352 rtx tmp, tmp2;
8353
8354 tmp = gen_rtx_REG (Pmode, 0);
0a2aaacc 8355 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
e2358068
RH
8356
8357 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8358 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8359 if (hi + lo == vcall_offset)
8360 {
8361 if (hi)
8362 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8363 }
8364 else
8365 {
8366 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
da80c6b8 8367 vcall_offset);
e2358068
RH
8368 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8369 lo = 0;
8370 }
8371 if (lo)
8372 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8373 else
8374 tmp2 = tmp;
8375 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8376
0a2aaacc 8377 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
e2358068
RH
8378 }
8379
14691f8d
RH
8380 /* Generate a tail call to the target function. */
8381 if (! TREE_USED (function))
8382 {
8383 assemble_external (function);
8384 TREE_USED (function) = 1;
8385 }
8386 funexp = XEXP (DECL_RTL (function), 0);
8387 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8388 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8389 SIBLING_CALL_P (insn) = 1;
8390
8391 /* Run just enough of rest_of_compilation to get the insns emitted.
8392 There's not really enough bulk here to make other passes such as
8393 instruction scheduling worth while. Note that use_thunk calls
8394 assemble_start_function and assemble_end_function. */
8395 insn = get_insns ();
8396 shorten_branches (insn);
8397 final_start_function (insn, file, 1);
c9d691e9 8398 final (insn, file, 1);
14691f8d
RH
8399 final_end_function ();
8400}
c590b625 8401#endif /* TARGET_ABI_OSF */
48f6bfac
RK
8402\f
8403/* Debugging support. */
8404
8405#include "gstab.h"
8406
48f6bfac
RK
8407/* Name of the file containing the current function. */
8408
df45c7ea 8409static const char *current_function_file = "";
48f6bfac
RK
8410
8411/* Offsets to alpha virtual arg/local debugging pointers. */
8412
8413long alpha_arg_offset;
8414long alpha_auto_offset;
8415\f
8416/* Emit a new filename to a stream. */
8417
8418void
a5c24926 8419alpha_output_filename (FILE *stream, const char *name)
48f6bfac
RK
8420{
8421 static int first_time = TRUE;
48f6bfac
RK
8422
8423 if (first_time)
8424 {
8425 first_time = FALSE;
8426 ++num_source_filenames;
8427 current_function_file = name;
8428 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8429 output_quoted_string (stream, name);
8430 fprintf (stream, "\n");
48f6bfac
RK
8431 }
8432
48f6bfac 8433 else if (name != current_function_file
5665caa2 8434 && strcmp (name, current_function_file) != 0)
48f6bfac 8435 {
46e1a769
RO
8436 ++num_source_filenames;
8437 current_function_file = name;
8438 fprintf (stream, "\t.file\t%d ", num_source_filenames);
48f6bfac
RK
8439
8440 output_quoted_string (stream, name);
8441 fprintf (stream, "\n");
8442 }
8443}
6245e3df
RK
8444\f
8445/* Structure to show the current status of registers and memory. */
8446
8447struct shadow_summary
8448{
8449 struct {
1d11bf18
RH
8450 unsigned int i : 31; /* Mask of int regs */
8451 unsigned int fp : 31; /* Mask of fp regs */
8452 unsigned int mem : 1; /* mem == imem | fpmem */
6245e3df
RK
8453 } used, defd;
8454};
8455
8456/* Summary the effects of expression X on the machine. Update SUM, a pointer
8457 to the summary structure. SET is nonzero if the insn is setting the
8458 object, otherwise zero. */
8459
8460static void
a5c24926 8461summarize_insn (rtx x, struct shadow_summary *sum, int set)
6245e3df 8462{
6f7d635c 8463 const char *format_ptr;
6245e3df
RK
8464 int i, j;
8465
8466 if (x == 0)
8467 return;
8468
8469 switch (GET_CODE (x))
8470 {
8471 /* ??? Note that this case would be incorrect if the Alpha had a
8472 ZERO_EXTRACT in SET_DEST. */
8473 case SET:
8474 summarize_insn (SET_SRC (x), sum, 0);
8475 summarize_insn (SET_DEST (x), sum, 1);
8476 break;
8477
8478 case CLOBBER:
8479 summarize_insn (XEXP (x, 0), sum, 1);
8480 break;
8481
8482 case USE:
8483 summarize_insn (XEXP (x, 0), sum, 0);
8484 break;
8485
f4e31cf5
RH
8486 case ASM_OPERANDS:
8487 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8488 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8489 break;
8490
6245e3df 8491 case PARALLEL:
8fed04e5 8492 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
6245e3df
RK
8493 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8494 break;
8495
f4e31cf5 8496 case SUBREG:
9c0e94a5
RH
8497 summarize_insn (SUBREG_REG (x), sum, 0);
8498 break;
f4e31cf5 8499
6245e3df
RK
8500 case REG:
8501 {
8502 int regno = REGNO (x);
948068e2 8503 unsigned long mask = ((unsigned long) 1) << (regno % 32);
6245e3df
RK
8504
8505 if (regno == 31 || regno == 63)
8506 break;
8507
8508 if (set)
8509 {
8510 if (regno < 32)
8511 sum->defd.i |= mask;
8512 else
8513 sum->defd.fp |= mask;
8514 }
8515 else
8516 {
8517 if (regno < 32)
8518 sum->used.i |= mask;
8519 else
8520 sum->used.fp |= mask;
8521 }
8522 }
8523 break;
8524
8525 case MEM:
8526 if (set)
8527 sum->defd.mem = 1;
8528 else
8529 sum->used.mem = 1;
8530
8531 /* Find the regs used in memory address computation: */
8532 summarize_insn (XEXP (x, 0), sum, 0);
8533 break;
8534
f06ed650
UB
8535 case CONST_INT: case CONST_WIDE_INT: case CONST_DOUBLE:
8536 case SYMBOL_REF: case LABEL_REF: case CONST:
368a1647 8537 case SCRATCH: case ASM_INPUT:
8ba46994
RK
8538 break;
8539
6245e3df
RK
8540 /* Handle common unary and binary ops for efficiency. */
8541 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8542 case MOD: case UDIV: case UMOD: case AND: case IOR:
8543 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8544 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8545 case NE: case EQ: case GE: case GT: case LE:
8546 case LT: case GEU: case GTU: case LEU: case LTU:
8547 summarize_insn (XEXP (x, 0), sum, 0);
8548 summarize_insn (XEXP (x, 1), sum, 0);
8549 break;
8550
8551 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8552 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8553 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
f676971a 8554 case SQRT: case FFS:
6245e3df
RK
8555 summarize_insn (XEXP (x, 0), sum, 0);
8556 break;
8557
8558 default:
8559 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8fed04e5 8560 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
bed95fa1 8561 switch (format_ptr[i])
6245e3df
RK
8562 {
8563 case 'e':
8564 summarize_insn (XEXP (x, i), sum, 0);
8565 break;
8566
8567 case 'E':
8fed04e5 8568 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6245e3df
RK
8569 summarize_insn (XVECEXP (x, i, j), sum, 0);
8570 break;
8571
2b01d264
RH
8572 case 'i':
8573 break;
8574
6245e3df 8575 default:
56daab84 8576 gcc_unreachable ();
6245e3df
RK
8577 }
8578 }
8579}
6245e3df 8580
9c0e94a5
RH
8581/* Ensure a sufficient number of `trapb' insns are in the code when
8582 the user requests code with a trap precision of functions or
8583 instructions.
8584
8585 In naive mode, when the user requests a trap-precision of
8586 "instruction", a trapb is needed after every instruction that may
8587 generate a trap. This ensures that the code is resumption safe but
8588 it is also slow.
8589
8590 When optimizations are turned on, we delay issuing a trapb as long
8591 as possible. In this context, a trap shadow is the sequence of
8592 instructions that starts with a (potentially) trap generating
8593 instruction and extends to the next trapb or call_pal instruction
8594 (but GCC never generates call_pal by itself). We can delay (and
8595 therefore sometimes omit) a trapb subject to the following
8596 conditions:
8597
8598 (a) On entry to the trap shadow, if any Alpha register or memory
8599 location contains a value that is used as an operand value by some
8600 instruction in the trap shadow (live on entry), then no instruction
8601 in the trap shadow may modify the register or memory location.
8602
8603 (b) Within the trap shadow, the computation of the base register
8604 for a memory load or store instruction may not involve using the
8605 result of an instruction that might generate an UNPREDICTABLE
8606 result.
8607
8608 (c) Within the trap shadow, no register may be used more than once
8609 as a destination register. (This is to make life easier for the
8610 trap-handler.)
6245e3df 8611
2ea844d3 8612 (d) The trap shadow may not include any branch instructions. */
6245e3df 8613
2ea844d3 8614static void
a5c24926 8615alpha_handle_trap_shadows (void)
6245e3df 8616{
2ea844d3
RH
8617 struct shadow_summary shadow;
8618 int trap_pending, exception_nesting;
b32d5189 8619 rtx_insn *i, *n;
6245e3df 8620
2ea844d3
RH
8621 trap_pending = 0;
8622 exception_nesting = 0;
8623 shadow.used.i = 0;
8624 shadow.used.fp = 0;
8625 shadow.used.mem = 0;
8626 shadow.defd = shadow.used;
f676971a 8627
18dbd950 8628 for (i = get_insns (); i ; i = NEXT_INSN (i))
2ea844d3 8629 {
7d83f4f5 8630 if (NOTE_P (i))
2ea844d3 8631 {
a38e7aa5 8632 switch (NOTE_KIND (i))
2ea844d3
RH
8633 {
8634 case NOTE_INSN_EH_REGION_BEG:
8635 exception_nesting++;
8636 if (trap_pending)
8637 goto close_shadow;
8638 break;
8639
8640 case NOTE_INSN_EH_REGION_END:
8641 exception_nesting--;
8642 if (trap_pending)
8643 goto close_shadow;
8644 break;
8645
8646 case NOTE_INSN_EPILOGUE_BEG:
8647 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8648 goto close_shadow;
8649 break;
8650 }
8651 }
8652 else if (trap_pending)
8653 {
8654 if (alpha_tp == ALPHA_TP_FUNC)
8655 {
7d83f4f5 8656 if (JUMP_P (i)
2ea844d3
RH
8657 && GET_CODE (PATTERN (i)) == RETURN)
8658 goto close_shadow;
8659 }
8660 else if (alpha_tp == ALPHA_TP_INSN)
8661 {
8662 if (optimize > 0)
8663 {
8664 struct shadow_summary sum;
8665
8666 sum.used.i = 0;
8667 sum.used.fp = 0;
8668 sum.used.mem = 0;
f4e31cf5 8669 sum.defd = sum.used;
2ea844d3
RH
8670
8671 switch (GET_CODE (i))
8672 {
8673 case INSN:
56daab84 8674 /* Annoyingly, get_attr_trap will die on these. */
bb02e7ea
RH
8675 if (GET_CODE (PATTERN (i)) == USE
8676 || GET_CODE (PATTERN (i)) == CLOBBER)
2ea844d3
RH
8677 break;
8678
8679 summarize_insn (PATTERN (i), &sum, 0);
8680
8681 if ((sum.defd.i & shadow.defd.i)
8682 || (sum.defd.fp & shadow.defd.fp))
8683 {
8684 /* (c) would be violated */
8685 goto close_shadow;
8686 }
8687
8688 /* Combine shadow with summary of current insn: */
8689 shadow.used.i |= sum.used.i;
8690 shadow.used.fp |= sum.used.fp;
8691 shadow.used.mem |= sum.used.mem;
8692 shadow.defd.i |= sum.defd.i;
8693 shadow.defd.fp |= sum.defd.fp;
8694 shadow.defd.mem |= sum.defd.mem;
8695
8696 if ((sum.defd.i & shadow.used.i)
8697 || (sum.defd.fp & shadow.used.fp)
8698 || (sum.defd.mem & shadow.used.mem))
8699 {
8700 /* (a) would be violated (also takes care of (b)) */
56daab84
NS
8701 gcc_assert (get_attr_trap (i) != TRAP_YES
8702 || (!(sum.defd.i & sum.used.i)
8703 && !(sum.defd.fp & sum.used.fp)));
2ea844d3
RH
8704
8705 goto close_shadow;
8706 }
8707 break;
8708
dd5e7837
UB
8709 case BARRIER:
8710 /* __builtin_unreachable can expand to no code at all,
8711 leaving (barrier) RTXes in the instruction stream. */
8712 goto close_shadow_notrapb;
8713
2ea844d3
RH
8714 case JUMP_INSN:
8715 case CALL_INSN:
8716 case CODE_LABEL:
8717 goto close_shadow;
8718
8719 default:
56daab84 8720 gcc_unreachable ();
2ea844d3
RH
8721 }
8722 }
8723 else
8724 {
8725 close_shadow:
68aed21b
RH
8726 n = emit_insn_before (gen_trapb (), i);
8727 PUT_MODE (n, TImode);
8728 PUT_MODE (i, TImode);
dd5e7837 8729 close_shadow_notrapb:
2ea844d3
RH
8730 trap_pending = 0;
8731 shadow.used.i = 0;
8732 shadow.used.fp = 0;
8733 shadow.used.mem = 0;
8734 shadow.defd = shadow.used;
8735 }
8736 }
8737 }
6245e3df 8738
4f3f5e9f 8739 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
7d83f4f5 8740 && NONJUMP_INSN_P (i)
4f3f5e9f
RH
8741 && GET_CODE (PATTERN (i)) != USE
8742 && GET_CODE (PATTERN (i)) != CLOBBER
8743 && get_attr_trap (i) == TRAP_YES)
8744 {
8745 if (optimize && !trap_pending)
8746 summarize_insn (PATTERN (i), &shadow, 0);
8747 trap_pending = 1;
8748 }
6245e3df
RK
8749 }
8750}
68aed21b 8751\f
68aed21b 8752/* Alpha can only issue instruction groups simultaneously if they are
093354e0 8753 suitably aligned. This is very processor-specific. */
4ead2a39
RH
8754/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8755 that are marked "fake". These instructions do not exist on that target,
8756 but it is possible to see these insns with deranged combinations of
8757 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8758 choose a result at random. */
68aed21b 8759
3873d24b
RH
8760enum alphaev4_pipe {
8761 EV4_STOP = 0,
8762 EV4_IB0 = 1,
8763 EV4_IB1 = 2,
8764 EV4_IBX = 4
8765};
8766
68aed21b
RH
8767enum alphaev5_pipe {
8768 EV5_STOP = 0,
8769 EV5_NONE = 1,
8770 EV5_E01 = 2,
8771 EV5_E0 = 4,
8772 EV5_E1 = 8,
8773 EV5_FAM = 16,
8774 EV5_FA = 32,
8775 EV5_FM = 64
8776};
8777
3873d24b 8778static enum alphaev4_pipe
cad003ba 8779alphaev4_insn_pipe (rtx_insn *insn)
3873d24b
RH
8780{
8781 if (recog_memoized (insn) < 0)
8782 return EV4_STOP;
8783 if (get_attr_length (insn) != 4)
8784 return EV4_STOP;
8785
8786 switch (get_attr_type (insn))
8787 {
8788 case TYPE_ILD:
0b196b18 8789 case TYPE_LDSYM:
3873d24b 8790 case TYPE_FLD:
0b196b18 8791 case TYPE_LD_L:
3873d24b
RH
8792 return EV4_IBX;
8793
3873d24b
RH
8794 case TYPE_IADD:
8795 case TYPE_ILOG:
8796 case TYPE_ICMOV:
8797 case TYPE_ICMP:
3873d24b
RH
8798 case TYPE_FST:
8799 case TYPE_SHIFT:
8800 case TYPE_IMUL:
8801 case TYPE_FBR:
4ead2a39 8802 case TYPE_MVI: /* fake */
3873d24b
RH
8803 return EV4_IB0;
8804
0b196b18 8805 case TYPE_IST:
3873d24b
RH
8806 case TYPE_MISC:
8807 case TYPE_IBR:
8808 case TYPE_JSR:
d5909a79 8809 case TYPE_CALLPAL:
3873d24b
RH
8810 case TYPE_FCPYS:
8811 case TYPE_FCMOV:
8812 case TYPE_FADD:
8813 case TYPE_FDIV:
8814 case TYPE_FMUL:
0b196b18
RH
8815 case TYPE_ST_C:
8816 case TYPE_MB:
4ead2a39
RH
8817 case TYPE_FSQRT: /* fake */
8818 case TYPE_FTOI: /* fake */
8819 case TYPE_ITOF: /* fake */
3873d24b
RH
8820 return EV4_IB1;
8821
8822 default:
56daab84 8823 gcc_unreachable ();
3873d24b
RH
8824 }
8825}
8826
68aed21b 8827static enum alphaev5_pipe
cad003ba 8828alphaev5_insn_pipe (rtx_insn *insn)
68aed21b
RH
8829{
8830 if (recog_memoized (insn) < 0)
8831 return EV5_STOP;
8832 if (get_attr_length (insn) != 4)
8833 return EV5_STOP;
8834
8835 switch (get_attr_type (insn))
8836 {
8837 case TYPE_ILD:
8838 case TYPE_FLD:
8839 case TYPE_LDSYM:
8840 case TYPE_IADD:
8841 case TYPE_ILOG:
8842 case TYPE_ICMOV:
8843 case TYPE_ICMP:
8844 return EV5_E01;
8845
8846 case TYPE_IST:
8847 case TYPE_FST:
8848 case TYPE_SHIFT:
8849 case TYPE_IMUL:
8850 case TYPE_MISC:
8851 case TYPE_MVI:
0b196b18
RH
8852 case TYPE_LD_L:
8853 case TYPE_ST_C:
8854 case TYPE_MB:
4ead2a39
RH
8855 case TYPE_FTOI: /* fake */
8856 case TYPE_ITOF: /* fake */
68aed21b
RH
8857 return EV5_E0;
8858
8859 case TYPE_IBR:
8860 case TYPE_JSR:
d5909a79 8861 case TYPE_CALLPAL:
68aed21b
RH
8862 return EV5_E1;
8863
8864 case TYPE_FCPYS:
8865 return EV5_FAM;
8866
8867 case TYPE_FBR:
8868 case TYPE_FCMOV:
8869 case TYPE_FADD:
8870 case TYPE_FDIV:
4ead2a39 8871 case TYPE_FSQRT: /* fake */
68aed21b
RH
8872 return EV5_FA;
8873
8874 case TYPE_FMUL:
8875 return EV5_FM;
2c01018f
RH
8876
8877 default:
56daab84 8878 gcc_unreachable ();
68aed21b 8879 }
68aed21b
RH
8880}
8881
f676971a 8882/* IN_USE is a mask of the slots currently filled within the insn group.
3873d24b 8883 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
f676971a 8884 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
3873d24b
RH
8885
8886 LEN is, of course, the length of the group in bytes. */
8887
cad003ba
DM
8888static rtx_insn *
8889alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
3873d24b
RH
8890{
8891 int len, in_use;
8892
8893 len = in_use = 0;
8894
2c3c49de 8895 if (! INSN_P (insn)
3873d24b
RH
8896 || GET_CODE (PATTERN (insn)) == CLOBBER
8897 || GET_CODE (PATTERN (insn)) == USE)
8898 goto next_and_done;
8899
8900 while (1)
8901 {
8902 enum alphaev4_pipe pipe;
8903
8904 pipe = alphaev4_insn_pipe (insn);
8905 switch (pipe)
8906 {
8907 case EV4_STOP:
8908 /* Force complex instructions to start new groups. */
8909 if (in_use)
8910 goto done;
8911
f3b569ca 8912 /* If this is a completely unrecognized insn, it's an asm.
3873d24b
RH
8913 We don't know how long it is, so record length as -1 to
8914 signal a needed realignment. */
8915 if (recog_memoized (insn) < 0)
8916 len = -1;
8917 else
8918 len = get_attr_length (insn);
8919 goto next_and_done;
8920
8921 case EV4_IBX:
8922 if (in_use & EV4_IB0)
8923 {
8924 if (in_use & EV4_IB1)
8925 goto done;
8926 in_use |= EV4_IB1;
8927 }
8928 else
8929 in_use |= EV4_IB0 | EV4_IBX;
8930 break;
8931
8932 case EV4_IB0:
8933 if (in_use & EV4_IB0)
8934 {
8935 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8936 goto done;
8937 in_use |= EV4_IB1;
8938 }
8939 in_use |= EV4_IB0;
8940 break;
8941
8942 case EV4_IB1:
8943 if (in_use & EV4_IB1)
8944 goto done;
8945 in_use |= EV4_IB1;
8946 break;
8947
8948 default:
56daab84 8949 gcc_unreachable ();
3873d24b
RH
8950 }
8951 len += 4;
f676971a 8952
3873d24b 8953 /* Haifa doesn't do well scheduling branches. */
7d83f4f5 8954 if (JUMP_P (insn))
3873d24b
RH
8955 goto next_and_done;
8956
8957 next:
8958 insn = next_nonnote_insn (insn);
8959
2c3c49de 8960 if (!insn || ! INSN_P (insn))
3873d24b
RH
8961 goto done;
8962
8963 /* Let Haifa tell us where it thinks insn group boundaries are. */
8964 if (GET_MODE (insn) == TImode)
8965 goto done;
8966
8967 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8968 goto next;
8969 }
8970
8971 next_and_done:
8972 insn = next_nonnote_insn (insn);
8973
8974 done:
8975 *plen = len;
8976 *pin_use = in_use;
8977 return insn;
8978}
8979
f676971a 8980/* IN_USE is a mask of the slots currently filled within the insn group.
3873d24b 8981 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
f676971a 8982 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
68aed21b
RH
8983
8984 LEN is, of course, the length of the group in bytes. */
8985
cad003ba
DM
8986static rtx_insn *
8987alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
68aed21b
RH
8988{
8989 int len, in_use;
8990
8991 len = in_use = 0;
8992
2c3c49de 8993 if (! INSN_P (insn)
2c01018f
RH
8994 || GET_CODE (PATTERN (insn)) == CLOBBER
8995 || GET_CODE (PATTERN (insn)) == USE)
8996 goto next_and_done;
68aed21b 8997
2c01018f 8998 while (1)
68aed21b
RH
8999 {
9000 enum alphaev5_pipe pipe;
68aed21b
RH
9001
9002 pipe = alphaev5_insn_pipe (insn);
9003 switch (pipe)
9004 {
9005 case EV5_STOP:
9006 /* Force complex instructions to start new groups. */
9007 if (in_use)
9008 goto done;
9009
f3b569ca 9010 /* If this is a completely unrecognized insn, it's an asm.
68aed21b
RH
9011 We don't know how long it is, so record length as -1 to
9012 signal a needed realignment. */
9013 if (recog_memoized (insn) < 0)
9014 len = -1;
9015 else
9016 len = get_attr_length (insn);
2c01018f 9017 goto next_and_done;
68aed21b 9018
56daab84
NS
9019 /* ??? Most of the places below, we would like to assert never
9020 happen, as it would indicate an error either in Haifa, or
9021 in the scheduling description. Unfortunately, Haifa never
9022 schedules the last instruction of the BB, so we don't have
9023 an accurate TI bit to go off. */
68aed21b
RH
9024 case EV5_E01:
9025 if (in_use & EV5_E0)
9026 {
9027 if (in_use & EV5_E1)
9028 goto done;
9029 in_use |= EV5_E1;
9030 }
9031 else
9032 in_use |= EV5_E0 | EV5_E01;
9033 break;
9034
9035 case EV5_E0:
9036 if (in_use & EV5_E0)
9037 {
3873d24b 9038 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
68aed21b
RH
9039 goto done;
9040 in_use |= EV5_E1;
9041 }
9042 in_use |= EV5_E0;
9043 break;
9044
9045 case EV5_E1:
9046 if (in_use & EV5_E1)
9047 goto done;
9048 in_use |= EV5_E1;
9049 break;
9050
9051 case EV5_FAM:
9052 if (in_use & EV5_FA)
9053 {
9054 if (in_use & EV5_FM)
9055 goto done;
9056 in_use |= EV5_FM;
9057 }
9058 else
9059 in_use |= EV5_FA | EV5_FAM;
9060 break;
9061
9062 case EV5_FA:
9063 if (in_use & EV5_FA)
9064 goto done;
9065 in_use |= EV5_FA;
9066 break;
9067
9068 case EV5_FM:
9069 if (in_use & EV5_FM)
9070 goto done;
9071 in_use |= EV5_FM;
9072 break;
9073
9074 case EV5_NONE:
9075 break;
9076
9077 default:
56daab84 9078 gcc_unreachable ();
68aed21b
RH
9079 }
9080 len += 4;
f676971a 9081
68aed21b
RH
9082 /* Haifa doesn't do well scheduling branches. */
9083 /* ??? If this is predicted not-taken, slotting continues, except
9084 that no more IBR, FBR, or JSR insns may be slotted. */
7d83f4f5 9085 if (JUMP_P (insn))
2c01018f 9086 goto next_and_done;
68aed21b 9087
2c01018f 9088 next:
68aed21b
RH
9089 insn = next_nonnote_insn (insn);
9090
2c3c49de 9091 if (!insn || ! INSN_P (insn))
68aed21b 9092 goto done;
a874dd18 9093
68aed21b
RH
9094 /* Let Haifa tell us where it thinks insn group boundaries are. */
9095 if (GET_MODE (insn) == TImode)
9096 goto done;
9097
2c01018f
RH
9098 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9099 goto next;
68aed21b 9100 }
2c01018f
RH
9101
9102 next_and_done:
9103 insn = next_nonnote_insn (insn);
68aed21b
RH
9104
9105 done:
9106 *plen = len;
9107 *pin_use = in_use;
9108 return insn;
68aed21b
RH
9109}
9110
3873d24b 9111static rtx
a5c24926 9112alphaev4_next_nop (int *pin_use)
3873d24b
RH
9113{
9114 int in_use = *pin_use;
9115 rtx nop;
9116
9117 if (!(in_use & EV4_IB0))
9118 {
9119 in_use |= EV4_IB0;
9120 nop = gen_nop ();
9121 }
9122 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9123 {
9124 in_use |= EV4_IB1;
9125 nop = gen_nop ();
9126 }
9127 else if (TARGET_FP && !(in_use & EV4_IB1))
9128 {
9129 in_use |= EV4_IB1;
9130 nop = gen_fnop ();
9131 }
9132 else
9133 nop = gen_unop ();
9134
9135 *pin_use = in_use;
9136 return nop;
9137}
9138
9139static rtx
a5c24926 9140alphaev5_next_nop (int *pin_use)
3873d24b
RH
9141{
9142 int in_use = *pin_use;
9143 rtx nop;
9144
9145 if (!(in_use & EV5_E1))
9146 {
9147 in_use |= EV5_E1;
9148 nop = gen_nop ();
9149 }
9150 else if (TARGET_FP && !(in_use & EV5_FA))
9151 {
9152 in_use |= EV5_FA;
9153 nop = gen_fnop ();
9154 }
9155 else if (TARGET_FP && !(in_use & EV5_FM))
9156 {
9157 in_use |= EV5_FM;
9158 nop = gen_fnop ();
9159 }
9160 else
9161 nop = gen_unop ();
9162
9163 *pin_use = in_use;
9164 return nop;
9165}
9166
9167/* The instruction group alignment main loop. */
9168
68aed21b 9169static void
4bdf6418 9170alpha_align_insns_1 (unsigned int max_align,
cad003ba 9171 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
4bdf6418 9172 rtx (*next_nop) (int *))
68aed21b
RH
9173{
9174 /* ALIGN is the known alignment for the insn group. */
b81f53a1 9175 unsigned int align;
68aed21b
RH
9176 /* OFS is the offset of the current insn in the insn group. */
9177 int ofs;
0f1341c7 9178 int prev_in_use, in_use, len, ldgp;
cad003ba 9179 rtx_insn *i, *next;
68aed21b
RH
9180
9181 /* Let shorten branches care for assigning alignments to code labels. */
18dbd950 9182 shorten_branches (get_insns ());
68aed21b 9183
30864e14
RH
9184 if (align_functions < 4)
9185 align = 4;
21cb9e60 9186 else if ((unsigned int) align_functions < max_align)
30864e14
RH
9187 align = align_functions;
9188 else
9189 align = max_align;
80db34d8 9190
68aed21b 9191 ofs = prev_in_use = 0;
18dbd950 9192 i = get_insns ();
7d83f4f5 9193 if (NOTE_P (i))
68aed21b
RH
9194 i = next_nonnote_insn (i);
9195
0f1341c7
RH
9196 ldgp = alpha_function_needs_gp ? 8 : 0;
9197
68aed21b
RH
9198 while (i)
9199 {
b81f53a1 9200 next = (*next_group) (i, &in_use, &len);
68aed21b
RH
9201
9202 /* When we see a label, resync alignment etc. */
7d83f4f5 9203 if (LABEL_P (i))
68aed21b 9204 {
b81f53a1
RK
9205 unsigned int new_align = 1 << label_to_alignment (i);
9206
68aed21b
RH
9207 if (new_align >= align)
9208 {
3873d24b 9209 align = new_align < max_align ? new_align : max_align;
68aed21b
RH
9210 ofs = 0;
9211 }
b81f53a1 9212
68aed21b
RH
9213 else if (ofs & (new_align-1))
9214 ofs = (ofs | (new_align-1)) + 1;
56daab84 9215 gcc_assert (!len);
68aed21b
RH
9216 }
9217
9218 /* Handle complex instructions special. */
9219 else if (in_use == 0)
9220 {
9221 /* Asms will have length < 0. This is a signal that we have
9222 lost alignment knowledge. Assume, however, that the asm
9223 will not mis-align instructions. */
9224 if (len < 0)
9225 {
9226 ofs = 0;
9227 align = 4;
9228 len = 0;
9229 }
9230 }
9231
9232 /* If the known alignment is smaller than the recognized insn group,
9233 realign the output. */
1eb356b9 9234 else if ((int) align < len)
68aed21b 9235 {
b81f53a1 9236 unsigned int new_log_align = len > 8 ? 4 : 3;
cad003ba 9237 rtx_insn *prev, *where;
68aed21b 9238
11cb1475 9239 where = prev = prev_nonnote_insn (i);
7d83f4f5 9240 if (!where || !LABEL_P (where))
68aed21b
RH
9241 where = i;
9242
11cb1475
RH
9243 /* Can't realign between a call and its gp reload. */
9244 if (! (TARGET_EXPLICIT_RELOCS
7d83f4f5 9245 && prev && CALL_P (prev)))
11cb1475
RH
9246 {
9247 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9248 align = 1 << new_log_align;
9249 ofs = 0;
9250 }
68aed21b
RH
9251 }
9252
0f1341c7
RH
9253 /* We may not insert padding inside the initial ldgp sequence. */
9254 else if (ldgp > 0)
9255 ldgp -= len;
9256
68aed21b
RH
9257 /* If the group won't fit in the same INT16 as the previous,
9258 we need to add padding to keep the group together. Rather
9259 than simply leaving the insn filling to the assembler, we
9260 can make use of the knowledge of what sorts of instructions
9261 were issued in the previous group to make sure that all of
9262 the added nops are really free. */
1eb356b9 9263 else if (ofs + len > (int) align)
68aed21b
RH
9264 {
9265 int nop_count = (align - ofs) / 4;
cad003ba 9266 rtx_insn *where;
68aed21b 9267
839a4992 9268 /* Insert nops before labels, branches, and calls to truly merge
11cb1475 9269 the execution of the nops with the previous instruction group. */
68aed21b 9270 where = prev_nonnote_insn (i);
3873d24b 9271 if (where)
68aed21b 9272 {
7d83f4f5 9273 if (LABEL_P (where))
68aed21b 9274 {
cad003ba 9275 rtx_insn *where2 = prev_nonnote_insn (where);
7d83f4f5 9276 if (where2 && JUMP_P (where2))
3873d24b 9277 where = where2;
68aed21b 9278 }
7d83f4f5 9279 else if (NONJUMP_INSN_P (where))
3873d24b 9280 where = i;
68aed21b 9281 }
3873d24b
RH
9282 else
9283 where = i;
9284
f676971a 9285 do
3873d24b 9286 emit_insn_before ((*next_nop)(&prev_in_use), where);
68aed21b
RH
9287 while (--nop_count);
9288 ofs = 0;
9289 }
9290
9291 ofs = (ofs + len) & (align - 1);
9292 prev_in_use = in_use;
9293 i = next;
9294 }
9295}
76a4a1bd 9296
4bdf6418
UB
9297static void
9298alpha_align_insns (void)
9299{
9300 if (alpha_tune == PROCESSOR_EV4)
9301 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9302 else if (alpha_tune == PROCESSOR_EV5)
9303 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9304 else
9305 gcc_unreachable ();
9306}
9307
3eda5123 9308/* Insert an unop between sibcall or noreturn function call and GP load. */
76a4a1bd
UB
9309
9310static void
3eda5123 9311alpha_pad_function_end (void)
76a4a1bd 9312{
cad003ba 9313 rtx_insn *insn, *next;
76a4a1bd
UB
9314
9315 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9316 {
e1df0477
RH
9317 if (!CALL_P (insn)
9318 || !(SIBLING_CALL_P (insn)
9319 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
76a4a1bd
UB
9320 continue;
9321
9e43ad68
UB
9322 /* Make sure we do not split a call and its corresponding
9323 CALL_ARG_LOCATION note. */
e1df0477
RH
9324 next = NEXT_INSN (insn);
9325 if (next == NULL)
9326 continue;
e1df0477
RH
9327 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9328 insn = next;
9e43ad68 9329
76a4a1bd 9330 next = next_active_insn (insn);
76a4a1bd
UB
9331 if (next)
9332 {
9333 rtx pat = PATTERN (next);
9334
9335 if (GET_CODE (pat) == SET
9336 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9337 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9338 emit_insn_after (gen_unop (), insn);
9339 }
9340 }
9341}
68aed21b 9342\f
f5143c46 9343/* Machine dependent reorg pass. */
2ea844d3 9344
18dbd950 9345static void
a5c24926 9346alpha_reorg (void)
2ea844d3 9347{
3eda5123
UB
9348 /* Workaround for a linker error that triggers when an exception
9349 handler immediatelly follows a sibcall or a noreturn function.
9350
9351In the sibcall case:
9352
9353 The instruction stream from an object file:
9354
9355 1d8: 00 00 fb 6b jmp (t12)
9356 1dc: 00 00 ba 27 ldah gp,0(ra)
9357 1e0: 00 00 bd 23 lda gp,0(gp)
9358 1e4: 00 00 7d a7 ldq t12,0(gp)
9359 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9360
9361 was converted in the final link pass to:
9362
9363 12003aa88: 67 fa ff c3 br 120039428 <...>
9364 12003aa8c: 00 00 fe 2f unop
9365 12003aa90: 00 00 fe 2f unop
9366 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9367 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9368
9369And in the noreturn case:
76a4a1bd
UB
9370
9371 The instruction stream from an object file:
9372
9373 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9374 58: 00 00 ba 27 ldah gp,0(ra)
9375 5c: 00 00 bd 23 lda gp,0(gp)
9376 60: 00 00 7d a7 ldq t12,0(gp)
9377 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9378
9379 was converted in the final link pass to:
9380
9381 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9382 fdb28: 00 00 fe 2f unop
9383 fdb2c: 00 00 fe 2f unop
9384 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9385 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9386
9387 GP load instructions were wrongly cleared by the linker relaxation
9388 pass. This workaround prevents removal of GP loads by inserting
3eda5123 9389 an unop instruction between a sibcall or noreturn function call and
76a4a1bd
UB
9390 exception handler prologue. */
9391
9392 if (current_function_has_exception_handlers ())
3eda5123 9393 alpha_pad_function_end ();
2ea844d3 9394}
2ea844d3 9395\f
1bc7c5b6
ZW
9396static void
9397alpha_file_start (void)
9398{
9399 default_file_start ();
1bc7c5b6
ZW
9400
9401 fputs ("\t.set noreorder\n", asm_out_file);
9402 fputs ("\t.set volatile\n", asm_out_file);
42d085c1 9403 if (TARGET_ABI_OSF)
1bc7c5b6
ZW
9404 fputs ("\t.set noat\n", asm_out_file);
9405 if (TARGET_EXPLICIT_RELOCS)
9406 fputs ("\t.set nomacro\n", asm_out_file);
9407 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
8bea7f7c
RH
9408 {
9409 const char *arch;
9410
9411 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9412 arch = "ev6";
9413 else if (TARGET_MAX)
9414 arch = "pca56";
9415 else if (TARGET_BWX)
9416 arch = "ev56";
9417 else if (alpha_cpu == PROCESSOR_EV5)
9418 arch = "ev5";
9419 else
9420 arch = "ev4";
9421
9422 fprintf (asm_out_file, "\t.arch %s\n", arch);
9423 }
1bc7c5b6 9424}
1bc7c5b6 9425
9b580a0b
RH
9426/* Since we don't have a .dynbss section, we should not allow global
9427 relocations in the .rodata section. */
9428
9429static int
9430alpha_elf_reloc_rw_mask (void)
9431{
9432 return flag_pic ? 3 : 2;
9433}
b64a1b53 9434
d6b5193b
RS
9435/* Return a section for X. The only special thing we do here is to
9436 honor small data. */
b64a1b53 9437
d6b5193b 9438static section *
ef4bddc2 9439alpha_elf_select_rtx_section (machine_mode mode, rtx x,
a5c24926 9440 unsigned HOST_WIDE_INT align)
b64a1b53
RH
9441{
9442 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
093354e0 9443 /* ??? Consider using mergeable sdata sections. */
d6b5193b 9444 return sdata_section;
b64a1b53 9445 else
d6b5193b 9446 return default_elf_select_rtx_section (mode, x, align);
b64a1b53
RH
9447}
9448
ae069803
RH
9449static unsigned int
9450alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9451{
9452 unsigned int flags = 0;
9453
9454 if (strcmp (name, ".sdata") == 0
9455 || strncmp (name, ".sdata.", 7) == 0
9456 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9457 || strcmp (name, ".sbss") == 0
9458 || strncmp (name, ".sbss.", 6) == 0
9459 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9460 flags = SECTION_SMALL;
9461
9462 flags |= default_section_type_flags (decl, name, reloc);
9463 return flags;
9464}
b64a1b53 9465\f
f030826a
RH
9466/* Structure to collect function names for final output in link section. */
9467/* Note that items marked with GTY can't be ifdef'ed out. */
17211ab5 9468
735f469b
TG
9469enum reloc_kind
9470{
9471 KIND_LINKAGE,
9472 KIND_CODEADDR
9473};
17211ab5 9474
d1b38208 9475struct GTY(()) alpha_links
17211ab5 9476{
735f469b 9477 rtx func;
17211ab5 9478 rtx linkage;
f030826a
RH
9479 enum reloc_kind rkind;
9480};
9481
be7b80f4 9482#if TARGET_ABI_OPEN_VMS
89cfc2c6 9483
e9a25f70 9484/* Return the VMS argument type corresponding to MODE. */
89cfc2c6 9485
e9a25f70 9486enum avms_arg_type
ef4bddc2 9487alpha_arg_type (machine_mode mode)
e9a25f70
JL
9488{
9489 switch (mode)
89cfc2c6 9490 {
e9a25f70
JL
9491 case SFmode:
9492 return TARGET_FLOAT_VAX ? FF : FS;
9493 case DFmode:
9494 return TARGET_FLOAT_VAX ? FD : FT;
9495 default:
9496 return I64;
89cfc2c6 9497 }
e9a25f70 9498}
89cfc2c6 9499
e9a25f70
JL
9500/* Return an rtx for an integer representing the VMS Argument Information
9501 register value. */
89cfc2c6 9502
aa388f29 9503rtx
a5c24926 9504alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
e9a25f70
JL
9505{
9506 unsigned HOST_WIDE_INT regval = cum.num_args;
9507 int i;
89cfc2c6 9508
e9a25f70
JL
9509 for (i = 0; i < 6; i++)
9510 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
89cfc2c6 9511
e9a25f70
JL
9512 return GEN_INT (regval);
9513}
9514\f
89cfc2c6 9515
b714133e
EB
9516/* Return a SYMBOL_REF representing the reference to the .linkage entry
9517 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9518 this is the reference to the linkage pointer value, 0 if this is the
9519 reference to the function entry value. RFLAG is 1 if this a reduced
9520 reference (code address only), 0 if this is a full reference. */
9521
1330f7d5 9522rtx
735f469b 9523alpha_use_linkage (rtx func, bool lflag, bool rflag)
1330f7d5 9524{
735f469b 9525 struct alpha_links *al = NULL;
b714133e 9526 const char *name = XSTR (func, 0);
1330f7d5 9527
735f469b 9528 if (cfun->machine->links)
1330f7d5 9529 {
1330f7d5 9530 /* Is this name already defined? */
a6330e85 9531 alpha_links **slot = cfun->machine->links->get (name);
de144fb2
TS
9532 if (slot)
9533 al = *slot;
1330f7d5
DR
9534 }
9535 else
de144fb2 9536 cfun->machine->links
fb5c464a 9537 = hash_map<nofree_string_hash, alpha_links *>::create_ggc (64);
1330f7d5 9538
735f469b 9539 if (al == NULL)
1330f7d5 9540 {
735f469b 9541 size_t buf_len;
1330f7d5 9542 char *linksym;
39420b1a 9543 tree id;
1330f7d5
DR
9544
9545 if (name[0] == '*')
9546 name++;
9547
39420b1a
TG
9548 /* Follow transparent alias, as this is used for CRTL translations. */
9549 id = maybe_get_identifier (name);
9550 if (id)
9551 {
9552 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9553 id = TREE_CHAIN (id);
9554 name = IDENTIFIER_POINTER (id);
9555 }
9556
735f469b
TG
9557 buf_len = strlen (name) + 8 + 9;
9558 linksym = (char *) alloca (buf_len);
9559 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
1330f7d5 9560
766090c2 9561 al = ggc_alloc<alpha_links> ();
735f469b
TG
9562 al->func = func;
9563 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
1330f7d5 9564
de144fb2 9565 cfun->machine->links->put (ggc_strdup (name), al);
1330f7d5
DR
9566 }
9567
735f469b 9568 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
f676971a 9569
1330f7d5 9570 if (lflag)
0a81f074 9571 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
1330f7d5
DR
9572 else
9573 return al->linkage;
9574}
9575
a82c7f05 9576static int
a6330e85 9577alpha_write_one_linkage (const char *name, alpha_links *link, FILE *stream)
a82c7f05 9578{
735f469b 9579 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
1330f7d5 9580 if (link->rkind == KIND_CODEADDR)
a82c7f05 9581 {
735f469b 9582 /* External and used, request code address. */
39420b1a 9583 fprintf (stream, "\t.code_address ");
a82c7f05
RH
9584 }
9585 else
9586 {
735f469b
TG
9587 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9588 && SYMBOL_REF_LOCAL_P (link->func))
1330f7d5 9589 {
735f469b 9590 /* Locally defined, build linkage pair. */
1330f7d5 9591 fprintf (stream, "\t.quad %s..en\n", name);
39420b1a 9592 fprintf (stream, "\t.quad ");
1330f7d5
DR
9593 }
9594 else
9595 {
735f469b 9596 /* External, request linkage pair. */
39420b1a 9597 fprintf (stream, "\t.linkage ");
1330f7d5 9598 }
a82c7f05 9599 }
39420b1a
TG
9600 assemble_name (stream, name);
9601 fputs ("\n", stream);
a82c7f05
RH
9602
9603 return 0;
9604}
89cfc2c6 9605
1330f7d5 9606static void
735f469b 9607alpha_write_linkage (FILE *stream, const char *funname)
89cfc2c6 9608{
d6b5193b 9609 fprintf (stream, "\t.link\n");
1330f7d5 9610 fprintf (stream, "\t.align 3\n");
d6b5193b
RS
9611 in_section = NULL;
9612
735f469b 9613#ifdef TARGET_VMS_CRASH_DEBUG
1330f7d5
DR
9614 fputs ("\t.name ", stream);
9615 assemble_name (stream, funname);
9616 fputs ("..na\n", stream);
735f469b
TG
9617#endif
9618
1330f7d5
DR
9619 ASM_OUTPUT_LABEL (stream, funname);
9620 fprintf (stream, "\t.pdesc ");
9621 assemble_name (stream, funname);
9622 fprintf (stream, "..en,%s\n",
9623 alpha_procedure_type == PT_STACK ? "stack"
9624 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9625
735f469b 9626 if (cfun->machine->links)
c1bd46a8 9627 {
fb5c464a 9628 hash_map<nofree_string_hash, alpha_links *>::iterator iter
de144fb2
TS
9629 = cfun->machine->links->begin ();
9630 for (; iter != cfun->machine->links->end (); ++iter)
9631 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
c1bd46a8 9632 }
89cfc2c6
RK
9633}
9634
7c262518
RH
9635/* Switch to an arbitrary section NAME with attributes as specified
9636 by FLAGS. ALIGN specifies any known alignment requirements for
9637 the section; 0 if the default should be used. */
9638
9639static void
c18a5b6c
MM
9640vms_asm_named_section (const char *name, unsigned int flags,
9641 tree decl ATTRIBUTE_UNUSED)
7c262518 9642{
c1bd46a8
DR
9643 fputc ('\n', asm_out_file);
9644 fprintf (asm_out_file, ".section\t%s", name);
7c262518 9645
c1bd46a8
DR
9646 if (flags & SECTION_DEBUG)
9647 fprintf (asm_out_file, ",NOWRT");
9648
9649 fputc ('\n', asm_out_file);
7c262518
RH
9650}
9651
2cc07db4
RH
9652/* Record an element in the table of global constructors. SYMBOL is
9653 a SYMBOL_REF of the function to be called; PRIORITY is a number
f676971a 9654 between 0 and MAX_INIT_PRIORITY.
2cc07db4
RH
9655
9656 Differs from default_ctors_section_asm_out_constructor in that the
9657 width of the .ctors entry is always 64 bits, rather than the 32 bits
9658 used by a normal pointer. */
9659
9660static void
a5c24926 9661vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
2cc07db4 9662{
d6b5193b 9663 switch_to_section (ctors_section);
c8af3574
RH
9664 assemble_align (BITS_PER_WORD);
9665 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
2cc07db4
RH
9666}
9667
9668static void
a5c24926 9669vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
2cc07db4 9670{
d6b5193b 9671 switch_to_section (dtors_section);
c8af3574
RH
9672 assemble_align (BITS_PER_WORD);
9673 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
2cc07db4 9674}
89cfc2c6 9675#else
1330f7d5 9676rtx
b714133e 9677alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
735f469b
TG
9678 bool lflag ATTRIBUTE_UNUSED,
9679 bool rflag ATTRIBUTE_UNUSED)
1330f7d5
DR
9680{
9681 return NULL_RTX;
9682}
9683
be7b80f4 9684#endif /* TARGET_ABI_OPEN_VMS */
30102605 9685\f
c15c90bb
ZW
9686static void
9687alpha_init_libfuncs (void)
9688{
75db85d8 9689 if (TARGET_ABI_OPEN_VMS)
c15c90bb
ZW
9690 {
9691 /* Use the VMS runtime library functions for division and
9692 remainder. */
9693 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9694 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9695 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9696 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9697 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9698 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9699 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9700 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
5e3fef6c
DR
9701 abort_libfunc = init_one_libfunc ("decc$abort");
9702 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9703#ifdef MEM_LIBFUNCS_INIT
9704 MEM_LIBFUNCS_INIT;
9705#endif
c15c90bb
ZW
9706 }
9707}
9708
5efd84c5
NF
9709/* On the Alpha, we use this to disable the floating-point registers
9710 when they don't exist. */
9711
9712static void
9713alpha_conditional_register_usage (void)
9714{
9715 int i;
9716 if (! TARGET_FPREGS)
9717 for (i = 32; i < 63; i++)
9718 fixed_regs[i] = call_used_regs[i] = 1;
9719}
c354951b
AK
9720
9721/* Canonicalize a comparison from one we don't have to one we do have. */
9722
9723static void
9724alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9725 bool op0_preserve_value)
9726{
9727 if (!op0_preserve_value
9728 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9729 && (REG_P (*op1) || *op1 == const0_rtx))
9730 {
9731 rtx tem = *op0;
9732 *op0 = *op1;
9733 *op1 = tem;
9734 *code = (int)swap_condition ((enum rtx_code)*code);
9735 }
9736
9737 if ((*code == LT || *code == LTU)
9738 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9739 {
9740 *code = *code == LT ? LE : LEU;
9741 *op1 = GEN_INT (255);
9742 }
9743}
286934b4
UB
9744
9745/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9746
9747static void
9748alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9749{
9750 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9751
9752 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9753 tree new_fenv_var, reload_fenv, restore_fnenv;
9754 tree update_call, atomic_feraiseexcept, hold_fnclex;
9755
9756 /* Assume OSF/1 compatible interfaces. */
9757 if (!TARGET_ABI_OSF)
9758 return;
9759
9760 /* Generate the equivalent of :
9761 unsigned long fenv_var;
9762 fenv_var = __ieee_get_fp_control ();
9763
9764 unsigned long masked_fenv;
9765 masked_fenv = fenv_var & mask;
9766
9767 __ieee_set_fp_control (masked_fenv); */
9768
9b489f31 9769 fenv_var = create_tmp_var (long_unsigned_type_node);
286934b4
UB
9770 get_fpscr
9771 = build_fn_decl ("__ieee_get_fp_control",
9772 build_function_type_list (long_unsigned_type_node, NULL));
9773 set_fpscr
9774 = build_fn_decl ("__ieee_set_fp_control",
9775 build_function_type_list (void_type_node, NULL));
9776 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9777 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9778 fenv_var, build_call_expr (get_fpscr, 0));
9779 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9780 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9781 *hold = build2 (COMPOUND_EXPR, void_type_node,
9782 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9783 hold_fnclex);
9784
9785 /* Store the value of masked_fenv to clear the exceptions:
9786 __ieee_set_fp_control (masked_fenv); */
9787
9788 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9789
9790 /* Generate the equivalent of :
9791 unsigned long new_fenv_var;
9792 new_fenv_var = __ieee_get_fp_control ();
9793
9794 __ieee_set_fp_control (fenv_var);
9795
9796 __atomic_feraiseexcept (new_fenv_var); */
9797
9b489f31 9798 new_fenv_var = create_tmp_var (long_unsigned_type_node);
286934b4
UB
9799 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9800 build_call_expr (get_fpscr, 0));
9801 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9802 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9803 update_call
9804 = build_call_expr (atomic_feraiseexcept, 1,
9805 fold_convert (integer_type_node, new_fenv_var));
9806 *update = build2 (COMPOUND_EXPR, void_type_node,
9807 build2 (COMPOUND_EXPR, void_type_node,
9808 reload_fenv, restore_fnenv), update_call);
9809}
a5c24926
RH
9810\f
9811/* Initialize the GCC target structure. */
9812#if TARGET_ABI_OPEN_VMS
9813# undef TARGET_ATTRIBUTE_TABLE
9814# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
7b5cbb57
AS
9815# undef TARGET_CAN_ELIMINATE
9816# define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
a5c24926
RH
9817#endif
9818
9819#undef TARGET_IN_SMALL_DATA_P
9820#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9821
a5c24926
RH
9822#undef TARGET_ASM_ALIGNED_HI_OP
9823#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9824#undef TARGET_ASM_ALIGNED_DI_OP
9825#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9826
9827/* Default unaligned ops are provided for ELF systems. To get unaligned
9828 data for non-ELF systems, we have to turn off auto alignment. */
46e1a769 9829#if TARGET_ABI_OPEN_VMS
a5c24926
RH
9830#undef TARGET_ASM_UNALIGNED_HI_OP
9831#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9832#undef TARGET_ASM_UNALIGNED_SI_OP
9833#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9834#undef TARGET_ASM_UNALIGNED_DI_OP
9835#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9836#endif
9837
9b580a0b
RH
9838#undef TARGET_ASM_RELOC_RW_MASK
9839#define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
a5c24926
RH
9840#undef TARGET_ASM_SELECT_RTX_SECTION
9841#define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
ae069803
RH
9842#undef TARGET_SECTION_TYPE_FLAGS
9843#define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
a5c24926
RH
9844
9845#undef TARGET_ASM_FUNCTION_END_PROLOGUE
9846#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9847
c15c90bb
ZW
9848#undef TARGET_INIT_LIBFUNCS
9849#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9850
506d7b68
PB
9851#undef TARGET_LEGITIMIZE_ADDRESS
9852#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
b0f6b612
NF
9853#undef TARGET_MODE_DEPENDENT_ADDRESS_P
9854#define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
506d7b68 9855
1bc7c5b6
ZW
9856#undef TARGET_ASM_FILE_START
9857#define TARGET_ASM_FILE_START alpha_file_start
1bc7c5b6 9858
a5c24926
RH
9859#undef TARGET_SCHED_ADJUST_COST
9860#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9861#undef TARGET_SCHED_ISSUE_RATE
9862#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
a5c24926
RH
9863#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9864#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9865 alpha_multipass_dfa_lookahead
9866
9867#undef TARGET_HAVE_TLS
9868#define TARGET_HAVE_TLS HAVE_AS_TLS
9869
fd930388
RH
9870#undef TARGET_BUILTIN_DECL
9871#define TARGET_BUILTIN_DECL alpha_builtin_decl
a5c24926
RH
9872#undef TARGET_INIT_BUILTINS
9873#define TARGET_INIT_BUILTINS alpha_init_builtins
9874#undef TARGET_EXPAND_BUILTIN
9875#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
36013987
RH
9876#undef TARGET_FOLD_BUILTIN
9877#define TARGET_FOLD_BUILTIN alpha_fold_builtin
b6db8af6
UB
9878#undef TARGET_GIMPLE_FOLD_BUILTIN
9879#define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
a5c24926
RH
9880
9881#undef TARGET_FUNCTION_OK_FOR_SIBCALL
9882#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9883#undef TARGET_CANNOT_COPY_INSN_P
9884#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
1a627b35
RS
9885#undef TARGET_LEGITIMATE_CONSTANT_P
9886#define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
04886dc0
RH
9887#undef TARGET_CANNOT_FORCE_CONST_MEM
9888#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
a5c24926
RH
9889
9890#if TARGET_ABI_OSF
9891#undef TARGET_ASM_OUTPUT_MI_THUNK
9892#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9893#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3101faab 9894#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
3f620b5f
RH
9895#undef TARGET_STDARG_OPTIMIZE_HOOK
9896#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
a5c24926
RH
9897#endif
9898
f7a57cdc
TG
9899/* Use 16-bits anchor. */
9900#undef TARGET_MIN_ANCHOR_OFFSET
9901#define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9902#undef TARGET_MAX_ANCHOR_OFFSET
9903#define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9904#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9905#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9906
a5c24926
RH
9907#undef TARGET_RTX_COSTS
9908#define TARGET_RTX_COSTS alpha_rtx_costs
9909#undef TARGET_ADDRESS_COST
b413068c 9910#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
a5c24926
RH
9911
9912#undef TARGET_MACHINE_DEPENDENT_REORG
9913#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9914
cde0f3fd
PB
9915#undef TARGET_PROMOTE_FUNCTION_MODE
9916#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
f93c2180 9917#undef TARGET_PROMOTE_PROTOTYPES
586de218 9918#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
f93c2180
RH
9919#undef TARGET_RETURN_IN_MEMORY
9920#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
8cd5a4e0
RH
9921#undef TARGET_PASS_BY_REFERENCE
9922#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
f93c2180
RH
9923#undef TARGET_SETUP_INCOMING_VARARGS
9924#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9925#undef TARGET_STRICT_ARGUMENT_NAMING
9926#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9927#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9928#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
42ba5130
RH
9929#undef TARGET_SPLIT_COMPLEX_ARG
9930#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
28245018
RH
9931#undef TARGET_GIMPLIFY_VA_ARG_EXPR
9932#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
78a52f11
RH
9933#undef TARGET_ARG_PARTIAL_BYTES
9934#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
0c3a9758
NF
9935#undef TARGET_FUNCTION_ARG
9936#define TARGET_FUNCTION_ARG alpha_function_arg
9937#undef TARGET_FUNCTION_ARG_ADVANCE
9938#define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
2d7b663a
RH
9939#undef TARGET_TRAMPOLINE_INIT
9940#define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
6dd53648 9941
1e46eb2a
UB
9942#undef TARGET_INSTANTIATE_DECLS
9943#define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9944
48f46219
RH
9945#undef TARGET_SECONDARY_RELOAD
9946#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9947
6dd53648
RH
9948#undef TARGET_SCALAR_MODE_SUPPORTED_P
9949#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
f676971a
EC
9950#undef TARGET_VECTOR_MODE_SUPPORTED_P
9951#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
f93c2180 9952
c35d187f
RH
9953#undef TARGET_BUILD_BUILTIN_VA_LIST
9954#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9955
d7bd8aeb
JJ
9956#undef TARGET_EXPAND_BUILTIN_VA_START
9957#define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9958
c5387660
JM
9959#undef TARGET_OPTION_OVERRIDE
9960#define TARGET_OPTION_OVERRIDE alpha_option_override
9961
ad2c39af
UB
9962#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
9963#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
9964 alpha_override_options_after_change
9965
7269aee7 9966#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
608063c3
JB
9967#undef TARGET_MANGLE_TYPE
9968#define TARGET_MANGLE_TYPE alpha_mangle_type
7269aee7
AH
9969#endif
9970
c6c3dba9
PB
9971#undef TARGET_LEGITIMATE_ADDRESS_P
9972#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9973
5efd84c5
NF
9974#undef TARGET_CONDITIONAL_REGISTER_USAGE
9975#define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9976
c354951b
AK
9977#undef TARGET_CANONICALIZE_COMPARISON
9978#define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
9979
286934b4
UB
9980#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
9981#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
9982
a5c24926
RH
9983struct gcc_target targetm = TARGET_INITIALIZER;
9984
9985\f
e2500fed 9986#include "gt-alpha.h"