]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/alpha/alpha.c
include/:
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
CommitLineData
bf2a98b3 1/* Subroutines used for code generation on the DEC Alpha.
b657e73a 2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
292add68 3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
0e0a0e7a 5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
bf2a98b3 6
187b36cf 7This file is part of GCC.
bf2a98b3 8
187b36cf 9GCC is free software; you can redistribute it and/or modify
bf2a98b3 10it under the terms of the GNU General Public License as published by
038d1e19 11the Free Software Foundation; either version 3, or (at your option)
bf2a98b3 12any later version.
13
187b36cf 14GCC is distributed in the hope that it will be useful,
bf2a98b3 15but WITHOUT ANY WARRANTY; without even the implied warranty of
16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License
038d1e19 20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
bf2a98b3 22
23
bf2a98b3 24#include "config.h"
769ea120 25#include "system.h"
805e22b2 26#include "coretypes.h"
27#include "tm.h"
bf2a98b3 28#include "rtl.h"
d8fc4d0b 29#include "tree.h"
bf2a98b3 30#include "regs.h"
31#include "hard-reg-set.h"
32#include "real.h"
33#include "insn-config.h"
34#include "conditions.h"
bf2a98b3 35#include "output.h"
36#include "insn-attr.h"
37#include "flags.h"
38#include "recog.h"
bf2a98b3 39#include "expr.h"
d8fc4d0b 40#include "optabs.h"
41#include "reload.h"
bf2a98b3 42#include "obstack.h"
0c0464e6 43#include "except.h"
44#include "function.h"
769ea120 45#include "toplev.h"
11016d99 46#include "ggc.h"
0f37b7a2 47#include "integrate.h"
0d50f0b7 48#include "tm_p.h"
a767736d 49#include "target.h"
50#include "target-def.h"
961d6ddd 51#include "debug.h"
a1f71e15 52#include "langhooks.h"
573aba85 53#include <splay-tree.h>
356907d3 54#include "cfglayout.h"
75a70cf9 55#include "gimple.h"
a6c787e5 56#include "tree-flow.h"
57#include "tree-stdarg.h"
1dffd068 58#include "tm-constrs.h"
3072d30e 59#include "df.h"
0c0464e6 60
65abff06 61/* Specify which cpu to schedule for. */
fb64edde 62enum processor_type alpha_tune;
0c0464e6 63
fb64edde 64/* Which cpu we're generating code for. */
f141a8b4 65enum processor_type alpha_cpu;
fb64edde 66
9e7454d0 67static const char * const alpha_cpu_name[] =
07c1a295 68{
69 "ev4", "ev5", "ev6"
70};
0c5845b3 71
c4622276 72/* Specify how accurate floating-point traps need to be. */
73
74enum alpha_trap_precision alpha_tp;
75
76/* Specify the floating-point rounding mode. */
77
78enum alpha_fp_rounding_mode alpha_fprm;
79
80/* Specify which things cause traps. */
81
82enum alpha_fp_trap_mode alpha_fptm;
83
e3e08e7f 84/* Nonzero if inside of a function, because the Alpha asm can't
449b7f2d 85 handle .files inside of functions. */
86
87static int inside_function = FALSE;
88
07c1a295 89/* The number of cycles of latency we should assume on memory reads. */
90
91int alpha_memory_latency = 3;
92
b9a5aa8e 93/* Whether the function needs the GP. */
94
95static int alpha_function_needs_gp;
96
849674a3 97/* The alias set for prologue/epilogue register save/restore. */
98
32c2fdea 99static GTY(()) alias_set_type alpha_sr_alias_set;
849674a3 100
a314eb5e 101/* The assembler name of the current function. */
102
103static const char *alpha_fnname;
104
1f0ce6a6 105/* The next explicit relocation sequence number. */
9de382d9 106extern GTY(()) int alpha_next_sequence_number;
1f0ce6a6 107int alpha_next_sequence_number = 1;
108
109/* The literal and gpdisp sequence numbers for this insn, as printed
110 by %# and %* respectively. */
9de382d9 111extern GTY(()) int alpha_this_literal_sequence_number;
112extern GTY(()) int alpha_this_gpdisp_sequence_number;
1f0ce6a6 113int alpha_this_literal_sequence_number;
114int alpha_this_gpdisp_sequence_number;
115
fab7adbf 116/* Costs of various operations on the different architectures. */
117
118struct alpha_rtx_cost_data
119{
120 unsigned char fp_add;
121 unsigned char fp_mult;
122 unsigned char fp_div_sf;
123 unsigned char fp_div_df;
124 unsigned char int_mult_si;
125 unsigned char int_mult_di;
126 unsigned char int_shift;
127 unsigned char int_cmov;
d7cf2331 128 unsigned short int_div;
fab7adbf 129};
130
131static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
132{
133 { /* EV4 */
134 COSTS_N_INSNS (6), /* fp_add */
135 COSTS_N_INSNS (6), /* fp_mult */
136 COSTS_N_INSNS (34), /* fp_div_sf */
137 COSTS_N_INSNS (63), /* fp_div_df */
138 COSTS_N_INSNS (23), /* int_mult_si */
139 COSTS_N_INSNS (23), /* int_mult_di */
140 COSTS_N_INSNS (2), /* int_shift */
141 COSTS_N_INSNS (2), /* int_cmov */
f6777b0a 142 COSTS_N_INSNS (97), /* int_div */
fab7adbf 143 },
144 { /* EV5 */
145 COSTS_N_INSNS (4), /* fp_add */
146 COSTS_N_INSNS (4), /* fp_mult */
147 COSTS_N_INSNS (15), /* fp_div_sf */
148 COSTS_N_INSNS (22), /* fp_div_df */
149 COSTS_N_INSNS (8), /* int_mult_si */
150 COSTS_N_INSNS (12), /* int_mult_di */
151 COSTS_N_INSNS (1) + 1, /* int_shift */
152 COSTS_N_INSNS (1), /* int_cmov */
f6777b0a 153 COSTS_N_INSNS (83), /* int_div */
fab7adbf 154 },
155 { /* EV6 */
156 COSTS_N_INSNS (4), /* fp_add */
157 COSTS_N_INSNS (4), /* fp_mult */
158 COSTS_N_INSNS (12), /* fp_div_sf */
159 COSTS_N_INSNS (15), /* fp_div_df */
160 COSTS_N_INSNS (7), /* int_mult_si */
161 COSTS_N_INSNS (7), /* int_mult_di */
162 COSTS_N_INSNS (1), /* int_shift */
163 COSTS_N_INSNS (2), /* int_cmov */
f6777b0a 164 COSTS_N_INSNS (86), /* int_div */
fab7adbf 165 },
166};
167
d7cf2331 168/* Similar but tuned for code size instead of execution latency. The
169 extra +N is fractional cost tuning based on latency. It's used to
170 encourage use of cheaper insns like shift, but only if there's just
171 one of them. */
172
173static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
174{
175 COSTS_N_INSNS (1), /* fp_add */
176 COSTS_N_INSNS (1), /* fp_mult */
177 COSTS_N_INSNS (1), /* fp_div_sf */
178 COSTS_N_INSNS (1) + 1, /* fp_div_df */
179 COSTS_N_INSNS (1) + 1, /* int_mult_si */
180 COSTS_N_INSNS (1) + 2, /* int_mult_di */
181 COSTS_N_INSNS (1), /* int_shift */
182 COSTS_N_INSNS (1), /* int_cmov */
183 COSTS_N_INSNS (6), /* int_div */
184};
185
0dbd1c74 186/* Get the number of args of a function in one of two ways. */
9caef960 187#if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
abe32cce 188#define NUM_ARGS crtl->args.info.num_args
0dbd1c74 189#else
abe32cce 190#define NUM_ARGS crtl->args.info
0dbd1c74 191#endif
d2832bd8 192
d2832bd8 193#define REG_PV 27
194#define REG_RA 26
f2cc13dc 195
92643d95 196/* Declarations of static functions. */
197static struct machine_function *alpha_init_machine_status (void);
8c3428a6 198static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
805e22b2 199
92643d95 200#if TARGET_ABI_OPEN_VMS
201static void alpha_write_linkage (FILE *, const char *, tree);
6988553d 202#endif
203
92643d95 204static void unicosmk_output_deferred_case_vectors (FILE *);
205static void unicosmk_gen_dsib (unsigned long *);
206static void unicosmk_output_ssib (FILE *, const char *);
207static int unicosmk_need_dex (rtx);
a767736d 208\f
fb64edde 209/* Implement TARGET_HANDLE_OPTION. */
210
211static bool
212alpha_handle_option (size_t code, const char *arg, int value)
213{
214 switch (code)
215 {
216 case OPT_mfp_regs:
217 if (value == 0)
218 target_flags |= MASK_SOFT_FP;
219 break;
220
221 case OPT_mieee:
222 case OPT_mieee_with_inexact:
223 target_flags |= MASK_IEEE_CONFORMANT;
224 break;
225
fb64edde 226 case OPT_mtls_size_:
0fe44c73 227 if (value != 16 && value != 32 && value != 64)
fb64edde 228 error ("bad value %qs for -mtls-size switch", arg);
229 break;
230 }
231
232 return true;
233}
234
4257b08a 235#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
eddcdde1 236/* Implement TARGET_MANGLE_TYPE. */
4257b08a 237
238static const char *
a9f1838b 239alpha_mangle_type (const_tree type)
4257b08a 240{
241 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
242 && TARGET_LONG_DOUBLE_128)
243 return "g";
244
245 /* For all other types, use normal C++ mangling. */
246 return NULL;
247}
248#endif
249
65abff06 250/* Parse target option strings. */
c4622276 251
252void
92643d95 253override_options (void)
c4622276 254{
e99c3a1d 255 static const struct cpu_table {
256 const char *const name;
257 const enum processor_type processor;
258 const int flags;
27de1488 259 } cpu_table[] = {
27de1488 260 { "ev4", PROCESSOR_EV4, 0 },
261 { "ev45", PROCESSOR_EV4, 0 },
262 { "21064", PROCESSOR_EV4, 0 },
fb64edde 263 { "ev5", PROCESSOR_EV5, 0 },
264 { "21164", PROCESSOR_EV5, 0 },
265 { "ev56", PROCESSOR_EV5, MASK_BWX },
266 { "21164a", PROCESSOR_EV5, MASK_BWX },
267 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
268 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
269 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
270 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
271 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
272 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
afba4ec8 273 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
27de1488 274 };
9e7454d0 275
afba4ec8 276 int const ct_size = ARRAY_SIZE (cpu_table);
fb64edde 277 int i;
278
9caef960 279 /* Unicos/Mk doesn't have shared libraries. */
280 if (TARGET_ABI_UNICOSMK && flag_pic)
281 {
c3ceba8e 282 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
9caef960 283 (flag_pic > 1) ? "PIC" : "pic");
284 flag_pic = 0;
285 }
286
9e7454d0 287 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
9caef960 288 floating-point instructions. Make that the default for this target. */
289 if (TARGET_ABI_UNICOSMK)
290 alpha_fprm = ALPHA_FPRM_DYN;
291 else
292 alpha_fprm = ALPHA_FPRM_NORM;
293
c4622276 294 alpha_tp = ALPHA_TP_PROG;
c4622276 295 alpha_fptm = ALPHA_FPTM_N;
296
9e7454d0 297 /* We cannot use su and sui qualifiers for conversion instructions on
9caef960 298 Unicos/Mk. I'm not sure if this is due to assembler or hardware
299 limitations. Right now, we issue a warning if -mieee is specified
300 and then ignore it; eventually, we should either get it right or
301 disable the option altogether. */
302
c4622276 303 if (TARGET_IEEE)
304 {
9caef960 305 if (TARGET_ABI_UNICOSMK)
c3ceba8e 306 warning (0, "-mieee not supported on Unicos/Mk");
9caef960 307 else
308 {
309 alpha_tp = ALPHA_TP_INSN;
310 alpha_fptm = ALPHA_FPTM_SU;
311 }
c4622276 312 }
313
314 if (TARGET_IEEE_WITH_INEXACT)
315 {
9caef960 316 if (TARGET_ABI_UNICOSMK)
c3ceba8e 317 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
9caef960 318 else
319 {
320 alpha_tp = ALPHA_TP_INSN;
321 alpha_fptm = ALPHA_FPTM_SUI;
322 }
c4622276 323 }
324
325 if (alpha_tp_string)
264f7d8c 326 {
327 if (! strcmp (alpha_tp_string, "p"))
c4622276 328 alpha_tp = ALPHA_TP_PROG;
264f7d8c 329 else if (! strcmp (alpha_tp_string, "f"))
c4622276 330 alpha_tp = ALPHA_TP_FUNC;
264f7d8c 331 else if (! strcmp (alpha_tp_string, "i"))
c4622276 332 alpha_tp = ALPHA_TP_INSN;
264f7d8c 333 else
1e5fcbe2 334 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
264f7d8c 335 }
c4622276 336
337 if (alpha_fprm_string)
264f7d8c 338 {
339 if (! strcmp (alpha_fprm_string, "n"))
c4622276 340 alpha_fprm = ALPHA_FPRM_NORM;
264f7d8c 341 else if (! strcmp (alpha_fprm_string, "m"))
c4622276 342 alpha_fprm = ALPHA_FPRM_MINF;
264f7d8c 343 else if (! strcmp (alpha_fprm_string, "c"))
c4622276 344 alpha_fprm = ALPHA_FPRM_CHOP;
264f7d8c 345 else if (! strcmp (alpha_fprm_string,"d"))
c4622276 346 alpha_fprm = ALPHA_FPRM_DYN;
264f7d8c 347 else
1e5fcbe2 348 error ("bad value %qs for -mfp-rounding-mode switch",
c4622276 349 alpha_fprm_string);
264f7d8c 350 }
c4622276 351
352 if (alpha_fptm_string)
264f7d8c 353 {
354 if (strcmp (alpha_fptm_string, "n") == 0)
355 alpha_fptm = ALPHA_FPTM_N;
356 else if (strcmp (alpha_fptm_string, "u") == 0)
357 alpha_fptm = ALPHA_FPTM_U;
358 else if (strcmp (alpha_fptm_string, "su") == 0)
359 alpha_fptm = ALPHA_FPTM_SU;
360 else if (strcmp (alpha_fptm_string, "sui") == 0)
361 alpha_fptm = ALPHA_FPTM_SUI;
362 else
1e5fcbe2 363 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
264f7d8c 364 }
c4622276 365
cbd8ec27 366 if (alpha_cpu_string)
367 {
afba4ec8 368 for (i = 0; i < ct_size; i++)
27de1488 369 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
370 {
fb64edde 371 alpha_tune = alpha_cpu = cpu_table [i].processor;
372 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
27de1488 373 target_flags |= cpu_table [i].flags;
374 break;
375 }
afba4ec8 376 if (i == ct_size)
1e5fcbe2 377 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
cbd8ec27 378 }
379
27de1488 380 if (alpha_tune_string)
381 {
afba4ec8 382 for (i = 0; i < ct_size; i++)
27de1488 383 if (! strcmp (alpha_tune_string, cpu_table [i].name))
384 {
fb64edde 385 alpha_tune = cpu_table [i].processor;
27de1488 386 break;
387 }
afba4ec8 388 if (i == ct_size)
1e5fcbe2 389 error ("bad value %qs for -mcpu switch", alpha_tune_string);
27de1488 390 }
391
65abff06 392 /* Do some sanity checks on the above options. */
c4622276 393
9caef960 394 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
395 {
c3ceba8e 396 warning (0, "trap mode not supported on Unicos/Mk");
9caef960 397 alpha_fptm = ALPHA_FPTM_N;
398 }
399
264f7d8c 400 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
fb64edde 401 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
c4622276 402 {
c3ceba8e 403 warning (0, "fp software completion requires -mtrap-precision=i");
c4622276 404 alpha_tp = ALPHA_TP_INSN;
405 }
8df4a58b 406
fb64edde 407 if (alpha_cpu == PROCESSOR_EV6)
bc16f0c1 408 {
409 /* Except for EV6 pass 1 (not released), we always have precise
410 arithmetic traps. Which means we can do software completion
411 without minding trap shadows. */
412 alpha_tp = ALPHA_TP_PROG;
413 }
414
8df4a58b 415 if (TARGET_FLOAT_VAX)
416 {
417 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
418 {
c3ceba8e 419 warning (0, "rounding mode not supported for VAX floats");
8df4a58b 420 alpha_fprm = ALPHA_FPRM_NORM;
421 }
422 if (alpha_fptm == ALPHA_FPTM_SUI)
423 {
c3ceba8e 424 warning (0, "trap mode not supported for VAX floats");
8df4a58b 425 alpha_fptm = ALPHA_FPTM_SU;
426 }
ef76af46 427 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
c3ceba8e 428 warning (0, "128-bit long double not supported for VAX floats");
ef76af46 429 target_flags &= ~MASK_LONG_DOUBLE_128;
8df4a58b 430 }
07c1a295 431
432 {
433 char *end;
434 int lat;
435
436 if (!alpha_mlat_string)
437 alpha_mlat_string = "L1";
438
14184418 439 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
07c1a295 440 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
441 ;
442 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
14184418 443 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
07c1a295 444 && alpha_mlat_string[2] == '\0')
445 {
9e7454d0 446 static int const cache_latency[][4] =
07c1a295 447 {
448 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
449 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
65abff06 450 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
07c1a295 451 };
452
453 lat = alpha_mlat_string[1] - '0';
fb64edde 454 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
07c1a295 455 {
c3ceba8e 456 warning (0, "L%d cache latency unknown for %s",
fb64edde 457 lat, alpha_cpu_name[alpha_tune]);
07c1a295 458 lat = 3;
459 }
460 else
fb64edde 461 lat = cache_latency[alpha_tune][lat-1];
07c1a295 462 }
463 else if (! strcmp (alpha_mlat_string, "main"))
464 {
465 /* Most current memories have about 370ns latency. This is
466 a reasonable guess for a fast cpu. */
467 lat = 150;
468 }
469 else
470 {
c3ceba8e 471 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
07c1a295 472 lat = 3;
473 }
474
475 alpha_memory_latency = lat;
476 }
a9fa9190 477
478 /* Default the definition of "small data" to 8 bytes. */
479 if (!g_switch_set)
480 g_switch_value = 8;
849674a3 481
5dcb037d 482 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
483 if (flag_pic == 1)
484 target_flags |= MASK_SMALL_DATA;
485 else if (flag_pic == 2)
486 target_flags &= ~MASK_SMALL_DATA;
487
0ea5169b 488 /* Align labels and loops for optimal branching. */
489 /* ??? Kludge these by not doing anything if we don't optimize and also if
65abff06 490 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
0ea5169b 491 if (optimize > 0 && write_symbols != SDB_DEBUG)
492 {
493 if (align_loops <= 0)
494 align_loops = 16;
495 if (align_jumps <= 0)
496 align_jumps = 16;
497 }
498 if (align_functions <= 0)
499 align_functions = 16;
500
849674a3 501 /* Acquire a unique set number for our register saves and restores. */
502 alpha_sr_alias_set = new_alias_set ();
9caef960 503
504 /* Register variables and functions with the garbage collector. */
505
9caef960 506 /* Set up function hooks. */
507 init_machine_status = alpha_init_machine_status;
1268285a 508
509 /* Tell the compiler when we're using VAX floating point. */
510 if (TARGET_FLOAT_VAX)
511 {
0021bea9 512 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
513 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
514 REAL_MODE_FORMAT (TFmode) = NULL;
1268285a 515 }
2dde0cc6 516
517#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
518 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
519 target_flags |= MASK_LONG_DOUBLE_128;
520#endif
8a58ed0a 521
522 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
523 can be optimized to ap = __builtin_next_arg (0). */
524 if (TARGET_ABI_UNICOSMK)
525 targetm.expand_builtin_va_start = NULL;
c4622276 526}
527\f
bf2a98b3 528/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
529
530int
92643d95 531zap_mask (HOST_WIDE_INT value)
bf2a98b3 532{
533 int i;
534
535 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
536 i++, value >>= 8)
537 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
538 return 0;
539
540 return 1;
541}
542
9e7454d0 543/* Return true if OP is valid for a particular TLS relocation.
41421c64 544 We are already guaranteed that OP is a CONST. */
bf2a98b3 545
546int
41421c64 547tls_symbolic_operand_1 (rtx op, int size, int unspec)
bf2a98b3 548{
5f7b9df8 549 op = XEXP (op, 0);
550
551 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
552 return 0;
553 op = XVECEXP (op, 0, 0);
554
555 if (GET_CODE (op) != SYMBOL_REF)
556 return 0;
5f7b9df8 557
09a1f342 558 switch (SYMBOL_REF_TLS_MODEL (op))
9bdcc1e5 559 {
09a1f342 560 case TLS_MODEL_LOCAL_DYNAMIC:
ea284d73 561 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
09a1f342 562 case TLS_MODEL_INITIAL_EXEC:
9bdcc1e5 563 return unspec == UNSPEC_TPREL && size == 64;
09a1f342 564 case TLS_MODEL_LOCAL_EXEC:
ea284d73 565 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
9bdcc1e5 566 default:
4d10b463 567 gcc_unreachable ();
9bdcc1e5 568 }
5f7b9df8 569}
570
41421c64 571/* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
bbf31a61 573
41421c64 574rtx
575resolve_reload_operand (rtx op)
bf2a98b3 576{
cc215844 577 if (reload_in_progress)
bf2a98b3 578 {
cc215844 579 rtx tmp = op;
580 if (GET_CODE (tmp) == SUBREG)
581 tmp = SUBREG_REG (tmp);
c933fb42 582 if (REG_P (tmp)
cc215844 583 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
584 {
585 op = reg_equiv_memory_loc[REGNO (tmp)];
586 if (op == 0)
587 return 0;
588 }
bf2a98b3 589 }
41421c64 590 return op;
14f7bc98 591}
592
b2aef146 593/* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
597
598static bool
599alpha_scalar_mode_supported_p (enum machine_mode mode)
600{
601 switch (mode)
602 {
603 case QImode:
604 case HImode:
605 case SImode:
606 case DImode:
607 case TImode: /* via optabs.c */
608 return true;
609
610 case SFmode:
611 case DFmode:
612 return true;
613
614 case TFmode:
615 return TARGET_HAS_XFLOATING_LIBS;
616
617 default:
618 return false;
619 }
620}
621
622/* Alpha implements a couple of integer vector mode operations when
b739144f 623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
b2aef146 626
9e7454d0 627static bool
628alpha_vector_mode_supported_p (enum machine_mode mode)
629{
b739144f 630 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
9e7454d0 631}
632
550e415f 633/* Return 1 if this function can directly return via $26. */
634
635int
92643d95 636direct_return (void)
550e415f 637{
9caef960 638 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
1467e953 639 && reload_completed
640 && alpha_sa_size () == 0
550e415f 641 && get_frame_size () == 0
abe32cce 642 && crtl->outgoing_args_size == 0
643 && crtl->args.pretend_args_size == 0);
550e415f 644}
ecb98d40 645
646/* Return the ADDR_VEC associated with a tablejump insn. */
647
648rtx
92643d95 649alpha_tablejump_addr_vec (rtx insn)
ecb98d40 650{
651 rtx tmp;
652
653 tmp = JUMP_LABEL (insn);
654 if (!tmp)
655 return NULL_RTX;
656 tmp = NEXT_INSN (tmp);
657 if (!tmp)
658 return NULL_RTX;
c933fb42 659 if (JUMP_P (tmp)
ecb98d40 660 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
661 return PATTERN (tmp);
662 return NULL_RTX;
663}
664
665/* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
666
667rtx
92643d95 668alpha_tablejump_best_label (rtx insn)
ecb98d40 669{
670 rtx jump_table = alpha_tablejump_addr_vec (insn);
671 rtx best_label = NULL_RTX;
672
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
675
676 if (jump_table)
677 {
678 int n_labels = XVECLEN (jump_table, 1);
679 int best_count = -1;
680 int i, j;
681
682 for (i = 0; i < n_labels; i++)
683 {
684 int count = 1;
685
686 for (j = i + 1; j < n_labels; j++)
687 if (XEXP (XVECEXP (jump_table, 1, i), 0)
688 == XEXP (XVECEXP (jump_table, 1, j), 0))
689 count++;
690
691 if (count > best_count)
692 best_count = count, best_label = XVECEXP (jump_table, 1, i);
693 }
694 }
695
696 return best_label ? best_label : const0_rtx;
697}
5f7b9df8 698
699/* Return the TLS model to use for SYMBOL. */
700
701static enum tls_model
92643d95 702tls_symbolic_operand_type (rtx symbol)
5f7b9df8 703{
09a1f342 704 enum tls_model model;
5f7b9df8 705
706 if (GET_CODE (symbol) != SYMBOL_REF)
afba4ec8 707 return TLS_MODEL_NONE;
09a1f342 708 model = SYMBOL_REF_TLS_MODEL (symbol);
5f7b9df8 709
09a1f342 710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
712 model = TLS_MODEL_INITIAL_EXEC;
5f7b9df8 713
09a1f342 714 return model;
5f7b9df8 715}
14f7bc98 716\f
9bdcc1e5 717/* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
719
720static bool
a9f1838b 721decl_has_samegp (const_tree decl)
9bdcc1e5 722{
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm.binds_local_p) (decl))
726 return false;
727
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
733 return true;
734
735 /* Functions that are not external are defined in this UoT. */
cf1d67e3 736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
9bdcc1e5 739}
740
52470889 741/* Return true if EXP should be placed in the small data section. */
742
743static bool
a9f1838b 744alpha_in_small_data_p (const_tree exp)
52470889 745{
0aad4cd2 746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp) == STRING_CST)
748 return false;
749
6ac09a46 750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp) == FUNCTION_DECL)
752 return false;
753
52470889 754 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
755 {
756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
757 if (strcmp (section, ".sdata") == 0
758 || strcmp (section, ".sbss") == 0)
759 return true;
760 }
761 else
762 {
763 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
764
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
3be2f219 767 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
52470889 768 return true;
769 }
770
771 return false;
772}
773
cf73d31f 774#if TARGET_ABI_OPEN_VMS
775static bool
92643d95 776alpha_linkage_symbol_p (const char *symname)
cf73d31f 777{
778 int symlen = strlen (symname);
779
780 if (symlen > 4)
781 return strcmp (&symname [symlen - 4], "..lk") == 0;
782
783 return false;
784}
785
786#define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
793#endif
794
24b3c0ed 795/* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
798
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
803
fd50b071 804static bool
805alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
24b3c0ed 806{
807 /* If this is an ldq_u type address, discard the outer AND. */
808 if (mode == DImode
809 && GET_CODE (x) == AND
c933fb42 810 && CONST_INT_P (XEXP (x, 1))
24b3c0ed 811 && INTVAL (XEXP (x, 1)) == -8)
812 x = XEXP (x, 0);
813
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
818 x = SUBREG_REG (x);
819
820 /* Unadorned general registers are valid. */
821 if (REG_P (x)
822 && (strict
823 ? STRICT_REG_OK_FOR_BASE_P (x)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
825 return true;
826
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x))
829 return true;
830
cf73d31f 831#if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x))
833 return true;
834#endif
835
24b3c0ed 836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x) == PLUS)
838 {
839 rtx ofs = XEXP (x, 1);
840 x = XEXP (x, 0);
841
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
846 x = SUBREG_REG (x);
847
848 if (REG_P (x))
849 {
850 if (! strict
851 && NONSTRICT_REG_OK_FP_BASE_P (x)
c933fb42 852 && CONST_INT_P (ofs))
24b3c0ed 853 return true;
854 if ((strict
855 ? STRICT_REG_OK_FOR_BASE_P (x)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x))
857 && CONSTANT_ADDRESS_P (ofs))
858 return true;
859 }
24b3c0ed 860 }
861
d6c48eee 862 /* If we're managing explicit relocations, LO_SUM is valid, as are small
863 data symbols. Avoid explicit relocations of modes larger than word
864 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
865 else if (TARGET_EXPLICIT_RELOCS
866 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
1f0ce6a6 867 {
f5a60074 868 if (small_symbolic_operand (x, Pmode))
1f0ce6a6 869 return true;
f5a60074 870
871 if (GET_CODE (x) == LO_SUM)
872 {
873 rtx ofs = XEXP (x, 1);
874 x = XEXP (x, 0);
875
876 /* Discard non-paradoxical subregs. */
877 if (GET_CODE (x) == SUBREG
878 && (GET_MODE_SIZE (GET_MODE (x))
879 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
880 x = SUBREG_REG (x);
881
882 /* Must have a valid base register. */
883 if (! (REG_P (x)
884 && (strict
885 ? STRICT_REG_OK_FOR_BASE_P (x)
886 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
887 return false;
888
889 /* The symbol must be local. */
5f7b9df8 890 if (local_symbolic_operand (ofs, Pmode)
891 || dtp32_symbolic_operand (ofs, Pmode)
892 || tp32_symbolic_operand (ofs, Pmode))
f5a60074 893 return true;
894 }
1f0ce6a6 895 }
896
24b3c0ed 897 return false;
898}
899
09a1f342 900/* Build the SYMBOL_REF for __tls_get_addr. */
901
902static GTY(()) rtx tls_get_addr_libfunc;
903
904static rtx
92643d95 905get_tls_get_addr (void)
09a1f342 906{
907 if (!tls_get_addr_libfunc)
908 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
909 return tls_get_addr_libfunc;
910}
911
0d50f0b7 912/* Try machine-dependent ways of modifying an illegitimate address
913 to be legitimate. If we find one, return the new, valid address. */
914
41e3a0c7 915static rtx
916alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
0d50f0b7 917{
918 HOST_WIDE_INT addend;
919
920 /* If the address is (plus reg const_int) and the CONST_INT is not a
921 valid offset, compute the high part of the constant and add it to
922 the register. Then our address is (plus temp low-part-const). */
923 if (GET_CODE (x) == PLUS
c933fb42 924 && REG_P (XEXP (x, 0))
925 && CONST_INT_P (XEXP (x, 1))
0d50f0b7 926 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
927 {
928 addend = INTVAL (XEXP (x, 1));
929 x = XEXP (x, 0);
930 goto split_addend;
931 }
932
933 /* If the address is (const (plus FOO const_int)), find the low-order
934 part of the CONST_INT. Then load FOO plus any high-order part of the
935 CONST_INT into a register. Our address is (plus reg low-part-const).
936 This is done to reduce the number of GOT entries. */
e1ba4a27 937 if (can_create_pseudo_p ()
f5a60074 938 && GET_CODE (x) == CONST
0d50f0b7 939 && GET_CODE (XEXP (x, 0)) == PLUS
c933fb42 940 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
0d50f0b7 941 {
942 addend = INTVAL (XEXP (XEXP (x, 0), 1));
943 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
944 goto split_addend;
945 }
946
947 /* If we have a (plus reg const), emit the load as in (2), then add
948 the two registers, and finally generate (plus reg low-part-const) as
949 our address. */
e1ba4a27 950 if (can_create_pseudo_p ()
f5a60074 951 && GET_CODE (x) == PLUS
c933fb42 952 && REG_P (XEXP (x, 0))
0d50f0b7 953 && GET_CODE (XEXP (x, 1)) == CONST
954 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
c933fb42 955 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
0d50f0b7 956 {
957 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
958 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
959 XEXP (XEXP (XEXP (x, 1), 0), 0),
960 NULL_RTX, 1, OPTAB_LIB_WIDEN);
961 goto split_addend;
962 }
963
d6c48eee 964 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
965 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
966 around +/- 32k offset. */
967 if (TARGET_EXPLICIT_RELOCS
968 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
969 && symbolic_operand (x, Pmode))
1f0ce6a6 970 {
5f7b9df8 971 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
972
973 switch (tls_symbolic_operand_type (x))
974 {
dda53cd5 975 case TLS_MODEL_NONE:
976 break;
977
5f7b9df8 978 case TLS_MODEL_GLOBAL_DYNAMIC:
979 start_sequence ();
980
981 r0 = gen_rtx_REG (Pmode, 0);
982 r16 = gen_rtx_REG (Pmode, 16);
09a1f342 983 tga = get_tls_get_addr ();
5f7b9df8 984 dest = gen_reg_rtx (Pmode);
985 seq = GEN_INT (alpha_next_sequence_number++);
9e7454d0 986
5f7b9df8 987 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
988 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
989 insn = emit_call_insn (insn);
9c2a0c05 990 RTL_CONST_CALL_P (insn) = 1;
5f7b9df8 991 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
992
993 insn = get_insns ();
994 end_sequence ();
995
996 emit_libcall_block (insn, dest, r0, x);
997 return dest;
998
999 case TLS_MODEL_LOCAL_DYNAMIC:
1000 start_sequence ();
1001
1002 r0 = gen_rtx_REG (Pmode, 0);
1003 r16 = gen_rtx_REG (Pmode, 16);
09a1f342 1004 tga = get_tls_get_addr ();
5f7b9df8 1005 scratch = gen_reg_rtx (Pmode);
1006 seq = GEN_INT (alpha_next_sequence_number++);
1007
1008 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1009 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1010 insn = emit_call_insn (insn);
9c2a0c05 1011 RTL_CONST_CALL_P (insn) = 1;
5f7b9df8 1012 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1013
1014 insn = get_insns ();
1015 end_sequence ();
1016
1017 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1018 UNSPEC_TLSLDM_CALL);
1019 emit_libcall_block (insn, scratch, r0, eqv);
1020
1021 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1022 eqv = gen_rtx_CONST (Pmode, eqv);
1023
1024 if (alpha_tls_size == 64)
1025 {
1026 dest = gen_reg_rtx (Pmode);
1027 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1028 emit_insn (gen_adddi3 (dest, dest, scratch));
1029 return dest;
1030 }
1031 if (alpha_tls_size == 32)
1032 {
1033 insn = gen_rtx_HIGH (Pmode, eqv);
1034 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1035 scratch = gen_reg_rtx (Pmode);
1036 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1037 }
1038 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1039
1040 case TLS_MODEL_INITIAL_EXEC:
1041 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1042 eqv = gen_rtx_CONST (Pmode, eqv);
1043 tp = gen_reg_rtx (Pmode);
1044 scratch = gen_reg_rtx (Pmode);
1045 dest = gen_reg_rtx (Pmode);
1046
1047 emit_insn (gen_load_tp (tp));
1048 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1049 emit_insn (gen_adddi3 (dest, tp, scratch));
1050 return dest;
1051
1052 case TLS_MODEL_LOCAL_EXEC:
1053 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1054 eqv = gen_rtx_CONST (Pmode, eqv);
1055 tp = gen_reg_rtx (Pmode);
1056
1057 emit_insn (gen_load_tp (tp));
1058 if (alpha_tls_size == 32)
1059 {
1060 insn = gen_rtx_HIGH (Pmode, eqv);
1061 insn = gen_rtx_PLUS (Pmode, tp, insn);
1062 tp = gen_reg_rtx (Pmode);
1063 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1064 }
1065 return gen_rtx_LO_SUM (Pmode, tp, eqv);
dda53cd5 1066
1067 default:
1068 gcc_unreachable ();
5f7b9df8 1069 }
1070
8afb6db4 1071 if (local_symbolic_operand (x, Pmode))
1072 {
1073 if (small_symbolic_operand (x, Pmode))
f5a60074 1074 return x;
8afb6db4 1075 else
1076 {
e1ba4a27 1077 if (can_create_pseudo_p ())
f5a60074 1078 scratch = gen_reg_rtx (Pmode);
1079 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1080 gen_rtx_HIGH (Pmode, x)));
1081 return gen_rtx_LO_SUM (Pmode, scratch, x);
8afb6db4 1082 }
5dcb037d 1083 }
1f0ce6a6 1084 }
1085
0d50f0b7 1086 return NULL;
1087
1088 split_addend:
1089 {
f5a60074 1090 HOST_WIDE_INT low, high;
1091
1092 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1093 addend -= low;
1094 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1095 addend -= high;
1096
1097 if (addend)
1098 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
e1ba4a27 1099 (!can_create_pseudo_p () ? scratch : NULL_RTX),
f5a60074 1100 1, OPTAB_LIB_WIDEN);
1101 if (high)
1102 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
e1ba4a27 1103 (!can_create_pseudo_p () ? scratch : NULL_RTX),
f5a60074 1104 1, OPTAB_LIB_WIDEN);
1105
1106 return plus_constant (x, low);
0d50f0b7 1107 }
1108}
1109
41e3a0c7 1110
1111/* Try machine-dependent ways of modifying an illegitimate address
1112 to be legitimate. Return X or the new, valid address. */
1113
1114static rtx
1115alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1116 enum machine_mode mode)
1117{
1118 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1119 return new_x ? new_x : x;
1120}
1121
c0da4391 1122/* Primarily this is required for TLS symbols, but given that our move
1123 patterns *ought* to be able to handle any symbol at any time, we
1124 should never be spilling symbolic operands to the constant pool, ever. */
1125
1126static bool
1127alpha_cannot_force_const_mem (rtx x)
1128{
1129 enum rtx_code code = GET_CODE (x);
1130 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1131}
1132
805e22b2 1133/* We do not allow indirect calls to be optimized into sibling calls, nor
9bdcc1e5 1134 can we allow a call to a function with a different GP to be optimized
1135 into a sibcall. */
1136
805e22b2 1137static bool
92643d95 1138alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
805e22b2 1139{
9bdcc1e5 1140 /* Can't do indirect tail calls, since we don't know if the target
1141 uses the same GP. */
1142 if (!decl)
1143 return false;
1144
1145 /* Otherwise, we can make a tail call if the target function shares
1146 the same GP. */
1147 return decl_has_samegp (decl);
805e22b2 1148}
1149
b71b0310 1150int
1151some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
367e2ab3 1152{
1153 rtx x = *px;
f5a60074 1154
792433e3 1155 /* Don't re-split. */
1156 if (GET_CODE (x) == LO_SUM)
1157 return -1;
367e2ab3 1158
792433e3 1159 return small_symbolic_operand (x, Pmode) != 0;
f5a60074 1160}
1161
367e2ab3 1162static int
92643d95 1163split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
367e2ab3 1164{
1165 rtx x = *px;
443bb1a6 1166
792433e3 1167 /* Don't re-split. */
1168 if (GET_CODE (x) == LO_SUM)
1169 return -1;
f5a60074 1170
367e2ab3 1171 if (small_symbolic_operand (x, Pmode))
1172 {
1173 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1174 *px = x;
792433e3 1175 return -1;
367e2ab3 1176 }
1177
792433e3 1178 return 0;
f5a60074 1179}
1180
92643d95 1181rtx
1182split_small_symbolic_operand (rtx x)
1183{
1184 x = copy_insn (x);
1185 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1186 return x;
1187}
1188
2f58af60 1189/* Indicate that INSN cannot be duplicated. This is true for any insn
1190 that we've marked with gpdisp relocs, since those have to stay in
1191 1-1 correspondence with one another.
1192
5910bb95 1193 Technically we could copy them if we could set up a mapping from one
2f58af60 1194 sequence number to another, across the set of insns to be duplicated.
1195 This seems overly complicated and error-prone since interblock motion
b55f2ed8 1196 from sched-ebb could move one of the pair of insns to a different block.
1197
1198 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1199 then they'll be in a different block from their ldgp. Which could lead
1200 the bb reorder code to think that it would be ok to copy just the block
1201 containing the call and branch to the block containing the ldgp. */
2f58af60 1202
1203static bool
92643d95 1204alpha_cannot_copy_insn_p (rtx insn)
2f58af60 1205{
2f58af60 1206 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1207 return false;
b55f2ed8 1208 if (recog_memoized (insn) >= 0)
1209 return get_attr_cannot_copy (insn);
1210 else
2f58af60 1211 return false;
2f58af60 1212}
1213
9e7454d0 1214
0d50f0b7 1215/* Try a machine-dependent way of reloading an illegitimate address
1216 operand. If we find one, push the reload and return the new rtx. */
9e7454d0 1217
0d50f0b7 1218rtx
92643d95 1219alpha_legitimize_reload_address (rtx x,
1220 enum machine_mode mode ATTRIBUTE_UNUSED,
1221 int opnum, int type,
1222 int ind_levels ATTRIBUTE_UNUSED)
0d50f0b7 1223{
1224 /* We must recognize output that we have already generated ourselves. */
1225 if (GET_CODE (x) == PLUS
1226 && GET_CODE (XEXP (x, 0)) == PLUS
c933fb42 1227 && REG_P (XEXP (XEXP (x, 0), 0))
1228 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1229 && CONST_INT_P (XEXP (x, 1)))
0d50f0b7 1230 {
1231 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1232 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
0a48089c 1233 opnum, (enum reload_type) type);
0d50f0b7 1234 return x;
1235 }
1236
1237 /* We wish to handle large displacements off a base register by
1238 splitting the addend across an ldah and the mem insn. This
1239 cuts number of extra insns needed from 3 to 1. */
1240 if (GET_CODE (x) == PLUS
c933fb42 1241 && REG_P (XEXP (x, 0))
0d50f0b7 1242 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1243 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1244 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1245 {
1246 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1247 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1248 HOST_WIDE_INT high
1249 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1250
1251 /* Check for 32-bit overflow. */
1252 if (high + low != val)
1253 return NULL_RTX;
1254
1255 /* Reload the high part into a base reg; leave the low part
1256 in the mem directly. */
1257 x = gen_rtx_PLUS (GET_MODE (x),
1258 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1259 GEN_INT (high)),
1260 GEN_INT (low));
1261
1262 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1263 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
0a48089c 1264 opnum, (enum reload_type) type);
0d50f0b7 1265 return x;
1266 }
1267
1268 return NULL_RTX;
1269}
1270\f
fab7adbf 1271/* Compute a (partial) cost for rtx X. Return true if the complete
1272 cost has been computed, and false if subexpressions should be
1273 scanned. In either case, *TOTAL contains the cost result. */
1274
1275static bool
f529eb25 1276alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1277 bool speed)
fab7adbf 1278{
1279 enum machine_mode mode = GET_MODE (x);
1280 bool float_mode_p = FLOAT_MODE_P (mode);
d7cf2331 1281 const struct alpha_rtx_cost_data *cost_data;
1282
f529eb25 1283 if (!speed)
d7cf2331 1284 cost_data = &alpha_rtx_cost_size;
1285 else
fb64edde 1286 cost_data = &alpha_rtx_cost_data[alpha_tune];
fab7adbf 1287
1288 switch (code)
1289 {
d7cf2331 1290 case CONST_INT:
fab7adbf 1291 /* If this is an 8-bit constant, return zero since it can be used
1292 nearly anywhere with no cost. If it is a valid operand for an
1293 ADD or AND, likewise return 0 if we know it will be used in that
1294 context. Otherwise, return 2 since it might be used there later.
1295 All other constants take at least two insns. */
fab7adbf 1296 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1297 {
1298 *total = 0;
1299 return true;
1300 }
8e262b5e 1301 /* FALLTHRU */
fab7adbf 1302
1303 case CONST_DOUBLE:
1304 if (x == CONST0_RTX (mode))
1305 *total = 0;
1306 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1307 || (outer_code == AND && and_operand (x, VOIDmode)))
1308 *total = 0;
1309 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1310 *total = 2;
1311 else
1312 *total = COSTS_N_INSNS (2);
1313 return true;
9e7454d0 1314
fab7adbf 1315 case CONST:
1316 case SYMBOL_REF:
1317 case LABEL_REF:
1318 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1319 *total = COSTS_N_INSNS (outer_code != MEM);
1320 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1321 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1322 else if (tls_symbolic_operand_type (x))
1323 /* Estimate of cost for call_pal rduniq. */
d7cf2331 1324 /* ??? How many insns do we emit here? More than one... */
fab7adbf 1325 *total = COSTS_N_INSNS (15);
1326 else
1327 /* Otherwise we do a load from the GOT. */
f529eb25 1328 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
fab7adbf 1329 return true;
9e7454d0 1330
91bc47b0 1331 case HIGH:
1332 /* This is effectively an add_operand. */
1333 *total = 2;
1334 return true;
1335
fab7adbf 1336 case PLUS:
1337 case MINUS:
1338 if (float_mode_p)
d7cf2331 1339 *total = cost_data->fp_add;
fab7adbf 1340 else if (GET_CODE (XEXP (x, 0)) == MULT
1341 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1342 {
0a48089c 1343 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1344 (enum rtx_code) outer_code, speed)
1345 + rtx_cost (XEXP (x, 1),
1346 (enum rtx_code) outer_code, speed)
1347 + COSTS_N_INSNS (1));
fab7adbf 1348 return true;
1349 }
1350 return false;
1351
1352 case MULT:
1353 if (float_mode_p)
d7cf2331 1354 *total = cost_data->fp_mult;
fab7adbf 1355 else if (mode == DImode)
d7cf2331 1356 *total = cost_data->int_mult_di;
fab7adbf 1357 else
d7cf2331 1358 *total = cost_data->int_mult_si;
fab7adbf 1359 return false;
1360
1361 case ASHIFT:
c933fb42 1362 if (CONST_INT_P (XEXP (x, 1))
fab7adbf 1363 && INTVAL (XEXP (x, 1)) <= 3)
1364 {
1365 *total = COSTS_N_INSNS (1);
1366 return false;
1367 }
8e262b5e 1368 /* FALLTHRU */
fab7adbf 1369
1370 case ASHIFTRT:
1371 case LSHIFTRT:
d7cf2331 1372 *total = cost_data->int_shift;
fab7adbf 1373 return false;
1374
1375 case IF_THEN_ELSE:
1376 if (float_mode_p)
d7cf2331 1377 *total = cost_data->fp_add;
fab7adbf 1378 else
d7cf2331 1379 *total = cost_data->int_cmov;
fab7adbf 1380 return false;
1381
1382 case DIV:
1383 case UDIV:
1384 case MOD:
1385 case UMOD:
1386 if (!float_mode_p)
d7cf2331 1387 *total = cost_data->int_div;
fab7adbf 1388 else if (mode == SFmode)
d7cf2331 1389 *total = cost_data->fp_div_sf;
fab7adbf 1390 else
d7cf2331 1391 *total = cost_data->fp_div_df;
fab7adbf 1392 return false;
1393
1394 case MEM:
f529eb25 1395 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
fab7adbf 1396 return true;
1397
1398 case NEG:
1399 if (! float_mode_p)
1400 {
1401 *total = COSTS_N_INSNS (1);
1402 return false;
1403 }
8e262b5e 1404 /* FALLTHRU */
fab7adbf 1405
1406 case ABS:
1407 if (! float_mode_p)
1408 {
d7cf2331 1409 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
fab7adbf 1410 return false;
1411 }
8e262b5e 1412 /* FALLTHRU */
fab7adbf 1413
1414 case FLOAT:
1415 case UNSIGNED_FLOAT:
1416 case FIX:
1417 case UNSIGNED_FIX:
fab7adbf 1418 case FLOAT_TRUNCATE:
d7cf2331 1419 *total = cost_data->fp_add;
fab7adbf 1420 return false;
1421
5fc4edde 1422 case FLOAT_EXTEND:
c933fb42 1423 if (MEM_P (XEXP (x, 0)))
5fc4edde 1424 *total = 0;
1425 else
1426 *total = cost_data->fp_add;
1427 return false;
1428
fab7adbf 1429 default:
1430 return false;
1431 }
1432}
1433\f
bf2a98b3 1434/* REF is an alignable memory location. Place an aligned SImode
1435 reference into *PALIGNED_MEM and the number of bits to shift into
a99a652b 1436 *PBITNUM. SCRATCH is a free register for use in reloading out
1437 of range stack slots. */
bf2a98b3 1438
1439void
92643d95 1440get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
bf2a98b3 1441{
1442 rtx base;
a3fed3d2 1443 HOST_WIDE_INT disp, offset;
bf2a98b3 1444
c933fb42 1445 gcc_assert (MEM_P (ref));
bf2a98b3 1446
cc215844 1447 if (reload_in_progress
1448 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
a99a652b 1449 {
cc215844 1450 base = find_replacement (&XEXP (ref, 0));
4d10b463 1451 gcc_assert (memory_address_p (GET_MODE (ref), base));
a99a652b 1452 }
bf2a98b3 1453 else
4d10b463 1454 base = XEXP (ref, 0);
bf2a98b3 1455
1456 if (GET_CODE (base) == PLUS)
a3fed3d2 1457 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1458 else
1459 disp = 0;
1460
1461 /* Find the byte offset within an aligned word. If the memory itself is
1462 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1463 will have examined the base register and determined it is aligned, and
1464 thus displacements from it are naturally alignable. */
1465 if (MEM_ALIGN (ref) >= 32)
1466 offset = 0;
1467 else
1468 offset = disp & 3;
bf2a98b3 1469
a3fed3d2 1470 /* Access the entire aligned word. */
1471 *paligned_mem = widen_memory_access (ref, SImode, -offset);
bf2a98b3 1472
a3fed3d2 1473 /* Convert the byte offset within the word to a bit offset. */
9caef960 1474 if (WORDS_BIG_ENDIAN)
a3fed3d2 1475 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
9caef960 1476 else
a3fed3d2 1477 offset *= 8;
1478 *pbitnum = GEN_INT (offset);
bf2a98b3 1479}
1480
9e7454d0 1481/* Similar, but just get the address. Handle the two reload cases.
b044f41c 1482 Add EXTRA_OFFSET to the address we return. */
bf2a98b3 1483
1484rtx
0934d969 1485get_unaligned_address (rtx ref)
bf2a98b3 1486{
1487 rtx base;
1488 HOST_WIDE_INT offset = 0;
1489
c933fb42 1490 gcc_assert (MEM_P (ref));
bf2a98b3 1491
cc215844 1492 if (reload_in_progress
1493 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
a99a652b 1494 {
a99a652b 1495 base = find_replacement (&XEXP (ref, 0));
cc215844 1496
4d10b463 1497 gcc_assert (memory_address_p (GET_MODE (ref), base));
a99a652b 1498 }
bf2a98b3 1499 else
4d10b463 1500 base = XEXP (ref, 0);
bf2a98b3 1501
1502 if (GET_CODE (base) == PLUS)
1503 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1504
0934d969 1505 return plus_constant (base, offset);
1506}
1507
1508/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1509 X is always returned in a register. */
1510
1511rtx
1512get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1513{
1514 if (GET_CODE (addr) == PLUS)
1515 {
1516 ofs += INTVAL (XEXP (addr, 1));
1517 addr = XEXP (addr, 0);
1518 }
1519
1520 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
bf2a98b3 1522}
14f7bc98 1523
f5a60074 1524/* On the Alpha, all (non-symbolic) constants except zero go into
9e7454d0 1525 a floating-point register via memory. Note that we cannot
8deb3959 1526 return anything that is not a subset of RCLASS, and that some
f5a60074 1527 symbolic constants cannot be dropped to memory. */
1528
1529enum reg_class
8deb3959 1530alpha_preferred_reload_class(rtx x, enum reg_class rclass)
f5a60074 1531{
1532 /* Zero is present in any register class. */
1533 if (x == CONST0_RTX (GET_MODE (x)))
8deb3959 1534 return rclass;
f5a60074 1535
1536 /* These sorts of constants we can easily drop to memory. */
c933fb42 1537 if (CONST_INT_P (x)
91bc47b0 1538 || GET_CODE (x) == CONST_DOUBLE
1539 || GET_CODE (x) == CONST_VECTOR)
f5a60074 1540 {
8deb3959 1541 if (rclass == FLOAT_REGS)
f5a60074 1542 return NO_REGS;
8deb3959 1543 if (rclass == ALL_REGS)
f5a60074 1544 return GENERAL_REGS;
8deb3959 1545 return rclass;
f5a60074 1546 }
1547
1548 /* All other kinds of constants should not (and in the case of HIGH
1549 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1550 secondary reload. */
1551 if (CONSTANT_P (x))
8deb3959 1552 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
f5a60074 1553
8deb3959 1554 return rclass;
f5a60074 1555}
1556
0d96cd2b 1557/* Inform reload about cases where moving X with a mode MODE to a register in
8deb3959 1558 RCLASS requires an extra scratch or immediate register. Return the class
0d96cd2b 1559 needed for the immediate register. */
14f7bc98 1560
0d96cd2b 1561static enum reg_class
8deb3959 1562alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
0d96cd2b 1563 enum machine_mode mode, secondary_reload_info *sri)
14f7bc98 1564{
0d96cd2b 1565 /* Loading and storing HImode or QImode values to and from memory
1566 usually requires a scratch register. */
1567 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
d2494d49 1568 {
0d96cd2b 1569 if (any_memory_operand (x, mode))
d2494d49 1570 {
0d96cd2b 1571 if (in_p)
1572 {
1573 if (!aligned_memory_operand (x, mode))
1574 sri->icode = reload_in_optab[mode];
1575 }
1576 else
1577 sri->icode = reload_out_optab[mode];
1578 return NO_REGS;
d2494d49 1579 }
1580 }
14f7bc98 1581
0d96cd2b 1582 /* We also cannot do integral arithmetic into FP regs, as might result
1583 from register elimination into a DImode fp register. */
8deb3959 1584 if (rclass == FLOAT_REGS)
14f7bc98 1585 {
0d96cd2b 1586 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
14f7bc98 1587 return GENERAL_REGS;
0d96cd2b 1588 if (in_p && INTEGRAL_MODE_P (mode)
1589 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
14f7bc98 1590 return GENERAL_REGS;
1591 }
1592
1593 return NO_REGS;
1594}
bf2a98b3 1595\f
1596/* Subfunction of the following function. Update the flags of any MEM
1597 found in part of X. */
1598
b04fab2a 1599static int
1600alpha_set_memflags_1 (rtx *xp, void *data)
bf2a98b3 1601{
b04fab2a 1602 rtx x = *xp, orig = (rtx) data;
bf2a98b3 1603
c933fb42 1604 if (!MEM_P (x))
b04fab2a 1605 return 0;
bf2a98b3 1606
b04fab2a 1607 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1608 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1609 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1610 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1611 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
bf2a98b3 1612
b04fab2a 1613 /* Sadly, we cannot use alias sets because the extra aliasing
1614 produced by the AND interferes. Given that two-byte quantities
1615 are the only thing we would be able to differentiate anyway,
1616 there does not seem to be any point in convoluting the early
1617 out of the alias check. */
99c14947 1618
b04fab2a 1619 return -1;
bf2a98b3 1620}
1621
7ca8bc48 1622/* Given SEQ, which is an INSN list, look for any MEMs in either
31d3e01c 1623 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1624 volatile flags from REF into each of the MEMs found. If REF is not
1625 a MEM, don't do anything. */
bf2a98b3 1626
1627void
7ca8bc48 1628alpha_set_memflags (rtx seq, rtx ref)
bf2a98b3 1629{
7ca8bc48 1630 rtx insn;
849674a3 1631
7ca8bc48 1632 if (!MEM_P (ref))
bf2a98b3 1633 return;
1634
9e7454d0 1635 /* This is only called from alpha.md, after having had something
849674a3 1636 generated from one of the insn patterns. So if everything is
1637 zero, the pattern is already up-to-date. */
b04fab2a 1638 if (!MEM_VOLATILE_P (ref)
1639 && !MEM_IN_STRUCT_P (ref)
1640 && !MEM_SCALAR_P (ref)
1641 && !MEM_NOTRAP_P (ref)
1642 && !MEM_READONLY_P (ref))
849674a3 1643 return;
1644
7ca8bc48 1645 for (insn = seq; insn; insn = NEXT_INSN (insn))
1646 if (INSN_P (insn))
1647 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1648 else
1649 gcc_unreachable ();
bf2a98b3 1650}
1651\f
91bc47b0 1652static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1653 int, bool);
1654
1655/* Internal routine for alpha_emit_set_const to check for N or below insns.
1656 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1657 and return pc_rtx if successful. */
6f86cb15 1658
1659static rtx
92643d95 1660alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
91bc47b0 1661 HOST_WIDE_INT c, int n, bool no_output)
bf2a98b3 1662{
8deb3959 1663 HOST_WIDE_INT new_const;
bf2a98b3 1664 int i, bits;
ea5db00c 1665 /* Use a pseudo if highly optimizing and still generating RTL. */
1666 rtx subtarget
e1ba4a27 1667 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
301416af 1668 rtx temp, insn;
bf2a98b3 1669
bf2a98b3 1670 /* If this is a sign-extended 32-bit constant, we can do this in at most
1671 three insns, so do it if we have enough insns left. We always have
65abff06 1672 a sign-extended 32-bit constant when compiling on a narrow machine. */
bf2a98b3 1673
3bc2043a 1674 if (HOST_BITS_PER_WIDE_INT != 64
1675 || c >> 31 == -1 || c >> 31 == 0)
bf2a98b3 1676 {
bdb19034 1677 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
bf2a98b3 1678 HOST_WIDE_INT tmp1 = c - low;
bdb19034 1679 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
bf2a98b3 1680 HOST_WIDE_INT extra = 0;
1681
81d03ebd 1682 /* If HIGH will be interpreted as negative but the constant is
1683 positive, we must adjust it to do two ldha insns. */
1684
1685 if ((high & 0x8000) != 0 && c >= 0)
bf2a98b3 1686 {
1687 extra = 0x4000;
1688 tmp1 -= 0x40000000;
1689 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1690 }
1691
1692 if (c == low || (low == 0 && extra == 0))
3bc2043a 1693 {
1694 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1695 but that meant that we can't handle INT_MIN on 32-bit machines
9e7454d0 1696 (like NT/Alpha), because we recurse indefinitely through
3bc2043a 1697 emit_move_insn to gen_movdi. So instead, since we know exactly
1698 what we want, create it explicitly. */
1699
91bc47b0 1700 if (no_output)
1701 return pc_rtx;
3bc2043a 1702 if (target == NULL)
1703 target = gen_reg_rtx (mode);
941522d6 1704 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
3bc2043a 1705 return target;
1706 }
6f86cb15 1707 else if (n >= 2 + (extra != 0))
bf2a98b3 1708 {
91bc47b0 1709 if (no_output)
1710 return pc_rtx;
e1ba4a27 1711 if (!can_create_pseudo_p ())
5b952578 1712 {
1713 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1714 temp = target;
1715 }
1716 else
1717 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1718 subtarget, mode);
ea5db00c 1719
301416af 1720 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1721 This means that if we go through expand_binop, we'll try to
1722 generate extensions, etc, which will require new pseudos, which
1723 will fail during some split phases. The SImode add patterns
1724 still exist, but are not named. So build the insns by hand. */
1725
bf2a98b3 1726 if (extra != 0)
301416af 1727 {
1728 if (! subtarget)
1729 subtarget = gen_reg_rtx (mode);
1730 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1731 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1732 emit_insn (insn);
b8585446 1733 temp = subtarget;
301416af 1734 }
bf2a98b3 1735
301416af 1736 if (target == NULL)
1737 target = gen_reg_rtx (mode);
1738 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1739 insn = gen_rtx_SET (VOIDmode, target, insn);
1740 emit_insn (insn);
1741 return target;
bf2a98b3 1742 }
1743 }
1744
dacd345b 1745 /* If we couldn't do it that way, try some other methods. But if we have
07014ed9 1746 no instructions left, don't bother. Likewise, if this is SImode and
1747 we can't make pseudos, we can't do anything since the expand_binop
1748 and expand_unop calls will widen and try to make pseudos. */
bf2a98b3 1749
e1ba4a27 1750 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
bf2a98b3 1751 return 0;
1752
dacd345b 1753 /* Next, see if we can load a related constant and then shift and possibly
bf2a98b3 1754 negate it to get the constant we want. Try this once each increasing
1755 numbers of insns. */
1756
1757 for (i = 1; i < n; i++)
1758 {
bdb19034 1759 /* First, see if minus some low bits, we've an easy load of
1760 high bits. */
1761
8deb3959 1762 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1763 if (new_const != 0)
91bc47b0 1764 {
8deb3959 1765 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
91bc47b0 1766 if (temp)
1767 {
1768 if (no_output)
1769 return temp;
8deb3959 1770 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
91bc47b0 1771 target, 0, OPTAB_WIDEN);
1772 }
1773 }
bdb19034 1774
1775 /* Next try complementing. */
91bc47b0 1776 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1777 if (temp)
1778 {
1779 if (no_output)
1780 return temp;
1781 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1782 }
bf2a98b3 1783
ea5db00c 1784 /* Next try to form a constant and do a left shift. We can do this
bf2a98b3 1785 if some low-order bits are zero; the exact_log2 call below tells
1786 us that information. The bits we are shifting out could be any
1787 value, but here we'll just try the 0- and sign-extended forms of
1788 the constant. To try to increase the chance of having the same
1789 constant in more than one insn, start at the highest number of
1790 bits to shift, but try all possibilities in case a ZAPNOT will
1791 be useful. */
1792
91bc47b0 1793 bits = exact_log2 (c & -c);
1794 if (bits > 0)
bf2a98b3 1795 for (; bits > 0; bits--)
91bc47b0 1796 {
8deb3959 1797 new_const = c >> bits;
1798 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
91bc47b0 1799 if (!temp && c < 0)
1800 {
8deb3959 1801 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1802 temp = alpha_emit_set_const (subtarget, mode, new_const,
91bc47b0 1803 i, no_output);
1804 }
1805 if (temp)
1806 {
1807 if (no_output)
1808 return temp;
1809 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1810 target, 0, OPTAB_WIDEN);
1811 }
1812 }
bf2a98b3 1813
1814 /* Now try high-order zero bits. Here we try the shifted-in bits as
066efb8d 1815 all zero and all ones. Be careful to avoid shifting outside the
1816 mode and to avoid shifting outside the host wide int size. */
3bc2043a 1817 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1818 confuse the recursive call and set all of the high 32 bits. */
bf2a98b3 1819
91bc47b0 1820 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1821 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1822 if (bits > 0)
bf2a98b3 1823 for (; bits > 0; bits--)
91bc47b0 1824 {
8deb3959 1825 new_const = c << bits;
1826 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
91bc47b0 1827 if (!temp)
1828 {
8deb3959 1829 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1830 temp = alpha_emit_set_const (subtarget, mode, new_const,
91bc47b0 1831 i, no_output);
1832 }
1833 if (temp)
1834 {
1835 if (no_output)
1836 return temp;
1837 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1838 target, 1, OPTAB_WIDEN);
1839 }
1840 }
bf2a98b3 1841
1842 /* Now try high-order 1 bits. We get that with a sign-extension.
066efb8d 1843 But one bit isn't enough here. Be careful to avoid shifting outside
65abff06 1844 the mode and to avoid shifting outside the host wide int size. */
9caef960 1845
91bc47b0 1846 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1847 - floor_log2 (~ c) - 2);
1848 if (bits > 0)
bf2a98b3 1849 for (; bits > 0; bits--)
91bc47b0 1850 {
8deb3959 1851 new_const = c << bits;
1852 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
91bc47b0 1853 if (!temp)
1854 {
8deb3959 1855 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1856 temp = alpha_emit_set_const (subtarget, mode, new_const,
91bc47b0 1857 i, no_output);
1858 }
1859 if (temp)
1860 {
1861 if (no_output)
1862 return temp;
1863 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1864 target, 0, OPTAB_WIDEN);
1865 }
1866 }
bf2a98b3 1867 }
1868
bdb19034 1869#if HOST_BITS_PER_WIDE_INT == 64
1870 /* Finally, see if can load a value into the target that is the same as the
1871 constant except that all bytes that are 0 are changed to be 0xff. If we
1872 can, then we can do a ZAPNOT to obtain the desired constant. */
1873
8deb3959 1874 new_const = c;
bdb19034 1875 for (i = 0; i < 64; i += 8)
8deb3959 1876 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1877 new_const |= (HOST_WIDE_INT) 0xff << i;
e52799e9 1878
bdb19034 1879 /* We are only called for SImode and DImode. If this is SImode, ensure that
1880 we are sign extended to a full word. */
1881
1882 if (mode == SImode)
8deb3959 1883 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
bdb19034 1884
8deb3959 1885 if (new_const != c)
91bc47b0 1886 {
8deb3959 1887 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
91bc47b0 1888 if (temp)
1889 {
1890 if (no_output)
1891 return temp;
8deb3959 1892 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
91bc47b0 1893 target, 0, OPTAB_WIDEN);
1894 }
1895 }
bdb19034 1896#endif
e52799e9 1897
bf2a98b3 1898 return 0;
1899}
996a379d 1900
92643d95 1901/* Try to output insns to set TARGET equal to the constant C if it can be
1902 done in less than N insns. Do all computations in MODE. Returns the place
1903 where the output has been placed if it can be done and the insns have been
1904 emitted. If it would take more than N insns, zero is returned and no
1905 insns and emitted. */
1906
91bc47b0 1907static rtx
92643d95 1908alpha_emit_set_const (rtx target, enum machine_mode mode,
91bc47b0 1909 HOST_WIDE_INT c, int n, bool no_output)
92643d95 1910{
91bc47b0 1911 enum machine_mode orig_mode = mode;
92643d95 1912 rtx orig_target = target;
91bc47b0 1913 rtx result = 0;
92643d95 1914 int i;
1915
1916 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1917 can't load this constant in one insn, do this in DImode. */
e1ba4a27 1918 if (!can_create_pseudo_p () && mode == SImode
c933fb42 1919 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
92643d95 1920 {
91bc47b0 1921 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1922 if (result)
1923 return result;
1924
1925 target = no_output ? NULL : gen_lowpart (DImode, target);
1926 mode = DImode;
1927 }
1928 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1929 {
1930 target = no_output ? NULL : gen_lowpart (DImode, target);
92643d95 1931 mode = DImode;
1932 }
1933
1934 /* Try 1 insn, then 2, then up to N. */
1935 for (i = 1; i <= n; i++)
1936 {
91bc47b0 1937 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
92643d95 1938 if (result)
1939 {
91bc47b0 1940 rtx insn, set;
1941
1942 if (no_output)
1943 return result;
1944
1945 insn = get_last_insn ();
1946 set = single_set (insn);
92643d95 1947 if (! CONSTANT_P (SET_SRC (set)))
1948 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1949 break;
1950 }
1951 }
1952
1953 /* Allow for the case where we changed the mode of TARGET. */
91bc47b0 1954 if (result)
1955 {
1956 if (result == target)
1957 result = orig_target;
1958 else if (mode != orig_mode)
1959 result = gen_lowpart (orig_mode, result);
1960 }
92643d95 1961
1962 return result;
1963}
1964
2612f626 1965/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1966 fall back to a straight forward decomposition. We do this to avoid
1967 exponential run times encountered when looking for longer sequences
1968 with alpha_emit_set_const. */
1969
91bc47b0 1970static rtx
92643d95 1971alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2612f626 1972{
2612f626 1973 HOST_WIDE_INT d1, d2, d3, d4;
2612f626 1974
1975 /* Decompose the entire word */
af792316 1976#if HOST_BITS_PER_WIDE_INT >= 64
4d10b463 1977 gcc_assert (c2 == -(c1 < 0));
af792316 1978 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1979 c1 -= d1;
1980 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1981 c1 = (c1 - d2) >> 32;
1982 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1983 c1 -= d3;
1984 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
4d10b463 1985 gcc_assert (c1 == d4);
af792316 1986#else
1987 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1988 c1 -= d1;
1989 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
4d10b463 1990 gcc_assert (c1 == d2);
af792316 1991 c2 += (d2 < 0);
1992 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1993 c2 -= d3;
1994 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
4d10b463 1995 gcc_assert (c2 == d4);
af792316 1996#endif
2612f626 1997
1998 /* Construct the high word */
af792316 1999 if (d4)
2000 {
2001 emit_move_insn (target, GEN_INT (d4));
2002 if (d3)
2003 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2004 }
2612f626 2005 else
af792316 2006 emit_move_insn (target, GEN_INT (d3));
2612f626 2007
2008 /* Shift it into place */
af792316 2009 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2612f626 2010
af792316 2011 /* Add in the low bits. */
2012 if (d2)
2013 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2014 if (d1)
2015 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2612f626 2016
af792316 2017 return target;
2612f626 2018}
2612f626 2019
91bc47b0 2020/* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2021 the low 64 bits. */
2022
2023static void
2024alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2025{
2026 HOST_WIDE_INT i0, i1;
2027
2028 if (GET_CODE (x) == CONST_VECTOR)
2029 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2030
2031
c933fb42 2032 if (CONST_INT_P (x))
91bc47b0 2033 {
2034 i0 = INTVAL (x);
2035 i1 = -(i0 < 0);
2036 }
2037 else if (HOST_BITS_PER_WIDE_INT >= 64)
2038 {
2039 i0 = CONST_DOUBLE_LOW (x);
2040 i1 = -(i0 < 0);
2041 }
2042 else
2043 {
2044 i0 = CONST_DOUBLE_LOW (x);
2045 i1 = CONST_DOUBLE_HIGH (x);
2046 }
2047
2048 *p0 = i0;
2049 *p1 = i1;
2050}
2051
2052/* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2053 are willing to load the value into a register via a move pattern.
2054 Normally this is all symbolic constants, integral constants that
2055 take three or fewer instructions, and floating-point zero. */
2056
2057bool
2058alpha_legitimate_constant_p (rtx x)
2059{
2060 enum machine_mode mode = GET_MODE (x);
2061 HOST_WIDE_INT i0, i1;
2062
2063 switch (GET_CODE (x))
2064 {
2065 case CONST:
2066 case LABEL_REF:
91bc47b0 2067 case HIGH:
2068 return true;
2069
b5c0ec3d 2070 case SYMBOL_REF:
2071 /* TLS symbols are never valid. */
2072 return SYMBOL_REF_TLS_MODEL (x) == 0;
2073
91bc47b0 2074 case CONST_DOUBLE:
2075 if (x == CONST0_RTX (mode))
2076 return true;
2077 if (FLOAT_MODE_P (mode))
2078 return false;
2079 goto do_integer;
2080
2081 case CONST_VECTOR:
2082 if (x == CONST0_RTX (mode))
2083 return true;
2084 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2085 return false;
2086 if (GET_MODE_SIZE (mode) != 8)
2087 return false;
2088 goto do_integer;
2089
2090 case CONST_INT:
2091 do_integer:
2092 if (TARGET_BUILD_CONSTANTS)
2093 return true;
2094 alpha_extract_integer (x, &i0, &i1);
2095 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2096 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2097 return false;
2098
2099 default:
2100 return false;
2101 }
2102}
2103
2104/* Operand 1 is known to be a constant, and should require more than one
2105 instruction to load. Emit that multi-part load. */
2106
2107bool
2108alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2109{
2110 HOST_WIDE_INT i0, i1;
2111 rtx temp = NULL_RTX;
2112
2113 alpha_extract_integer (operands[1], &i0, &i1);
2114
2115 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2116 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2117
2118 if (!temp && TARGET_BUILD_CONSTANTS)
2119 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2120
2121 if (temp)
2122 {
2123 if (!rtx_equal_p (operands[0], temp))
2124 emit_move_insn (operands[0], temp);
2125 return true;
2126 }
2127
2128 return false;
2129}
2130
cb6e3ae1 2131/* Expand a move instruction; return true if all work is done.
2132 We don't handle non-bwx subword loads here. */
2133
2134bool
92643d95 2135alpha_expand_mov (enum machine_mode mode, rtx *operands)
cb6e3ae1 2136{
f8fff44e 2137 rtx tmp;
2138
cb6e3ae1 2139 /* If the output is not a register, the input must be. */
c933fb42 2140 if (MEM_P (operands[0])
cb6e3ae1 2141 && ! reg_or_0_operand (operands[1], mode))
2142 operands[1] = force_reg (mode, operands[1]);
2143
f5a60074 2144 /* Allow legitimize_address to perform some simplifications. */
62e050c6 2145 if (mode == Pmode && symbolic_operand (operands[1], mode))
1f0ce6a6 2146 {
41e3a0c7 2147 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
f5a60074 2148 if (tmp)
5dcb037d 2149 {
5f7b9df8 2150 if (tmp == operands[0])
2151 return true;
f5a60074 2152 operands[1] = tmp;
8afb6db4 2153 return false;
2154 }
1f0ce6a6 2155 }
2156
cb6e3ae1 2157 /* Early out for non-constants and valid constants. */
2158 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2159 return false;
2160
2161 /* Split large integers. */
c933fb42 2162 if (CONST_INT_P (operands[1])
91bc47b0 2163 || GET_CODE (operands[1]) == CONST_DOUBLE
2164 || GET_CODE (operands[1]) == CONST_VECTOR)
cb6e3ae1 2165 {
91bc47b0 2166 if (alpha_split_const_mov (mode, operands))
2167 return true;
cb6e3ae1 2168 }
2169
2170 /* Otherwise we've nothing left but to drop the thing to memory. */
f8fff44e 2171 tmp = force_const_mem (mode, operands[1]);
2172
2173 if (tmp == NULL_RTX)
2174 return false;
2175
cb6e3ae1 2176 if (reload_in_progress)
2177 {
f8fff44e 2178 emit_move_insn (operands[0], XEXP (tmp, 0));
2179 operands[1] = replace_equiv_address (tmp, operands[0]);
cb6e3ae1 2180 }
2181 else
f8fff44e 2182 operands[1] = validize_mem (tmp);
cb6e3ae1 2183 return false;
2184}
2185
2186/* Expand a non-bwx QImode or HImode move instruction;
2187 return true if all work is done. */
2188
2189bool
92643d95 2190alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
cb6e3ae1 2191{
0d96cd2b 2192 rtx seq;
2193
cb6e3ae1 2194 /* If the output is not a register, the input must be. */
0d96cd2b 2195 if (MEM_P (operands[0]))
cb6e3ae1 2196 operands[1] = force_reg (mode, operands[1]);
2197
2198 /* Handle four memory cases, unaligned and aligned for either the input
2199 or the output. The only case where we can be called during reload is
2200 for aligned loads; all other cases require temporaries. */
2201
0d96cd2b 2202 if (any_memory_operand (operands[1], mode))
cb6e3ae1 2203 {
2204 if (aligned_memory_operand (operands[1], mode))
2205 {
2206 if (reload_in_progress)
2207 {
0d96cd2b 2208 if (mode == QImode)
2209 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2210 else
2211 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2212 emit_insn (seq);
cb6e3ae1 2213 }
2214 else
2215 {
2216 rtx aligned_mem, bitnum;
2217 rtx scratch = gen_reg_rtx (SImode);
d67e1866 2218 rtx subtarget;
2219 bool copyout;
cb6e3ae1 2220
2221 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2222
d67e1866 2223 subtarget = operands[0];
c933fb42 2224 if (REG_P (subtarget))
d67e1866 2225 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2226 else
2227 subtarget = gen_reg_rtx (DImode), copyout = true;
2228
0d96cd2b 2229 if (mode == QImode)
2230 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2231 bitnum, scratch);
2232 else
2233 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2234 bitnum, scratch);
2235 emit_insn (seq);
d67e1866 2236
2237 if (copyout)
2238 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
cb6e3ae1 2239 }
2240 }
2241 else
2242 {
2243 /* Don't pass these as parameters since that makes the generated
2244 code depend on parameter evaluation order which will cause
2245 bootstrap failures. */
2246
0d96cd2b 2247 rtx temp1, temp2, subtarget, ua;
d67e1866 2248 bool copyout;
2249
2250 temp1 = gen_reg_rtx (DImode);
2251 temp2 = gen_reg_rtx (DImode);
cb6e3ae1 2252
d67e1866 2253 subtarget = operands[0];
c933fb42 2254 if (REG_P (subtarget))
d67e1866 2255 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2256 else
2257 subtarget = gen_reg_rtx (DImode), copyout = true;
2258
0d96cd2b 2259 ua = get_unaligned_address (operands[1]);
2260 if (mode == QImode)
2261 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2262 else
2263 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2264
cb6e3ae1 2265 alpha_set_memflags (seq, operands[1]);
2266 emit_insn (seq);
d67e1866 2267
2268 if (copyout)
2269 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
cb6e3ae1 2270 }
2271 return true;
2272 }
2273
0d96cd2b 2274 if (any_memory_operand (operands[0], mode))
cb6e3ae1 2275 {
2276 if (aligned_memory_operand (operands[0], mode))
2277 {
2278 rtx aligned_mem, bitnum;
2279 rtx temp1 = gen_reg_rtx (SImode);
2280 rtx temp2 = gen_reg_rtx (SImode);
2281
2282 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2283
2284 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2285 temp1, temp2));
2286 }
2287 else
2288 {
2289 rtx temp1 = gen_reg_rtx (DImode);
2290 rtx temp2 = gen_reg_rtx (DImode);
2291 rtx temp3 = gen_reg_rtx (DImode);
0d96cd2b 2292 rtx ua = get_unaligned_address (operands[0]);
2293
2294 if (mode == QImode)
2295 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2296 else
2297 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
cb6e3ae1 2298
2299 alpha_set_memflags (seq, operands[0]);
2300 emit_insn (seq);
2301 }
2302 return true;
2303 }
2304
2305 return false;
2306}
2307
a31688d7 2308/* Implement the movmisalign patterns. One of the operands is a memory
84846cff 2309 that is not naturally aligned. Emit instructions to load it. */
a31688d7 2310
2311void
2312alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2313{
2314 /* Honor misaligned loads, for those we promised to do so. */
2315 if (MEM_P (operands[1]))
2316 {
2317 rtx tmp;
2318
2319 if (register_operand (operands[0], mode))
2320 tmp = operands[0];
2321 else
2322 tmp = gen_reg_rtx (mode);
2323
2324 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2325 if (tmp != operands[0])
2326 emit_move_insn (operands[0], tmp);
2327 }
2328 else if (MEM_P (operands[0]))
2329 {
2330 if (!reg_or_0_operand (operands[1], mode))
2331 operands[1] = force_reg (mode, operands[1]);
2332 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2333 }
2334 else
2335 gcc_unreachable ();
2336}
2337
2a42ba09 2338/* Generate an unsigned DImode to FP conversion. This is the same code
2339 optabs would emit if we didn't have TFmode patterns.
2340
2341 For SFmode, this is the only construction I've found that can pass
2342 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2343 intermediates will work, because you'll get intermediate rounding
2344 that ruins the end result. Some of this could be fixed by turning
2345 on round-to-positive-infinity, but that requires diddling the fpsr,
2346 which kills performance. I tried turning this around and converting
2347 to a negative number, so that I could turn on /m, but either I did
2348 it wrong or there's something else cause I wound up with the exact
2349 same single-bit error. There is a branch-less form of this same code:
2350
2351 srl $16,1,$1
2352 and $16,1,$2
2353 cmplt $16,0,$3
2354 or $1,$2,$2
2355 cmovge $16,$16,$2
2356 itoft $3,$f10
2357 itoft $2,$f11
2358 cvtqs $f11,$f11
2359 adds $f11,$f11,$f0
2360 fcmoveq $f10,$f11,$f0
2361
2362 I'm not using it because it's the same number of instructions as
2363 this branch-full form, and it has more serialized long latency
2364 instructions on the critical path.
2365
2366 For DFmode, we can avoid rounding errors by breaking up the word
2367 into two pieces, converting them separately, and adding them back:
2368
2369 LC0: .long 0,0x5f800000
2370
2371 itoft $16,$f11
2372 lda $2,LC0
093c0196 2373 cmplt $16,0,$1
2a42ba09 2374 cpyse $f11,$f31,$f10
2375 cpyse $f31,$f11,$f11
2376 s4addq $1,$2,$1
2377 lds $f12,0($1)
2378 cvtqt $f10,$f10
2379 cvtqt $f11,$f11
2380 addt $f12,$f10,$f0
2381 addt $f0,$f11,$f0
2382
2383 This doesn't seem to be a clear-cut win over the optabs form.
2384 It probably all depends on the distribution of numbers being
2385 converted -- in the optabs form, all but high-bit-set has a
2386 much lower minimum execution time. */
2387
2388void
92643d95 2389alpha_emit_floatuns (rtx operands[2])
2a42ba09 2390{
2391 rtx neglab, donelab, i0, i1, f0, in, out;
2392 enum machine_mode mode;
2393
2394 out = operands[0];
8e2025b4 2395 in = force_reg (DImode, operands[1]);
2a42ba09 2396 mode = GET_MODE (out);
2397 neglab = gen_label_rtx ();
2398 donelab = gen_label_rtx ();
2399 i0 = gen_reg_rtx (DImode);
2400 i1 = gen_reg_rtx (DImode);
2401 f0 = gen_reg_rtx (mode);
2402
7e69f45b 2403 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2a42ba09 2404
2405 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2406 emit_jump_insn (gen_jump (donelab));
093c0196 2407 emit_barrier ();
2a42ba09 2408
2409 emit_label (neglab);
2410
2411 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2412 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2413 emit_insn (gen_iordi3 (i0, i0, i1));
2414 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2415 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2416
2417 emit_label (donelab);
2418}
2419
3a2a3a7f 2420/* Generate the comparison for a conditional branch. */
2421
74f4459c 2422void
2423alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
3a2a3a7f 2424{
2425 enum rtx_code cmp_code, branch_code;
74f4459c 2426 enum machine_mode branch_mode = VOIDmode;
2427 enum rtx_code code = GET_CODE (operands[0]);
2428 rtx op0 = operands[1], op1 = operands[2];
3a2a3a7f 2429 rtx tem;
2430
74f4459c 2431 if (cmp_mode == TFmode)
915c336f 2432 {
8c3428a6 2433 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
915c336f 2434 op1 = const0_rtx;
74f4459c 2435 cmp_mode = DImode;
915c336f 2436 }
2437
3a2a3a7f 2438 /* The general case: fold the comparison code to the types of compares
2439 that we have, choosing the branch as necessary. */
2440 switch (code)
2441 {
2442 case EQ: case LE: case LT: case LEU: case LTU:
a4110d9a 2443 case UNORDERED:
3a2a3a7f 2444 /* We have these compares: */
2445 cmp_code = code, branch_code = NE;
2446 break;
2447
2448 case NE:
a4110d9a 2449 case ORDERED:
65abff06 2450 /* These must be reversed. */
a4110d9a 2451 cmp_code = reverse_condition (code), branch_code = EQ;
3a2a3a7f 2452 break;
2453
2454 case GE: case GT: case GEU: case GTU:
2455 /* For FP, we swap them, for INT, we reverse them. */
74f4459c 2456 if (cmp_mode == DFmode)
3a2a3a7f 2457 {
2458 cmp_code = swap_condition (code);
2459 branch_code = NE;
2460 tem = op0, op0 = op1, op1 = tem;
2461 }
2462 else
2463 {
2464 cmp_code = reverse_condition (code);
2465 branch_code = EQ;
2466 }
2467 break;
2468
2469 default:
4d10b463 2470 gcc_unreachable ();
3a2a3a7f 2471 }
2472
74f4459c 2473 if (cmp_mode == DFmode)
3a2a3a7f 2474 {
70ce4162 2475 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
3a2a3a7f 2476 {
2477 /* When we are not as concerned about non-finite values, and we
2478 are comparing against zero, we can branch directly. */
2479 if (op1 == CONST0_RTX (DFmode))
21f1e711 2480 cmp_code = UNKNOWN, branch_code = code;
3a2a3a7f 2481 else if (op0 == CONST0_RTX (DFmode))
2482 {
2483 /* Undo the swap we probably did just above. */
2484 tem = op0, op0 = op1, op1 = tem;
4899654e 2485 branch_code = swap_condition (cmp_code);
21f1e711 2486 cmp_code = UNKNOWN;
3a2a3a7f 2487 }
2488 }
2489 else
2490 {
d30e015b 2491 /* ??? We mark the branch mode to be CCmode to prevent the
9e7454d0 2492 compare and branch from being combined, since the compare
3a2a3a7f 2493 insn follows IEEE rules that the branch does not. */
2494 branch_mode = CCmode;
2495 }
2496 }
2497 else
2498 {
3a2a3a7f 2499 /* The following optimizations are only for signed compares. */
2500 if (code != LEU && code != LTU && code != GEU && code != GTU)
2501 {
2502 /* Whee. Compare and branch against 0 directly. */
2503 if (op1 == const0_rtx)
21f1e711 2504 cmp_code = UNKNOWN, branch_code = code;
3a2a3a7f 2505
3a2f3420 2506 /* If the constants doesn't fit into an immediate, but can
2507 be generated by lda/ldah, we adjust the argument and
2508 compare against zero, so we can use beq/bne directly. */
62350d6c 2509 /* ??? Don't do this when comparing against symbols, otherwise
2510 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2511 be declared false out of hand (at least for non-weak). */
c933fb42 2512 else if (CONST_INT_P (op1)
62350d6c 2513 && (code == EQ || code == NE)
2514 && !(symbolic_operand (op0, VOIDmode)
c933fb42 2515 || (REG_P (op0) && REG_POINTER (op0))))
3a2a3a7f 2516 {
1dffd068 2517 rtx n_op1 = GEN_INT (-INTVAL (op1));
2518
2519 if (! satisfies_constraint_I (op1)
2520 && (satisfies_constraint_K (n_op1)
2521 || satisfies_constraint_L (n_op1)))
2522 cmp_code = PLUS, branch_code = code, op1 = n_op1;
3a2a3a7f 2523 }
2524 }
3a2a3a7f 2525
d74ce6fa 2526 if (!reg_or_0_operand (op0, DImode))
2527 op0 = force_reg (DImode, op0);
2528 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2529 op1 = force_reg (DImode, op1);
2530 }
3a2a3a7f 2531
2532 /* Emit an initial compare instruction, if necessary. */
2533 tem = op0;
21f1e711 2534 if (cmp_code != UNKNOWN)
3a2a3a7f 2535 {
2536 tem = gen_reg_rtx (cmp_mode);
2537 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2538 }
2539
74f4459c 2540 /* Emit the branch instruction. */
2541 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2542 gen_rtx_IF_THEN_ELSE (VOIDmode,
2543 gen_rtx_fmt_ee (branch_code,
2544 branch_mode, tem,
2545 CONST0_RTX (cmp_mode)),
2546 gen_rtx_LABEL_REF (VOIDmode,
2547 operands[3]),
2548 pc_rtx));
2549 emit_jump_insn (tem);
3a2a3a7f 2550}
2551
d74ce6fa 2552/* Certain simplifications can be done to make invalid setcc operations
2553 valid. Return the final comparison, or NULL if we can't work. */
2554
74f4459c 2555bool
2556alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
d74ce6fa 2557{
2558 enum rtx_code cmp_code;
74f4459c 2559 enum rtx_code code = GET_CODE (operands[1]);
2560 rtx op0 = operands[2], op1 = operands[3];
d74ce6fa 2561 rtx tmp;
2562
74f4459c 2563 if (cmp_mode == TFmode)
d74ce6fa 2564 {
8c3428a6 2565 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
d74ce6fa 2566 op1 = const0_rtx;
74f4459c 2567 cmp_mode = DImode;
d74ce6fa 2568 }
2569
74f4459c 2570 if (cmp_mode == DFmode && !TARGET_FIX)
2571 return 0;
d74ce6fa 2572
2573 /* The general case: fold the comparison code to the types of compares
2574 that we have, choosing the branch as necessary. */
2575
21f1e711 2576 cmp_code = UNKNOWN;
d74ce6fa 2577 switch (code)
2578 {
2579 case EQ: case LE: case LT: case LEU: case LTU:
2580 case UNORDERED:
2581 /* We have these compares. */
74f4459c 2582 if (cmp_mode == DFmode)
d74ce6fa 2583 cmp_code = code, code = NE;
2584 break;
2585
2586 case NE:
74f4459c 2587 if (cmp_mode == DImode && op1 == const0_rtx)
d74ce6fa 2588 break;
8e262b5e 2589 /* FALLTHRU */
d74ce6fa 2590
2591 case ORDERED:
2592 cmp_code = reverse_condition (code);
2593 code = EQ;
2594 break;
2595
2596 case GE: case GT: case GEU: case GTU:
75b3314a 2597 /* These normally need swapping, but for integer zero we have
bc882521 2598 special patterns that recognize swapped operands. */
74f4459c 2599 if (cmp_mode == DImode && op1 == const0_rtx)
bc882521 2600 break;
d74ce6fa 2601 code = swap_condition (code);
74f4459c 2602 if (cmp_mode == DFmode)
d74ce6fa 2603 cmp_code = code, code = NE;
2604 tmp = op0, op0 = op1, op1 = tmp;
2605 break;
2606
2607 default:
4d10b463 2608 gcc_unreachable ();
d74ce6fa 2609 }
2610
74f4459c 2611 if (cmp_mode == DImode)
d74ce6fa 2612 {
bc882521 2613 if (!register_operand (op0, DImode))
d74ce6fa 2614 op0 = force_reg (DImode, op0);
2615 if (!reg_or_8bit_operand (op1, DImode))
2616 op1 = force_reg (DImode, op1);
2617 }
2618
2619 /* Emit an initial compare instruction, if necessary. */
21f1e711 2620 if (cmp_code != UNKNOWN)
d74ce6fa 2621 {
74f4459c 2622 tmp = gen_reg_rtx (cmp_mode);
d74ce6fa 2623 emit_insn (gen_rtx_SET (VOIDmode, tmp,
74f4459c 2624 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
d74ce6fa 2625
74f4459c 2626 op0 = cmp_mode == DImode ? gen_lowpart (DImode, tmp) : tmp;
d74ce6fa 2627 op1 = const0_rtx;
2628 }
2629
74f4459c 2630 /* Emit the setcc instruction. */
2631 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2632 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2633 return true;
d74ce6fa 2634}
2635
3a2a3a7f 2636
996a379d 2637/* Rewrite a comparison against zero CMP of the form
2638 (CODE (cc0) (const_int 0)) so it can be written validly in
2639 a conditional move (if_then_else CMP ...).
e3e08e7f 2640 If both of the operands that set cc0 are nonzero we must emit
996a379d 2641 an insn to perform the compare (it can't be done within
65abff06 2642 the conditional move). */
92643d95 2643
996a379d 2644rtx
92643d95 2645alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
996a379d 2646{
23be97c5 2647 enum rtx_code code = GET_CODE (cmp);
c60bc286 2648 enum rtx_code cmov_code = NE;
74f4459c 2649 rtx op0 = XEXP (cmp, 0);
2650 rtx op1 = XEXP (cmp, 1);
23be97c5 2651 enum machine_mode cmp_mode
2652 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
3a2a3a7f 2653 enum machine_mode cmov_mode = VOIDmode;
7f3be425 2654 int local_fast_math = flag_unsafe_math_optimizations;
23be97c5 2655 rtx tem;
996a379d 2656
74f4459c 2657 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
b18b881f 2658
74f4459c 2659 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
d6cc9868 2660 {
2661 enum rtx_code cmp_code;
2662
2663 if (! TARGET_FIX)
2664 return 0;
2665
2666 /* If we have fp<->int register move instructions, do a cmov by
2667 performing the comparison in fp registers, and move the
e3e08e7f 2668 zero/nonzero value to integer registers, where we can then
d6cc9868 2669 use a normal cmov, or vice-versa. */
2670
2671 switch (code)
2672 {
2673 case EQ: case LE: case LT: case LEU: case LTU:
2674 /* We have these compares. */
2675 cmp_code = code, code = NE;
2676 break;
2677
2678 case NE:
2679 /* This must be reversed. */
2680 cmp_code = EQ, code = EQ;
2681 break;
2682
2683 case GE: case GT: case GEU: case GTU:
75b3314a 2684 /* These normally need swapping, but for integer zero we have
2685 special patterns that recognize swapped operands. */
74f4459c 2686 if (cmp_mode == DImode && op1 == const0_rtx)
88f8f2a2 2687 cmp_code = code, code = NE;
2688 else
2689 {
2690 cmp_code = swap_condition (code);
2691 code = NE;
2692 tem = op0, op0 = op1, op1 = tem;
2693 }
d6cc9868 2694 break;
2695
2696 default:
4d10b463 2697 gcc_unreachable ();
d6cc9868 2698 }
2699
74f4459c 2700 tem = gen_reg_rtx (cmp_mode);
d6cc9868 2701 emit_insn (gen_rtx_SET (VOIDmode, tem,
74f4459c 2702 gen_rtx_fmt_ee (cmp_code, cmp_mode,
d6cc9868 2703 op0, op1)));
2704
74f4459c 2705 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2706 op0 = gen_lowpart (cmp_mode, tem);
2707 op1 = CONST0_RTX (cmp_mode);
d6cc9868 2708 local_fast_math = 1;
2709 }
996a379d 2710
2711 /* We may be able to use a conditional move directly.
65abff06 2712 This avoids emitting spurious compares. */
2a42ba09 2713 if (signed_comparison_operator (cmp, VOIDmode)
74f4459c 2714 && (cmp_mode == DImode || local_fast_math)
23be97c5 2715 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
941522d6 2716 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
996a379d 2717
3029ee00 2718 /* We can't put the comparison inside the conditional move;
996a379d 2719 emit a compare instruction and put that inside the
23be97c5 2720 conditional move. Make sure we emit only comparisons we have;
2721 swap or reverse as necessary. */
996a379d 2722
e1ba4a27 2723 if (!can_create_pseudo_p ())
3029ee00 2724 return NULL_RTX;
2725
996a379d 2726 switch (code)
2727 {
23be97c5 2728 case EQ: case LE: case LT: case LEU: case LTU:
2729 /* We have these compares: */
996a379d 2730 break;
23be97c5 2731
996a379d 2732 case NE:
65abff06 2733 /* This must be reversed. */
23be97c5 2734 code = reverse_condition (code);
c60bc286 2735 cmov_code = EQ;
996a379d 2736 break;
23be97c5 2737
2738 case GE: case GT: case GEU: case GTU:
d74ce6fa 2739 /* These must be swapped. */
88f8f2a2 2740 if (op1 != CONST0_RTX (cmp_mode))
2741 {
2742 code = swap_condition (code);
2743 tem = op0, op0 = op1, op1 = tem;
2744 }
996a379d 2745 break;
23be97c5 2746
996a379d 2747 default:
4d10b463 2748 gcc_unreachable ();
996a379d 2749 }
2750
74f4459c 2751 if (cmp_mode == DImode)
d74ce6fa 2752 {
2753 if (!reg_or_0_operand (op0, DImode))
2754 op0 = force_reg (DImode, op0);
2755 if (!reg_or_8bit_operand (op1, DImode))
2756 op1 = force_reg (DImode, op1);
2757 }
2758
b9b4428b 2759 /* ??? We mark the branch mode to be CCmode to prevent the compare
3a2a3a7f 2760 and cmov from being combined, since the compare insn follows IEEE
2761 rules that the cmov does not. */
74f4459c 2762 if (cmp_mode == DFmode && !local_fast_math)
3a2a3a7f 2763 cmov_mode = CCmode;
2764
74f4459c 2765 tem = gen_reg_rtx (cmp_mode);
2766 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2767 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
996a379d 2768}
bbf31a61 2769
2770/* Simplify a conditional move of two constants into a setcc with
2771 arithmetic. This is done with a splitter since combine would
2772 just undo the work if done during code generation. It also catches
2773 cases we wouldn't have before cse. */
2774
2775int
92643d95 2776alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2777 rtx t_rtx, rtx f_rtx)
bbf31a61 2778{
2779 HOST_WIDE_INT t, f, diff;
2780 enum machine_mode mode;
2781 rtx target, subtarget, tmp;
2782
2783 mode = GET_MODE (dest);
2784 t = INTVAL (t_rtx);
2785 f = INTVAL (f_rtx);
2786 diff = t - f;
2787
2788 if (((code == NE || code == EQ) && diff < 0)
2789 || (code == GE || code == GT))
2790 {
2791 code = reverse_condition (code);
2792 diff = t, t = f, f = diff;
2793 diff = t - f;
2794 }
2795
2796 subtarget = target = dest;
2797 if (mode != DImode)
2798 {
2799 target = gen_lowpart (DImode, dest);
e1ba4a27 2800 if (can_create_pseudo_p ())
bbf31a61 2801 subtarget = gen_reg_rtx (DImode);
2802 else
2803 subtarget = target;
2804 }
64656695 2805 /* Below, we must be careful to use copy_rtx on target and subtarget
2806 in intermediate insns, as they may be a subreg rtx, which may not
2807 be shared. */
bbf31a61 2808
2809 if (f == 0 && exact_log2 (diff) > 0
8d232dc7 2810 /* On EV6, we've got enough shifters to make non-arithmetic shifts
bbf31a61 2811 viable over a longer latency cmove. On EV5, the E0 slot is a
65abff06 2812 scarce resource, and on EV4 shift has the same latency as a cmove. */
fb64edde 2813 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
bbf31a61 2814 {
2815 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
64656695 2816 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
bbf31a61 2817
64656695 2818 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2819 GEN_INT (exact_log2 (t)));
bbf31a61 2820 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2821 }
2822 else if (f == 0 && t == -1)
2823 {
2824 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
64656695 2825 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
bbf31a61 2826
64656695 2827 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
bbf31a61 2828 }
2829 else if (diff == 1 || diff == 4 || diff == 8)
2830 {
2831 rtx add_op;
2832
2833 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
64656695 2834 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
bbf31a61 2835
2836 if (diff == 1)
64656695 2837 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
bbf31a61 2838 else
2839 {
2840 add_op = GEN_INT (f);
2841 if (sext_add_operand (add_op, mode))
2842 {
64656695 2843 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2844 GEN_INT (diff));
bbf31a61 2845 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2846 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2847 }
2848 else
2849 return 0;
2850 }
2851 }
2852 else
2853 return 0;
2854
2855 return 1;
2856}
34377880 2857\f
915c336f 2858/* Look up the function X_floating library function name for the
2859 given operation. */
2860
fb1e4f4a 2861struct GTY(()) xfloating_op
2d7c492e 2862{
2863 const enum rtx_code code;
7035b2ab 2864 const char *const GTY((skip)) osf_func;
2865 const char *const GTY((skip)) vms_func;
2d7c492e 2866 rtx libcall;
2867};
2868
9e7454d0 2869static GTY(()) struct xfloating_op xfloating_ops[] =
2d7c492e 2870{
2871 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2872 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2873 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2874 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2875 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2876 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2877 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2878 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2879 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2880 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2881 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2882 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2883 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2884 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2885 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2886};
2887
2888static GTY(()) struct xfloating_op vax_cvt_ops[] =
2889{
2890 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2891 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2892};
2893
2894static rtx
92643d95 2895alpha_lookup_xfloating_lib_func (enum rtx_code code)
915c336f 2896{
2d7c492e 2897 struct xfloating_op *ops = xfloating_ops;
2898 long n = ARRAY_SIZE (xfloating_ops);
915c336f 2899 long i;
2900
8c3428a6 2901 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2902
2d7c492e 2903 /* How irritating. Nothing to key off for the main table. */
2904 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
915c336f 2905 {
2d7c492e 2906 ops = vax_cvt_ops;
2907 n = ARRAY_SIZE (vax_cvt_ops);
915c336f 2908 }
2909
2d7c492e 2910 for (i = 0; i < n; ++i, ++ops)
2911 if (ops->code == code)
2912 {
2913 rtx func = ops->libcall;
2914 if (!func)
2915 {
2916 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2917 ? ops->vms_func : ops->osf_func);
2918 ops->libcall = func;
2919 }
2920 return func;
2921 }
915c336f 2922
4d10b463 2923 gcc_unreachable ();
915c336f 2924}
2925
2926/* Most X_floating operations take the rounding mode as an argument.
2927 Compute that here. */
2928
2929static int
92643d95 2930alpha_compute_xfloating_mode_arg (enum rtx_code code,
2931 enum alpha_fp_rounding_mode round)
915c336f 2932{
2933 int mode;
2934
2935 switch (round)
2936 {
2937 case ALPHA_FPRM_NORM:
2938 mode = 2;
2939 break;
2940 case ALPHA_FPRM_MINF:
2941 mode = 1;
2942 break;
2943 case ALPHA_FPRM_CHOP:
2944 mode = 0;
2945 break;
2946 case ALPHA_FPRM_DYN:
2947 mode = 4;
2948 break;
2949 default:
4d10b463 2950 gcc_unreachable ();
915c336f 2951
2952 /* XXX For reference, round to +inf is mode = 3. */
2953 }
2954
2955 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2956 mode |= 0x10000;
2957
2958 return mode;
2959}
2960
2961/* Emit an X_floating library function call.
2962
2963 Note that these functions do not follow normal calling conventions:
2964 TFmode arguments are passed in two integer registers (as opposed to
9e7454d0 2965 indirect); TFmode return values appear in R16+R17.
915c336f 2966
2d7c492e 2967 FUNC is the function to call.
915c336f 2968 TARGET is where the output belongs.
2969 OPERANDS are the inputs.
2970 NOPERANDS is the count of inputs.
2971 EQUIV is the expression equivalent for the function.
2972*/
2973
2974static void
2d7c492e 2975alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
92643d95 2976 int noperands, rtx equiv)
915c336f 2977{
2978 rtx usage = NULL_RTX, tmp, reg;
2979 int regno = 16, i;
2980
2981 start_sequence ();
2982
2983 for (i = 0; i < noperands; ++i)
2984 {
2985 switch (GET_MODE (operands[i]))
2986 {
2987 case TFmode:
2988 reg = gen_rtx_REG (TFmode, regno);
2989 regno += 2;
2990 break;
2991
2992 case DFmode:
2993 reg = gen_rtx_REG (DFmode, regno + 32);
2994 regno += 1;
2995 break;
2996
2997 case VOIDmode:
c933fb42 2998 gcc_assert (CONST_INT_P (operands[i]));
8e262b5e 2999 /* FALLTHRU */
915c336f 3000 case DImode:
3001 reg = gen_rtx_REG (DImode, regno);
3002 regno += 1;
3003 break;
3004
3005 default:
4d10b463 3006 gcc_unreachable ();
915c336f 3007 }
3008
3009 emit_move_insn (reg, operands[i]);
3010 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3011 }
3012
3013 switch (GET_MODE (target))
3014 {
3015 case TFmode:
3016 reg = gen_rtx_REG (TFmode, 16);
3017 break;
3018 case DFmode:
3019 reg = gen_rtx_REG (DFmode, 32);
3020 break;
3021 case DImode:
3022 reg = gen_rtx_REG (DImode, 0);
3023 break;
3024 default:
4d10b463 3025 gcc_unreachable ();
915c336f 3026 }
3027
2d7c492e 3028 tmp = gen_rtx_MEM (QImode, func);
2c6f8e4d 3029 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
915c336f 3030 const0_rtx, const0_rtx));
3031 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
9c2a0c05 3032 RTL_CONST_CALL_P (tmp) = 1;
915c336f 3033
3034 tmp = get_insns ();
3035 end_sequence ();
3036
3037 emit_libcall_block (tmp, target, reg, equiv);
3038}
3039
3040/* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3041
3042void
92643d95 3043alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
915c336f 3044{
2d7c492e 3045 rtx func;
915c336f 3046 int mode;
b90b6519 3047 rtx out_operands[3];
915c336f 3048
3049 func = alpha_lookup_xfloating_lib_func (code);
3050 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3051
b90b6519 3052 out_operands[0] = operands[1];
3053 out_operands[1] = operands[2];
3054 out_operands[2] = GEN_INT (mode);
9e7454d0 3055 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
915c336f 3056 gen_rtx_fmt_ee (code, TFmode, operands[1],
3057 operands[2]));
3058}
3059
3060/* Emit an X_floating library function call for a comparison. */
3061
3062static rtx
8c3428a6 3063alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
915c336f 3064{
8c3428a6 3065 enum rtx_code cmp_code, res_code;
e8e27646 3066 rtx func, out, operands[2], note;
915c336f 3067
8c3428a6 3068 /* X_floating library comparison functions return
3069 -1 unordered
3070 0 false
3071 1 true
3072 Convert the compare against the raw return value. */
3073
3074 cmp_code = *pcode;
3075 switch (cmp_code)
3076 {
3077 case UNORDERED:
3078 cmp_code = EQ;
3079 res_code = LT;
3080 break;
3081 case ORDERED:
3082 cmp_code = EQ;
3083 res_code = GE;
3084 break;
3085 case NE:
3086 res_code = NE;
3087 break;
3088 case EQ:
3089 case LT:
3090 case GT:
3091 case LE:
3092 case GE:
3093 res_code = GT;
3094 break;
3095 default:
3096 gcc_unreachable ();
3097 }
3098 *pcode = res_code;
3099
3100 func = alpha_lookup_xfloating_lib_func (cmp_code);
915c336f 3101
3102 operands[0] = op0;
3103 operands[1] = op1;
3104 out = gen_reg_rtx (DImode);
3105
e8e27646 3106 /* What's actually returned is -1,0,1, not a proper boolean value,
3107 so use an EXPR_LIST as with a generic libcall instead of a
3108 comparison type expression. */
3109 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3110 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3111 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3112 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
915c336f 3113
3114 return out;
3115}
3116
3117/* Emit an X_floating library function call for a conversion. */
3118
3119void
caf6f044 3120alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
915c336f 3121{
3122 int noperands = 1, mode;
b90b6519 3123 rtx out_operands[2];
2d7c492e 3124 rtx func;
caf6f044 3125 enum rtx_code code = orig_code;
3126
3127 if (code == UNSIGNED_FIX)
3128 code = FIX;
915c336f 3129
3130 func = alpha_lookup_xfloating_lib_func (code);
3131
b90b6519 3132 out_operands[0] = operands[1];
3133
915c336f 3134 switch (code)
3135 {
3136 case FIX:
3137 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
b90b6519 3138 out_operands[1] = GEN_INT (mode);
8581412d 3139 noperands = 2;
915c336f 3140 break;
3141 case FLOAT_TRUNCATE:
3142 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
b90b6519 3143 out_operands[1] = GEN_INT (mode);
8581412d 3144 noperands = 2;
915c336f 3145 break;
3146 default:
3147 break;
3148 }
3149
b90b6519 3150 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
caf6f044 3151 gen_rtx_fmt_e (orig_code,
3152 GET_MODE (operands[0]),
915c336f 3153 operands[1]));
3154}
3420680b 3155
10c77d2b 3156/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3157 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3158 guarantee that the sequence
3159 set (OP[0] OP[2])
3160 set (OP[1] OP[3])
3161 is valid. Naturally, output operand ordering is little-endian.
3162 This is used by *movtf_internal and *movti_internal. */
3163
3420680b 3164void
10c77d2b 3165alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3166 bool fixup_overlap)
3420680b 3167{
4d10b463 3168 switch (GET_CODE (operands[1]))
3420680b 3169 {
4d10b463 3170 case REG:
3420680b 3171 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3172 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
4d10b463 3173 break;
3174
3175 case MEM:
e513d163 3176 operands[3] = adjust_address (operands[1], DImode, 8);
3177 operands[2] = adjust_address (operands[1], DImode, 0);
4d10b463 3178 break;
3179
10c77d2b 3180 case CONST_INT:
e4a93d80 3181 case CONST_DOUBLE:
10c77d2b 3182 gcc_assert (operands[1] == CONST0_RTX (mode));
4d10b463 3183 operands[2] = operands[3] = const0_rtx;
3184 break;
3185
3186 default:
3187 gcc_unreachable ();
3420680b 3188 }
3420680b 3189
4d10b463 3190 switch (GET_CODE (operands[0]))
3420680b 3191 {
4d10b463 3192 case REG:
3420680b 3193 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3194 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
4d10b463 3195 break;
3196
3197 case MEM:
e513d163 3198 operands[1] = adjust_address (operands[0], DImode, 8);
3199 operands[0] = adjust_address (operands[0], DImode, 0);
4d10b463 3200 break;
3201
3202 default:
3203 gcc_unreachable ();
3420680b 3204 }
10c77d2b 3205
3206 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3207 {
3208 rtx tmp;
3209 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3210 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3211 }
3420680b 3212}
2267ca84 3213
9e7454d0 3214/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3215 op2 is a register containing the sign bit, operation is the
2267ca84 3216 logical operation to be performed. */
3217
3218void
92643d95 3219alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
2267ca84 3220{
3221 rtx high_bit = operands[2];
3222 rtx scratch;
3223 int move;
3224
10c77d2b 3225 alpha_split_tmode_pair (operands, TFmode, false);
2267ca84 3226
e3e08e7f 3227 /* Detect three flavors of operand overlap. */
2267ca84 3228 move = 1;
3229 if (rtx_equal_p (operands[0], operands[2]))
3230 move = 0;
3231 else if (rtx_equal_p (operands[1], operands[2]))
3232 {
3233 if (rtx_equal_p (operands[0], high_bit))
3234 move = 2;
3235 else
3236 move = -1;
3237 }
3238
3239 if (move < 0)
3240 emit_move_insn (operands[0], operands[2]);
3241
3242 /* ??? If the destination overlaps both source tf and high_bit, then
3243 assume source tf is dead in its entirety and use the other half
3244 for a scratch register. Otherwise "scratch" is just the proper
3245 destination register. */
3246 scratch = operands[move < 2 ? 1 : 3];
3247
3248 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3249
3250 if (move > 0)
3251 {
3252 emit_move_insn (operands[0], operands[2]);
3253 if (move > 1)
3254 emit_move_insn (operands[1], scratch);
3255 }
3256}
915c336f 3257\f
34377880 3258/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3259 unaligned data:
3260
3261 unsigned: signed:
3262 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3263 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3264 lda r3,X(r11) lda r3,X+2(r11)
3265 extwl r1,r3,r1 extql r1,r3,r1
3266 extwh r2,r3,r2 extqh r2,r3,r2
3267 or r1.r2.r1 or r1,r2,r1
3268 sra r1,48,r1
3269
3270 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3271 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3272 lda r3,X(r11) lda r3,X(r11)
3273 extll r1,r3,r1 extll r1,r3,r1
3274 extlh r2,r3,r2 extlh r2,r3,r2
3275 or r1.r2.r1 addl r1,r2,r1
3276
3277 quad: ldq_u r1,X(r11)
3278 ldq_u r2,X+7(r11)
3279 lda r3,X(r11)
3280 extql r1,r3,r1
3281 extqh r2,r3,r2
3282 or r1.r2.r1
3283*/
3284
3285void
92643d95 3286alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3287 HOST_WIDE_INT ofs, int sign)
34377880 3288{
1f0ce6a6 3289 rtx meml, memh, addr, extl, exth, tmp, mema;
2cc46ade 3290 enum machine_mode mode;
34377880 3291
9467fa25 3292 if (TARGET_BWX && size == 2)
3293 {
b83bea6c 3294 meml = adjust_address (mem, QImode, ofs);
3295 memh = adjust_address (mem, QImode, ofs+1);
9467fa25 3296 if (BYTES_BIG_ENDIAN)
3297 tmp = meml, meml = memh, memh = tmp;
3298 extl = gen_reg_rtx (DImode);
3299 exth = gen_reg_rtx (DImode);
3300 emit_insn (gen_zero_extendqidi2 (extl, meml));
3301 emit_insn (gen_zero_extendqidi2 (exth, memh));
3302 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3303 NULL, 1, OPTAB_LIB_WIDEN);
3304 addr = expand_simple_binop (DImode, IOR, extl, exth,
3305 NULL, 1, OPTAB_LIB_WIDEN);
3306
3307 if (sign && GET_MODE (tgt) != HImode)
3308 {
3309 addr = gen_lowpart (HImode, addr);
3310 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3311 }
3312 else
3313 {
3314 if (GET_MODE (tgt) != DImode)
3315 addr = gen_lowpart (GET_MODE (tgt), addr);
3316 emit_move_insn (tgt, addr);
3317 }
3318 return;
3319 }
3320
34377880 3321 meml = gen_reg_rtx (DImode);
3322 memh = gen_reg_rtx (DImode);
3323 addr = gen_reg_rtx (DImode);
3324 extl = gen_reg_rtx (DImode);
3325 exth = gen_reg_rtx (DImode);
3326
1f0ce6a6 3327 mema = XEXP (mem, 0);
3328 if (GET_CODE (mema) == LO_SUM)
3329 mema = force_reg (Pmode, mema);
3330
3024e9f8 3331 /* AND addresses cannot be in any alias set, since they may implicitly
9e7454d0 3332 alias surrounding code. Ideally we'd have some alias set that
3024e9f8 3333 covered all types except those with alignment 8 or higher. */
3334
3335 tmp = change_address (mem, DImode,
9e7454d0 3336 gen_rtx_AND (DImode,
1f0ce6a6 3337 plus_constant (mema, ofs),
3024e9f8 3338 GEN_INT (-8)));
ab6ab77e 3339 set_mem_alias_set (tmp, 0);
3024e9f8 3340 emit_move_insn (meml, tmp);
3341
3342 tmp = change_address (mem, DImode,
9e7454d0 3343 gen_rtx_AND (DImode,
1f0ce6a6 3344 plus_constant (mema, ofs + size - 1),
3024e9f8 3345 GEN_INT (-8)));
ab6ab77e 3346 set_mem_alias_set (tmp, 0);
3024e9f8 3347 emit_move_insn (memh, tmp);
34377880 3348
9caef960 3349 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3350 {
3351 emit_move_insn (addr, plus_constant (mema, -1));
3352
3353 emit_insn (gen_extqh_be (extl, meml, addr));
3354 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3355
3356 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3357 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3358 addr, 1, OPTAB_WIDEN);
3359 }
3360 else if (sign && size == 2)
34377880 3361 {
1f0ce6a6 3362 emit_move_insn (addr, plus_constant (mema, ofs+2));
34377880 3363
9caef960 3364 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3365 emit_insn (gen_extqh_le (exth, memh, addr));
34377880 3366
ba4a7733 3367 /* We must use tgt here for the target. Alpha-vms port fails if we use
3368 addr for the target, because addr is marked as a pointer and combine
85c36fd1 3369 knows that pointers are always sign-extended 32-bit values. */
ba4a7733 3370 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
9e7454d0 3371 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
2cc46ade 3372 addr, 1, OPTAB_WIDEN);
34377880 3373 }
2cc46ade 3374 else
34377880 3375 {
9caef960 3376 if (WORDS_BIG_ENDIAN)
2cc46ade 3377 {
9caef960 3378 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3379 switch ((int) size)
3380 {
3381 case 2:
3382 emit_insn (gen_extwh_be (extl, meml, addr));
3383 mode = HImode;
3384 break;
34377880 3385
9caef960 3386 case 4:
3387 emit_insn (gen_extlh_be (extl, meml, addr));
3388 mode = SImode;
3389 break;
34377880 3390
9caef960 3391 case 8:
3392 emit_insn (gen_extqh_be (extl, meml, addr));
3393 mode = DImode;
3394 break;
915c336f 3395
9caef960 3396 default:
4d10b463 3397 gcc_unreachable ();
9caef960 3398 }
3399 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3400 }
3401 else
3402 {
3403 emit_move_insn (addr, plus_constant (mema, ofs));
3404 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3405 switch ((int) size)
3406 {
3407 case 2:
3408 emit_insn (gen_extwh_le (exth, memh, addr));
3409 mode = HImode;
3410 break;
3411
3412 case 4:
3413 emit_insn (gen_extlh_le (exth, memh, addr));
3414 mode = SImode;
3415 break;
3416
3417 case 8:
3418 emit_insn (gen_extqh_le (exth, memh, addr));
3419 mode = DImode;
3420 break;
3421
3422 default:
4d10b463 3423 gcc_unreachable ();
9caef960 3424 }
2cc46ade 3425 }
3426
3427 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3428 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3429 sign, OPTAB_WIDEN);
34377880 3430 }
3431
2cc46ade 3432 if (addr != tgt)
9467fa25 3433 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
34377880 3434}
3435
3436/* Similarly, use ins and msk instructions to perform unaligned stores. */
3437
3438void
92643d95 3439alpha_expand_unaligned_store (rtx dst, rtx src,
3440 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
34377880 3441{
1f0ce6a6 3442 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
9e7454d0 3443
9467fa25 3444 if (TARGET_BWX && size == 2)
3445 {
3446 if (src != const0_rtx)
3447 {
3448 dstl = gen_lowpart (QImode, src);
3449 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3450 NULL, 1, OPTAB_LIB_WIDEN);
3451 dsth = gen_lowpart (QImode, dsth);
3452 }
3453 else
3454 dstl = dsth = const0_rtx;
3455
b83bea6c 3456 meml = adjust_address (dst, QImode, ofs);
3457 memh = adjust_address (dst, QImode, ofs+1);
9467fa25 3458 if (BYTES_BIG_ENDIAN)
3459 addr = meml, meml = memh, memh = addr;
3460
3461 emit_move_insn (meml, dstl);
3462 emit_move_insn (memh, dsth);
3463 return;
3464 }
3465
34377880 3466 dstl = gen_reg_rtx (DImode);
3467 dsth = gen_reg_rtx (DImode);
3468 insl = gen_reg_rtx (DImode);
3469 insh = gen_reg_rtx (DImode);
3470
1f0ce6a6 3471 dsta = XEXP (dst, 0);
3472 if (GET_CODE (dsta) == LO_SUM)
3473 dsta = force_reg (Pmode, dsta);
3474
3024e9f8 3475 /* AND addresses cannot be in any alias set, since they may implicitly
9e7454d0 3476 alias surrounding code. Ideally we'd have some alias set that
3024e9f8 3477 covered all types except those with alignment 8 or higher. */
3478
34377880 3479 meml = change_address (dst, DImode,
9e7454d0 3480 gen_rtx_AND (DImode,
1f0ce6a6 3481 plus_constant (dsta, ofs),
941522d6 3482 GEN_INT (-8)));
ab6ab77e 3483 set_mem_alias_set (meml, 0);
3024e9f8 3484
34377880 3485 memh = change_address (dst, DImode,
9e7454d0 3486 gen_rtx_AND (DImode,
1f0ce6a6 3487 plus_constant (dsta, ofs + size - 1),
941522d6 3488 GEN_INT (-8)));
ab6ab77e 3489 set_mem_alias_set (memh, 0);
34377880 3490
3491 emit_move_insn (dsth, memh);
3492 emit_move_insn (dstl, meml);
9caef960 3493 if (WORDS_BIG_ENDIAN)
34377880 3494 {
9caef960 3495 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3496
3497 if (src != const0_rtx)
3498 {
3499 switch ((int) size)
3500 {
3501 case 2:
3502 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3503 break;
3504 case 4:
3505 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3506 break;
3507 case 8:
3508 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3509 break;
3510 }
3511 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3512 GEN_INT (size*8), addr));
3513 }
34377880 3514
29768226 3515 switch ((int) size)
34377880 3516 {
3517 case 2:
9caef960 3518 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
34377880 3519 break;
3520 case 4:
9caef960 3521 {
ae4cd3a5 3522 rtx msk = immed_double_const (0xffffffff, 0, DImode);
9caef960 3523 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
ae4cd3a5 3524 break;
9caef960 3525 }
ae4cd3a5 3526 case 8:
3527 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
34377880 3528 break;
3529 }
9caef960 3530
3531 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
34377880 3532 }
9caef960 3533 else
3534 {
3535 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
34377880 3536
b739144f 3537 if (src != CONST0_RTX (GET_MODE (src)))
9caef960 3538 {
3539 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3540 GEN_INT (size*8), addr));
34377880 3541
9caef960 3542 switch ((int) size)
3543 {
3544 case 2:
3545 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3546 break;
3547 case 4:
3548 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3549 break;
3550 case 8:
3551 emit_insn (gen_insql_le (insl, src, addr));
3552 break;
3553 }
3554 }
3555
3556 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3557
3558 switch ((int) size)
3559 {
3560 case 2:
3561 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3562 break;
3563 case 4:
9caef960 3564 {
ae4cd3a5 3565 rtx msk = immed_double_const (0xffffffff, 0, DImode);
9caef960 3566 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
ae4cd3a5 3567 break;
9caef960 3568 }
ae4cd3a5 3569 case 8:
3570 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
9caef960 3571 break;
3572 }
34377880 3573 }
3574
b739144f 3575 if (src != CONST0_RTX (GET_MODE (src)))
34377880 3576 {
2cc46ade 3577 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3578 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
34377880 3579 }
9e7454d0 3580
9caef960 3581 if (WORDS_BIG_ENDIAN)
3582 {
3583 emit_move_insn (meml, dstl);
3584 emit_move_insn (memh, dsth);
3585 }
3586 else
3587 {
3588 /* Must store high before low for degenerate case of aligned. */
3589 emit_move_insn (memh, dsth);
3590 emit_move_insn (meml, dstl);
3591 }
34377880 3592}
3593
2cc46ade 3594/* The block move code tries to maximize speed by separating loads and
3595 stores at the expense of register pressure: we load all of the data
3596 before we store it back out. There are two secondary effects worth
3597 mentioning, that this speeds copying to/from aligned and unaligned
3598 buffers, and that it makes the code significantly easier to write. */
34377880 3599
2cc46ade 3600#define MAX_MOVE_WORDS 8
3601
3602/* Load an integral number of consecutive unaligned quadwords. */
34377880 3603
3604static void
92643d95 3605alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3606 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
34377880 3607{
3608 rtx const im8 = GEN_INT (-8);
3609 rtx const i64 = GEN_INT (64);
2cc46ade 3610 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1f0ce6a6 3611 rtx sreg, areg, tmp, smema;
34377880 3612 HOST_WIDE_INT i;
3613
1f0ce6a6 3614 smema = XEXP (smem, 0);
3615 if (GET_CODE (smema) == LO_SUM)
3616 smema = force_reg (Pmode, smema);
3617
34377880 3618 /* Generate all the tmp registers we need. */
3619 for (i = 0; i < words; ++i)
2cc46ade 3620 {
3621 data_regs[i] = out_regs[i];
3622 ext_tmps[i] = gen_reg_rtx (DImode);
3623 }
3624 data_regs[words] = gen_reg_rtx (DImode);
3625
3626 if (ofs != 0)
e513d163 3627 smem = adjust_address (smem, GET_MODE (smem), ofs);
9e7454d0 3628
34377880 3629 /* Load up all of the source data. */
3630 for (i = 0; i < words; ++i)
3631 {
3024e9f8 3632 tmp = change_address (smem, DImode,
3633 gen_rtx_AND (DImode,
1f0ce6a6 3634 plus_constant (smema, 8*i),
3024e9f8 3635 im8));
ab6ab77e 3636 set_mem_alias_set (tmp, 0);
3024e9f8 3637 emit_move_insn (data_regs[i], tmp);
34377880 3638 }
3024e9f8 3639
3640 tmp = change_address (smem, DImode,
3641 gen_rtx_AND (DImode,
1f0ce6a6 3642 plus_constant (smema, 8*words - 1),
3024e9f8 3643 im8));
ab6ab77e 3644 set_mem_alias_set (tmp, 0);
3024e9f8 3645 emit_move_insn (data_regs[words], tmp);
34377880 3646
3647 /* Extract the half-word fragments. Unfortunately DEC decided to make
9e7454d0 3648 extxh with offset zero a noop instead of zeroing the register, so
34377880 3649 we must take care of that edge condition ourselves with cmov. */
3650
1f0ce6a6 3651 sreg = copy_addr_to_reg (smema);
9e7454d0 3652 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
2cc46ade 3653 1, OPTAB_WIDEN);
9caef960 3654 if (WORDS_BIG_ENDIAN)
3655 emit_move_insn (sreg, plus_constant (sreg, 7));
34377880 3656 for (i = 0; i < words; ++i)
3657 {
9caef960 3658 if (WORDS_BIG_ENDIAN)
3659 {
3660 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3661 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3662 }
3663 else
3664 {
3665 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3666 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3667 }
941522d6 3668 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3669 gen_rtx_IF_THEN_ELSE (DImode,
2cc46ade 3670 gen_rtx_EQ (DImode, areg,
3671 const0_rtx),
941522d6 3672 const0_rtx, ext_tmps[i])));
34377880 3673 }
3674
3675 /* Merge the half-words into whole words. */
3676 for (i = 0; i < words; ++i)
3677 {
2cc46ade 3678 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3679 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
34377880 3680 }
3681}
3682
3683/* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3684 may be NULL to store zeros. */
3685
3686static void
92643d95 3687alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3688 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
34377880 3689{
3690 rtx const im8 = GEN_INT (-8);
3691 rtx const i64 = GEN_INT (64);
34377880 3692 rtx ins_tmps[MAX_MOVE_WORDS];
2cc46ade 3693 rtx st_tmp_1, st_tmp_2, dreg;
1f0ce6a6 3694 rtx st_addr_1, st_addr_2, dmema;
34377880 3695 HOST_WIDE_INT i;
3696
1f0ce6a6 3697 dmema = XEXP (dmem, 0);
3698 if (GET_CODE (dmema) == LO_SUM)
3699 dmema = force_reg (Pmode, dmema);
3700
34377880 3701 /* Generate all the tmp registers we need. */
3702 if (data_regs != NULL)
3703 for (i = 0; i < words; ++i)
3704 ins_tmps[i] = gen_reg_rtx(DImode);
3705 st_tmp_1 = gen_reg_rtx(DImode);
3706 st_tmp_2 = gen_reg_rtx(DImode);
9e7454d0 3707
2cc46ade 3708 if (ofs != 0)
e513d163 3709 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
2cc46ade 3710
3711 st_addr_2 = change_address (dmem, DImode,
941522d6 3712 gen_rtx_AND (DImode,
1f0ce6a6 3713 plus_constant (dmema, words*8 - 1),
34377880 3714 im8));
ab6ab77e 3715 set_mem_alias_set (st_addr_2, 0);
3024e9f8 3716
2cc46ade 3717 st_addr_1 = change_address (dmem, DImode,
1f0ce6a6 3718 gen_rtx_AND (DImode, dmema, im8));
ab6ab77e 3719 set_mem_alias_set (st_addr_1, 0);
34377880 3720
3721 /* Load up the destination end bits. */
3722 emit_move_insn (st_tmp_2, st_addr_2);
3723 emit_move_insn (st_tmp_1, st_addr_1);
3724
3725 /* Shift the input data into place. */
1f0ce6a6 3726 dreg = copy_addr_to_reg (dmema);
9caef960 3727 if (WORDS_BIG_ENDIAN)
3728 emit_move_insn (dreg, plus_constant (dreg, 7));
34377880 3729 if (data_regs != NULL)
3730 {
3731 for (i = words-1; i >= 0; --i)
3732 {
9caef960 3733 if (WORDS_BIG_ENDIAN)
3734 {
3735 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3736 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3737 }
3738 else
3739 {
3740 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3741 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3742 }
34377880 3743 }
34377880 3744 for (i = words-1; i > 0; --i)
3745 {
2cc46ade 3746 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3747 ins_tmps[i-1], ins_tmps[i-1], 1,
3748 OPTAB_WIDEN);
34377880 3749 }
3750 }
3751
3752 /* Split and merge the ends with the destination data. */
9caef960 3753 if (WORDS_BIG_ENDIAN)
3754 {
ae4cd3a5 3755 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
9caef960 3756 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3757 }
3758 else
3759 {
3760 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
ae4cd3a5 3761 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
9caef960 3762 }
34377880 3763
3764 if (data_regs != NULL)
3765 {
2cc46ade 3766 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3767 st_tmp_2, 1, OPTAB_WIDEN);
3768 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3769 st_tmp_1, 1, OPTAB_WIDEN);
34377880 3770 }
3771
3772 /* Store it all. */
9caef960 3773 if (WORDS_BIG_ENDIAN)
3774 emit_move_insn (st_addr_1, st_tmp_1);
3775 else
3776 emit_move_insn (st_addr_2, st_tmp_2);
34377880 3777 for (i = words-1; i > 0; --i)
3778 {
3024e9f8 3779 rtx tmp = change_address (dmem, DImode,
3780 gen_rtx_AND (DImode,
9caef960 3781 plus_constant(dmema,
3782 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3024e9f8 3783 im8));
ab6ab77e 3784 set_mem_alias_set (tmp, 0);
3024e9f8 3785 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
34377880 3786 }
9caef960 3787 if (WORDS_BIG_ENDIAN)
3788 emit_move_insn (st_addr_2, st_tmp_2);
3789 else
3790 emit_move_insn (st_addr_1, st_tmp_1);
34377880 3791}
3792
3793
3794/* Expand string/block move operations.
3795
3796 operands[0] is the pointer to the destination.
3797 operands[1] is the pointer to the source.
3798 operands[2] is the number of bytes to move.
3799 operands[3] is the alignment. */
3800
3801int
92643d95 3802alpha_expand_block_move (rtx operands[])
34377880 3803{
3804 rtx bytes_rtx = operands[2];
3805 rtx align_rtx = operands[3];
d94b545b 3806 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
a9aaae37 3807 HOST_WIDE_INT bytes = orig_bytes;
3808 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3809 HOST_WIDE_INT dst_align = src_align;
80909c64 3810 rtx orig_src = operands[1];
3811 rtx orig_dst = operands[0];
3812 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
2cc46ade 3813 rtx tmp;
1f0ce6a6 3814 unsigned int i, words, ofs, nregs = 0;
9e7454d0 3815
80909c64 3816 if (orig_bytes <= 0)
34377880 3817 return 1;
a9aaae37 3818 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
34377880 3819 return 0;
3820
2cc46ade 3821 /* Look for additional alignment information from recorded register info. */
3822
3823 tmp = XEXP (orig_src, 0);
c933fb42 3824 if (REG_P (tmp))
80909c64 3825 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
2cc46ade 3826 else if (GET_CODE (tmp) == PLUS
c933fb42 3827 && REG_P (XEXP (tmp, 0))
3828 && CONST_INT_P (XEXP (tmp, 1)))
2cc46ade 3829 {
80909c64 3830 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3831 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2cc46ade 3832
3833 if (a > src_align)
3834 {
80909c64 3835 if (a >= 64 && c % 8 == 0)
3836 src_align = 64;
3837 else if (a >= 32 && c % 4 == 0)
3838 src_align = 32;
3839 else if (a >= 16 && c % 2 == 0)
3840 src_align = 16;
2cc46ade 3841 }
3842 }
9e7454d0 3843
2cc46ade 3844 tmp = XEXP (orig_dst, 0);
c933fb42 3845 if (REG_P (tmp))
80909c64 3846 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
2cc46ade 3847 else if (GET_CODE (tmp) == PLUS
c933fb42 3848 && REG_P (XEXP (tmp, 0))
3849 && CONST_INT_P (XEXP (tmp, 1)))
2cc46ade 3850 {
80909c64 3851 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3852 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2cc46ade 3853
3854 if (a > dst_align)
3855 {
80909c64 3856 if (a >= 64 && c % 8 == 0)
3857 dst_align = 64;
3858 else if (a >= 32 && c % 4 == 0)
3859 dst_align = 32;
3860 else if (a >= 16 && c % 2 == 0)
3861 dst_align = 16;
2cc46ade 3862 }
3863 }
3864
2cc46ade 3865 ofs = 0;
80909c64 3866 if (src_align >= 64 && bytes >= 8)
34377880 3867 {
3868 words = bytes / 8;
3869
34377880 3870 for (i = 0; i < words; ++i)
27d0c333 3871 data_regs[nregs + i] = gen_reg_rtx (DImode);
34377880 3872
34377880 3873 for (i = 0; i < words; ++i)
80909c64 3874 emit_move_insn (data_regs[nregs + i],
e513d163 3875 adjust_address (orig_src, DImode, ofs + i * 8));
34377880 3876
2cc46ade 3877 nregs += words;
34377880 3878 bytes -= words * 8;
7597afe9 3879 ofs += words * 8;
34377880 3880 }
80909c64 3881
3882 if (src_align >= 32 && bytes >= 4)
34377880 3883 {
3884 words = bytes / 4;
3885
34377880 3886 for (i = 0; i < words; ++i)
27d0c333 3887 data_regs[nregs + i] = gen_reg_rtx (SImode);
34377880 3888
34377880 3889 for (i = 0; i < words; ++i)
80909c64 3890 emit_move_insn (data_regs[nregs + i],
537ffcfc 3891 adjust_address (orig_src, SImode, ofs + i * 4));
34377880 3892
2cc46ade 3893 nregs += words;
34377880 3894 bytes -= words * 4;
7597afe9 3895 ofs += words * 4;
34377880 3896 }
80909c64 3897
a9aaae37 3898 if (bytes >= 8)
34377880 3899 {
3900 words = bytes / 8;
3901
34377880 3902 for (i = 0; i < words+1; ++i)
27d0c333 3903 data_regs[nregs + i] = gen_reg_rtx (DImode);
34377880 3904
b47268cf 3905 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3906 words, ofs);
34377880 3907
2cc46ade 3908 nregs += words;
34377880 3909 bytes -= words * 8;
7597afe9 3910 ofs += words * 8;
34377880 3911 }
80909c64 3912
80909c64 3913 if (! TARGET_BWX && bytes >= 4)
34377880 3914 {
2cc46ade 3915 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
34377880 3916 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
34377880 3917 bytes -= 4;
3918 ofs += 4;
3919 }
80909c64 3920
34377880 3921 if (bytes >= 2)
3922 {
80909c64 3923 if (src_align >= 16)
34377880 3924 {
3925 do {
2cc46ade 3926 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
e513d163 3927 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
34377880 3928 bytes -= 2;
3929 ofs += 2;
3930 } while (bytes >= 2);
3931 }
80909c64 3932 else if (! TARGET_BWX)
34377880 3933 {
2cc46ade 3934 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
34377880 3935 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
34377880 3936 bytes -= 2;
3937 ofs += 2;
3938 }
3939 }
80909c64 3940
34377880 3941 while (bytes > 0)
3942 {
2cc46ade 3943 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
e513d163 3944 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
34377880 3945 bytes -= 1;
3946 ofs += 1;
3947 }
80909c64 3948
4d10b463 3949 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
2cc46ade 3950
80909c64 3951 /* Now save it back out again. */
2cc46ade 3952
3953 i = 0, ofs = 0;
3954
2cc46ade 3955 /* Write out the data in whatever chunks reading the source allowed. */
80909c64 3956 if (dst_align >= 64)
2cc46ade 3957 {
3958 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3959 {
e513d163 3960 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
2cc46ade 3961 data_regs[i]);
3962 ofs += 8;
3963 i++;
3964 }
3965 }
80909c64 3966
3967 if (dst_align >= 32)
2cc46ade 3968 {
3969 /* If the source has remaining DImode regs, write them out in
3970 two pieces. */
3971 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3972 {
3973 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3974 NULL_RTX, 1, OPTAB_WIDEN);
3975
e513d163 3976 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
2cc46ade 3977 gen_lowpart (SImode, data_regs[i]));
e513d163 3978 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
2cc46ade 3979 gen_lowpart (SImode, tmp));
3980 ofs += 8;
3981 i++;
3982 }
3983
3984 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3985 {
e513d163 3986 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
2cc46ade 3987 data_regs[i]);
3988 ofs += 4;
3989 i++;
3990 }
3991 }
80909c64 3992
2cc46ade 3993 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3994 {
3995 /* Write out a remaining block of words using unaligned methods. */
3996
80909c64 3997 for (words = 1; i + words < nregs; words++)
3998 if (GET_MODE (data_regs[i + words]) != DImode)
2cc46ade 3999 break;
4000
4001 if (words == 1)
4002 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4003 else
80909c64 4004 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4005 words, ofs);
9e7454d0 4006
2cc46ade 4007 i += words;
4008 ofs += words * 8;
4009 }
4010
4011 /* Due to the above, this won't be aligned. */
4012 /* ??? If we have more than one of these, consider constructing full
4013 words in registers and using alpha_expand_unaligned_store_words. */
4014 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4015 {
4016 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4017 ofs += 4;
4018 i++;
4019 }
4020
80909c64 4021 if (dst_align >= 16)
2cc46ade 4022 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4023 {
e513d163 4024 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
2cc46ade 4025 i++;
4026 ofs += 2;
4027 }
4028 else
4029 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4030 {
4031 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4032 i++;
4033 ofs += 2;
4034 }
80909c64 4035
4d10b463 4036 /* The remainder must be byte copies. */
4037 while (i < nregs)
2cc46ade 4038 {
4d10b463 4039 gcc_assert (GET_MODE (data_regs[i]) == QImode);
e513d163 4040 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
2cc46ade 4041 i++;
4042 ofs += 1;
4043 }
80909c64 4044
34377880 4045 return 1;
4046}
4047
4048int
92643d95 4049alpha_expand_block_clear (rtx operands[])
34377880 4050{
4051 rtx bytes_rtx = operands[1];
7a3e5564 4052 rtx align_rtx = operands[3];
80909c64 4053 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
a9aaae37 4054 HOST_WIDE_INT bytes = orig_bytes;
4055 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4056 HOST_WIDE_INT alignofs = 0;
80909c64 4057 rtx orig_dst = operands[0];
2cc46ade 4058 rtx tmp;
a9aaae37 4059 int i, words, ofs = 0;
9e7454d0 4060
80909c64 4061 if (orig_bytes <= 0)
34377880 4062 return 1;
a9aaae37 4063 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
34377880 4064 return 0;
4065
2cc46ade 4066 /* Look for stricter alignment. */
2cc46ade 4067 tmp = XEXP (orig_dst, 0);
c933fb42 4068 if (REG_P (tmp))
80909c64 4069 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
2cc46ade 4070 else if (GET_CODE (tmp) == PLUS
c933fb42 4071 && REG_P (XEXP (tmp, 0))
4072 && CONST_INT_P (XEXP (tmp, 1)))
2cc46ade 4073 {
a9aaae37 4074 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4075 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2cc46ade 4076
4077 if (a > align)
4078 {
a9aaae37 4079 if (a >= 64)
4080 align = a, alignofs = 8 - c % 8;
4081 else if (a >= 32)
4082 align = a, alignofs = 4 - c % 4;
4083 else if (a >= 16)
4084 align = a, alignofs = 2 - c % 2;
2cc46ade 4085 }
4086 }
4087
a9aaae37 4088 /* Handle an unaligned prefix first. */
4089
4090 if (alignofs > 0)
4091 {
4092#if HOST_BITS_PER_WIDE_INT >= 64
4093 /* Given that alignofs is bounded by align, the only time BWX could
4094 generate three stores is for a 7 byte fill. Prefer two individual
4095 stores over a load/mask/store sequence. */
4096 if ((!TARGET_BWX || alignofs == 7)
4097 && align >= 32
4098 && !(alignofs == 4 && bytes >= 4))
4099 {
4100 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4101 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4102 rtx mem, tmp;
4103 HOST_WIDE_INT mask;
4104
e513d163 4105 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
ab6ab77e 4106 set_mem_alias_set (mem, 0);
a9aaae37 4107
4108 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4109 if (bytes < alignofs)
4110 {
4111 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4112 ofs += bytes;
4113 bytes = 0;
4114 }
4115 else
4116 {
4117 bytes -= alignofs;
4118 ofs += alignofs;
4119 }
4120 alignofs = 0;
4121
4122 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4123 NULL_RTX, 1, OPTAB_WIDEN);
4124
4125 emit_move_insn (mem, tmp);
4126 }
4127#endif
4128
4129 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4130 {
e513d163 4131 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
a9aaae37 4132 bytes -= 1;
4133 ofs += 1;
4134 alignofs -= 1;
4135 }
4136 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4137 {
e513d163 4138 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
a9aaae37 4139 bytes -= 2;
4140 ofs += 2;
4141 alignofs -= 2;
4142 }
4143 if (alignofs == 4 && bytes >= 4)
4144 {
e513d163 4145 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
a9aaae37 4146 bytes -= 4;
4147 ofs += 4;
4148 alignofs = 0;
4149 }
4150
4151 /* If we've not used the extra lead alignment information by now,
4152 we won't be able to. Downgrade align to match what's left over. */
4153 if (alignofs > 0)
4154 {
4155 alignofs = alignofs & -alignofs;
4156 align = MIN (align, alignofs * BITS_PER_UNIT);
4157 }
4158 }
4159
4160 /* Handle a block of contiguous long-words. */
34377880 4161
80909c64 4162 if (align >= 64 && bytes >= 8)
34377880 4163 {
4164 words = bytes / 8;
4165
4166 for (i = 0; i < words; ++i)
1f0ce6a6 4167 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
e513d163 4168 const0_rtx);
34377880 4169
4170 bytes -= words * 8;
7597afe9 4171 ofs += words * 8;
34377880 4172 }
80909c64 4173
a9aaae37 4174 /* If the block is large and appropriately aligned, emit a single
4175 store followed by a sequence of stq_u insns. */
4176
4177 if (align >= 32 && bytes > 16)
4178 {
1f0ce6a6 4179 rtx orig_dsta;
4180
e513d163 4181 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
a9aaae37 4182 bytes -= 4;
4183 ofs += 4;
4184
1f0ce6a6 4185 orig_dsta = XEXP (orig_dst, 0);
4186 if (GET_CODE (orig_dsta) == LO_SUM)
4187 orig_dsta = force_reg (Pmode, orig_dsta);
4188
a9aaae37 4189 words = bytes / 8;
4190 for (i = 0; i < words; ++i)
4191 {
ab6ab77e 4192 rtx mem
4193 = change_address (orig_dst, DImode,
4194 gen_rtx_AND (DImode,
1f0ce6a6 4195 plus_constant (orig_dsta, ofs + i*8),
ab6ab77e 4196 GEN_INT (-8)));
4197 set_mem_alias_set (mem, 0);
a9aaae37 4198 emit_move_insn (mem, const0_rtx);
4199 }
4200
4201 /* Depending on the alignment, the first stq_u may have overlapped
4202 with the initial stl, which means that the last stq_u didn't
4203 write as much as it would appear. Leave those questionable bytes
4204 unaccounted for. */
4205 bytes -= words * 8 - 4;
4206 ofs += words * 8 - 4;
4207 }
4208
4209 /* Handle a smaller block of aligned words. */
4210
4211 if ((align >= 64 && bytes == 4)
4212 || (align == 32 && bytes >= 4))
34377880 4213 {
4214 words = bytes / 4;
4215
4216 for (i = 0; i < words; ++i)
e513d163 4217 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
80909c64 4218 const0_rtx);
34377880 4219
4220 bytes -= words * 4;
7597afe9 4221 ofs += words * 4;
34377880 4222 }
80909c64 4223
a9aaae37 4224 /* An unaligned block uses stq_u stores for as many as possible. */
4225
4226 if (bytes >= 8)
34377880 4227 {
4228 words = bytes / 8;
4229
7597afe9 4230 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
34377880 4231
4232 bytes -= words * 8;
7597afe9 4233 ofs += words * 8;
34377880 4234 }
4235
a9aaae37 4236 /* Next clean up any trailing pieces. */
34377880 4237
a9aaae37 4238#if HOST_BITS_PER_WIDE_INT >= 64
4239 /* Count the number of bits in BYTES for which aligned stores could
4240 be emitted. */
4241 words = 0;
4242 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4243 if (bytes & i)
4244 words += 1;
4245
4246 /* If we have appropriate alignment (and it wouldn't take too many
4247 instructions otherwise), mask out the bytes we need. */
4248 if (TARGET_BWX ? words > 2 : bytes > 0)
4249 {
4250 if (align >= 64)
4251 {
4252 rtx mem, tmp;
4253 HOST_WIDE_INT mask;
4254
e513d163 4255 mem = adjust_address (orig_dst, DImode, ofs);
ab6ab77e 4256 set_mem_alias_set (mem, 0);
a9aaae37 4257
4258 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4259
4260 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4261 NULL_RTX, 1, OPTAB_WIDEN);
4262
4263 emit_move_insn (mem, tmp);
4264 return 1;
4265 }
4266 else if (align >= 32 && bytes < 4)
4267 {
4268 rtx mem, tmp;
4269 HOST_WIDE_INT mask;
4270
e513d163 4271 mem = adjust_address (orig_dst, SImode, ofs);
ab6ab77e 4272 set_mem_alias_set (mem, 0);
a9aaae37 4273
4274 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4275
4276 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4277 NULL_RTX, 1, OPTAB_WIDEN);
4278
4279 emit_move_insn (mem, tmp);
4280 return 1;
4281 }
34377880 4282 }
a9aaae37 4283#endif
80909c64 4284
34377880 4285 if (!TARGET_BWX && bytes >= 4)
4286 {
4287 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4288 bytes -= 4;
4289 ofs += 4;
4290 }
80909c64 4291
34377880 4292 if (bytes >= 2)
4293 {
80909c64 4294 if (align >= 16)
34377880 4295 {
4296 do {
e513d163 4297 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
34377880 4298 const0_rtx);
4299 bytes -= 2;
4300 ofs += 2;
4301 } while (bytes >= 2);
4302 }
80909c64 4303 else if (! TARGET_BWX)
34377880 4304 {
4305 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4306 bytes -= 2;
4307 ofs += 2;
4308 }
4309 }
80909c64 4310
34377880 4311 while (bytes > 0)
4312 {
e513d163 4313 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
34377880 4314 bytes -= 1;
4315 ofs += 1;
4316 }
4317
4318 return 1;
4319}
f2cc13dc 4320
4321/* Returns a mask so that zap(x, value) == x & mask. */
4322
4323rtx
92643d95 4324alpha_expand_zap_mask (HOST_WIDE_INT value)
f2cc13dc 4325{
4326 rtx result;
4327 int i;
4328
4329 if (HOST_BITS_PER_WIDE_INT >= 64)
4330 {
4331 HOST_WIDE_INT mask = 0;
4332
4333 for (i = 7; i >= 0; --i)
4334 {
4335 mask <<= 8;
4336 if (!((value >> i) & 1))
4337 mask |= 0xff;
4338 }
4339
4340 result = gen_int_mode (mask, DImode);
4341 }
4d10b463 4342 else
f2cc13dc 4343 {
4344 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4345
4d10b463 4346 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4347
f2cc13dc 4348 for (i = 7; i >= 4; --i)
4349 {
4350 mask_hi <<= 8;
4351 if (!((value >> i) & 1))
4352 mask_hi |= 0xff;
4353 }
4354
4355 for (i = 3; i >= 0; --i)
4356 {
4357 mask_lo <<= 8;
4358 if (!((value >> i) & 1))
4359 mask_lo |= 0xff;
4360 }
4361
4362 result = immed_double_const (mask_lo, mask_hi, DImode);
4363 }
f2cc13dc 4364
4365 return result;
4366}
4367
4368void
92643d95 4369alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4370 enum machine_mode mode,
4371 rtx op0, rtx op1, rtx op2)
f2cc13dc 4372{
4373 op0 = gen_lowpart (mode, op0);
4374
4375 if (op1 == const0_rtx)
4376 op1 = CONST0_RTX (mode);
4377 else
4378 op1 = gen_lowpart (mode, op1);
ae4cd3a5 4379
4380 if (op2 == const0_rtx)
f2cc13dc 4381 op2 = CONST0_RTX (mode);
4382 else
4383 op2 = gen_lowpart (mode, op2);
4384
4385 emit_insn ((*gen) (op0, op1, op2));
4386}
f155876e 4387
032caa7b 4388/* A subroutine of the atomic operation splitters. Jump to LABEL if
4389 COND is true. Mark the jump as unlikely to be taken. */
4390
4391static void
4392emit_unlikely_jump (rtx cond, rtx label)
4393{
4394 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4395 rtx x;
4396
4397 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4398 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
0a48089c 4399 add_reg_note (x, REG_BR_PROB, very_unlikely);
032caa7b 4400}
4401
4402/* A subroutine of the atomic operation splitters. Emit a load-locked
4403 instruction in MODE. */
4404
4405static void
4406emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4407{
4408 rtx (*fn) (rtx, rtx) = NULL;
4409 if (mode == SImode)
4410 fn = gen_load_locked_si;
4411 else if (mode == DImode)
4412 fn = gen_load_locked_di;
4413 emit_insn (fn (reg, mem));
4414}
4415
4416/* A subroutine of the atomic operation splitters. Emit a store-conditional
4417 instruction in MODE. */
4418
4419static void
4420emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4421{
4422 rtx (*fn) (rtx, rtx, rtx) = NULL;
4423 if (mode == SImode)
4424 fn = gen_store_conditional_si;
4425 else if (mode == DImode)
4426 fn = gen_store_conditional_di;
4427 emit_insn (fn (res, mem, val));
4428}
4429
596d3184 4430/* A subroutine of the atomic operation splitters. Emit an insxl
4431 instruction in MODE. */
4432
4433static rtx
4434emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4435{
4436 rtx ret = gen_reg_rtx (DImode);
4437 rtx (*fn) (rtx, rtx, rtx);
4438
4439 if (WORDS_BIG_ENDIAN)
4440 {
4441 if (mode == QImode)
4442 fn = gen_insbl_be;
4443 else
4444 fn = gen_inswl_be;
4445 }
4446 else
4447 {
4448 if (mode == QImode)
4449 fn = gen_insbl_le;
4450 else
4451 fn = gen_inswl_le;
4452 }
9a6f4ddd 4453 /* The insbl and inswl patterns require a register operand. */
4454 op1 = force_reg (mode, op1);
596d3184 4455 emit_insn (fn (ret, op1, op2));
4456
4457 return ret;
4458}
4459
85694bac 4460/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
f155876e 4461 to perform. MEM is the memory on which to operate. VAL is the second
4462 operand of the binary operator. BEFORE and AFTER are optional locations to
4463 return the value of MEM either before of after the operation. SCRATCH is
4464 a scratch register. */
4465
4466void
4467alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4468 rtx before, rtx after, rtx scratch)
4469{
4470 enum machine_mode mode = GET_MODE (mem);
032caa7b 4471 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
f155876e 4472
4473 emit_insn (gen_memory_barrier ());
4474
4475 label = gen_label_rtx ();
4476 emit_label (label);
4477 label = gen_rtx_LABEL_REF (DImode, label);
4478
4479 if (before == NULL)
4480 before = scratch;
032caa7b 4481 emit_load_locked (mode, before, mem);
f155876e 4482
4483 if (code == NOT)
6f7e6aa3 4484 {
4485 x = gen_rtx_AND (mode, before, val);
4486 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4487
4488 x = gen_rtx_NOT (mode, val);
4489 }
f155876e 4490 else
4491 x = gen_rtx_fmt_ee (code, mode, before, val);
f155876e 4492 if (after)
4493 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
87121034 4494 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
f155876e 4495
032caa7b 4496 emit_store_conditional (mode, cond, mem, scratch);
4497
4498 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4499 emit_unlikely_jump (x, label);
4500
4501 emit_insn (gen_memory_barrier ());
4502}
4503
4504/* Expand a compare and swap operation. */
4505
4506void
4507alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4508 rtx scratch)
4509{
4510 enum machine_mode mode = GET_MODE (mem);
4511 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4512
4513 emit_insn (gen_memory_barrier ());
4514
4515 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4516 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4517 emit_label (XEXP (label1, 0));
4518
4519 emit_load_locked (mode, retval, mem);
4520
4521 x = gen_lowpart (DImode, retval);
4522 if (oldval == const0_rtx)
4523 x = gen_rtx_NE (DImode, x, const0_rtx);
f155876e 4524 else
032caa7b 4525 {
4526 x = gen_rtx_EQ (DImode, x, oldval);
4527 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4528 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4529 }
4530 emit_unlikely_jump (x, label2);
4531
4532 emit_move_insn (scratch, newval);
4533 emit_store_conditional (mode, cond, mem, scratch);
f155876e 4534
4535 x = gen_rtx_EQ (DImode, cond, const0_rtx);
032caa7b 4536 emit_unlikely_jump (x, label1);
4537
4538 emit_insn (gen_memory_barrier ());
4539 emit_label (XEXP (label2, 0));
4540}
4541
596d3184 4542void
4543alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4544{
4545 enum machine_mode mode = GET_MODE (mem);
4546 rtx addr, align, wdst;
4547 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4548
4549 addr = force_reg (DImode, XEXP (mem, 0));
4550 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4551 NULL_RTX, 1, OPTAB_DIRECT);
4552
4553 oldval = convert_modes (DImode, mode, oldval, 1);
4554 newval = emit_insxl (mode, newval, addr);
4555
4556 wdst = gen_reg_rtx (DImode);
4557 if (mode == QImode)
4558 fn5 = gen_sync_compare_and_swapqi_1;
4559 else
4560 fn5 = gen_sync_compare_and_swaphi_1;
4561 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4562
4563 emit_move_insn (dst, gen_lowpart (mode, wdst));
4564}
4565
4566void
4567alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4568 rtx oldval, rtx newval, rtx align,
4569 rtx scratch, rtx cond)
4570{
4571 rtx label1, label2, mem, width, mask, x;
4572
4573 mem = gen_rtx_MEM (DImode, align);
4574 MEM_VOLATILE_P (mem) = 1;
4575
4576 emit_insn (gen_memory_barrier ());
4577 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4578 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4579 emit_label (XEXP (label1, 0));
4580
4581 emit_load_locked (DImode, scratch, mem);
4582
4583 width = GEN_INT (GET_MODE_BITSIZE (mode));
4584 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4585 if (WORDS_BIG_ENDIAN)
4586 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4587 else
4588 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4589
4590 if (oldval == const0_rtx)
4591 x = gen_rtx_NE (DImode, dest, const0_rtx);
4592 else
4593 {
4594 x = gen_rtx_EQ (DImode, dest, oldval);
4595 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4596 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4597 }
4598 emit_unlikely_jump (x, label2);
4599
4600 if (WORDS_BIG_ENDIAN)
4601 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4602 else
4603 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4604 emit_insn (gen_iordi3 (scratch, scratch, newval));
4605
4606 emit_store_conditional (DImode, scratch, mem, scratch);
4607
4608 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4609 emit_unlikely_jump (x, label1);
4610
4611 emit_insn (gen_memory_barrier ());
4612 emit_label (XEXP (label2, 0));
4613}
4614
032caa7b 4615/* Expand an atomic exchange operation. */
4616
4617void
4618alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4619{
4620 enum machine_mode mode = GET_MODE (mem);
4621 rtx label, x, cond = gen_lowpart (DImode, scratch);
f155876e 4622
032caa7b 4623 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4624 emit_label (XEXP (label, 0));
4625
4626 emit_load_locked (mode, retval, mem);
4627 emit_move_insn (scratch, val);
4628 emit_store_conditional (mode, cond, mem, scratch);
4629
4630 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4631 emit_unlikely_jump (x, label);
a15b4a3c 4632
4633 emit_insn (gen_memory_barrier ());
f155876e 4634}
596d3184 4635
4636void
4637alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4638{
4639 enum machine_mode mode = GET_MODE (mem);
4640 rtx addr, align, wdst;
4641 rtx (*fn4) (rtx, rtx, rtx, rtx);
4642
4643 /* Force the address into a register. */
4644 addr = force_reg (DImode, XEXP (mem, 0));
4645
4646 /* Align it to a multiple of 8. */
4647 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4648 NULL_RTX, 1, OPTAB_DIRECT);
4649
4650 /* Insert val into the correct byte location within the word. */
4651 val = emit_insxl (mode, val, addr);
4652
4653 wdst = gen_reg_rtx (DImode);
4654 if (mode == QImode)
4655 fn4 = gen_sync_lock_test_and_setqi_1;
4656 else
4657 fn4 = gen_sync_lock_test_and_sethi_1;
4658 emit_insn (fn4 (wdst, addr, val, align));
4659
4660 emit_move_insn (dst, gen_lowpart (mode, wdst));
4661}
4662
4663void
4664alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4665 rtx val, rtx align, rtx scratch)
4666{
4667 rtx label, mem, width, mask, x;
4668
4669 mem = gen_rtx_MEM (DImode, align);
4670 MEM_VOLATILE_P (mem) = 1;
4671
596d3184 4672 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4673 emit_label (XEXP (label, 0));
4674
4675 emit_load_locked (DImode, scratch, mem);
4676
4677 width = GEN_INT (GET_MODE_BITSIZE (mode));
4678 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4679 if (WORDS_BIG_ENDIAN)
4680 {
4681 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4682 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4683 }
4684 else
4685 {
4686 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4687 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4688 }
4689 emit_insn (gen_iordi3 (scratch, scratch, val));
4690
4691 emit_store_conditional (DImode, scratch, mem, scratch);
4692
4693 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4694 emit_unlikely_jump (x, label);
a15b4a3c 4695
4696 emit_insn (gen_memory_barrier ());
596d3184 4697}
bf2a98b3 4698\f
4699/* Adjust the cost of a scheduling dependency. Return the new cost of
4700 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4701
747af5e7 4702static int
92643d95 4703alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
bf2a98b3 4704{
d2832bd8 4705 enum attr_type insn_type, dep_insn_type;
bf2a98b3 4706
4707 /* If the dependence is an anti-dependence, there is no cost. For an
4708 output dependence, there is sometimes a cost, but it doesn't seem
4709 worth handling those few cases. */
bf2a98b3 4710 if (REG_NOTE_KIND (link) != 0)
7eb0c947 4711 return cost;
bf2a98b3 4712
d2832bd8 4713 /* If we can't recognize the insns, we can't really do anything. */
4714 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4715 return cost;
4716
4717 insn_type = get_attr_type (insn);
4718 dep_insn_type = get_attr_type (dep_insn);
4719
07c1a295 4720 /* Bring in the user-defined memory latency. */
e7a69d05 4721 if (dep_insn_type == TYPE_ILD
4722 || dep_insn_type == TYPE_FLD
4723 || dep_insn_type == TYPE_LDSYM)
07c1a295 4724 cost += alpha_memory_latency-1;
4725
7eb0c947 4726 /* Everything else handled in DFA bypasses now. */
3680ac41 4727
bf2a98b3 4728 return cost;
4729}
747af5e7 4730
7eb0c947 4731/* The number of instructions that can be issued per cycle. */
4732
747af5e7 4733static int
92643d95 4734alpha_issue_rate (void)
747af5e7 4735{
fb64edde 4736 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
747af5e7 4737}
4738
7eb0c947 4739/* How many alternative schedules to try. This should be as wide as the
4740 scheduling freedom in the DFA, but no wider. Making this value too
4741 large results extra work for the scheduler.
4742
4743 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4744 alternative schedules. For EV5, we can choose between E0/E1 and
8d232dc7 4745 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
7eb0c947 4746
4747static int
92643d95 4748alpha_multipass_dfa_lookahead (void)
7eb0c947 4749{
fb64edde 4750 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
7eb0c947 4751}
0c0464e6 4752\f
5f7b9df8 4753/* Machine-specific function data. */
4754
fb1e4f4a 4755struct GTY(()) machine_function
5f7b9df8 4756{
674a8f0b 4757 /* For unicosmk. */
5f7b9df8 4758 /* List of call information words for calls from this function. */
4759 struct rtx_def *first_ciw;
4760 struct rtx_def *last_ciw;
4761 int ciw_count;
4762
4763 /* List of deferred case vectors. */
4764 struct rtx_def *addr_list;
1f3233d1 4765
674a8f0b 4766 /* For OSF. */
5f7b9df8 4767 const char *some_ld_name;
a221313c 4768
4769 /* For TARGET_LD_BUGGY_LDGP. */
4770 struct rtx_def *gp_save_rtx;
5f7b9df8 4771};
4772
1f3233d1 4773/* How to allocate a 'struct machine_function'. */
9caef960 4774
1f3233d1 4775static struct machine_function *
92643d95 4776alpha_init_machine_status (void)
9caef960 4777{
9e7454d0 4778 return ((struct machine_function *)
1f3233d1 4779 ggc_alloc_cleared (sizeof (struct machine_function)));
9caef960 4780}
9caef960 4781
0c0464e6 4782/* Functions to save and restore alpha_return_addr_rtx. */
4783
0c0464e6 4784/* Start the ball rolling with RETURN_ADDR_RTX. */
4785
4786rtx
92643d95 4787alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
0c0464e6 4788{
0c0464e6 4789 if (count != 0)
4790 return const0_rtx;
4791
0f37b7a2 4792 return get_hard_reg_initial_val (Pmode, REG_RA);
0c0464e6 4793}
4794
a221313c 4795/* Return or create a memory slot containing the gp value for the current
66561750 4796 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4797
4798rtx
92643d95 4799alpha_gp_save_rtx (void)
66561750 4800{
a221313c 4801 rtx seq, m = cfun->machine->gp_save_rtx;
4802
4803 if (m == NULL)
4804 {
4805 start_sequence ();
4806
4807 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4808 m = validize_mem (m);
4809 emit_move_insn (m, pic_offset_table_rtx);
4810
4811 seq = get_insns ();
4812 end_sequence ();
8a1586ba 4813
4814 /* We used to simply emit the sequence after entry_of_function.
4815 However this breaks the CFG if the first instruction in the
4816 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4817 label. Emit the sequence properly on the edge. We are only
4818 invoked from dw2_build_landing_pads and finish_eh_generation
4819 will call commit_edge_insertions thanks to a kludge. */
4820 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
a221313c 4821
4822 cfun->machine->gp_save_rtx = m;
4823 }
4824
4825 return m;
66561750 4826}
4827
0c0464e6 4828static int
92643d95 4829alpha_ra_ever_killed (void)
0c0464e6 4830{
5a965225 4831 rtx top;
4832
0f37b7a2 4833 if (!has_hard_reg_initial_val (Pmode, REG_RA))
3072d30e 4834 return (int)df_regs_ever_live_p (REG_RA);
0c0464e6 4835
5a965225 4836 push_topmost_sequence ();
4837 top = get_insns ();
4838 pop_topmost_sequence ();
4839
4840 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
0c0464e6 4841}
4842
bf2a98b3 4843\f
6fec94c5 4844/* Return the trap mode suffix applicable to the current
65abff06 4845 instruction, or NULL. */
bf2a98b3 4846
6fec94c5 4847static const char *
92643d95 4848get_trap_mode_suffix (void)
bf2a98b3 4849{
6fec94c5 4850 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
bf2a98b3 4851
6fec94c5 4852 switch (s)
bf2a98b3 4853 {
6fec94c5 4854 case TRAP_SUFFIX_NONE:
4855 return NULL;
c4622276 4856
6fec94c5 4857 case TRAP_SUFFIX_SU:
bc16f0c1 4858 if (alpha_fptm >= ALPHA_FPTM_SU)
6fec94c5 4859 return "su";
4860 return NULL;
c4622276 4861
6fec94c5 4862 case TRAP_SUFFIX_SUI:
4863 if (alpha_fptm >= ALPHA_FPTM_SUI)
4864 return "sui";
4865 return NULL;
4866
4867 case TRAP_SUFFIX_V_SV:
39344852 4868 switch (alpha_fptm)
4869 {
4870 case ALPHA_FPTM_N:
6fec94c5 4871 return NULL;
39344852 4872 case ALPHA_FPTM_U:
6fec94c5 4873 return "v";
39344852 4874 case ALPHA_FPTM_SU:
4875 case ALPHA_FPTM_SUI:
6fec94c5 4876 return "sv";
4d10b463 4877 default:
4878 gcc_unreachable ();
39344852 4879 }
39344852 4880
6fec94c5 4881 case TRAP_SUFFIX_V_SV_SVI:
b5ea3193 4882 switch (alpha_fptm)
4883 {
4884 case ALPHA_FPTM_N:
6fec94c5 4885 return NULL;
b5ea3193 4886 case ALPHA_FPTM_U:
6fec94c5 4887 return "v";
b5ea3193 4888 case ALPHA_FPTM_SU:
6fec94c5 4889 return "sv";
b5ea3193 4890 case ALPHA_FPTM_SUI:
6fec94c5 4891 return "svi";
4d10b463 4892 default:
4893 gcc_unreachable ();
b5ea3193 4894 }
4895 break;
4896
6fec94c5 4897 case TRAP_SUFFIX_U_SU_SUI:
c4622276 4898 switch (alpha_fptm)
4899 {
4900 case ALPHA_FPTM_N:
6fec94c5 4901 return NULL;
c4622276 4902 case ALPHA_FPTM_U:
6fec94c5 4903 return "u";
c4622276 4904 case ALPHA_FPTM_SU:
6fec94c5 4905 return "su";
c4622276 4906 case ALPHA_FPTM_SUI:
6fec94c5 4907 return "sui";
4d10b463 4908 default:
4909 gcc_unreachable ();
c4622276 4910 }
4911 break;
4d10b463 4912
4913 default:
4914 gcc_unreachable ();
6fec94c5 4915 }
4d10b463 4916 gcc_unreachable ();
6fec94c5 4917}
c4622276 4918
6fec94c5 4919/* Return the rounding mode suffix applicable to the current
65abff06 4920 instruction, or NULL. */
6fec94c5 4921
4922static const char *
92643d95 4923get_round_mode_suffix (void)
6fec94c5 4924{
4925 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4926
4927 switch (s)
4928 {
4929 case ROUND_SUFFIX_NONE:
4930 return NULL;
4931 case ROUND_SUFFIX_NORMAL:
4932 switch (alpha_fprm)
c4622276 4933 {
6fec94c5 4934 case ALPHA_FPRM_NORM:
4935 return NULL;
9e7454d0 4936 case ALPHA_FPRM_MINF:
6fec94c5 4937 return "m";
4938 case ALPHA_FPRM_CHOP:
4939 return "c";
4940 case ALPHA_FPRM_DYN:
4941 return "d";
4d10b463 4942 default:
4943 gcc_unreachable ();
c4622276 4944 }
4945 break;
4946
6fec94c5 4947 case ROUND_SUFFIX_C:
4948 return "c";
4d10b463 4949
4950 default:
4951 gcc_unreachable ();
6fec94c5 4952 }
4d10b463 4953 gcc_unreachable ();
6fec94c5 4954}
4955
5f7b9df8 4956/* Locate some local-dynamic symbol still in use by this function
4957 so that we can print its name in some movdi_er_tlsldm pattern. */
4958
92643d95 4959static int
4960get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4961{
4962 rtx x = *px;
4963
4964 if (GET_CODE (x) == SYMBOL_REF
4965 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4966 {
4967 cfun->machine->some_ld_name = XSTR (x, 0);
4968 return 1;
4969 }
4970
4971 return 0;
4972}
4973
5f7b9df8 4974static const char *
92643d95 4975get_some_local_dynamic_name (void)
5f7b9df8 4976{
4977 rtx insn;
4978
4979 if (cfun->machine->some_ld_name)
4980 return cfun->machine->some_ld_name;
4981
4982 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4983 if (INSN_P (insn)
4984 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4985 return cfun->machine->some_ld_name;
4986
4d10b463 4987 gcc_unreachable ();
5f7b9df8 4988}
4989
6fec94c5 4990/* Print an operand. Recognize special options, documented below. */
4991
4992void
92643d95 4993print_operand (FILE *file, rtx x, int code)
6fec94c5 4994{
4995 int i;
4996
4997 switch (code)
4998 {
4999 case '~':
5000 /* Print the assembler name of the current function. */
5001 assemble_name (file, alpha_fnname);
5002 break;
5003
5f7b9df8 5004 case '&':
5005 assemble_name (file, get_some_local_dynamic_name ());
5006 break;
5007
6fec94c5 5008 case '/':
5009 {
5010 const char *trap = get_trap_mode_suffix ();
5011 const char *round = get_round_mode_suffix ();
5012
5013 if (trap || round)
9caef960 5014 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5015 (trap ? trap : ""), (round ? round : ""));
6fec94c5 5016 break;
5017 }
5018
8df4a58b 5019 case ',':
5020 /* Generates single precision instruction suffix. */
6fec94c5 5021 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
8df4a58b 5022 break;
5023
5024 case '-':
5025 /* Generates double precision instruction suffix. */
6fec94c5 5026 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
8df4a58b 5027 break;
5028
1f0ce6a6 5029 case '#':
5030 if (alpha_this_literal_sequence_number == 0)
5031 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5032 fprintf (file, "%d", alpha_this_literal_sequence_number);
5033 break;
5034
5035 case '*':
5036 if (alpha_this_gpdisp_sequence_number == 0)
5037 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5038 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5039 break;
5040
5041 case 'H':
5042 if (GET_CODE (x) == HIGH)
5dcb037d 5043 output_addr_const (file, XEXP (x, 0));
1f0ce6a6 5044 else
5045 output_operand_lossage ("invalid %%H value");
5046 break;
5047
ad2ed779 5048 case 'J':
5f7b9df8 5049 {
5050 const char *lituse;
5051
5052 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5053 {
5054 x = XVECEXP (x, 0, 0);
5055 lituse = "lituse_tlsgd";
5056 }
5057 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5058 {
5059 x = XVECEXP (x, 0, 0);
5060 lituse = "lituse_tlsldm";
5061 }
c933fb42 5062 else if (CONST_INT_P (x))
5f7b9df8 5063 lituse = "lituse_jsr";
5064 else
5065 {
5066 output_operand_lossage ("invalid %%J value");
5067 break;
5068 }
5069
5070 if (x != const0_rtx)
5071 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5072 }
ad2ed779 5073 break;
5074
592222c2 5075 case 'j':
5076 {
5077 const char *lituse;
5078
5079#ifdef HAVE_AS_JSRDIRECT_RELOCS
5080 lituse = "lituse_jsrdirect";
5081#else
5082 lituse = "lituse_jsr";
5083#endif
5084
5085 gcc_assert (INTVAL (x) != 0);
5086 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5087 }
5088 break;
bf2a98b3 5089 case 'r':
5090 /* If this operand is the constant zero, write it as "$31". */
c933fb42 5091 if (REG_P (x))
bf2a98b3 5092 fprintf (file, "%s", reg_names[REGNO (x)]);
5093 else if (x == CONST0_RTX (GET_MODE (x)))
5094 fprintf (file, "$31");
5095 else
5096 output_operand_lossage ("invalid %%r value");
bf2a98b3 5097 break;
5098
5099 case 'R':
5100 /* Similar, but for floating-point. */
c933fb42 5101 if (REG_P (x))
bf2a98b3 5102 fprintf (file, "%s", reg_names[REGNO (x)]);
5103 else if (x == CONST0_RTX (GET_MODE (x)))
5104 fprintf (file, "$f31");
5105 else
5106 output_operand_lossage ("invalid %%R value");
bf2a98b3 5107 break;
5108
5109 case 'N':
5110 /* Write the 1's complement of a constant. */
c933fb42 5111 if (!CONST_INT_P (x))
bf2a98b3 5112 output_operand_lossage ("invalid %%N value");
5113
61a63ca5 5114 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
bf2a98b3 5115 break;
5116
5117 case 'P':
5118 /* Write 1 << C, for a constant C. */
c933fb42 5119 if (!CONST_INT_P (x))
bf2a98b3 5120 output_operand_lossage ("invalid %%P value");
5121
61a63ca5 5122 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
bf2a98b3 5123 break;
5124
5125 case 'h':
5126 /* Write the high-order 16 bits of a constant, sign-extended. */
c933fb42 5127 if (!CONST_INT_P (x))
bf2a98b3 5128 output_operand_lossage ("invalid %%h value");
5129
61a63ca5 5130 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
bf2a98b3 5131 break;
5132
5133 case 'L':
5134 /* Write the low-order 16 bits of a constant, sign-extended. */
c933fb42 5135 if (!CONST_INT_P (x))
bf2a98b3 5136 output_operand_lossage ("invalid %%L value");
5137
61a63ca5 5138 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5139 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
bf2a98b3 5140 break;
5141
5142 case 'm':
5143 /* Write mask for ZAP insn. */
5144 if (GET_CODE (x) == CONST_DOUBLE)
5145 {
5146 HOST_WIDE_INT mask = 0;
5147 HOST_WIDE_INT value;
5148
5149 value = CONST_DOUBLE_LOW (x);
5150 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5151 i++, value >>= 8)
5152 if (value & 0xff)
5153 mask |= (1 << i);
5154
5155 value = CONST_DOUBLE_HIGH (x);
5156 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5157 i++, value >>= 8)
5158 if (value & 0xff)
5159 mask |= (1 << (i + sizeof (int)));
5160
61a63ca5 5161 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
bf2a98b3 5162 }
5163
c933fb42 5164 else if (CONST_INT_P (x))
bf2a98b3 5165 {
5166 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5167
5168 for (i = 0; i < 8; i++, value >>= 8)
5169 if (value & 0xff)
5170 mask |= (1 << i);
5171
61a63ca5 5172 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
bf2a98b3 5173 }
5174 else
5175 output_operand_lossage ("invalid %%m value");
5176 break;
5177
5178 case 'M':
34377880 5179 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
c933fb42 5180 if (!CONST_INT_P (x)
34377880 5181 || (INTVAL (x) != 8 && INTVAL (x) != 16
5182 && INTVAL (x) != 32 && INTVAL (x) != 64))
bf2a98b3 5183 output_operand_lossage ("invalid %%M value");
5184
5185 fprintf (file, "%s",
34377880 5186 (INTVAL (x) == 8 ? "b"
5187 : INTVAL (x) == 16 ? "w"
5188 : INTVAL (x) == 32 ? "l"
5189 : "q"));
bf2a98b3 5190 break;
5191
5192 case 'U':
5193 /* Similar, except do it from the mask. */
c933fb42 5194 if (CONST_INT_P (x))
ae4cd3a5 5195 {
5196 HOST_WIDE_INT value = INTVAL (x);
5197
5198 if (value == 0xff)
5199 {
5200 fputc ('b', file);
5201 break;
5202 }
5203 if (value == 0xffff)
5204 {
5205 fputc ('w', file);
5206 break;
5207 }
5208 if (value == 0xffffffff)
5209 {
5210 fputc ('l', file);
5211 break;
5212 }
5213 if (value == -1)
5214 {
5215 fputc ('q', file);
5216 break;
5217 }
5218 }
5219 else if (HOST_BITS_PER_WIDE_INT == 32
5220 && GET_CODE (x) == CONST_DOUBLE
5221 && CONST_DOUBLE_LOW (x) == 0xffffffff
5222 && CONST_DOUBLE_HIGH (x) == 0)
5223 {
5224 fputc ('l', file);
5225 break;
5226 }
5227 output_operand_lossage ("invalid %%U value");
bf2a98b3 5228 break;
5229
5230 case 's':
9caef960 5231 /* Write the constant value divided by 8 for little-endian mode or
5232 (56 - value) / 8 for big-endian mode. */
5233
c933fb42 5234 if (!CONST_INT_P (x)
9caef960 5235 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5236 ? 56
9e7454d0 5237 : 64)
9caef960 5238 || (INTVAL (x) & 7) != 0)
bf2a98b3 5239 output_operand_lossage ("invalid %%s value");
5240
9caef960 5241 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5242 WORDS_BIG_ENDIAN
5243 ? (56 - INTVAL (x)) / 8
5244 : INTVAL (x) / 8);
bf2a98b3 5245 break;
5246
5247 case 'S':
5248 /* Same, except compute (64 - c) / 8 */
5249
c933fb42 5250 if (!CONST_INT_P (x)
bf2a98b3 5251 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5252 && (INTVAL (x) & 7) != 8)
5253 output_operand_lossage ("invalid %%s value");
5254
61a63ca5 5255 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
bf2a98b3 5256 break;
5257
9caef960 5258 case 't':
5259 {
5260 /* On Unicos/Mk systems: use a DEX expression if the symbol
5261 clashes with a register name. */
5262 int dex = unicosmk_need_dex (x);
5263 if (dex)
5264 fprintf (file, "DEX(%d)", dex);
5265 else
5266 output_addr_const (file, x);
5267 }
5268 break;
5269
62dc3582 5270 case 'C': case 'D': case 'c': case 'd':
bf2a98b3 5271 /* Write out comparison name. */
62dc3582 5272 {
5273 enum rtx_code c = GET_CODE (x);
5274
6720e96c 5275 if (!COMPARISON_P (x))
62dc3582 5276 output_operand_lossage ("invalid %%C value");
5277
f3d263a7 5278 else if (code == 'D')
62dc3582 5279 c = reverse_condition (c);
5280 else if (code == 'c')
5281 c = swap_condition (c);
5282 else if (code == 'd')
5283 c = swap_condition (reverse_condition (c));
5284
5285 if (c == LEU)
5286 fprintf (file, "ule");
5287 else if (c == LTU)
5288 fprintf (file, "ult");
a4110d9a 5289 else if (c == UNORDERED)
5290 fprintf (file, "un");
62dc3582 5291 else
5292 fprintf (file, "%s", GET_RTX_NAME (c));
5293 }
8ad50a44 5294 break;
5295
bf2a98b3 5296 case 'E':
5297 /* Write the divide or modulus operator. */
5298 switch (GET_CODE (x))
5299 {
5300 case DIV:
5301 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5302 break;
5303 case UDIV:
5304 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5305 break;
5306 case MOD:
5307 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5308 break;
5309 case UMOD:
5310 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5311 break;
5312 default:
5313 output_operand_lossage ("invalid %%E value");
5314 break;
5315 }
5316 break;
5317
bf2a98b3 5318 case 'A':
5319 /* Write "_u" for unaligned access. */
c933fb42 5320 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
bf2a98b3 5321 fprintf (file, "_u");
5322 break;
5323
5324 case 0:
c933fb42 5325 if (REG_P (x))
bf2a98b3 5326 fprintf (file, "%s", reg_names[REGNO (x)]);
c933fb42 5327 else if (MEM_P (x))
bf2a98b3 5328 output_address (XEXP (x, 0));
5f7b9df8 5329 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5330 {
5331 switch (XINT (XEXP (x, 0), 1))
5332 {
5333 case UNSPEC_DTPREL:
5334 case UNSPEC_TPREL:
5335 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5336 break;
5337 default:
5338 output_operand_lossage ("unknown relocation unspec");
5339 break;
5340 }
5341 }
bf2a98b3 5342 else
5343 output_addr_const (file, x);
5344 break;
5345
5346 default:
5347 output_operand_lossage ("invalid %%xn code");
5348 }
5349}
6e0fe99e 5350
5351void
92643d95 5352print_operand_address (FILE *file, rtx addr)
6e0fe99e 5353{
a3e39a24 5354 int basereg = 31;
6e0fe99e 5355 HOST_WIDE_INT offset = 0;
5356
5357 if (GET_CODE (addr) == AND)
5358 addr = XEXP (addr, 0);
6e0fe99e 5359
a3e39a24 5360 if (GET_CODE (addr) == PLUS
c933fb42 5361 && CONST_INT_P (XEXP (addr, 1)))
6e0fe99e 5362 {
5363 offset = INTVAL (XEXP (addr, 1));
a3e39a24 5364 addr = XEXP (addr, 0);
6e0fe99e 5365 }
1f0ce6a6 5366
5367 if (GET_CODE (addr) == LO_SUM)
5368 {
5f7b9df8 5369 const char *reloc16, *reloclo;
5370 rtx op1 = XEXP (addr, 1);
5371
5372 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5373 {
5374 op1 = XEXP (op1, 0);
5375 switch (XINT (op1, 1))
5376 {
5377 case UNSPEC_DTPREL:
5378 reloc16 = NULL;
5379 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5380 break;
5381 case UNSPEC_TPREL:
5382 reloc16 = NULL;
5383 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5384 break;
5385 default:
5386 output_operand_lossage ("unknown relocation unspec");
5387 return;
5388 }
5389
5390 output_addr_const (file, XVECEXP (op1, 0, 0));
5391 }
5392 else
5393 {
5394 reloc16 = "gprel";
5395 reloclo = "gprellow";
5396 output_addr_const (file, op1);
5397 }
5398
1f0ce6a6 5399 if (offset)
4840a03a 5400 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
9e7454d0 5401
1f0ce6a6 5402 addr = XEXP (addr, 0);
4d10b463 5403 switch (GET_CODE (addr))
5404 {
5405 case REG:
5406 basereg = REGNO (addr);
5407 break;
5408
5409 case SUBREG:
5410 basereg = subreg_regno (addr);
5411 break;
5412
5413 default:
5414 gcc_unreachable ();
5415 }
5dcb037d 5416
5417 fprintf (file, "($%d)\t\t!%s", basereg,
5f7b9df8 5418 (basereg == 29 ? reloc16 : reloclo));
1f0ce6a6 5419 return;
5420 }
5421
4d10b463 5422 switch (GET_CODE (addr))
5423 {
5424 case REG:
5425 basereg = REGNO (addr);
5426 break;
5427
5428 case SUBREG:
5429 basereg = subreg_regno (addr);
5430 break;
5431
5432 case CONST_INT:
5433 offset = INTVAL (addr);
5434 break;
cf73d31f 5435
5436#if TARGET_ABI_OPEN_VMS
4d10b463 5437 case SYMBOL_REF:
cf73d31f 5438 fprintf (file, "%s", XSTR (addr, 0));
5439 return;
4d10b463 5440
5441 case CONST:
5442 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5443 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
6433714e 5444 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
cf73d31f 5445 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5446 INTVAL (XEXP (XEXP (addr, 0), 1)));
5447 return;
4d10b463 5448
cf73d31f 5449#endif
4d10b463 5450 default:
5451 gcc_unreachable ();
5452 }
6e0fe99e 5453
4840a03a 5454 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
6e0fe99e 5455}
bf2a98b3 5456\f
9e042f31 5457/* Emit RTL insns to initialize the variable parts of a trampoline at
5458 TRAMP. FNADDR is an RTX for the address of the function's pure
5459 code. CXT is an RTX for the static chain value for the function.
96297568 5460
5461 The three offset parameters are for the individual template's
9e7454d0 5462 layout. A JMPOFS < 0 indicates that the trampoline does not
96297568 5463 contain instructions at all.
5464
9e042f31 5465 We assume here that a function will be called many more times than
5466 its address is taken (e.g., it might be passed to qsort), so we
5467 take the trouble to initialize the "hint" field in the JMP insn.
5468 Note that the hint field is PC (new) + 4 * bits 13:0. */
5469
5470void
92643d95 5471alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5472 int fnofs, int cxtofs, int jmpofs)
9e042f31 5473{
b71600b1 5474 rtx addr;
17683b9f 5475 /* VMS really uses DImode pointers in memory at this point. */
1467e953 5476 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
9e042f31 5477
17683b9f 5478#ifdef POINTERS_EXTEND_UNSIGNED
5479 fnaddr = convert_memory_address (mode, fnaddr);
5480 cxt = convert_memory_address (mode, cxt);
5481#endif
5482
9e042f31 5483 /* Store function address and CXT. */
46ba8e1c 5484 addr = memory_address (mode, plus_constant (tramp, fnofs));
7014838c 5485 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
46ba8e1c 5486 addr = memory_address (mode, plus_constant (tramp, cxtofs));
7014838c 5487 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
96297568 5488
5577e296 5489#ifdef ENABLE_EXECUTE_STACK
09a1f342 5490 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
92e0f786 5491 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
9e042f31 5492#endif
5493
96297568 5494 if (jmpofs >= 0)
5495 emit_insn (gen_imb ());
9e042f31 5496}
5497\f
915c336f 5498/* Determine where to put an argument to a function.
5499 Value is zero to push the argument on the stack,
5500 or a hard register in which to store the argument.
5501
5502 MODE is the argument's machine mode.
5503 TYPE is the data type of the argument (as a tree).
5504 This is null for libcalls where that information may
5505 not be available.
5506 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5507 the preceding args and about the function being called.
5508 NAMED is nonzero if this argument is a named parameter
5509 (otherwise it is an extra parameter matching an ellipsis).
5510
5511 On Alpha the first 6 words of args are normally in registers
5512 and the rest are pushed. */
5513
5514rtx
92643d95 5515function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5516 int named ATTRIBUTE_UNUSED)
915c336f 5517{
5518 int basereg;
57e47080 5519 int num_args;
915c336f 5520
a685f5d8 5521 /* Don't get confused and pass small structures in FP registers. */
5522 if (type && AGGREGATE_TYPE_P (type))
9caef960 5523 basereg = 16;
a685f5d8 5524 else
5525 {
5526#ifdef ENABLE_CHECKING
92d40bc4 5527 /* With alpha_split_complex_arg, we shouldn't see any raw complex
a685f5d8 5528 values here. */
4d10b463 5529 gcc_assert (!COMPLEX_MODE_P (mode));
a685f5d8 5530#endif
5531
5532 /* Set up defaults for FP operands passed in FP registers, and
5533 integral operands passed in integer registers. */
5534 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5535 basereg = 32 + 16;
5536 else
5537 basereg = 16;
5538 }
9caef960 5539
5540 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5541 the three platforms, so we can't avoid conditional compilation. */
1467e953 5542#if TARGET_ABI_OPEN_VMS
9caef960 5543 {
5544 if (mode == VOIDmode)
5545 return alpha_arg_info_reg_val (cum);
1467e953 5546
9caef960 5547 num_args = cum.num_args;
0336f0f0 5548 if (num_args >= 6
5549 || targetm.calls.must_pass_in_stack (mode, type))
9caef960 5550 return NULL_RTX;
5551 }
a685f5d8 5552#elif TARGET_ABI_UNICOSMK
9caef960 5553 {
5554 int size;
915c336f 5555
9caef960 5556 /* If this is the last argument, generate the call info word (CIW). */
5557 /* ??? We don't include the caller's line number in the CIW because
5558 I don't know how to determine it if debug infos are turned off. */
5559 if (mode == VOIDmode)
5560 {
5561 int i;
5562 HOST_WIDE_INT lo;
5563 HOST_WIDE_INT hi;
5564 rtx ciw;
5565
5566 lo = 0;
5567
5568 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5569 if (cum.reg_args_type[i])
5570 lo |= (1 << (7 - i));
5571
5572 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5573 lo |= 7;
5574 else
5575 lo |= cum.num_reg_words;
5576
5577#if HOST_BITS_PER_WIDE_INT == 32
5578 hi = (cum.num_args << 20) | cum.num_arg_words;
5579#else
e162157f 5580 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5581 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
9caef960 5582 hi = 0;
5583#endif
5584 ciw = immed_double_const (lo, hi, DImode);
5585
5586 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5587 UNSPEC_UMK_LOAD_CIW);
5588 }
5589
5590 size = ALPHA_ARG_SIZE (mode, type, named);
5591 num_args = cum.num_reg_words;
0336f0f0 5592 if (cum.force_stack
5593 || cum.num_reg_words + size > 6
5594 || targetm.calls.must_pass_in_stack (mode, type))
9caef960 5595 return NULL_RTX;
5596 else if (type && TYPE_MODE (type) == BLKmode)
5597 {
5598 rtx reg1, reg2;
5599
5600 reg1 = gen_rtx_REG (DImode, num_args + 16);
5601 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5602
5603 /* The argument fits in two registers. Note that we still need to
5604 reserve a register for empty structures. */
5605 if (size == 0)
5606 return NULL_RTX;
5607 else if (size == 1)
5608 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5609 else
5610 {
5611 reg2 = gen_rtx_REG (DImode, num_args + 17);
5612 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5613 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5614 }
5615 }
5616 }
a685f5d8 5617#elif TARGET_ABI_OSF
9caef960 5618 {
5619 if (cum >= 6)
5620 return NULL_RTX;
5621 num_args = cum;
5622
5623 /* VOID is passed as a special flag for "last argument". */
5624 if (type == void_type_node)
5625 basereg = 16;
0336f0f0 5626 else if (targetm.calls.must_pass_in_stack (mode, type))
9caef960 5627 return NULL_RTX;
9caef960 5628 }
a685f5d8 5629#else
5630#error Unhandled ABI
5631#endif
915c336f 5632
57e47080 5633 return gen_rtx_REG (mode, num_args + basereg);
915c336f 5634}
5635
f054eb3c 5636static int
5637alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5638 enum machine_mode mode ATTRIBUTE_UNUSED,
5639 tree type ATTRIBUTE_UNUSED,
5640 bool named ATTRIBUTE_UNUSED)
5641{
5642 int words = 0;
5643
5644#if TARGET_ABI_OPEN_VMS
5645 if (cum->num_args < 6
5646 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5c5b637a 5647 words = 6 - cum->num_args;
f054eb3c 5648#elif TARGET_ABI_UNICOSMK
5649 /* Never any split arguments. */
5650#elif TARGET_ABI_OSF
5651 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5652 words = 6 - *cum;
5653#else
5654#error Unhandled ABI
5655#endif
5656
5657 return words * UNITS_PER_WORD;
5658}
5659
5660
a685f5d8 5661/* Return true if TYPE must be returned in memory, instead of in registers. */
5662
dd9f3024 5663static bool
fb80456a 5664alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
a685f5d8 5665{
dd9f3024 5666 enum machine_mode mode = VOIDmode;
a685f5d8 5667 int size;
5668
5669 if (type)
5670 {
5671 mode = TYPE_MODE (type);
5672
5673 /* All aggregates are returned in memory. */
5674 if (AGGREGATE_TYPE_P (type))
5675 return true;
5676 }
5677
5678 size = GET_MODE_SIZE (mode);
5679 switch (GET_MODE_CLASS (mode))
5680 {
5681 case MODE_VECTOR_FLOAT:
5682 /* Pass all float vectors in memory, like an aggregate. */
5683 return true;
5684
5685 case MODE_COMPLEX_FLOAT:
5686 /* We judge complex floats on the size of their element,
5687 not the size of the whole type. */
5688 size = GET_MODE_UNIT_SIZE (mode);
5689 break;
5690
5691 case MODE_INT:
5692 case MODE_FLOAT:
5693 case MODE_COMPLEX_INT:
5694 case MODE_VECTOR_INT:
5695 break;
5696
5697 default:
9e7454d0 5698 /* ??? We get called on all sorts of random stuff from
4d10b463 5699 aggregate_value_p. We must return something, but it's not
5700 clear what's safe to return. Pretend it's a struct I
5701 guess. */
a685f5d8 5702 return true;
5703 }
5704
5705 /* Otherwise types must fit in one register. */
5706 return size > UNITS_PER_WORD;
5707}
5708
b981d932 5709/* Return true if TYPE should be passed by invisible reference. */
5710
5711static bool
5712alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5713 enum machine_mode mode,
fb80456a 5714 const_tree type ATTRIBUTE_UNUSED,
b981d932 5715 bool named ATTRIBUTE_UNUSED)
5716{
5717 return mode == TFmode || mode == TCmode;
5718}
5719
a685f5d8 5720/* Define how to find the value returned by a function. VALTYPE is the
5721 data type of the value (as a tree). If the precise function being
5722 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5723 MODE is set instead of VALTYPE for libcalls.
5724
5725 On Alpha the value is found in $0 for integer functions and
5726 $f0 for floating-point functions. */
5727
5728rtx
fb80456a 5729function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
a685f5d8 5730 enum machine_mode mode)
5731{
79db42ad 5732 unsigned int regnum, dummy;
8deb3959 5733 enum mode_class mclass;
a685f5d8 5734
4d10b463 5735 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
a685f5d8 5736
5737 if (valtype)
5738 mode = TYPE_MODE (valtype);
5739
8deb3959 5740 mclass = GET_MODE_CLASS (mode);
5741 switch (mclass)
a685f5d8 5742 {
5743 case MODE_INT:
79db42ad 5744 PROMOTE_MODE (mode, dummy, valtype);
8e262b5e 5745 /* FALLTHRU */
a685f5d8 5746
5747 case MODE_COMPLEX_INT:
5748 case MODE_VECTOR_INT:
5749 regnum = 0;
5750 break;
5751
5752 case MODE_FLOAT:
5753 regnum = 32;
5754 break;
5755
5756 case MODE_COMPLEX_FLOAT:
5757 {
5758 enum machine_mode cmode = GET_MODE_INNER (mode);
5759
5760 return gen_rtx_PARALLEL
5761 (VOIDmode,
5762 gen_rtvec (2,
5763 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
bcd9bd66 5764 const0_rtx),
a685f5d8 5765 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5766 GEN_INT (GET_MODE_SIZE (cmode)))));
5767 }
5768
5769 default:
4d10b463 5770 gcc_unreachable ();
a685f5d8 5771 }
5772
5773 return gen_rtx_REG (mode, regnum);
5774}
5775
9e7454d0 5776/* TCmode complex values are passed by invisible reference. We
92d40bc4 5777 should not split these values. */
5778
5779static bool
a9f1838b 5780alpha_split_complex_arg (const_tree type)
92d40bc4 5781{
5782 return TYPE_MODE (type) != TCmode;
5783}
5784
2e15d750 5785static tree
5786alpha_build_builtin_va_list (void)
bf2a98b3 5787{
7ba21c9f 5788 tree base, ofs, space, record, type_decl;
bf2a98b3 5789
9caef960 5790 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
e7aabeab 5791 return ptr_type_node;
5792
a1f71e15 5793 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
0054fd98 5794 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5795 TREE_CHAIN (record) = type_decl;
5796 TYPE_NAME (record) = type_decl;
5797
e7aabeab 5798 /* C++? SET_IS_AGGR_TYPE (record, 1); */
bf2a98b3 5799
7ba21c9f 5800 /* Dummy field to prevent alignment warnings. */
5801 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5802 DECL_FIELD_CONTEXT (space) = record;
5803 DECL_ARTIFICIAL (space) = 1;
5804 DECL_IGNORED_P (space) = 1;
5805
e7aabeab 5806 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5807 integer_type_node);
5808 DECL_FIELD_CONTEXT (ofs) = record;
7ba21c9f 5809 TREE_CHAIN (ofs) = space;
fc4c89ed 5810
e7aabeab 5811 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5812 ptr_type_node);
5813 DECL_FIELD_CONTEXT (base) = record;
5814 TREE_CHAIN (base) = ofs;
fc4c89ed 5815
e7aabeab 5816 TYPE_FIELDS (record) = base;
5817 layout_type (record);
5818
a6c787e5 5819 va_list_gpr_counter_field = ofs;
e7aabeab 5820 return record;
5821}
5822
7955d282 5823#if TARGET_ABI_OSF
a6c787e5 5824/* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5825 and constant additions. */
5826
94c0325b 5827static gimple
a6c787e5 5828va_list_skip_additions (tree lhs)
5829{
94c0325b 5830 gimple stmt;
a6c787e5 5831
5832 for (;;)
5833 {
94c0325b 5834 enum tree_code code;
5835
a6c787e5 5836 stmt = SSA_NAME_DEF_STMT (lhs);
5837
94c0325b 5838 if (gimple_code (stmt) == GIMPLE_PHI)
a6c787e5 5839 return stmt;
5840
94c0325b 5841 if (!is_gimple_assign (stmt)
5842 || gimple_assign_lhs (stmt) != lhs)
5843 return NULL;
a6c787e5 5844
94c0325b 5845 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5846 return stmt;
5847 code = gimple_assign_rhs_code (stmt);
5848 if (!CONVERT_EXPR_CODE_P (code)
5849 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5850 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5851 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5852 return stmt;
a6c787e5 5853
94c0325b 5854 lhs = gimple_assign_rhs1 (stmt);
a6c787e5 5855 }
5856}
5857
5858/* Check if LHS = RHS statement is
5859 LHS = *(ap.__base + ap.__offset + cst)
5860 or
5861 LHS = *(ap.__base
5862 + ((ap.__offset + cst <= 47)
5863 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5864 If the former, indicate that GPR registers are needed,
5865 if the latter, indicate that FPR registers are needed.
adde8f91 5866
5867 Also look for LHS = (*ptr).field, where ptr is one of the forms
5868 listed above.
5869
a6c787e5 5870 On alpha, cfun->va_list_gpr_size is used as size of the needed
adde8f91 5871 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5872 registers are needed and bit 1 set if FPR registers are needed.
5873 Return true if va_list references should not be scanned for the
5874 current statement. */
a6c787e5 5875
5876static bool
75a70cf9 5877alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
a6c787e5 5878{
94c0325b 5879 tree base, offset, rhs;
a6c787e5 5880 int offset_arg = 1;
94c0325b 5881 gimple base_stmt;
a6c787e5 5882
94c0325b 5883 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5884 != GIMPLE_SINGLE_RHS)
5885 return false;
5886
5887 rhs = gimple_assign_rhs1 (stmt);
adde8f91 5888 while (handled_component_p (rhs))
5889 rhs = TREE_OPERAND (rhs, 0);
a6c787e5 5890 if (TREE_CODE (rhs) != INDIRECT_REF
5891 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5892 return false;
5893
94c0325b 5894 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5895 if (stmt == NULL
5896 || !is_gimple_assign (stmt)
5897 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
a6c787e5 5898 return false;
5899
94c0325b 5900 base = gimple_assign_rhs1 (stmt);
a6c787e5 5901 if (TREE_CODE (base) == SSA_NAME)
94c0325b 5902 {
5903 base_stmt = va_list_skip_additions (base);
5904 if (base_stmt
5905 && is_gimple_assign (base_stmt)
5906 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5907 base = gimple_assign_rhs1 (base_stmt);
5908 }
a6c787e5 5909
5910 if (TREE_CODE (base) != COMPONENT_REF
5911 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5912 {
94c0325b 5913 base = gimple_assign_rhs2 (stmt);
a6c787e5 5914 if (TREE_CODE (base) == SSA_NAME)
94c0325b 5915 {
5916 base_stmt = va_list_skip_additions (base);
5917 if (base_stmt
5918 && is_gimple_assign (base_stmt)
5919 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5920 base = gimple_assign_rhs1 (base_stmt);
5921 }
a6c787e5 5922
5923 if (TREE_CODE (base) != COMPONENT_REF
5924 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5925 return false;
5926
5927 offset_arg = 0;
5928 }
5929
5930 base = get_base_address (base);
5931 if (TREE_CODE (base) != VAR_DECL
dda53cd5 5932 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
a6c787e5 5933 return false;
5934
94c0325b 5935 offset = gimple_op (stmt, 1 + offset_arg);
a6c787e5 5936 if (TREE_CODE (offset) == SSA_NAME)
a6c787e5 5937 {
94c0325b 5938 gimple offset_stmt = va_list_skip_additions (offset);
a6c787e5 5939
94c0325b 5940 if (offset_stmt
5941 && gimple_code (offset_stmt) == GIMPLE_PHI)
a6c787e5 5942 {
94c0325b 5943 HOST_WIDE_INT sub;
5944 gimple arg1_stmt, arg2_stmt;
5945 tree arg1, arg2;
5946 enum tree_code code1, code2;
a6c787e5 5947
94c0325b 5948 if (gimple_phi_num_args (offset_stmt) != 2)
7955d282 5949 goto escapes;
a6c787e5 5950
94c0325b 5951 arg1_stmt
5952 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5953 arg2_stmt
5954 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5955 if (arg1_stmt == NULL
5956 || !is_gimple_assign (arg1_stmt)
5957 || arg2_stmt == NULL
5958 || !is_gimple_assign (arg2_stmt))
5959 goto escapes;
a6c787e5 5960
94c0325b 5961 code1 = gimple_assign_rhs_code (arg1_stmt);
5962 code2 = gimple_assign_rhs_code (arg2_stmt);
5963 if (code1 == COMPONENT_REF
5964 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5965 /* Do nothing. */;
5966 else if (code2 == COMPONENT_REF
5967 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5968 {
5969 gimple tem = arg1_stmt;
5970 code2 = code1;
5971 arg1_stmt = arg2_stmt;
5972 arg2_stmt = tem;
5973 }
5974 else
5975 goto escapes;
7955d282 5976
94c0325b 5977 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5978 goto escapes;
7955d282 5979
94c0325b 5980 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5981 if (code2 == MINUS_EXPR)
5982 sub = -sub;
5983 if (sub < -48 || sub > -32)
5984 goto escapes;
a6c787e5 5985
94c0325b 5986 arg1 = gimple_assign_rhs1 (arg1_stmt);
5987 arg2 = gimple_assign_rhs1 (arg2_stmt);
5988 if (TREE_CODE (arg2) == SSA_NAME)
5989 {
5990 arg2_stmt = va_list_skip_additions (arg2);
5991 if (arg2_stmt == NULL
5992 || !is_gimple_assign (arg2_stmt)
5993 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5994 goto escapes;
5995 arg2 = gimple_assign_rhs1 (arg2_stmt);
5996 }
5997 if (arg1 != arg2)
5998 goto escapes;
5999
6000 if (TREE_CODE (arg1) != COMPONENT_REF
6001 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6002 || get_base_address (arg1) != base)
6003 goto escapes;
6004
6005 /* Need floating point regs. */
6006 cfun->va_list_fpr_size |= 2;
6007 return false;
6008 }
6009 if (offset_stmt
6010 && is_gimple_assign (offset_stmt)
6011 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6012 offset = gimple_assign_rhs1 (offset_stmt);
6013 }
6014 if (TREE_CODE (offset) != COMPONENT_REF
6015 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6016 || get_base_address (offset) != base)
a6c787e5 6017 goto escapes;
6018 else
6019 /* Need general regs. */
6020 cfun->va_list_fpr_size |= 1;
6021 return false;
6022
6023escapes:
6024 si->va_list_escapes = true;
6025 return false;
6026}
7955d282 6027#endif
a6c787e5 6028
4310aa50 6029/* Perform any needed actions needed for a function that is receiving a
dd9f3024 6030 variable number of arguments. */
4310aa50 6031
dd9f3024 6032static void
2dc656b7 6033alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6034 tree type, int *pretend_size, int no_rtl)
dd9f3024 6035{
2dc656b7 6036 CUMULATIVE_ARGS cum = *pcum;
6037
6038 /* Skip the current argument. */
6039 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6040
dd9f3024 6041#if TARGET_ABI_UNICOSMK
6042 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6043 arguments on the stack. Unfortunately, it doesn't always store the first
6044 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6045 with stdargs as we always have at least one named argument there. */
2dc656b7 6046 if (cum.num_reg_words < 6)
dd9f3024 6047 {
6048 if (!no_rtl)
6049 {
2dc656b7 6050 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
dd9f3024 6051 emit_insn (gen_arg_home_umk ());
6052 }
6053 *pretend_size = 0;
6054 }
6055#elif TARGET_ABI_OPEN_VMS
6056 /* For VMS, we allocate space for all 6 arg registers plus a count.
4310aa50 6057
dd9f3024 6058 However, if NO registers need to be saved, don't allocate any space.
6059 This is not only because we won't need the space, but because AP
6060 includes the current_pretend_args_size and we don't want to mess up
6061 any ap-relative addresses already made. */
2dc656b7 6062 if (cum.num_args < 6)
dd9f3024 6063 {
6064 if (!no_rtl)
6065 {
6066 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6067 emit_insn (gen_arg_home ());
6068 }
6069 *pretend_size = 7 * UNITS_PER_WORD;
6070 }
6071#else
6072 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6073 only push those that are remaining. However, if NO registers need to
6074 be saved, don't allocate any space. This is not only because we won't
6075 need the space, but because AP includes the current_pretend_args_size
6076 and we don't want to mess up any ap-relative addresses already made.
6077
6078 If we are not to use the floating-point registers, save the integer
6079 registers where we would put the floating-point registers. This is
6080 not the most efficient way to implement varargs with just one register
6081 class, but it isn't worth doing anything more efficient in this rare
6082 case. */
4310aa50 6083 if (cum >= 6)
6084 return;
6085
6086 if (!no_rtl)
6087 {
32c2fdea 6088 int count;
6089 alias_set_type set = get_varargs_alias_set ();
4310aa50 6090 rtx tmp;
6091
7955d282 6092 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6093 if (count > 6 - cum)
6094 count = 6 - cum;
4310aa50 6095
7955d282 6096 /* Detect whether integer registers or floating-point registers
6097 are needed by the detected va_arg statements. See above for
6098 how these values are computed. Note that the "escape" value
6099 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6100 these bits set. */
6101 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6102
6103 if (cfun->va_list_fpr_size & 1)
6104 {
6105 tmp = gen_rtx_MEM (BLKmode,
6106 plus_constant (virtual_incoming_args_rtx,
6107 (cum + 6) * UNITS_PER_WORD));
ae2dd339 6108 MEM_NOTRAP_P (tmp) = 1;
7955d282 6109 set_mem_alias_set (tmp, set);
6110 move_block_from_reg (16 + cum, tmp, count);
6111 }
6112
6113 if (cfun->va_list_fpr_size & 2)
6114 {
6115 tmp = gen_rtx_MEM (BLKmode,
6116 plus_constant (virtual_incoming_args_rtx,
6117 cum * UNITS_PER_WORD));
ae2dd339 6118 MEM_NOTRAP_P (tmp) = 1;
7955d282 6119 set_mem_alias_set (tmp, set);
6120 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6121 }
6122 }
4310aa50 6123 *pretend_size = 12 * UNITS_PER_WORD;
f6940372 6124#endif
dd9f3024 6125}
4310aa50 6126
8a58ed0a 6127static void
92643d95 6128alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
e7aabeab 6129{
6130 HOST_WIDE_INT offset;
6131 tree t, offset_field, base_field;
fc4c89ed 6132
80909c64 6133 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6134 return;
6135
fc264da3 6136 if (TARGET_ABI_UNICOSMK)
7df226a2 6137 std_expand_builtin_va_start (valist, nextarg);
e7aabeab 6138
6644435d 6139 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
e7aabeab 6140 up by 48, storing fp arg registers in the first 48 bytes, and the
6141 integer arg registers in the next 48 bytes. This is only done,
6142 however, if any integer registers need to be stored.
6143
6144 If no integer registers need be stored, then we must subtract 48
6145 in order to account for the integer arg registers which are counted
4310aa50 6146 in argsize above, but which are not actually stored on the stack.
6147 Must further be careful here about structures straddling the last
9e7454d0 6148 integer argument register; that futzes with pretend_args_size,
4310aa50 6149 which changes the meaning of AP. */
e7aabeab 6150
2dc656b7 6151 if (NUM_ARGS < 6)
fc264da3 6152 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
8df4a58b 6153 else
abe32cce 6154 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
e7aabeab 6155
fc264da3 6156 if (TARGET_ABI_OPEN_VMS)
6157 {
6158 nextarg = plus_constant (nextarg, offset);
6159 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
75a70cf9 6160 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
ed03eadb 6161 make_tree (ptr_type_node, nextarg));
fc264da3 6162 TREE_SIDE_EFFECTS (t) = 1;
e7aabeab 6163
fc264da3 6164 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6165 }
6166 else
6167 {
6168 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6169 offset_field = TREE_CHAIN (base_field);
6170
ed03eadb 6171 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6172 valist, base_field, NULL_TREE);
6173 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6174 valist, offset_field, NULL_TREE);
fc264da3 6175
6176 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
8920b6b9 6177 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6178 size_int (offset));
75a70cf9 6179 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
fc264da3 6180 TREE_SIDE_EFFECTS (t) = 1;
6181 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6182
7016c612 6183 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
75a70cf9 6184 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
fc264da3 6185 TREE_SIDE_EFFECTS (t) = 1;
6186 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6187 }
e7aabeab 6188}
6189
de8f9b94 6190static tree
94c0325b 6191alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
75a70cf9 6192 gimple_seq *pre_p)
de8f9b94 6193{
94c0325b 6194 tree type_size, ptr_type, addend, t, addr;
6195 gimple_seq internal_post;
de8f9b94 6196
de8f9b94 6197 /* If the type could not be passed in registers, skip the block
6198 reserved for the registers. */
0336f0f0 6199 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
de8f9b94 6200 {
7016c612 6201 t = build_int_cst (TREE_TYPE (offset), 6*8);
75a70cf9 6202 gimplify_assign (offset,
6203 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6204 pre_p);
de8f9b94 6205 }
6206
6207 addend = offset;
6208 ptr_type = build_pointer_type (type);
de8f9b94 6209
2cd7bb84 6210 if (TREE_CODE (type) == COMPLEX_TYPE)
de8f9b94 6211 {
6212 tree real_part, imag_part, real_temp;
6213
c7b3f103 6214 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6215 offset, pre_p);
6216
6217 /* Copy the value into a new temporary, lest the formal temporary
de8f9b94 6218 be reused out from under us. */
c7b3f103 6219 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
de8f9b94 6220
c7b3f103 6221 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6222 offset, pre_p);
de8f9b94 6223
ed03eadb 6224 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
de8f9b94 6225 }
6226 else if (TREE_CODE (type) == REAL_TYPE)
6227 {
6228 tree fpaddend, cond, fourtyeight;
6229
7016c612 6230 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
ed03eadb 6231 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6232 addend, fourtyeight);
6233 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6234 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6235 fpaddend, addend);
de8f9b94 6236 }
6237
6238 /* Build the final address and force that value into a temporary. */
8920b6b9 6239 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6240 fold_convert (sizetype, addend));
c7b3f103 6241 internal_post = NULL;
6242 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
94c0325b 6243 gimple_seq_add_seq (pre_p, internal_post);
de8f9b94 6244
6245 /* Update the offset field. */
c7b3f103 6246 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6247 if (type_size == NULL || TREE_OVERFLOW (type_size))
6248 t = size_zero_node;
6249 else
6250 {
6251 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6252 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6253 t = size_binop (MULT_EXPR, t, size_int (8));
6254 }
6255 t = fold_convert (TREE_TYPE (offset), t);
75a70cf9 6256 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6257 pre_p);
de8f9b94 6258
063f5fdd 6259 return build_va_arg_indirect_ref (addr);
de8f9b94 6260}
6261
e0eca1fa 6262static tree
75a70cf9 6263alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6264 gimple_seq *post_p)
de8f9b94 6265{
e0eca1fa 6266 tree offset_field, base_field, offset, base, t, r;
2cd7bb84 6267 bool indirect;
de8f9b94 6268
6269 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
e0eca1fa 6270 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
de8f9b94 6271
6272 base_field = TYPE_FIELDS (va_list_type_node);
6273 offset_field = TREE_CHAIN (base_field);
ed03eadb 6274 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6275 valist, base_field, NULL_TREE);
6276 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6277 valist, offset_field, NULL_TREE);
de8f9b94 6278
c7b3f103 6279 /* Pull the fields of the structure out into temporaries. Since we never
6280 modify the base field, we can use a formal temporary. Sign-extend the
6281 offset field so that it's the proper width for pointer arithmetic. */
6282 base = get_formal_tmp_var (base_field, pre_p);
de8f9b94 6283
c7b3f103 6284 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6285 offset = get_initialized_tmp_var (t, pre_p, NULL);
de8f9b94 6286
2cd7bb84 6287 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6288 if (indirect)
6289 type = build_pointer_type (type);
6290
de8f9b94 6291 /* Find the value. Note that this will be a stable indirection, or
6292 a composite of stable indirections in the case of complex. */
c7b3f103 6293 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
de8f9b94 6294
6295 /* Stuff the offset temporary back into its field. */
94c0325b 6296 gimplify_assign (unshare_expr (offset_field),
75a70cf9 6297 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
e0eca1fa 6298
2cd7bb84 6299 if (indirect)
063f5fdd 6300 r = build_va_arg_indirect_ref (r);
2cd7bb84 6301
e0eca1fa 6302 return r;
de8f9b94 6303}
bf2a98b3 6304\f
f2cc13dc 6305/* Builtins. */
6306
6307enum alpha_builtin
6308{
6309 ALPHA_BUILTIN_CMPBGE,
ae4cd3a5 6310 ALPHA_BUILTIN_EXTBL,
6311 ALPHA_BUILTIN_EXTWL,
6312 ALPHA_BUILTIN_EXTLL,
f2cc13dc 6313 ALPHA_BUILTIN_EXTQL,
ae4cd3a5 6314 ALPHA_BUILTIN_EXTWH,
6315 ALPHA_BUILTIN_EXTLH,
f2cc13dc 6316 ALPHA_BUILTIN_EXTQH,
ae4cd3a5 6317 ALPHA_BUILTIN_INSBL,
6318 ALPHA_BUILTIN_INSWL,
6319 ALPHA_BUILTIN_INSLL,
6320 ALPHA_BUILTIN_INSQL,
6321 ALPHA_BUILTIN_INSWH,
6322 ALPHA_BUILTIN_INSLH,
6323 ALPHA_BUILTIN_INSQH,
6324 ALPHA_BUILTIN_MSKBL,
6325 ALPHA_BUILTIN_MSKWL,
6326 ALPHA_BUILTIN_MSKLL,
6327 ALPHA_BUILTIN_MSKQL,
6328 ALPHA_BUILTIN_MSKWH,
6329 ALPHA_BUILTIN_MSKLH,
6330 ALPHA_BUILTIN_MSKQH,
6331 ALPHA_BUILTIN_UMULH,
f2cc13dc 6332 ALPHA_BUILTIN_ZAP,
6333 ALPHA_BUILTIN_ZAPNOT,
6334 ALPHA_BUILTIN_AMASK,
6335 ALPHA_BUILTIN_IMPLVER,
6336 ALPHA_BUILTIN_RPCC,
938e069b 6337 ALPHA_BUILTIN_THREAD_POINTER,
6338 ALPHA_BUILTIN_SET_THREAD_POINTER,
f2cc13dc 6339
6340 /* TARGET_MAX */
6341 ALPHA_BUILTIN_MINUB8,
6342 ALPHA_BUILTIN_MINSB8,
6343 ALPHA_BUILTIN_MINUW4,
6344 ALPHA_BUILTIN_MINSW4,
6345 ALPHA_BUILTIN_MAXUB8,
6346 ALPHA_BUILTIN_MAXSB8,
6347 ALPHA_BUILTIN_MAXUW4,
6348 ALPHA_BUILTIN_MAXSW4,
6349 ALPHA_BUILTIN_PERR,
6350 ALPHA_BUILTIN_PKLB,
6351 ALPHA_BUILTIN_PKWB,
6352 ALPHA_BUILTIN_UNPKBL,
6353 ALPHA_BUILTIN_UNPKBW,
6354
ae4cd3a5 6355 /* TARGET_CIX */
6356 ALPHA_BUILTIN_CTTZ,
6357 ALPHA_BUILTIN_CTLZ,
6358 ALPHA_BUILTIN_CTPOP,
6359
f2cc13dc 6360 ALPHA_BUILTIN_max
6361};
6362
ff8e23a2 6363static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
ae4cd3a5 6364 CODE_FOR_builtin_cmpbge,
6365 CODE_FOR_builtin_extbl,
6366 CODE_FOR_builtin_extwl,
6367 CODE_FOR_builtin_extll,
6368 CODE_FOR_builtin_extql,
6369 CODE_FOR_builtin_extwh,
6370 CODE_FOR_builtin_extlh,
6371 CODE_FOR_builtin_extqh,
6372 CODE_FOR_builtin_insbl,
6373 CODE_FOR_builtin_inswl,
6374 CODE_FOR_builtin_insll,
6375 CODE_FOR_builtin_insql,
6376 CODE_FOR_builtin_inswh,
6377 CODE_FOR_builtin_inslh,
6378 CODE_FOR_builtin_insqh,
6379 CODE_FOR_builtin_mskbl,
6380 CODE_FOR_builtin_mskwl,
6381 CODE_FOR_builtin_mskll,
6382 CODE_FOR_builtin_mskql,
6383 CODE_FOR_builtin_mskwh,
6384 CODE_FOR_builtin_msklh,
6385 CODE_FOR_builtin_mskqh,
6386 CODE_FOR_umuldi3_highpart,
6387 CODE_FOR_builtin_zap,
6388 CODE_FOR_builtin_zapnot,
6389 CODE_FOR_builtin_amask,
6390 CODE_FOR_builtin_implver,
6391 CODE_FOR_builtin_rpcc,
938e069b 6392 CODE_FOR_load_tp,
6393 CODE_FOR_set_tp,
ae4cd3a5 6394
6395 /* TARGET_MAX */
6396 CODE_FOR_builtin_minub8,
6397 CODE_FOR_builtin_minsb8,
6398 CODE_FOR_builtin_minuw4,
6399 CODE_FOR_builtin_minsw4,
6400 CODE_FOR_builtin_maxub8,
6401 CODE_FOR_builtin_maxsb8,
6402 CODE_FOR_builtin_maxuw4,
6403 CODE_FOR_builtin_maxsw4,
6404 CODE_FOR_builtin_perr,
6405 CODE_FOR_builtin_pklb,
6406 CODE_FOR_builtin_pkwb,
6407 CODE_FOR_builtin_unpkbl,
6408 CODE_FOR_builtin_unpkbw,
6409
6410 /* TARGET_CIX */
849c7bc6 6411 CODE_FOR_ctzdi2,
6412 CODE_FOR_clzdi2,
6413 CODE_FOR_popcountdi2
ae4cd3a5 6414};
6415
f2cc13dc 6416struct alpha_builtin_def
6417{
6418 const char *name;
6419 enum alpha_builtin code;
6420 unsigned int target_mask;
849c7bc6 6421 bool is_const;
f2cc13dc 6422};
6423
6424static struct alpha_builtin_def const zero_arg_builtins[] = {
849c7bc6 6425 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6426 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
f2cc13dc 6427};
6428
6429static struct alpha_builtin_def const one_arg_builtins[] = {
849c7bc6 6430 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6431 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6432 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6433 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6434 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6435 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6436 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6437 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
f2cc13dc 6438};
6439
6440static struct alpha_builtin_def const two_arg_builtins[] = {
849c7bc6 6441 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6442 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6443 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6444 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6445 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6446 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6447 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6448 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6449 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6450 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6451 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6452 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6453 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6454 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6455 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6456 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6457 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6458 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6459 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6460 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6461 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6462 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6463 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6464 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6465 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6466 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6467 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6468 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6469 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6470 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6471 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6472 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6473 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6474 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
f2cc13dc 6475};
6476
849c7bc6 6477static GTY(()) tree alpha_v8qi_u;
6478static GTY(()) tree alpha_v8qi_s;
6479static GTY(()) tree alpha_v4hi_u;
6480static GTY(()) tree alpha_v4hi_s;
6481
b657e73a 6482/* Helper function of alpha_init_builtins. Add the COUNT built-in
6483 functions pointed to by P, with function type FTYPE. */
6484
6485static void
6486alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6487 tree ftype)
6488{
6489 tree decl;
6490 size_t i;
6491
6492 for (i = 0; i < count; ++i, ++p)
6493 if ((target_flags & p->target_mask) == p->target_mask)
6494 {
6495 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6496 NULL, NULL);
6497 if (p->is_const)
6498 TREE_READONLY (decl) = 1;
6499 TREE_NOTHROW (decl) = 1;
6500 }
6501}
6502
6503
f2cc13dc 6504static void
92643d95 6505alpha_init_builtins (void)
f2cc13dc 6506{
e2dc233c 6507 tree dimode_integer_type_node;
b657e73a 6508 tree ftype, decl;
f2cc13dc 6509
e2dc233c 6510 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6511
e2dc233c 6512 ftype = build_function_type (dimode_integer_type_node, void_list_node);
b657e73a 6513 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6514 ftype);
f2cc13dc 6515
e2dc233c 6516 ftype = build_function_type_list (dimode_integer_type_node,
6517 dimode_integer_type_node, NULL_TREE);
b657e73a 6518 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6519 ftype);
f2cc13dc 6520
e2dc233c 6521 ftype = build_function_type_list (dimode_integer_type_node,
6522 dimode_integer_type_node,
6523 dimode_integer_type_node, NULL_TREE);
b657e73a 6524 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6525 ftype);
938e069b 6526
6527 ftype = build_function_type (ptr_type_node, void_list_node);
b657e73a 6528 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6529 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6530 NULL, NULL);
6531 TREE_NOTHROW (decl) = 1;
938e069b 6532
8b55c4ba 6533 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
b657e73a 6534 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6535 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6536 NULL, NULL);
6537 TREE_NOTHROW (decl) = 1;
849c7bc6 6538
6539 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6540 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6541 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6542 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
f2cc13dc 6543}
6544
6545/* Expand an expression EXP that calls a built-in function,
6546 with result going to TARGET if that's convenient
6547 (and in mode MODE if that's convenient).
6548 SUBTARGET may be used as the target for computing one of EXP's operands.
6549 IGNORE is nonzero if the value is to be ignored. */
6550
6551static rtx
92643d95 6552alpha_expand_builtin (tree exp, rtx target,
6553 rtx subtarget ATTRIBUTE_UNUSED,
6554 enum machine_mode mode ATTRIBUTE_UNUSED,
6555 int ignore ATTRIBUTE_UNUSED)
f2cc13dc 6556{
f2cc13dc 6557#define MAX_ARGS 2
6558
c2f47e15 6559 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
f2cc13dc 6560 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
c2f47e15 6561 tree arg;
6562 call_expr_arg_iterator iter;
f2cc13dc 6563 enum insn_code icode;
6564 rtx op[MAX_ARGS], pat;
6565 int arity;
938e069b 6566 bool nonvoid;
f2cc13dc 6567
6568 if (fcode >= ALPHA_BUILTIN_max)
6569 internal_error ("bad builtin fcode");
6570 icode = code_for_builtin[fcode];
6571 if (icode == 0)
6572 internal_error ("bad builtin fcode");
6573
938e069b 6574 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6575
c2f47e15 6576 arity = 0;
6577 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
f2cc13dc 6578 {
6579 const struct insn_operand_data *insn_op;
6580
f2cc13dc 6581 if (arg == error_mark_node)
6582 return NULL_RTX;
6583 if (arity > MAX_ARGS)
6584 return NULL_RTX;
6585
938e069b 6586 insn_op = &insn_data[icode].operand[arity + nonvoid];
6587
0a48089c 6588 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
f2cc13dc 6589
f2cc13dc 6590 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6591 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
c2f47e15 6592 arity++;
f2cc13dc 6593 }
6594
938e069b 6595 if (nonvoid)
6596 {
6597 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6598 if (!target
6599 || GET_MODE (target) != tmode
6600 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6601 target = gen_reg_rtx (tmode);
6602 }
f2cc13dc 6603
6604 switch (arity)
6605 {
6606 case 0:
6607 pat = GEN_FCN (icode) (target);
6608 break;
6609 case 1:
938e069b 6610 if (nonvoid)
6611 pat = GEN_FCN (icode) (target, op[0]);
6612 else
6613 pat = GEN_FCN (icode) (op[0]);
f2cc13dc 6614 break;
6615 case 2:
6616 pat = GEN_FCN (icode) (target, op[0], op[1]);
6617 break;
6618 default:
4d10b463 6619 gcc_unreachable ();
f2cc13dc 6620 }
6621 if (!pat)
6622 return NULL_RTX;
6623 emit_insn (pat);
6624
938e069b 6625 if (nonvoid)
6626 return target;
6627 else
6628 return const0_rtx;
f2cc13dc 6629}
849c7bc6 6630
6631
6632/* Several bits below assume HWI >= 64 bits. This should be enforced
6633 by config.gcc. */
6634#if HOST_BITS_PER_WIDE_INT < 64
6635# error "HOST_WIDE_INT too small"
6636#endif
6637
6638/* Fold the builtin for the CMPBGE instruction. This is a vector comparison
85c36fd1 6639 with an 8-bit output vector. OPINT contains the integer operands; bit N
849c7bc6 6640 of OP_CONST is set if OPINT[N] is valid. */
6641
6642static tree
6643alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6644{
6645 if (op_const == 3)
6646 {
6647 int i, val;
6648 for (i = 0, val = 0; i < 8; ++i)
6649 {
6650 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6651 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6652 if (c0 >= c1)
6653 val |= 1 << i;
6654 }
6655 return build_int_cst (long_integer_type_node, val);
6656 }
3def9653 6657 else if (op_const == 2 && opint[1] == 0)
849c7bc6 6658 return build_int_cst (long_integer_type_node, 0xff);
6659 return NULL;
6660}
6661
6662/* Fold the builtin for the ZAPNOT instruction. This is essentially a
6663 specialized form of an AND operation. Other byte manipulation instructions
6664 are defined in terms of this instruction, so this is also used as a
6665 subroutine for other builtins.
6666
6667 OP contains the tree operands; OPINT contains the extracted integer values.
6668 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6669 OPINT may be considered. */
6670
6671static tree
6672alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6673 long op_const)
6674{
6675 if (op_const & 2)
6676 {
6677 unsigned HOST_WIDE_INT mask = 0;
6678 int i;
6679
6680 for (i = 0; i < 8; ++i)
6681 if ((opint[1] >> i) & 1)
6682 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6683
6684 if (op_const & 1)
6685 return build_int_cst (long_integer_type_node, opint[0] & mask);
6686
6687 if (op)
b3da1868 6688 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6689 build_int_cst (long_integer_type_node, mask));
849c7bc6 6690 }
6691 else if ((op_const & 1) && opint[0] == 0)
6692 return build_int_cst (long_integer_type_node, 0);
6693 return NULL;
6694}
6695
6696/* Fold the builtins for the EXT family of instructions. */
6697
6698static tree
6699alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6700 long op_const, unsigned HOST_WIDE_INT bytemask,
6701 bool is_high)
6702{
6703 long zap_const = 2;
6704 tree *zap_op = NULL;
6705
6706 if (op_const & 2)
6707 {
6708 unsigned HOST_WIDE_INT loc;
6709
6710 loc = opint[1] & 7;
6711 if (BYTES_BIG_ENDIAN)
6712 loc ^= 7;
6713 loc *= 8;
6714
6715 if (loc != 0)
6716 {
6717 if (op_const & 1)
6718 {
6719 unsigned HOST_WIDE_INT temp = opint[0];
6720 if (is_high)
6721 temp <<= loc;
6722 else
6723 temp >>= loc;
6724 opint[0] = temp;
6725 zap_const = 3;
6726 }
6727 }
6728 else
6729 zap_op = op;
6730 }
6731
6732 opint[1] = bytemask;
6733 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6734}
6735
6736/* Fold the builtins for the INS family of instructions. */
6737
6738static tree
6739alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6740 long op_const, unsigned HOST_WIDE_INT bytemask,
6741 bool is_high)
6742{
6743 if ((op_const & 1) && opint[0] == 0)
6744 return build_int_cst (long_integer_type_node, 0);
6745
6746 if (op_const & 2)
6747 {
6748 unsigned HOST_WIDE_INT temp, loc, byteloc;
6749 tree *zap_op = NULL;
6750
6751 loc = opint[1] & 7;
6752 if (BYTES_BIG_ENDIAN)
6753 loc ^= 7;
6754 bytemask <<= loc;
6755
6756 temp = opint[0];
6757 if (is_high)
6758 {
6759 byteloc = (64 - (loc * 8)) & 0x3f;
6760 if (byteloc == 0)
6761 zap_op = op;
6762 else
6763 temp >>= byteloc;
6764 bytemask >>= 8;
6765 }
6766 else
6767 {
6768 byteloc = loc * 8;
6769 if (byteloc == 0)
6770 zap_op = op;
6771 else
6772 temp <<= byteloc;
6773 }
6774
6775 opint[0] = temp;
6776 opint[1] = bytemask;
6777 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6778 }
6779
6780 return NULL;
6781}
6782
6783static tree
6784alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6785 long op_const, unsigned HOST_WIDE_INT bytemask,
6786 bool is_high)
6787{
6788 if (op_const & 2)
6789 {
6790 unsigned HOST_WIDE_INT loc;
6791
6792 loc = opint[1] & 7;
6793 if (BYTES_BIG_ENDIAN)
6794 loc ^= 7;
6795 bytemask <<= loc;
6796
6797 if (is_high)
6798 bytemask >>= 8;
6799
6800 opint[1] = bytemask ^ 0xff;
6801 }
6802
6803 return alpha_fold_builtin_zapnot (op, opint, op_const);
6804}
6805
6806static tree
6807alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6808{
6809 switch (op_const)
6810 {
6811 case 3:
6812 {
6813 unsigned HOST_WIDE_INT l;
6814 HOST_WIDE_INT h;
6815
6816 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6817
6818#if HOST_BITS_PER_WIDE_INT > 64
6819# error fixme
6820#endif
6821
6822 return build_int_cst (long_integer_type_node, h);
6823 }
6824
6825 case 1:
6826 opint[1] = opint[0];
6827 /* FALLTHRU */
6828 case 2:
6829 /* Note that (X*1) >> 64 == 0. */
6830 if (opint[1] == 0 || opint[1] == 1)
6831 return build_int_cst (long_integer_type_node, 0);
6832 break;
6833 }
6834 return NULL;
6835}
6836
6837static tree
6838alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6839{
6840 tree op0 = fold_convert (vtype, op[0]);
6841 tree op1 = fold_convert (vtype, op[1]);
b3da1868 6842 tree val = fold_build2 (code, vtype, op0, op1);
70ce4162 6843 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
849c7bc6 6844}
6845
6846static tree
6847alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6848{
6849 unsigned HOST_WIDE_INT temp = 0;
6850 int i;
6851
6852 if (op_const != 3)
6853 return NULL;
6854
6855 for (i = 0; i < 8; ++i)
6856 {
6857 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6858 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6859 if (a >= b)
6860 temp += a - b;
6861 else
6862 temp += b - a;
6863 }
6864
6865 return build_int_cst (long_integer_type_node, temp);
6866}
6867
6868static tree
6869alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6870{
6871 unsigned HOST_WIDE_INT temp;
6872
6873 if (op_const == 0)
6874 return NULL;
6875
6876 temp = opint[0] & 0xff;
6877 temp |= (opint[0] >> 24) & 0xff00;
6878
6879 return build_int_cst (long_integer_type_node, temp);
6880}
6881
6882static tree
6883alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6884{
6885 unsigned HOST_WIDE_INT temp;
6886
6887 if (op_const == 0)
6888 return NULL;
6889
6890 temp = opint[0] & 0xff;
6891 temp |= (opint[0] >> 8) & 0xff00;
6892 temp |= (opint[0] >> 16) & 0xff0000;
6893 temp |= (opint[0] >> 24) & 0xff000000;
6894
6895 return build_int_cst (long_integer_type_node, temp);
6896}
6897
6898static tree
6899alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6900{
6901 unsigned HOST_WIDE_INT temp;
6902
6903 if (op_const == 0)
6904 return NULL;
6905
6906 temp = opint[0] & 0xff;
6907 temp |= (opint[0] & 0xff00) << 24;
6908
6909 return build_int_cst (long_integer_type_node, temp);
6910}
6911
6912static tree
6913alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6914{
6915 unsigned HOST_WIDE_INT temp;
6916
6917 if (op_const == 0)
6918 return NULL;
6919
6920 temp = opint[0] & 0xff;
6921 temp |= (opint[0] & 0x0000ff00) << 8;
6922 temp |= (opint[0] & 0x00ff0000) << 16;
6923 temp |= (opint[0] & 0xff000000) << 24;
6924
6925 return build_int_cst (long_integer_type_node, temp);
6926}
6927
6928static tree
6929alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6930{
6931 unsigned HOST_WIDE_INT temp;
6932
6933 if (op_const == 0)
6934 return NULL;
6935
6936 if (opint[0] == 0)
6937 temp = 64;
6938 else
6939 temp = exact_log2 (opint[0] & -opint[0]);
6940
6941 return build_int_cst (long_integer_type_node, temp);
6942}
6943
6944static tree
6945alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6946{
6947 unsigned HOST_WIDE_INT temp;
6948
6949 if (op_const == 0)
6950 return NULL;
6951
6952 if (opint[0] == 0)
6953 temp = 64;
6954 else
6955 temp = 64 - floor_log2 (opint[0]) - 1;
6956
6957 return build_int_cst (long_integer_type_node, temp);
6958}
6959
6960static tree
6961alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6962{
6963 unsigned HOST_WIDE_INT temp, op;
6964
6965 if (op_const == 0)
6966 return NULL;
6967
6968 op = opint[0];
6969 temp = 0;
6970 while (op)
6971 temp++, op &= op - 1;
6972
6973 return build_int_cst (long_integer_type_node, temp);
6974}
6975
6976/* Fold one of our builtin functions. */
6977
6978static tree
0ab8af67 6979alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
849c7bc6 6980{
849c7bc6 6981 tree op[MAX_ARGS], t;
6982 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6983 long op_const = 0, arity = 0;
6984
0ab8af67 6985 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
849c7bc6 6986 {
6987 tree arg = TREE_VALUE (t);
6988 if (arg == error_mark_node)
6989 return NULL;
6990 if (arity >= MAX_ARGS)
6991 return NULL;
6992
6993 op[arity] = arg;
6994 opint[arity] = 0;
6995 if (TREE_CODE (arg) == INTEGER_CST)
6996 {
6997 op_const |= 1L << arity;
6998 opint[arity] = int_cst_value (arg);
6999 }
7000 }
7001
7002 switch (DECL_FUNCTION_CODE (fndecl))
7003 {
7004 case ALPHA_BUILTIN_CMPBGE:
7005 return alpha_fold_builtin_cmpbge (opint, op_const);
7006
7007 case ALPHA_BUILTIN_EXTBL:
7008 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7009 case ALPHA_BUILTIN_EXTWL:
7010 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7011 case ALPHA_BUILTIN_EXTLL:
7012 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7013 case ALPHA_BUILTIN_EXTQL:
7014 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7015 case ALPHA_BUILTIN_EXTWH:
7016 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7017 case ALPHA_BUILTIN_EXTLH:
7018 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7019 case ALPHA_BUILTIN_EXTQH:
7020 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7021
7022 case ALPHA_BUILTIN_INSBL:
7023 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7024 case ALPHA_BUILTIN_INSWL:
7025 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7026 case ALPHA_BUILTIN_INSLL:
7027 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7028 case ALPHA_BUILTIN_INSQL:
7029 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7030 case ALPHA_BUILTIN_INSWH:
7031 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7032 case ALPHA_BUILTIN_INSLH:
7033 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7034 case ALPHA_BUILTIN_INSQH:
7035 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7036
7037 case ALPHA_BUILTIN_MSKBL:
7038 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7039 case ALPHA_BUILTIN_MSKWL:
7040 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7041 case ALPHA_BUILTIN_MSKLL:
7042 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7043 case ALPHA_BUILTIN_MSKQL:
7044 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7045 case ALPHA_BUILTIN_MSKWH:
7046 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7047 case ALPHA_BUILTIN_MSKLH:
7048 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7049 case ALPHA_BUILTIN_MSKQH:
7050 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7051
7052 case ALPHA_BUILTIN_UMULH:
7053 return alpha_fold_builtin_umulh (opint, op_const);
7054
7055 case ALPHA_BUILTIN_ZAP:
7056 opint[1] ^= 0xff;
7057 /* FALLTHRU */
7058 case ALPHA_BUILTIN_ZAPNOT:
7059 return alpha_fold_builtin_zapnot (op, opint, op_const);
7060
7061 case ALPHA_BUILTIN_MINUB8:
7062 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7063 case ALPHA_BUILTIN_MINSB8:
7064 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7065 case ALPHA_BUILTIN_MINUW4:
7066 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7067 case ALPHA_BUILTIN_MINSW4:
7068 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7069 case ALPHA_BUILTIN_MAXUB8:
7070 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7071 case ALPHA_BUILTIN_MAXSB8:
7072 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7073 case ALPHA_BUILTIN_MAXUW4:
7074 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7075 case ALPHA_BUILTIN_MAXSW4:
7076 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7077
7078 case ALPHA_BUILTIN_PERR:
7079 return alpha_fold_builtin_perr (opint, op_const);
7080 case ALPHA_BUILTIN_PKLB:
7081 return alpha_fold_builtin_pklb (opint, op_const);
7082 case ALPHA_BUILTIN_PKWB:
7083 return alpha_fold_builtin_pkwb (opint, op_const);
7084 case ALPHA_BUILTIN_UNPKBL:
7085 return alpha_fold_builtin_unpkbl (opint, op_const);
7086 case ALPHA_BUILTIN_UNPKBW:
7087 return alpha_fold_builtin_unpkbw (opint, op_const);
7088
7089 case ALPHA_BUILTIN_CTTZ:
7090 return alpha_fold_builtin_cttz (opint, op_const);
7091 case ALPHA_BUILTIN_CTLZ:
7092 return alpha_fold_builtin_ctlz (opint, op_const);
7093 case ALPHA_BUILTIN_CTPOP:
7094 return alpha_fold_builtin_ctpop (opint, op_const);
7095
7096 case ALPHA_BUILTIN_AMASK:
7097 case ALPHA_BUILTIN_IMPLVER:
7098 case ALPHA_BUILTIN_RPCC:
7099 case ALPHA_BUILTIN_THREAD_POINTER:
7100 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7101 /* None of these are foldable at compile-time. */
7102 default:
7103 return NULL;
7104 }
7105}
f2cc13dc 7106\f
bf2a98b3 7107/* This page contains routines that are used to determine what the function
7108 prologue and epilogue code will do and write them out. */
7109
7110/* Compute the size of the save area in the stack. */
7111
8df4a58b 7112/* These variables are used for communication between the following functions.
7113 They indicate various things about the current function being compiled
7114 that are used to tell what kind of prologue, epilogue and procedure
efee20da 7115 descriptor to generate. */
8df4a58b 7116
7117/* Nonzero if we need a stack procedure. */
b19d7ab1 7118enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7119static enum alpha_procedure_types alpha_procedure_type;
8df4a58b 7120
7121/* Register number (either FP or SP) that is used to unwind the frame. */
b9a5aa8e 7122static int vms_unwind_regno;
8df4a58b 7123
7124/* Register number used to save FP. We need not have one for RA since
7125 we don't modify it for register procedures. This is only defined
7126 for register frame procedures. */
b9a5aa8e 7127static int vms_save_fp_regno;
8df4a58b 7128
7129/* Register number used to reference objects off our PV. */
b9a5aa8e 7130static int vms_base_regno;
8df4a58b 7131
2cf1388a 7132/* Compute register masks for saved registers. */
8df4a58b 7133
7134static void
92643d95 7135alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
8df4a58b 7136{
7137 unsigned long imask = 0;
7138 unsigned long fmask = 0;
1f0ce6a6 7139 unsigned int i;
8df4a58b 7140
eaa112a0 7141 /* When outputting a thunk, we don't have valid register life info,
7142 but assemble_start_function wants to output .frame and .mask
7143 directives. */
9247818a 7144 if (cfun->is_thunk)
2cf1388a 7145 {
961d6ddd 7146 *imaskP = 0;
7147 *fmaskP = 0;
7148 return;
7149 }
8df4a58b 7150
b19d7ab1 7151 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
df7d0d23 7152 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
8df4a58b 7153
961d6ddd 7154 /* One for every register we have to save. */
7155 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7156 if (! fixed_regs[i] && ! call_used_regs[i]
3072d30e 7157 && df_regs_ever_live_p (i) && i != REG_RA
961d6ddd 7158 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7159 {
7160 if (i < 32)
df7d0d23 7161 imask |= (1UL << i);
961d6ddd 7162 else
df7d0d23 7163 fmask |= (1UL << (i - 32));
961d6ddd 7164 }
7165
7166 /* We need to restore these for the handler. */
18d50ae6 7167 if (crtl->calls_eh_return)
c49ad9ef 7168 {
7169 for (i = 0; ; ++i)
7170 {
7171 unsigned regno = EH_RETURN_DATA_REGNO (i);
7172 if (regno == INVALID_REGNUM)
7173 break;
7174 imask |= 1UL << regno;
7175 }
c49ad9ef 7176 }
9e7454d0 7177
961d6ddd 7178 /* If any register spilled, then spill the return address also. */
7179 /* ??? This is required by the Digital stack unwind specification
7180 and isn't needed if we're doing Dwarf2 unwinding. */
7181 if (imask || fmask || alpha_ra_ever_killed ())
df7d0d23 7182 imask |= (1UL << REG_RA);
b9a5aa8e 7183
8df4a58b 7184 *imaskP = imask;
7185 *fmaskP = fmask;
8df4a58b 7186}
7187
7188int
92643d95 7189alpha_sa_size (void)
8df4a58b 7190{
5aae9d06 7191 unsigned long mask[2];
8df4a58b 7192 int sa_size = 0;
5aae9d06 7193 int i, j;
8df4a58b 7194
5aae9d06 7195 alpha_sa_mask (&mask[0], &mask[1]);
7196
7197 if (TARGET_ABI_UNICOSMK)
7198 {
7199 if (mask[0] || mask[1])
7200 sa_size = 14;
7201 }
2cf1388a 7202 else
2cf1388a 7203 {
5aae9d06 7204 for (j = 0; j < 2; ++j)
7205 for (i = 0; i < 32; ++i)
7206 if ((mask[j] >> i) & 1)
7207 sa_size++;
2cf1388a 7208 }
8df4a58b 7209
9caef960 7210 if (TARGET_ABI_UNICOSMK)
7211 {
7212 /* We might not need to generate a frame if we don't make any calls
7213 (including calls to __T3E_MISMATCH if this is a vararg function),
7214 don't have any local variables which require stack slots, don't
7215 use alloca and have not determined that we need a frame for other
7216 reasons. */
7217
b19d7ab1 7218 alpha_procedure_type
7219 = (sa_size || get_frame_size() != 0
abe32cce 7220 || crtl->outgoing_args_size
18d50ae6 7221 || cfun->stdarg || cfun->calls_alloca
b19d7ab1 7222 || frame_pointer_needed)
7223 ? PT_STACK : PT_REGISTER;
9caef960 7224
7225 /* Always reserve space for saving callee-saved registers if we
7226 need a frame as required by the calling convention. */
b19d7ab1 7227 if (alpha_procedure_type == PT_STACK)
9caef960 7228 sa_size = 14;
7229 }
7230 else if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7231 {
7232 /* Start by assuming we can use a register procedure if we don't
7233 make any calls (REG_RA not used) or need to save any
7234 registers and a stack procedure if we do. */
b19d7ab1 7235 if ((mask[0] >> REG_RA) & 1)
7236 alpha_procedure_type = PT_STACK;
7237 else if (get_frame_size() != 0)
7238 alpha_procedure_type = PT_REGISTER;
7239 else
7240 alpha_procedure_type = PT_NULL;
5aae9d06 7241
2ab60bb1 7242 /* Don't reserve space for saving FP & RA yet. Do that later after we've
5aae9d06 7243 made the final decision on stack procedure vs register procedure. */
b19d7ab1 7244 if (alpha_procedure_type == PT_STACK)
2ab60bb1 7245 sa_size -= 2;
b9a5aa8e 7246
7247 /* Decide whether to refer to objects off our PV via FP or PV.
7248 If we need FP for something else or if we receive a nonlocal
7249 goto (which expects PV to contain the value), we must use PV.
7250 Otherwise, start by assuming we can use FP. */
b19d7ab1 7251
7252 vms_base_regno
7253 = (frame_pointer_needed
18d50ae6 7254 || cfun->has_nonlocal_label
b19d7ab1 7255 || alpha_procedure_type == PT_STACK
abe32cce 7256 || crtl->outgoing_args_size)
b19d7ab1 7257 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
b9a5aa8e 7258
7259 /* If we want to copy PV into FP, we need to find some register
7260 in which to save FP. */
7261
7262 vms_save_fp_regno = -1;
7263 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7264 for (i = 0; i < 32; i++)
3072d30e 7265 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
b9a5aa8e 7266 vms_save_fp_regno = i;
7267
b19d7ab1 7268 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7269 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7270 else if (alpha_procedure_type == PT_NULL)
7271 vms_base_regno = REG_PV;
b9a5aa8e 7272
7273 /* Stack unwinding should be done via FP unless we use it for PV. */
7274 vms_unwind_regno = (vms_base_regno == REG_PV
7275 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7276
7277 /* If this is a stack procedure, allow space for saving FP and RA. */
b19d7ab1 7278 if (alpha_procedure_type == PT_STACK)
b9a5aa8e 7279 sa_size += 2;
7280 }
7281 else
7282 {
b9a5aa8e 7283 /* Our size must be even (multiple of 16 bytes). */
7284 if (sa_size & 1)
7285 sa_size++;
7286 }
8df4a58b 7287
7288 return sa_size * 8;
7289}
7290
4310aa50 7291/* Define the offset between two registers, one to be eliminated,
7292 and the other its replacement, at the start of a routine. */
7293
7294HOST_WIDE_INT
92643d95 7295alpha_initial_elimination_offset (unsigned int from,
7296 unsigned int to ATTRIBUTE_UNUSED)
4310aa50 7297{
7298 HOST_WIDE_INT ret;
7299
7300 ret = alpha_sa_size ();
abe32cce 7301 ret += ALPHA_ROUND (crtl->outgoing_args_size);
4310aa50 7302
4d10b463 7303 switch (from)
7304 {
7305 case FRAME_POINTER_REGNUM:
7306 break;
7307
7308 case ARG_POINTER_REGNUM:
7309 ret += (ALPHA_ROUND (get_frame_size ()
abe32cce 7310 + crtl->args.pretend_args_size)
7311 - crtl->args.pretend_args_size);
4d10b463 7312 break;
7313
7314 default:
7315 gcc_unreachable ();
7316 }
4310aa50 7317
7318 return ret;
7319}
7320
8df4a58b 7321int
92643d95 7322alpha_pv_save_size (void)
8df4a58b 7323{
7324 alpha_sa_size ();
b19d7ab1 7325 return alpha_procedure_type == PT_STACK ? 8 : 0;
8df4a58b 7326}
7327
7328int
92643d95 7329alpha_using_fp (void)
8df4a58b 7330{
7331 alpha_sa_size ();
b9a5aa8e 7332 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
8df4a58b 7333}
7334
1467e953 7335#if TARGET_ABI_OPEN_VMS
2d280039 7336
cd819d2f 7337static const struct attribute_spec vms_attribute_table[] =
bf2a98b3 7338{
e3c541f0 7339 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
c64a8830 7340 { "overlaid", 0, 0, true, false, false, NULL },
7341 { "global", 0, 0, true, false, false, NULL },
7342 { "initialize", 0, 0, true, false, false, NULL },
7343 { NULL, 0, 0, false, false, false, NULL }
e3c541f0 7344};
bf2a98b3 7345
2d280039 7346#endif
7347
1f0ce6a6 7348static int
92643d95 7349find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
1f0ce6a6 7350{
a3859c0f 7351 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7352}
7353
7354int
92643d95 7355alpha_find_lo_sum_using_gp (rtx insn)
a3859c0f 7356{
7357 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
1f0ce6a6 7358}
7359
b9a5aa8e 7360static int
92643d95 7361alpha_does_function_need_gp (void)
b9a5aa8e 7362{
7363 rtx insn;
bf2a98b3 7364
9caef960 7365 /* The GP being variable is an OSF abi thing. */
7366 if (! TARGET_ABI_OSF)
b9a5aa8e 7367 return 0;
bf2a98b3 7368
008fdc59 7369 /* We need the gp to load the address of __mcount. */
18d50ae6 7370 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
b9a5aa8e 7371 return 1;
0e0a0e7a 7372
008fdc59 7373 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
9247818a 7374 if (cfun->is_thunk)
2cf1388a 7375 return 1;
2cf1388a 7376
008fdc59 7377 /* The nonlocal receiver pattern assumes that the gp is valid for
7378 the nested function. Reasonable because it's almost always set
7379 correctly already. For the cases where that's wrong, make sure
7380 the nested function loads its gp on entry. */
18d50ae6 7381 if (crtl->has_nonlocal_goto)
008fdc59 7382 return 1;
7383
9e7454d0 7384 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
b9a5aa8e 7385 Even if we are a static function, we still need to do this in case
7386 our address is taken and passed to something like qsort. */
bf2a98b3 7387
b9a5aa8e 7388 push_topmost_sequence ();
7389 insn = get_insns ();
7390 pop_topmost_sequence ();
8df4a58b 7391
b9a5aa8e 7392 for (; insn; insn = NEXT_INSN (insn))
9204e736 7393 if (INSN_P (insn)
449b6e20 7394 && ! JUMP_TABLE_DATA_P (insn)
b9a5aa8e 7395 && GET_CODE (PATTERN (insn)) != USE
a3859c0f 7396 && GET_CODE (PATTERN (insn)) != CLOBBER
7397 && get_attr_usegp (insn))
7398 return 1;
bf2a98b3 7399
b9a5aa8e 7400 return 0;
bf2a98b3 7401}
7402
7d73bc2a 7403\f
5a965225 7404/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7405 sequences. */
7406
7407static rtx
92643d95 7408set_frame_related_p (void)
5a965225 7409{
31d3e01c 7410 rtx seq = get_insns ();
7411 rtx insn;
7412
5a965225 7413 end_sequence ();
7414
31d3e01c 7415 if (!seq)
7416 return NULL_RTX;
7417
7418 if (INSN_P (seq))
5a965225 7419 {
31d3e01c 7420 insn = seq;
7421 while (insn != NULL_RTX)
7422 {
7423 RTX_FRAME_RELATED_P (insn) = 1;
7424 insn = NEXT_INSN (insn);
7425 }
7426 seq = emit_insn (seq);
5a965225 7427 }
7428 else
7429 {
7430 seq = emit_insn (seq);
7431 RTX_FRAME_RELATED_P (seq) = 1;
5a965225 7432 }
31d3e01c 7433 return seq;
5a965225 7434}
7435
7436#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7437
fb0de38e 7438/* Generates a store with the proper unwind info attached. VALUE is
183f1993 7439 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
fb0de38e 7440 contains SP+FRAME_BIAS, and that is the unwind info that should be
7441 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7442 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7443
7444static void
7445emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7446 HOST_WIDE_INT base_ofs, rtx frame_reg)
7447{
7448 rtx addr, mem, insn;
7449
7450 addr = plus_constant (base_reg, base_ofs);
7451 mem = gen_rtx_MEM (DImode, addr);
7452 set_mem_alias_set (mem, alpha_sr_alias_set);
7453
7454 insn = emit_move_insn (mem, value);
7455 RTX_FRAME_RELATED_P (insn) = 1;
7456
7457 if (frame_bias || value != frame_reg)
7458 {
7459 if (frame_bias)
7460 {
7461 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7462 mem = gen_rtx_MEM (DImode, addr);
7463 }
7464
0a48089c 7465 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7466 gen_rtx_SET (VOIDmode, mem, frame_reg));
fb0de38e 7467 }
7468}
7469
7470static void
7471emit_frame_store (unsigned int regno, rtx base_reg,
7472 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7473{
7474 rtx reg = gen_rtx_REG (DImode, regno);
7475 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7476}
7477
bf2a98b3 7478/* Write function prologue. */
7479
8df4a58b 7480/* On vms we have two kinds of functions:
7481
7482 - stack frame (PROC_STACK)
7483 these are 'normal' functions with local vars and which are
7484 calling other functions
7485 - register frame (PROC_REGISTER)
7486 keeps all data in registers, needs no stack
7487
7488 We must pass this to the assembler so it can generate the
7489 proper pdsc (procedure descriptor)
7490 This is done with the '.pdesc' command.
7491
b9a5aa8e 7492 On not-vms, we don't really differentiate between the two, as we can
7493 simply allocate stack without saving registers. */
8df4a58b 7494
7495void
92643d95 7496alpha_expand_prologue (void)
8df4a58b 7497{
b9a5aa8e 7498 /* Registers to save. */
8df4a58b 7499 unsigned long imask = 0;
7500 unsigned long fmask = 0;
7501 /* Stack space needed for pushing registers clobbered by us. */
7502 HOST_WIDE_INT sa_size;
7503 /* Complete stack size needed. */
7504 HOST_WIDE_INT frame_size;
7505 /* Offset from base reg to register save area. */
b9a5aa8e 7506 HOST_WIDE_INT reg_offset;
fb0de38e 7507 rtx sa_reg;
8df4a58b 7508 int i;
7509
7510 sa_size = alpha_sa_size ();
8df4a58b 7511
b9a5aa8e 7512 frame_size = get_frame_size ();
1467e953 7513 if (TARGET_ABI_OPEN_VMS)
9e7454d0 7514 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7515 + (alpha_procedure_type == PT_STACK ? 8 : 0)
b9a5aa8e 7516 + frame_size
abe32cce 7517 + crtl->args.pretend_args_size);
9caef960 7518 else if (TARGET_ABI_UNICOSMK)
7519 /* We have to allocate space for the DSIB if we generate a frame. */
7520 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7521 + (alpha_procedure_type == PT_STACK ? 48 : 0))
9caef960 7522 + ALPHA_ROUND (frame_size
abe32cce 7523 + crtl->outgoing_args_size);
b9a5aa8e 7524 else
abe32cce 7525 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
b9a5aa8e 7526 + sa_size
7527 + ALPHA_ROUND (frame_size
abe32cce 7528 + crtl->args.pretend_args_size));
8df4a58b 7529
1467e953 7530 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7531 reg_offset = 8;
7532 else
abe32cce 7533 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8df4a58b 7534
b9a5aa8e 7535 alpha_sa_mask (&imask, &fmask);
8df4a58b 7536
a314eb5e 7537 /* Emit an insn to reload GP, if needed. */
1467e953 7538 if (TARGET_ABI_OSF)
a314eb5e 7539 {
7540 alpha_function_needs_gp = alpha_does_function_need_gp ();
7541 if (alpha_function_needs_gp)
7542 emit_insn (gen_prologue_ldgp ());
7543 }
7544
30dceb30 7545 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7546 the call to mcount ourselves, rather than having the linker do it
7547 magically in response to -pg. Since _mcount has special linkage,
7548 don't represent the call as a call. */
18d50ae6 7549 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
30dceb30 7550 emit_insn (gen_prologue_mcount ());
9caef960 7551
7552 if (TARGET_ABI_UNICOSMK)
7553 unicosmk_gen_dsib (&imask);
7554
8df4a58b 7555 /* Adjust the stack by the frame size. If the frame size is > 4096
7556 bytes, we need to be sure we probe somewhere in the first and last
7557 4096 bytes (we can probably get away without the latter test) and
7558 every 8192 bytes in between. If the frame size is > 32768, we
7559 do this in a loop. Otherwise, we generate the explicit probe
9e7454d0 7560 instructions.
8df4a58b 7561
7562 Note that we are only allowed to adjust sp once in the prologue. */
7563
b9a5aa8e 7564 if (frame_size <= 32768)
8df4a58b 7565 {
7566 if (frame_size > 4096)
7567 {
baf8b2cc 7568 int probed;
8df4a58b 7569
baf8b2cc 7570 for (probed = 4096; probed < frame_size; probed += 8192)
9caef960 7571 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7572 ? -probed + 64
7573 : -probed)));
8df4a58b 7574
7575 /* We only have to do this probe if we aren't saving registers. */
baf8b2cc 7576 if (sa_size == 0 && frame_size > probed - 4096)
b9a5aa8e 7577 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
8df4a58b 7578 }
7579
7580 if (frame_size != 0)
205b281f 7581 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9caef960 7582 GEN_INT (TARGET_ABI_UNICOSMK
7583 ? -frame_size + 64
7584 : -frame_size))));
8df4a58b 7585 }
7586 else
7587 {
b9a5aa8e 7588 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
8df4a58b 7589 number of 8192 byte blocks to probe. We then probe each block
7590 in the loop and then set SP to the proper location. If the
7591 amount remaining is > 4096, we have to do one more probe if we
7592 are not saving any registers. */
7593
7594 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7595 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
b9a5aa8e 7596 rtx ptr = gen_rtx_REG (DImode, 22);
7597 rtx count = gen_rtx_REG (DImode, 23);
cd28cb76 7598 rtx seq;
8df4a58b 7599
b9a5aa8e 7600 emit_move_insn (count, GEN_INT (blocks));
9caef960 7601 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7602 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
8df4a58b 7603
b9a5aa8e 7604 /* Because of the difficulty in emitting a new basic block this
7605 late in the compilation, generate the loop as a single insn. */
7606 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
8df4a58b 7607
7608 if (leftover > 4096 && sa_size == 0)
b9a5aa8e 7609 {
7610 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7611 MEM_VOLATILE_P (last) = 1;
7612 emit_move_insn (last, const0_rtx);
7613 }
8df4a58b 7614
1467e953 7615 if (TARGET_ABI_WINDOWS_NT)
f88f2646 7616 {
7617 /* For NT stack unwind (done by 'reverse execution'), it's
7618 not OK to take the result of a loop, even though the value
7619 is already in ptr, so we reload it via a single operation
9e7454d0 7620 and subtract it to sp.
cd28cb76 7621
7622 Yes, that's correct -- we have to reload the whole constant
df9e12ce 7623 into a temporary via ldah+lda then subtract from sp. */
f88f2646 7624
7625 HOST_WIDE_INT lo, hi;
05bea6dd 7626 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7627 hi = frame_size - lo;
5a965225 7628
cd28cb76 7629 emit_move_insn (ptr, GEN_INT (hi));
df9e12ce 7630 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
cd28cb76 7631 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7632 ptr));
f88f2646 7633 }
7634 else
7635 {
f88f2646 7636 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7637 GEN_INT (-leftover)));
f88f2646 7638 }
cd28cb76 7639
7640 /* This alternative is special, because the DWARF code cannot
7641 possibly intuit through the loop above. So we invent this
7642 note it looks at instead. */
7643 RTX_FRAME_RELATED_P (seq) = 1;
0a48089c 7644 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7645 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7646 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7647 GEN_INT (TARGET_ABI_UNICOSMK
7648 ? -frame_size + 64
7649 : -frame_size))));
8df4a58b 7650 }
7651
9caef960 7652 if (!TARGET_ABI_UNICOSMK)
8df4a58b 7653 {
fb0de38e 7654 HOST_WIDE_INT sa_bias = 0;
7655
9caef960 7656 /* Cope with very large offsets to the register save area. */
7657 sa_reg = stack_pointer_rtx;
7658 if (reg_offset + sa_size > 0x8000)
7659 {
7660 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
fb0de38e 7661 rtx sa_bias_rtx;
8df4a58b 7662
9caef960 7663 if (low + sa_size <= 0x8000)
fb0de38e 7664 sa_bias = reg_offset - low, reg_offset = low;
9e7454d0 7665 else
fb0de38e 7666 sa_bias = reg_offset, reg_offset = 0;
8df4a58b 7667
9caef960 7668 sa_reg = gen_rtx_REG (DImode, 24);
fb0de38e 7669 sa_bias_rtx = GEN_INT (sa_bias);
7670
7671 if (add_operand (sa_bias_rtx, DImode))
7672 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7673 else
7674 {
7675 emit_move_insn (sa_reg, sa_bias_rtx);
7676 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7677 }
9caef960 7678 }
9e7454d0 7679
9caef960 7680 /* Save regs in stack order. Beginning with VMS PV. */
b19d7ab1 7681 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
fb0de38e 7682 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
8df4a58b 7683
9caef960 7684 /* Save register RA next. */
df7d0d23 7685 if (imask & (1UL << REG_RA))
9caef960 7686 {
fb0de38e 7687 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
df7d0d23 7688 imask &= ~(1UL << REG_RA);
9caef960 7689 reg_offset += 8;
7690 }
8df4a58b 7691
9caef960 7692 /* Now save any other registers required to be saved. */
c49ad9ef 7693 for (i = 0; i < 31; i++)
df7d0d23 7694 if (imask & (1UL << i))
9caef960 7695 {
fb0de38e 7696 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
9caef960 7697 reg_offset += 8;
7698 }
8df4a58b 7699
c49ad9ef 7700 for (i = 0; i < 31; i++)
df7d0d23 7701 if (fmask & (1UL << i))
9caef960 7702 {
fb0de38e 7703 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
9caef960 7704 reg_offset += 8;
7705 }
7706 }
b19d7ab1 7707 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
9caef960 7708 {
7709 /* The standard frame on the T3E includes space for saving registers.
7710 We just have to use it. We don't have to save the return address and
7711 the old frame pointer here - they are saved in the DSIB. */
7712
7713 reg_offset = -56;
7714 for (i = 9; i < 15; i++)
df7d0d23 7715 if (imask & (1UL << i))
9caef960 7716 {
fb0de38e 7717 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
9caef960 7718 reg_offset -= 8;
7719 }
7720 for (i = 2; i < 10; i++)
df7d0d23 7721 if (fmask & (1UL << i))
9caef960 7722 {
fb0de38e 7723 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
9caef960 7724 reg_offset -= 8;
7725 }
7726 }
8df4a58b 7727
1467e953 7728 if (TARGET_ABI_OPEN_VMS)
8df4a58b 7729 {
b19d7ab1 7730 if (alpha_procedure_type == PT_REGISTER)
7731 /* Register frame procedures save the fp.
7732 ?? Ought to have a dwarf2 save for this. */
6d50e356 7733 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7734 hard_frame_pointer_rtx);
8df4a58b 7735
b19d7ab1 7736 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
6d50e356 7737 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7738 gen_rtx_REG (DImode, REG_PV)));
8df4a58b 7739
b19d7ab1 7740 if (alpha_procedure_type != PT_NULL
7741 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
205b281f 7742 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8df4a58b 7743
b9a5aa8e 7744 /* If we have to allocate space for outgoing args, do it now. */
abe32cce 7745 if (crtl->outgoing_args_size != 0)
81a5b286 7746 {
7747 rtx seq
9e7454d0 7748 = emit_move_insn (stack_pointer_rtx,
81a5b286 7749 plus_constant
7750 (hard_frame_pointer_rtx,
7751 - (ALPHA_ROUND
abe32cce 7752 (crtl->outgoing_args_size))));
9e7454d0 7753
81a5b286 7754 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7755 if ! frame_pointer_needed. Setting the bit will change the CFA
7756 computation rule to use sp again, which would be wrong if we had
7757 frame_pointer_needed, as this means sp might move unpredictably
7758 later on.
7759
7760 Also, note that
7761 frame_pointer_needed
7762 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7763 and
abe32cce 7764 crtl->outgoing_args_size != 0
81a5b286 7765 => alpha_procedure_type != PT_NULL,
7766
7767 so when we are not setting the bit here, we are guaranteed to
5910bb95 7768 have emitted an FRP frame pointer update just before. */
81a5b286 7769 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7770 }
b9a5aa8e 7771 }
9caef960 7772 else if (!TARGET_ABI_UNICOSMK)
b9a5aa8e 7773 {
7774 /* If we need a frame pointer, set it from the stack pointer. */
7775 if (frame_pointer_needed)
7776 {
7777 if (TARGET_CAN_FAULT_IN_PROLOGUE)
5a965225 7778 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8df4a58b 7779 else
205b281f 7780 /* This must always be the last instruction in the
7781 prologue, thus we emit a special move + clobber. */
5a965225 7782 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7783 stack_pointer_rtx, sa_reg)));
8df4a58b 7784 }
8df4a58b 7785 }
7786
b9a5aa8e 7787 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7788 the prologue, for exception handling reasons, we cannot do this for
7789 any insn that might fault. We could prevent this for mems with a
7790 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7791 have to prevent all such scheduling with a blockage.
8df4a58b 7792
9e7454d0 7793 Linux, on the other hand, never bothered to implement OSF/1's
b9a5aa8e 7794 exception handling, and so doesn't care about such things. Anyone
7795 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8df4a58b 7796
b9a5aa8e 7797 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7798 emit_insn (gen_blockage ());
1fce2e8a 7799}
7800
e3b8b697 7801/* Count the number of .file directives, so that .loc is up to date. */
d0de818d 7802int num_source_filenames = 0;
e3b8b697 7803
2cf1388a 7804/* Output the textual info surrounding the prologue. */
8df4a58b 7805
b9a5aa8e 7806void
92643d95 7807alpha_start_function (FILE *file, const char *fnname,
7808 tree decl ATTRIBUTE_UNUSED)
0c0464e6 7809{
b9a5aa8e 7810 unsigned long imask = 0;
7811 unsigned long fmask = 0;
7812 /* Stack space needed for pushing registers clobbered by us. */
7813 HOST_WIDE_INT sa_size;
7814 /* Complete stack size needed. */
f9e9d81d 7815 unsigned HOST_WIDE_INT frame_size;
6dbdfeeb 7816 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7817 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7818 ? 524288
7819 : 1UL << 31;
b9a5aa8e 7820 /* Offset from base reg to register save area. */
7821 HOST_WIDE_INT reg_offset;
2cf1388a 7822 char *entry_label = (char *) alloca (strlen (fnname) + 6);
b9a5aa8e 7823 int i;
0c0464e6 7824
9caef960 7825 /* Don't emit an extern directive for functions defined in the same file. */
7826 if (TARGET_ABI_UNICOSMK)
7827 {
7828 tree name_tree;
7829 name_tree = get_identifier (fnname);
7830 TREE_ASM_WRITTEN (name_tree) = 1;
7831 }
7832
a314eb5e 7833 alpha_fnname = fnname;
b9a5aa8e 7834 sa_size = alpha_sa_size ();
0c0464e6 7835
b9a5aa8e 7836 frame_size = get_frame_size ();
1467e953 7837 if (TARGET_ABI_OPEN_VMS)
9e7454d0 7838 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7839 + (alpha_procedure_type == PT_STACK ? 8 : 0)
b9a5aa8e 7840 + frame_size
abe32cce 7841 + crtl->args.pretend_args_size);
9caef960 7842 else if (TARGET_ABI_UNICOSMK)
7843 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7844 + (alpha_procedure_type == PT_STACK ? 48 : 0))
9caef960 7845 + ALPHA_ROUND (frame_size
abe32cce 7846 + crtl->outgoing_args_size);
b9a5aa8e 7847 else
abe32cce 7848 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
b9a5aa8e 7849 + sa_size
7850 + ALPHA_ROUND (frame_size
abe32cce 7851 + crtl->args.pretend_args_size));
0c0464e6 7852
1467e953 7853 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7854 reg_offset = 8;
7855 else
abe32cce 7856 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
0c0464e6 7857
b9a5aa8e 7858 alpha_sa_mask (&imask, &fmask);
bf2a98b3 7859
0e0a0e7a 7860 /* Ecoff can handle multiple .file directives, so put out file and lineno.
449b7f2d 7861 We have to do that before the .ent directive as we cannot switch
7862 files within procedures with native ecoff because line numbers are
7863 linked to procedure descriptors.
7864 Outputting the lineno helps debugging of one line functions as they
7865 would otherwise get no line number at all. Please note that we would
01cc3b75 7866 like to put out last_linenum from final.c, but it is not accessible. */
449b7f2d 7867
7868 if (write_symbols == SDB_DEBUG)
7869 {
9caef960 7870#ifdef ASM_OUTPUT_SOURCE_FILENAME
346064d9 7871 ASM_OUTPUT_SOURCE_FILENAME (file,
7872 DECL_SOURCE_FILE (current_function_decl));
9caef960 7873#endif
e3b8b697 7874#ifdef SDB_OUTPUT_SOURCE_LINE
449b7f2d 7875 if (debug_info_level != DINFO_LEVEL_TERSE)
e3b8b697 7876 SDB_OUTPUT_SOURCE_LINE (file,
7877 DECL_SOURCE_LINE (current_function_decl));
9caef960 7878#endif
449b7f2d 7879 }
7880
b9a5aa8e 7881 /* Issue function start and label. */
9caef960 7882 if (TARGET_ABI_OPEN_VMS
7883 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
f1fe649e 7884 {
b9a5aa8e 7885 fputs ("\t.ent ", file);
2cf1388a 7886 assemble_name (file, fnname);
b9a5aa8e 7887 putc ('\n', file);
a314eb5e 7888
7889 /* If the function needs GP, we'll write the "..ng" label there.
7890 Otherwise, do it here. */
961d6ddd 7891 if (TARGET_ABI_OSF
7892 && ! alpha_function_needs_gp
9247818a 7893 && ! cfun->is_thunk)
a314eb5e 7894 {
7895 putc ('$', file);
7896 assemble_name (file, fnname);
7897 fputs ("..ng:\n", file);
7898 }
f1fe649e 7899 }
449b7f2d 7900
2cf1388a 7901 strcpy (entry_label, fnname);
1467e953 7902 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7903 strcat (entry_label, "..en");
9caef960 7904
7905 /* For public functions, the label must be globalized by appending an
7906 additional colon. */
7907 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7908 strcat (entry_label, ":");
7909
b9a5aa8e 7910 ASM_OUTPUT_LABEL (file, entry_label);
7911 inside_function = TRUE;
449b7f2d 7912
1467e953 7913 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7914 fprintf (file, "\t.base $%d\n", vms_base_regno);
bf2a98b3 7915
9caef960 7916 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
b9a5aa8e 7917 && !flag_inhibit_size_directive)
9c0e5703 7918 {
b9a5aa8e 7919 /* Set flags in procedure descriptor to request IEEE-conformant
7920 math-library routines. The value we set it to is PDSC_EXC_IEEE
65abff06 7921 (/usr/include/pdsc.h). */
b9a5aa8e 7922 fputs ("\t.eflag 48\n", file);
9c0e5703 7923 }
bf2a98b3 7924
b9a5aa8e 7925 /* Set up offsets to alpha virtual arg/local debugging pointer. */
abe32cce 7926 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
b9a5aa8e 7927 alpha_arg_offset = -frame_size + 48;
cb015df5 7928
b9a5aa8e 7929 /* Describe our frame. If the frame size is larger than an integer,
7930 print it as zero to avoid an assembler error. We won't be
7931 properly describing such a frame, but that's the best we can do. */
9caef960 7932 if (TARGET_ABI_UNICOSMK)
7933 ;
7934 else if (TARGET_ABI_OPEN_VMS)
4840a03a 7935 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7936 HOST_WIDE_INT_PRINT_DEC "\n",
7937 vms_unwind_regno,
7938 frame_size >= (1UL << 31) ? 0 : frame_size,
7939 reg_offset);
b9a5aa8e 7940 else if (!flag_inhibit_size_directive)
4840a03a 7941 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7942 (frame_pointer_needed
7943 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
6dbdfeeb 7944 frame_size >= max_frame_size ? 0 : frame_size,
abe32cce 7945 crtl->args.pretend_args_size);
15d5236f 7946
b9a5aa8e 7947 /* Describe which registers were spilled. */
9caef960 7948 if (TARGET_ABI_UNICOSMK)
7949 ;
7950 else if (TARGET_ABI_OPEN_VMS)
15d5236f 7951 {
b9a5aa8e 7952 if (imask)
9caef960 7953 /* ??? Does VMS care if mask contains ra? The old code didn't
b9a5aa8e 7954 set it, so I don't here. */
df7d0d23 7955 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
b9a5aa8e 7956 if (fmask)
769ea120 7957 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
b19d7ab1 7958 if (alpha_procedure_type == PT_REGISTER)
b9a5aa8e 7959 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7960 }
7961 else if (!flag_inhibit_size_directive)
7962 {
7963 if (imask)
15d5236f 7964 {
4840a03a 7965 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
6dbdfeeb 7966 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
b9a5aa8e 7967
7968 for (i = 0; i < 32; ++i)
df7d0d23 7969 if (imask & (1UL << i))
b9a5aa8e 7970 reg_offset += 8;
15d5236f 7971 }
b9a5aa8e 7972
7973 if (fmask)
4840a03a 7974 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
6dbdfeeb 7975 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
bf2a98b3 7976 }
7977
1467e953 7978#if TARGET_ABI_OPEN_VMS
6cde52a2 7979 /* Ifdef'ed cause link_section are only available then. */
2f14b1f9 7980 switch_to_section (readonly_data_section);
b9a5aa8e 7981 fprintf (file, "\t.align 3\n");
2cf1388a 7982 assemble_name (file, fnname); fputs ("..na:\n", file);
b9a5aa8e 7983 fputs ("\t.ascii \"", file);
2cf1388a 7984 assemble_name (file, fnname);
b9a5aa8e 7985 fputs ("\\0\"\n", file);
2cf1388a 7986 alpha_need_linkage (fnname, 1);
2f14b1f9 7987 switch_to_section (text_section);
b9a5aa8e 7988#endif
7989}
bf2a98b3 7990
b9a5aa8e 7991/* Emit the .prologue note at the scheduled end of the prologue. */
16b3392b 7992
85ae73e8 7993static void
92643d95 7994alpha_output_function_end_prologue (FILE *file)
b9a5aa8e 7995{
9caef960 7996 if (TARGET_ABI_UNICOSMK)
7997 ;
7998 else if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7999 fputs ("\t.prologue\n", file);
1467e953 8000 else if (TARGET_ABI_WINDOWS_NT)
b9a5aa8e 8001 fputs ("\t.prologue 0\n", file);
8002 else if (!flag_inhibit_size_directive)
961d6ddd 8003 fprintf (file, "\t.prologue %d\n",
9247818a 8004 alpha_function_needs_gp || cfun->is_thunk);
bf2a98b3 8005}
8006
8007/* Write function epilogue. */
8008
9e7454d0 8009/* ??? At some point we will want to support full unwind, and so will
5a965225 8010 need to mark the epilogue as well. At the moment, we just confuse
8011 dwarf2out. */
8012#undef FRP
8013#define FRP(exp) exp
8014
bf2a98b3 8015void
92643d95 8016alpha_expand_epilogue (void)
bf2a98b3 8017{
b9a5aa8e 8018 /* Registers to save. */
8019 unsigned long imask = 0;
8020 unsigned long fmask = 0;
8021 /* Stack space needed for pushing registers clobbered by us. */
8022 HOST_WIDE_INT sa_size;
8023 /* Complete stack size needed. */
8024 HOST_WIDE_INT frame_size;
8025 /* Offset from base reg to register save area. */
8026 HOST_WIDE_INT reg_offset;
8027 int fp_is_frame_pointer, fp_offset;
8028 rtx sa_reg, sa_reg_exp = NULL;
849674a3 8029 rtx sp_adj1, sp_adj2, mem;
11016d99 8030 rtx eh_ofs;
bf2a98b3 8031 int i;
8032
b9a5aa8e 8033 sa_size = alpha_sa_size ();
bf2a98b3 8034
b9a5aa8e 8035 frame_size = get_frame_size ();
1467e953 8036 if (TARGET_ABI_OPEN_VMS)
9e7454d0 8037 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 8038 + (alpha_procedure_type == PT_STACK ? 8 : 0)
b9a5aa8e 8039 + frame_size
abe32cce 8040 + crtl->args.pretend_args_size);
9caef960 8041 else if (TARGET_ABI_UNICOSMK)
8042 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 8043 + (alpha_procedure_type == PT_STACK ? 48 : 0))
9caef960 8044 + ALPHA_ROUND (frame_size
abe32cce 8045 + crtl->outgoing_args_size);
b9a5aa8e 8046 else
abe32cce 8047 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
b9a5aa8e 8048 + sa_size
8049 + ALPHA_ROUND (frame_size
abe32cce 8050 + crtl->args.pretend_args_size));
bf2a98b3 8051
1467e953 8052 if (TARGET_ABI_OPEN_VMS)
b19d7ab1 8053 {
8054 if (alpha_procedure_type == PT_STACK)
8055 reg_offset = 8;
8056 else
8057 reg_offset = 0;
8058 }
b9a5aa8e 8059 else
abe32cce 8060 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
b9a5aa8e 8061
8062 alpha_sa_mask (&imask, &fmask);
8063
b19d7ab1 8064 fp_is_frame_pointer
8065 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8066 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
29768226 8067 fp_offset = 0;
8068 sa_reg = stack_pointer_rtx;
b9a5aa8e 8069
18d50ae6 8070 if (crtl->calls_eh_return)
c92c328f 8071 eh_ofs = EH_RETURN_STACKADJ_RTX;
8072 else
8073 eh_ofs = NULL_RTX;
8074
9caef960 8075 if (!TARGET_ABI_UNICOSMK && sa_size)
b9a5aa8e 8076 {
8077 /* If we have a frame pointer, restore SP from it. */
1467e953 8078 if ((TARGET_ABI_OPEN_VMS
b9a5aa8e 8079 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
1467e953 8080 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
205b281f 8081 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
15d5236f 8082
b9a5aa8e 8083 /* Cope with very large offsets to the register save area. */
b9a5aa8e 8084 if (reg_offset + sa_size > 0x8000)
bf2a98b3 8085 {
b9a5aa8e 8086 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8087 HOST_WIDE_INT bias;
8088
8089 if (low + sa_size <= 0x8000)
8090 bias = reg_offset - low, reg_offset = low;
9e7454d0 8091 else
b9a5aa8e 8092 bias = reg_offset, reg_offset = 0;
8093
8094 sa_reg = gen_rtx_REG (DImode, 22);
8095 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8096
5a965225 8097 FRP (emit_move_insn (sa_reg, sa_reg_exp));
bf2a98b3 8098 }
9e7454d0 8099
65abff06 8100 /* Restore registers in order, excepting a true frame pointer. */
bf2a98b3 8101
c92c328f 8102 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
11016d99 8103 if (! eh_ofs)
ab6ab77e 8104 set_mem_alias_set (mem, alpha_sr_alias_set);
c92c328f 8105 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8106
b9a5aa8e 8107 reg_offset += 8;
df7d0d23 8108 imask &= ~(1UL << REG_RA);
16b3392b 8109
c49ad9ef 8110 for (i = 0; i < 31; ++i)
df7d0d23 8111 if (imask & (1UL << i))
bf2a98b3 8112 {
b9a5aa8e 8113 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
16b3392b 8114 fp_offset = reg_offset;
8115 else
b9a5aa8e 8116 {
849674a3 8117 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
ab6ab77e 8118 set_mem_alias_set (mem, alpha_sr_alias_set);
849674a3 8119 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
b9a5aa8e 8120 }
bf2a98b3 8121 reg_offset += 8;
8122 }
8123
c49ad9ef 8124 for (i = 0; i < 31; ++i)
df7d0d23 8125 if (fmask & (1UL << i))
bf2a98b3 8126 {
849674a3 8127 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
ab6ab77e 8128 set_mem_alias_set (mem, alpha_sr_alias_set);
849674a3 8129 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
bf2a98b3 8130 reg_offset += 8;
8131 }
b9a5aa8e 8132 }
b19d7ab1 8133 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
9caef960 8134 {
8135 /* Restore callee-saved general-purpose registers. */
8136
8137 reg_offset = -56;
8138
8139 for (i = 9; i < 15; i++)
df7d0d23 8140 if (imask & (1UL << i))
9caef960 8141 {
8142 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8143 reg_offset));
8144 set_mem_alias_set (mem, alpha_sr_alias_set);
8145 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8146 reg_offset -= 8;
8147 }
8148
8149 for (i = 2; i < 10; i++)
df7d0d23 8150 if (fmask & (1UL << i))
9caef960 8151 {
8152 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8153 reg_offset));
8154 set_mem_alias_set (mem, alpha_sr_alias_set);
8155 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8156 reg_offset -= 8;
8157 }
8158
8159 /* Restore the return address from the DSIB. */
8160
8161 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8162 set_mem_alias_set (mem, alpha_sr_alias_set);
8163 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8164 }
bf2a98b3 8165
11016d99 8166 if (frame_size || eh_ofs)
b9a5aa8e 8167 {
ec37ccb4 8168 sp_adj1 = stack_pointer_rtx;
8169
11016d99 8170 if (eh_ofs)
ec37ccb4 8171 {
8172 sp_adj1 = gen_rtx_REG (DImode, 23);
8173 emit_move_insn (sp_adj1,
11016d99 8174 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
ec37ccb4 8175 }
8176
b9a5aa8e 8177 /* If the stack size is large, begin computation into a temporary
8178 register so as not to interfere with a potential fp restore,
8179 which must be consecutive with an SP restore. */
9caef960 8180 if (frame_size < 32768
18d50ae6 8181 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
ec37ccb4 8182 sp_adj2 = GEN_INT (frame_size);
9caef960 8183 else if (TARGET_ABI_UNICOSMK)
8184 {
8185 sp_adj1 = gen_rtx_REG (DImode, 23);
8186 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8187 sp_adj2 = const0_rtx;
8188 }
b9a5aa8e 8189 else if (frame_size < 0x40007fffL)
8190 {
8191 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8192
ec37ccb4 8193 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
b9a5aa8e 8194 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8195 sp_adj1 = sa_reg;
8196 else
8197 {
8198 sp_adj1 = gen_rtx_REG (DImode, 23);
5a965225 8199 FRP (emit_move_insn (sp_adj1, sp_adj2));
b9a5aa8e 8200 }
8201 sp_adj2 = GEN_INT (low);
8202 }
0e0a0e7a 8203 else
b9a5aa8e 8204 {
ec37ccb4 8205 rtx tmp = gen_rtx_REG (DImode, 23);
91bc47b0 8206 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8207 3, false));
ec37ccb4 8208 if (!sp_adj2)
b9a5aa8e 8209 {
8210 /* We can't drop new things to memory this late, afaik,
8211 so build it up by pieces. */
af792316 8212 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8213 -(frame_size < 0)));
4d10b463 8214 gcc_assert (sp_adj2);
b9a5aa8e 8215 }
b9a5aa8e 8216 }
bf2a98b3 8217
b9a5aa8e 8218 /* From now on, things must be in order. So emit blockages. */
8219
8220 /* Restore the frame pointer. */
9caef960 8221 if (TARGET_ABI_UNICOSMK)
8222 {
8223 emit_insn (gen_blockage ());
8224 mem = gen_rtx_MEM (DImode,
8225 plus_constant (hard_frame_pointer_rtx, -16));
8226 set_mem_alias_set (mem, alpha_sr_alias_set);
8227 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8228 }
8229 else if (fp_is_frame_pointer)
b9a5aa8e 8230 {
8231 emit_insn (gen_blockage ());
205b281f 8232 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
ab6ab77e 8233 set_mem_alias_set (mem, alpha_sr_alias_set);
849674a3 8234 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
b9a5aa8e 8235 }
1467e953 8236 else if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 8237 {
8238 emit_insn (gen_blockage ());
5a965225 8239 FRP (emit_move_insn (hard_frame_pointer_rtx,
8240 gen_rtx_REG (DImode, vms_save_fp_regno)));
b9a5aa8e 8241 }
8242
8243 /* Restore the stack pointer. */
8244 emit_insn (gen_blockage ());
9caef960 8245 if (sp_adj2 == const0_rtx)
8246 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8247 else
8248 FRP (emit_move_insn (stack_pointer_rtx,
8249 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
b9a5aa8e 8250 }
9e7454d0 8251 else
b9a5aa8e 8252 {
b19d7ab1 8253 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
b9a5aa8e 8254 {
8255 emit_insn (gen_blockage ());
5a965225 8256 FRP (emit_move_insn (hard_frame_pointer_rtx,
8257 gen_rtx_REG (DImode, vms_save_fp_regno)));
b9a5aa8e 8258 }
b19d7ab1 8259 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
9caef960 8260 {
8261 /* Decrement the frame pointer if the function does not have a
8262 frame. */
8263
8264 emit_insn (gen_blockage ());
8265 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
bcd9bd66 8266 hard_frame_pointer_rtx, constm1_rtx)));
9caef960 8267 }
bf2a98b3 8268 }
b9a5aa8e 8269}
cf73d31f 8270\f
b9a5aa8e 8271/* Output the rest of the textual info surrounding the epilogue. */
8272
8273void
92643d95 8274alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
b9a5aa8e 8275{
32a8f747 8276 rtx insn;
8277
8278 /* We output a nop after noreturn calls at the very end of the function to
8279 ensure that the return address always remains in the caller's code range,
8280 as not doing so might confuse unwinding engines. */
8281 insn = get_last_insn ();
8282 if (!INSN_P (insn))
8283 insn = prev_active_insn (insn);
c933fb42 8284 if (CALL_P (insn))
32a8f747 8285 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8286
292add68 8287#if TARGET_ABI_OSF
9247818a 8288 if (cfun->is_thunk)
292add68 8289 free_after_compilation (cfun);
8290#endif
8291
04b0d94a 8292#if TARGET_ABI_OPEN_VMS
8293 alpha_write_linkage (file, fnname, decl);
8294#endif
8295
bf2a98b3 8296 /* End the function. */
9caef960 8297 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
f1fe649e 8298 {
b9a5aa8e 8299 fputs ("\t.end ", file);
2cf1388a 8300 assemble_name (file, fnname);
b9a5aa8e 8301 putc ('\n', file);
f1fe649e 8302 }
449b7f2d 8303 inside_function = FALSE;
9c0e5703 8304
9caef960 8305 /* Output jump tables and the static subroutine information block. */
8306 if (TARGET_ABI_UNICOSMK)
8307 {
8308 unicosmk_output_ssib (file, fnname);
8309 unicosmk_output_deferred_case_vectors (file);
8310 }
bf2a98b3 8311}
961d6ddd 8312
6988553d 8313#if TARGET_ABI_OSF
8314/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
961d6ddd 8315
8316 In order to avoid the hordes of differences between generated code
8317 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8318 lots of code loading up large constants, generate rtl and emit it
8319 instead of going straight to text.
8320
8321 Not sure why this idea hasn't been explored before... */
8322
6988553d 8323static void
92643d95 8324alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8325 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8326 tree function)
961d6ddd 8327{
8328 HOST_WIDE_INT hi, lo;
8deb3959 8329 rtx this_rtx, insn, funexp;
961d6ddd 8330
9247818a 8331 gcc_assert (cfun->is_thunk);
292add68 8332
961d6ddd 8333 /* We always require a valid GP. */
8334 emit_insn (gen_prologue_ldgp ());
31b97e8f 8335 emit_note (NOTE_INSN_PROLOGUE_END);
961d6ddd 8336
8337 /* Find the "this" pointer. If the function returns a structure,
8338 the structure return pointer is in $16. */
45550790 8339 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8deb3959 8340 this_rtx = gen_rtx_REG (Pmode, 17);
961d6ddd 8341 else
8deb3959 8342 this_rtx = gen_rtx_REG (Pmode, 16);
961d6ddd 8343
8344 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8345 entire constant for the add. */
8346 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8347 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8348 if (hi + lo == delta)
8349 {
8350 if (hi)
8deb3959 8351 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
961d6ddd 8352 if (lo)
8deb3959 8353 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
961d6ddd 8354 }
8355 else
8356 {
8357 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8358 delta, -(delta < 0));
8deb3959 8359 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
961d6ddd 8360 }
8361
a19ec9da 8362 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8363 if (vcall_offset)
8364 {
8365 rtx tmp, tmp2;
8366
8367 tmp = gen_rtx_REG (Pmode, 0);
8deb3959 8368 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
a19ec9da 8369
8370 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8371 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8372 if (hi + lo == vcall_offset)
8373 {
8374 if (hi)
8375 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8376 }
8377 else
8378 {
8379 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8380 vcall_offset, -(vcall_offset < 0));
8381 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8382 lo = 0;
8383 }
8384 if (lo)
8385 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8386 else
8387 tmp2 = tmp;
8388 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8389
8deb3959 8390 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
a19ec9da 8391 }
8392
961d6ddd 8393 /* Generate a tail call to the target function. */
8394 if (! TREE_USED (function))
8395 {
8396 assemble_external (function);
8397 TREE_USED (function) = 1;
8398 }
8399 funexp = XEXP (DECL_RTL (function), 0);
8400 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8401 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8402 SIBLING_CALL_P (insn) = 1;
8403
8404 /* Run just enough of rest_of_compilation to get the insns emitted.
8405 There's not really enough bulk here to make other passes such as
8406 instruction scheduling worth while. Note that use_thunk calls
8407 assemble_start_function and assemble_end_function. */
8408 insn = get_insns ();
375c1c8a 8409 insn_locators_alloc ();
961d6ddd 8410 shorten_branches (insn);
8411 final_start_function (insn, file, 1);
4bf029b0 8412 final (insn, file, 1);
961d6ddd 8413 final_end_function ();
8414}
6988553d 8415#endif /* TARGET_ABI_OSF */
449b7f2d 8416\f
8417/* Debugging support. */
8418
8419#include "gstab.h"
8420
8421/* Count the number of sdb related labels are generated (to find block
8422 start and end boundaries). */
8423
8424int sdb_label_count = 0;
8425
449b7f2d 8426/* Name of the file containing the current function. */
8427
ace75b22 8428static const char *current_function_file = "";
449b7f2d 8429
8430/* Offsets to alpha virtual arg/local debugging pointers. */
8431
8432long alpha_arg_offset;
8433long alpha_auto_offset;
8434\f
8435/* Emit a new filename to a stream. */
8436
8437void
92643d95 8438alpha_output_filename (FILE *stream, const char *name)
449b7f2d 8439{
8440 static int first_time = TRUE;
449b7f2d 8441
8442 if (first_time)
8443 {
8444 first_time = FALSE;
8445 ++num_source_filenames;
8446 current_function_file = name;
8447 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8448 output_quoted_string (stream, name);
8449 fprintf (stream, "\n");
8450 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8451 fprintf (stream, "\t#@stabs\n");
8452 }
8453
8763f243 8454 else if (write_symbols == DBX_DEBUG)
e3b8b697 8455 /* dbxout.c will emit an appropriate .stabs directive. */
8456 return;
449b7f2d 8457
8458 else if (name != current_function_file
be3797c1 8459 && strcmp (name, current_function_file) != 0)
449b7f2d 8460 {
8461 if (inside_function && ! TARGET_GAS)
8462 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8463 else
8464 {
8465 ++num_source_filenames;
8466 current_function_file = name;
8467 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8468 }
8469
8470 output_quoted_string (stream, name);
8471 fprintf (stream, "\n");
8472 }
8473}
c4622276 8474\f
8475/* Structure to show the current status of registers and memory. */
8476
8477struct shadow_summary
8478{
8479 struct {
495c4a78 8480 unsigned int i : 31; /* Mask of int regs */
8481 unsigned int fp : 31; /* Mask of fp regs */
8482 unsigned int mem : 1; /* mem == imem | fpmem */
c4622276 8483 } used, defd;
8484};
8485
8486/* Summary the effects of expression X on the machine. Update SUM, a pointer
8487 to the summary structure. SET is nonzero if the insn is setting the
8488 object, otherwise zero. */
8489
8490static void
92643d95 8491summarize_insn (rtx x, struct shadow_summary *sum, int set)
c4622276 8492{
d2ca078f 8493 const char *format_ptr;
c4622276 8494 int i, j;
8495
8496 if (x == 0)
8497 return;
8498
8499 switch (GET_CODE (x))
8500 {
8501 /* ??? Note that this case would be incorrect if the Alpha had a
8502 ZERO_EXTRACT in SET_DEST. */
8503 case SET:
8504 summarize_insn (SET_SRC (x), sum, 0);
8505 summarize_insn (SET_DEST (x), sum, 1);
8506 break;
8507
8508 case CLOBBER:
8509 summarize_insn (XEXP (x, 0), sum, 1);
8510 break;
8511
8512 case USE:
8513 summarize_insn (XEXP (x, 0), sum, 0);
8514 break;
8515
a886cc41 8516 case ASM_OPERANDS:
8517 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8518 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8519 break;
8520
c4622276 8521 case PARALLEL:
3a5dbb5e 8522 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
c4622276 8523 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8524 break;
8525
a886cc41 8526 case SUBREG:
b9a5aa8e 8527 summarize_insn (SUBREG_REG (x), sum, 0);
8528 break;
a886cc41 8529
c4622276 8530 case REG:
8531 {
8532 int regno = REGNO (x);
f3d263a7 8533 unsigned long mask = ((unsigned long) 1) << (regno % 32);
c4622276 8534
8535 if (regno == 31 || regno == 63)
8536 break;
8537
8538 if (set)
8539 {
8540 if (regno < 32)
8541 sum->defd.i |= mask;
8542 else
8543 sum->defd.fp |= mask;
8544 }
8545 else
8546 {
8547 if (regno < 32)
8548 sum->used.i |= mask;
8549 else
8550 sum->used.fp |= mask;
8551 }
8552 }
8553 break;
8554
8555 case MEM:
8556 if (set)
8557 sum->defd.mem = 1;
8558 else
8559 sum->used.mem = 1;
8560
8561 /* Find the regs used in memory address computation: */
8562 summarize_insn (XEXP (x, 0), sum, 0);
8563 break;
8564
2d710b28 8565 case CONST_INT: case CONST_DOUBLE:
8566 case SYMBOL_REF: case LABEL_REF: case CONST:
5bdbf614 8567 case SCRATCH: case ASM_INPUT:
2d710b28 8568 break;
8569
c4622276 8570 /* Handle common unary and binary ops for efficiency. */
8571 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8572 case MOD: case UDIV: case UMOD: case AND: case IOR:
8573 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8574 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8575 case NE: case EQ: case GE: case GT: case LE:
8576 case LT: case GEU: case GTU: case LEU: case LTU:
8577 summarize_insn (XEXP (x, 0), sum, 0);
8578 summarize_insn (XEXP (x, 1), sum, 0);
8579 break;
8580
8581 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8582 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8583 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
9e7454d0 8584 case SQRT: case FFS:
c4622276 8585 summarize_insn (XEXP (x, 0), sum, 0);
8586 break;
8587
8588 default:
8589 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
3a5dbb5e 8590 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
cada32d3 8591 switch (format_ptr[i])
c4622276 8592 {
8593 case 'e':
8594 summarize_insn (XEXP (x, i), sum, 0);
8595 break;
8596
8597 case 'E':
3a5dbb5e 8598 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
c4622276 8599 summarize_insn (XVECEXP (x, i, j), sum, 0);
8600 break;
8601
1dc5f36f 8602 case 'i':
8603 break;
8604
c4622276 8605 default:
4d10b463 8606 gcc_unreachable ();
c4622276 8607 }
8608 }
8609}
c4622276 8610
b9a5aa8e 8611/* Ensure a sufficient number of `trapb' insns are in the code when
8612 the user requests code with a trap precision of functions or
8613 instructions.
8614
8615 In naive mode, when the user requests a trap-precision of
8616 "instruction", a trapb is needed after every instruction that may
8617 generate a trap. This ensures that the code is resumption safe but
8618 it is also slow.
8619
8620 When optimizations are turned on, we delay issuing a trapb as long
8621 as possible. In this context, a trap shadow is the sequence of
8622 instructions that starts with a (potentially) trap generating
8623 instruction and extends to the next trapb or call_pal instruction
8624 (but GCC never generates call_pal by itself). We can delay (and
8625 therefore sometimes omit) a trapb subject to the following
8626 conditions:
8627
8628 (a) On entry to the trap shadow, if any Alpha register or memory
8629 location contains a value that is used as an operand value by some
8630 instruction in the trap shadow (live on entry), then no instruction
8631 in the trap shadow may modify the register or memory location.
8632
8633 (b) Within the trap shadow, the computation of the base register
8634 for a memory load or store instruction may not involve using the
8635 result of an instruction that might generate an UNPREDICTABLE
8636 result.
8637
8638 (c) Within the trap shadow, no register may be used more than once
8639 as a destination register. (This is to make life easier for the
8640 trap-handler.)
c4622276 8641
18adf4f6 8642 (d) The trap shadow may not include any branch instructions. */
c4622276 8643
18adf4f6 8644static void
92643d95 8645alpha_handle_trap_shadows (void)
c4622276 8646{
18adf4f6 8647 struct shadow_summary shadow;
8648 int trap_pending, exception_nesting;
b9b4428b 8649 rtx i, n;
c4622276 8650
18adf4f6 8651 trap_pending = 0;
8652 exception_nesting = 0;
8653 shadow.used.i = 0;
8654 shadow.used.fp = 0;
8655 shadow.used.mem = 0;
8656 shadow.defd = shadow.used;
9e7454d0 8657
2efea8c0 8658 for (i = get_insns (); i ; i = NEXT_INSN (i))
18adf4f6 8659 {
c933fb42 8660 if (NOTE_P (i))
18adf4f6 8661 {
ad4583d9 8662 switch (NOTE_KIND (i))
18adf4f6 8663 {
8664 case NOTE_INSN_EH_REGION_BEG:
8665 exception_nesting++;
8666 if (trap_pending)
8667 goto close_shadow;
8668 break;
8669
8670 case NOTE_INSN_EH_REGION_END:
8671 exception_nesting--;
8672 if (trap_pending)
8673 goto close_shadow;
8674 break;
8675
8676 case NOTE_INSN_EPILOGUE_BEG:
8677 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8678 goto close_shadow;
8679 break;
8680 }
8681 }
8682 else if (trap_pending)
8683 {
8684 if (alpha_tp == ALPHA_TP_FUNC)
8685 {
c933fb42 8686 if (JUMP_P (i)
18adf4f6 8687 && GET_CODE (PATTERN (i)) == RETURN)
8688 goto close_shadow;
8689 }
8690 else if (alpha_tp == ALPHA_TP_INSN)
8691 {
8692 if (optimize > 0)
8693 {
8694 struct shadow_summary sum;
8695
8696 sum.used.i = 0;
8697 sum.used.fp = 0;
8698 sum.used.mem = 0;
a886cc41 8699 sum.defd = sum.used;
18adf4f6 8700
8701 switch (GET_CODE (i))
8702 {
8703 case INSN:
4d10b463 8704 /* Annoyingly, get_attr_trap will die on these. */
fad0a39b 8705 if (GET_CODE (PATTERN (i)) == USE
8706 || GET_CODE (PATTERN (i)) == CLOBBER)
18adf4f6 8707 break;
8708
8709 summarize_insn (PATTERN (i), &sum, 0);
8710
8711 if ((sum.defd.i & shadow.defd.i)
8712 || (sum.defd.fp & shadow.defd.fp))
8713 {
8714 /* (c) would be violated */
8715 goto close_shadow;
8716 }
8717
8718 /* Combine shadow with summary of current insn: */
8719 shadow.used.i |= sum.used.i;
8720 shadow.used.fp |= sum.used.fp;
8721 shadow.used.mem |= sum.used.mem;
8722 shadow.defd.i |= sum.defd.i;
8723 shadow.defd.fp |= sum.defd.fp;
8724 shadow.defd.mem |= sum.defd.mem;
8725
8726 if ((sum.defd.i & shadow.used.i)
8727 || (sum.defd.fp & shadow.used.fp)
8728 || (sum.defd.mem & shadow.used.mem))
8729 {
8730 /* (a) would be violated (also takes care of (b)) */
4d10b463 8731 gcc_assert (get_attr_trap (i) != TRAP_YES
8732 || (!(sum.defd.i & sum.used.i)
8733 && !(sum.defd.fp & sum.used.fp)));
18adf4f6 8734
8735 goto close_shadow;
8736 }
8737 break;
8738
8739 case JUMP_INSN:
8740 case CALL_INSN:
8741 case CODE_LABEL:
8742 goto close_shadow;
8743
8744 default:
4d10b463 8745 gcc_unreachable ();
18adf4f6 8746 }
8747 }
8748 else
8749 {
8750 close_shadow:
b9b4428b 8751 n = emit_insn_before (gen_trapb (), i);
8752 PUT_MODE (n, TImode);
8753 PUT_MODE (i, TImode);
18adf4f6 8754 trap_pending = 0;
8755 shadow.used.i = 0;
8756 shadow.used.fp = 0;
8757 shadow.used.mem = 0;
8758 shadow.defd = shadow.used;
8759 }
8760 }
8761 }
c4622276 8762
609d4083 8763 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
c933fb42 8764 && NONJUMP_INSN_P (i)
609d4083 8765 && GET_CODE (PATTERN (i)) != USE
8766 && GET_CODE (PATTERN (i)) != CLOBBER
8767 && get_attr_trap (i) == TRAP_YES)
8768 {
8769 if (optimize && !trap_pending)
8770 summarize_insn (PATTERN (i), &shadow, 0);
8771 trap_pending = 1;
8772 }
c4622276 8773 }
8774}
b9b4428b 8775\f
b9b4428b 8776/* Alpha can only issue instruction groups simultaneously if they are
5910bb95 8777 suitably aligned. This is very processor-specific. */
07770f18 8778/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8779 that are marked "fake". These instructions do not exist on that target,
8780 but it is possible to see these insns with deranged combinations of
8781 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8782 choose a result at random. */
b9b4428b 8783
849674a3 8784enum alphaev4_pipe {
8785 EV4_STOP = 0,
8786 EV4_IB0 = 1,
8787 EV4_IB1 = 2,
8788 EV4_IBX = 4
8789};
8790
b9b4428b 8791enum alphaev5_pipe {
8792 EV5_STOP = 0,
8793 EV5_NONE = 1,
8794 EV5_E01 = 2,
8795 EV5_E0 = 4,
8796 EV5_E1 = 8,
8797 EV5_FAM = 16,
8798 EV5_FA = 32,
8799 EV5_FM = 64
8800};
8801
849674a3 8802static enum alphaev4_pipe
92643d95 8803alphaev4_insn_pipe (rtx insn)
849674a3 8804{
8805 if (recog_memoized (insn) < 0)
8806 return EV4_STOP;
8807 if (get_attr_length (insn) != 4)
8808 return EV4_STOP;
8809
8810 switch (get_attr_type (insn))
8811 {
8812 case TYPE_ILD:
f155876e 8813 case TYPE_LDSYM:
849674a3 8814 case TYPE_FLD:
f155876e 8815 case TYPE_LD_L:
849674a3 8816 return EV4_IBX;
8817
849674a3 8818 case TYPE_IADD:
8819 case TYPE_ILOG:
8820 case TYPE_ICMOV:
8821 case TYPE_ICMP:
849674a3 8822 case TYPE_FST:
8823 case TYPE_SHIFT:
8824 case TYPE_IMUL:
8825 case TYPE_FBR:
07770f18 8826 case TYPE_MVI: /* fake */
849674a3 8827 return EV4_IB0;
8828
f155876e 8829 case TYPE_IST:
849674a3 8830 case TYPE_MISC:
8831 case TYPE_IBR:
8832 case TYPE_JSR:
1050b77e 8833 case TYPE_CALLPAL:
849674a3 8834 case TYPE_FCPYS:
8835 case TYPE_FCMOV:
8836 case TYPE_FADD:
8837 case TYPE_FDIV:
8838 case TYPE_FMUL:
f155876e 8839 case TYPE_ST_C:
8840 case TYPE_MB:
07770f18 8841 case TYPE_FSQRT: /* fake */
8842 case TYPE_FTOI: /* fake */
8843 case TYPE_ITOF: /* fake */
849674a3 8844 return EV4_IB1;
8845
8846 default:
4d10b463 8847 gcc_unreachable ();
849674a3 8848 }
8849}
8850
b9b4428b 8851static enum alphaev5_pipe
92643d95 8852alphaev5_insn_pipe (rtx insn)
b9b4428b 8853{
8854 if (recog_memoized (insn) < 0)
8855 return EV5_STOP;
8856 if (get_attr_length (insn) != 4)
8857 return EV5_STOP;
8858
8859 switch (get_attr_type (insn))
8860 {
8861 case TYPE_ILD:
8862 case TYPE_FLD:
8863 case TYPE_LDSYM:
8864 case TYPE_IADD:
8865 case TYPE_ILOG:
8866 case TYPE_ICMOV:
8867 case TYPE_ICMP:
8868 return EV5_E01;
8869
8870 case TYPE_IST:
8871 case TYPE_FST:
8872 case TYPE_SHIFT:
8873 case TYPE_IMUL:
8874 case TYPE_MISC:
8875 case TYPE_MVI:
f155876e 8876 case TYPE_LD_L:
8877 case TYPE_ST_C:
8878 case TYPE_MB:
07770f18 8879 case TYPE_FTOI: /* fake */
8880 case TYPE_ITOF: /* fake */
b9b4428b 8881 return EV5_E0;
8882
8883 case TYPE_IBR:
8884 case TYPE_JSR:
1050b77e 8885 case TYPE_CALLPAL:
b9b4428b 8886 return EV5_E1;
8887
8888 case TYPE_FCPYS:
8889 return EV5_FAM;
8890
8891 case TYPE_FBR:
8892 case TYPE_FCMOV:
8893 case TYPE_FADD:
8894 case TYPE_FDIV:
07770f18 8895 case TYPE_FSQRT: /* fake */
b9b4428b 8896 return EV5_FA;
8897
8898 case TYPE_FMUL:
8899 return EV5_FM;
ddca68f8 8900
8901 default:
4d10b463 8902 gcc_unreachable ();
b9b4428b 8903 }
b9b4428b 8904}
8905
9e7454d0 8906/* IN_USE is a mask of the slots currently filled within the insn group.
849674a3 8907 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9e7454d0 8908 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
849674a3 8909
8910 LEN is, of course, the length of the group in bytes. */
8911
8912static rtx
92643d95 8913alphaev4_next_group (rtx insn, int *pin_use, int *plen)
849674a3 8914{
8915 int len, in_use;
8916
8917 len = in_use = 0;
8918
9204e736 8919 if (! INSN_P (insn)
849674a3 8920 || GET_CODE (PATTERN (insn)) == CLOBBER
8921 || GET_CODE (PATTERN (insn)) == USE)
8922 goto next_and_done;
8923
8924 while (1)
8925 {
8926 enum alphaev4_pipe pipe;
8927
8928 pipe = alphaev4_insn_pipe (insn);
8929 switch (pipe)
8930 {
8931 case EV4_STOP:
8932 /* Force complex instructions to start new groups. */
8933 if (in_use)
8934 goto done;
8935
20833d12 8936 /* If this is a completely unrecognized insn, it's an asm.
849674a3 8937 We don't know how long it is, so record length as -1 to
8938 signal a needed realignment. */
8939 if (recog_memoized (insn) < 0)
8940 len = -1;
8941 else
8942 len = get_attr_length (insn);
8943 goto next_and_done;
8944
8945 case EV4_IBX:
8946 if (in_use & EV4_IB0)
8947 {
8948 if (in_use & EV4_IB1)
8949 goto done;
8950 in_use |= EV4_IB1;
8951 }
8952 else
8953 in_use |= EV4_IB0 | EV4_IBX;
8954 break;
8955
8956 case EV4_IB0:
8957 if (in_use & EV4_IB0)
8958 {
8959 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8960 goto done;
8961 in_use |= EV4_IB1;
8962 }
8963 in_use |= EV4_IB0;
8964 break;
8965
8966 case EV4_IB1:
8967 if (in_use & EV4_IB1)
8968 goto done;
8969 in_use |= EV4_IB1;
8970 break;
8971
8972 default:
4d10b463 8973 gcc_unreachable ();
849674a3 8974 }
8975 len += 4;
9e7454d0 8976
849674a3 8977 /* Haifa doesn't do well scheduling branches. */
c933fb42 8978 if (JUMP_P (insn))
849674a3 8979 goto next_and_done;
8980
8981 next:
8982 insn = next_nonnote_insn (insn);
8983
9204e736 8984 if (!insn || ! INSN_P (insn))
849674a3 8985 goto done;
8986
8987 /* Let Haifa tell us where it thinks insn group boundaries are. */
8988 if (GET_MODE (insn) == TImode)
8989 goto done;
8990
8991 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8992 goto next;
8993 }
8994
8995 next_and_done:
8996 insn = next_nonnote_insn (insn);
8997
8998 done:
8999 *plen = len;
9000 *pin_use = in_use;
9001 return insn;
9002}
9003
9e7454d0 9004/* IN_USE is a mask of the slots currently filled within the insn group.
849674a3 9005 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9e7454d0 9006 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
b9b4428b 9007
9008 LEN is, of course, the length of the group in bytes. */
9009
9010static rtx
92643d95 9011alphaev5_next_group (rtx insn, int *pin_use, int *plen)
b9b4428b 9012{
9013 int len, in_use;
9014
9015 len = in_use = 0;
9016
9204e736 9017 if (! INSN_P (insn)
ddca68f8 9018 || GET_CODE (PATTERN (insn)) == CLOBBER
9019 || GET_CODE (PATTERN (insn)) == USE)
9020 goto next_and_done;
b9b4428b 9021
ddca68f8 9022 while (1)
b9b4428b 9023 {
9024 enum alphaev5_pipe pipe;
b9b4428b 9025
9026 pipe = alphaev5_insn_pipe (insn);
9027 switch (pipe)
9028 {
9029 case EV5_STOP:
9030 /* Force complex instructions to start new groups. */
9031 if (in_use)
9032 goto done;
9033
20833d12 9034 /* If this is a completely unrecognized insn, it's an asm.
b9b4428b 9035 We don't know how long it is, so record length as -1 to
9036 signal a needed realignment. */
9037 if (recog_memoized (insn) < 0)
9038 len = -1;
9039 else
9040 len = get_attr_length (insn);
ddca68f8 9041 goto next_and_done;
b9b4428b 9042
4d10b463 9043 /* ??? Most of the places below, we would like to assert never
9044 happen, as it would indicate an error either in Haifa, or
9045 in the scheduling description. Unfortunately, Haifa never
9046 schedules the last instruction of the BB, so we don't have
9047 an accurate TI bit to go off. */
b9b4428b 9048 case EV5_E01:
9049 if (in_use & EV5_E0)
9050 {
9051 if (in_use & EV5_E1)
9052 goto done;
9053 in_use |= EV5_E1;
9054 }
9055 else
9056 in_use |= EV5_E0 | EV5_E01;
9057 break;
9058
9059 case EV5_E0:
9060 if (in_use & EV5_E0)
9061 {
849674a3 9062 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
b9b4428b 9063 goto done;
9064 in_use |= EV5_E1;
9065 }
9066 in_use |= EV5_E0;
9067 break;
9068
9069 case EV5_E1:
9070 if (in_use & EV5_E1)
9071 goto done;
9072 in_use |= EV5_E1;
9073 break;
9074
9075 case EV5_FAM:
9076 if (in_use & EV5_FA)
9077 {
9078 if (in_use & EV5_FM)
9079 goto done;
9080 in_use |= EV5_FM;
9081 }
9082 else
9083 in_use |= EV5_FA | EV5_FAM;
9084 break;
9085
9086 case EV5_FA:
9087 if (in_use & EV5_FA)
9088 goto done;
9089 in_use |= EV5_FA;
9090 break;
9091
9092 case EV5_FM:
9093 if (in_use & EV5_FM)
9094 goto done;
9095 in_use |= EV5_FM;
9096 break;
9097
9098 case EV5_NONE:
9099 break;
9100
9101 default:
4d10b463 9102 gcc_unreachable ();
b9b4428b 9103 }
9104 len += 4;
9e7454d0 9105
b9b4428b 9106 /* Haifa doesn't do well scheduling branches. */
9107 /* ??? If this is predicted not-taken, slotting continues, except
9108 that no more IBR, FBR, or JSR insns may be slotted. */
c933fb42 9109 if (JUMP_P (insn))
ddca68f8 9110 goto next_and_done;
b9b4428b 9111
ddca68f8 9112 next:
b9b4428b 9113 insn = next_nonnote_insn (insn);
9114
9204e736 9115 if (!insn || ! INSN_P (insn))
b9b4428b 9116 goto done;
f9137da0 9117
b9b4428b 9118 /* Let Haifa tell us where it thinks insn group boundaries are. */
9119 if (GET_MODE (insn) == TImode)
9120 goto done;
9121
ddca68f8 9122 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9123 goto next;
b9b4428b 9124 }
ddca68f8 9125
9126 next_and_done:
9127 insn = next_nonnote_insn (insn);
b9b4428b 9128
9129 done:
9130 *plen = len;
9131 *pin_use = in_use;
9132 return insn;
b9b4428b 9133}
9134
849674a3 9135static rtx
92643d95 9136alphaev4_next_nop (int *pin_use)
849674a3 9137{
9138 int in_use = *pin_use;
9139 rtx nop;
9140
9141 if (!(in_use & EV4_IB0))
9142 {
9143 in_use |= EV4_IB0;
9144 nop = gen_nop ();
9145 }
9146 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9147 {
9148 in_use |= EV4_IB1;
9149 nop = gen_nop ();
9150 }
9151 else if (TARGET_FP && !(in_use & EV4_IB1))
9152 {
9153 in_use |= EV4_IB1;
9154 nop = gen_fnop ();
9155 }
9156 else
9157 nop = gen_unop ();
9158
9159 *pin_use = in_use;
9160 return nop;
9161}
9162
9163static rtx
92643d95 9164alphaev5_next_nop (int *pin_use)
849674a3 9165{
9166 int in_use = *pin_use;
9167 rtx nop;
9168
9169 if (!(in_use & EV5_E1))
9170 {
9171 in_use |= EV5_E1;
9172 nop = gen_nop ();
9173 }
9174 else if (TARGET_FP && !(in_use & EV5_FA))
9175 {
9176 in_use |= EV5_FA;
9177 nop = gen_fnop ();
9178 }
9179 else if (TARGET_FP && !(in_use & EV5_FM))
9180 {
9181 in_use |= EV5_FM;
9182 nop = gen_fnop ();
9183 }
9184 else
9185 nop = gen_unop ();
9186
9187 *pin_use = in_use;
9188 return nop;
9189}
9190
9191/* The instruction group alignment main loop. */
9192
b9b4428b 9193static void
92643d95 9194alpha_align_insns (unsigned int max_align,
9195 rtx (*next_group) (rtx, int *, int *),
9196 rtx (*next_nop) (int *))
b9b4428b 9197{
9198 /* ALIGN is the known alignment for the insn group. */
b53f315c 9199 unsigned int align;
b9b4428b 9200 /* OFS is the offset of the current insn in the insn group. */
9201 int ofs;
fd1ace94 9202 int prev_in_use, in_use, len, ldgp;
b9b4428b 9203 rtx i, next;
9204
9205 /* Let shorten branches care for assigning alignments to code labels. */
2efea8c0 9206 shorten_branches (get_insns ());
b9b4428b 9207
d815ce59 9208 if (align_functions < 4)
9209 align = 4;
eeca3ba1 9210 else if ((unsigned int) align_functions < max_align)
d815ce59 9211 align = align_functions;
9212 else
9213 align = max_align;
e2c8a34a 9214
b9b4428b 9215 ofs = prev_in_use = 0;
2efea8c0 9216 i = get_insns ();
c933fb42 9217 if (NOTE_P (i))
b9b4428b 9218 i = next_nonnote_insn (i);
9219
fd1ace94 9220 ldgp = alpha_function_needs_gp ? 8 : 0;
9221
b9b4428b 9222 while (i)
9223 {
b53f315c 9224 next = (*next_group) (i, &in_use, &len);
b9b4428b 9225
9226 /* When we see a label, resync alignment etc. */
c933fb42 9227 if (LABEL_P (i))
b9b4428b 9228 {
b53f315c 9229 unsigned int new_align = 1 << label_to_alignment (i);
9230
b9b4428b 9231 if (new_align >= align)
9232 {
849674a3 9233 align = new_align < max_align ? new_align : max_align;
b9b4428b 9234 ofs = 0;
9235 }
b53f315c 9236
b9b4428b 9237 else if (ofs & (new_align-1))
9238 ofs = (ofs | (new_align-1)) + 1;
4d10b463 9239 gcc_assert (!len);
b9b4428b 9240 }
9241
9242 /* Handle complex instructions special. */
9243 else if (in_use == 0)
9244 {
9245 /* Asms will have length < 0. This is a signal that we have
9246 lost alignment knowledge. Assume, however, that the asm
9247 will not mis-align instructions. */
9248 if (len < 0)
9249 {
9250 ofs = 0;
9251 align = 4;
9252 len = 0;
9253 }
9254 }
9255
9256 /* If the known alignment is smaller than the recognized insn group,
9257 realign the output. */
1f0ce6a6 9258 else if ((int) align < len)
b9b4428b 9259 {
b53f315c 9260 unsigned int new_log_align = len > 8 ? 4 : 3;
943a1b57 9261 rtx prev, where;
b9b4428b 9262
943a1b57 9263 where = prev = prev_nonnote_insn (i);
c933fb42 9264 if (!where || !LABEL_P (where))
b9b4428b 9265 where = i;
9266
943a1b57 9267 /* Can't realign between a call and its gp reload. */
9268 if (! (TARGET_EXPLICIT_RELOCS
c933fb42 9269 && prev && CALL_P (prev)))
943a1b57 9270 {
9271 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9272 align = 1 << new_log_align;
9273 ofs = 0;
9274 }
b9b4428b 9275 }
9276
fd1ace94 9277 /* We may not insert padding inside the initial ldgp sequence. */
9278 else if (ldgp > 0)
9279 ldgp -= len;
9280
b9b4428b 9281 /* If the group won't fit in the same INT16 as the previous,
9282 we need to add padding to keep the group together. Rather
9283 than simply leaving the insn filling to the assembler, we
9284 can make use of the knowledge of what sorts of instructions
9285 were issued in the previous group to make sure that all of
9286 the added nops are really free. */
1f0ce6a6 9287 else if (ofs + len > (int) align)
b9b4428b 9288 {
9289 int nop_count = (align - ofs) / 4;
9290 rtx where;
9291
efee20da 9292 /* Insert nops before labels, branches, and calls to truly merge
943a1b57 9293 the execution of the nops with the previous instruction group. */
b9b4428b 9294 where = prev_nonnote_insn (i);
849674a3 9295 if (where)
b9b4428b 9296 {
c933fb42 9297 if (LABEL_P (where))
b9b4428b 9298 {
849674a3 9299 rtx where2 = prev_nonnote_insn (where);
c933fb42 9300 if (where2 && JUMP_P (where2))
849674a3 9301 where = where2;
b9b4428b 9302 }
c933fb42 9303 else if (NONJUMP_INSN_P (where))
849674a3 9304 where = i;
b9b4428b 9305 }
849674a3 9306 else
9307 where = i;
9308
9e7454d0 9309 do
849674a3 9310 emit_insn_before ((*next_nop)(&prev_in_use), where);
b9b4428b 9311 while (--nop_count);
9312 ofs = 0;
9313 }
9314
9315 ofs = (ofs + len) & (align - 1);
9316 prev_in_use = in_use;
9317 i = next;
9318 }
9319}
a27deefc 9320
9321/* Insert an unop between a noreturn function call and GP load. */
9322
9323static void
9324alpha_pad_noreturn (void)
9325{
9326 rtx insn, next;
9327
9328 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9329 {
9330 if (!CALL_P (insn)
9331 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9332 continue;
9333
9334 next = next_active_insn (insn);
9335
9336 if (next)
9337 {
9338 rtx pat = PATTERN (next);
9339
9340 if (GET_CODE (pat) == SET
9341 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9342 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9343 emit_insn_after (gen_unop (), insn);
9344 }
9345 }
9346}
b9b4428b 9347\f
35a3065a 9348/* Machine dependent reorg pass. */
18adf4f6 9349
2efea8c0 9350static void
92643d95 9351alpha_reorg (void)
18adf4f6 9352{
a27deefc 9353 /* Workaround for a linker error that triggers when an
9354 exception handler immediatelly follows a noreturn function.
9355
9356 The instruction stream from an object file:
9357
9358 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9359 58: 00 00 ba 27 ldah gp,0(ra)
9360 5c: 00 00 bd 23 lda gp,0(gp)
9361 60: 00 00 7d a7 ldq t12,0(gp)
9362 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9363
9364 was converted in the final link pass to:
9365
9366 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9367 fdb28: 00 00 fe 2f unop
9368 fdb2c: 00 00 fe 2f unop
9369 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9370 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9371
9372 GP load instructions were wrongly cleared by the linker relaxation
9373 pass. This workaround prevents removal of GP loads by inserting
9374 an unop instruction between a noreturn function call and
9375 exception handler prologue. */
9376
9377 if (current_function_has_exception_handlers ())
9378 alpha_pad_noreturn ();
9379
b9b4428b 9380 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
2efea8c0 9381 alpha_handle_trap_shadows ();
b9b4428b 9382
b9b4428b 9383 /* Due to the number of extra trapb insns, don't bother fixing up
9384 alignment when trap precision is instruction. Moreover, we can
b53f315c 9385 only do our job when sched2 is run. */
b9b4428b 9386 if (optimize && !optimize_size
9387 && alpha_tp != ALPHA_TP_INSN
9388 && flag_schedule_insns_after_reload)
9389 {
fb64edde 9390 if (alpha_tune == PROCESSOR_EV4)
2efea8c0 9391 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
fb64edde 9392 else if (alpha_tune == PROCESSOR_EV5)
2efea8c0 9393 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
b9b4428b 9394 }
18adf4f6 9395}
18adf4f6 9396\f
92c473b8 9397#if !TARGET_ABI_UNICOSMK
9398
9399#ifdef HAVE_STAMP_H
9400#include <stamp.h>
9401#endif
9402
9403static void
9404alpha_file_start (void)
9405{
0fdc84d7 9406#ifdef OBJECT_FORMAT_ELF
9407 /* If emitting dwarf2 debug information, we cannot generate a .file
9408 directive to start the file, as it will conflict with dwarf2out
9409 file numbers. So it's only useful when emitting mdebug output. */
9410 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9411#endif
9412
92c473b8 9413 default_file_start ();
9414#ifdef MS_STAMP
bc964653 9415 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
92c473b8 9416#endif
9417
9418 fputs ("\t.set noreorder\n", asm_out_file);
9419 fputs ("\t.set volatile\n", asm_out_file);
9420 if (!TARGET_ABI_OPEN_VMS)
9421 fputs ("\t.set noat\n", asm_out_file);
9422 if (TARGET_EXPLICIT_RELOCS)
9423 fputs ("\t.set nomacro\n", asm_out_file);
9424 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
fb64edde 9425 {
9426 const char *arch;
9427
9428 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9429 arch = "ev6";
9430 else if (TARGET_MAX)
9431 arch = "pca56";
9432 else if (TARGET_BWX)
9433 arch = "ev56";
9434 else if (alpha_cpu == PROCESSOR_EV5)
9435 arch = "ev5";
9436 else
9437 arch = "ev4";
9438
9439 fprintf (asm_out_file, "\t.arch %s\n", arch);
9440 }
92c473b8 9441}
9442#endif
9443
bbfbe351 9444#ifdef OBJECT_FORMAT_ELF
4e151b05 9445/* Since we don't have a .dynbss section, we should not allow global
9446 relocations in the .rodata section. */
9447
9448static int
9449alpha_elf_reloc_rw_mask (void)
9450{
9451 return flag_pic ? 3 : 2;
9452}
bbfbe351 9453
2f14b1f9 9454/* Return a section for X. The only special thing we do here is to
9455 honor small data. */
bbfbe351 9456
2f14b1f9 9457static section *
92643d95 9458alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9459 unsigned HOST_WIDE_INT align)
bbfbe351 9460{
9461 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
5910bb95 9462 /* ??? Consider using mergeable sdata sections. */
2f14b1f9 9463 return sdata_section;
bbfbe351 9464 else
2f14b1f9 9465 return default_elf_select_rtx_section (mode, x, align);
bbfbe351 9466}
9467
cc2af183 9468static unsigned int
9469alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9470{
9471 unsigned int flags = 0;
9472
9473 if (strcmp (name, ".sdata") == 0
9474 || strncmp (name, ".sdata.", 7) == 0
9475 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9476 || strcmp (name, ".sbss") == 0
9477 || strncmp (name, ".sbss.", 6) == 0
9478 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9479 flags = SECTION_SMALL;
9480
9481 flags |= default_section_type_flags (decl, name, reloc);
9482 return flags;
9483}
bbfbe351 9484#endif /* OBJECT_FORMAT_ELF */
9485\f
9de382d9 9486/* Structure to collect function names for final output in link section. */
9487/* Note that items marked with GTY can't be ifdef'ed out. */
573aba85 9488
9489enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9de382d9 9490enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
573aba85 9491
fb1e4f4a 9492struct GTY(()) alpha_links
573aba85 9493{
9de382d9 9494 int num;
573aba85 9495 rtx linkage;
9de382d9 9496 enum links_kind lkind;
9497 enum reloc_kind rkind;
9498};
9499
fb1e4f4a 9500struct GTY(()) alpha_funcs
9de382d9 9501{
9502 int num;
9503 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9504 links;
573aba85 9505};
9506
9507static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9de382d9 9508 splay_tree alpha_links_tree;
9509static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9510 splay_tree alpha_funcs_tree;
9511
9512static GTY(()) int alpha_funcs_num;
573aba85 9513
1467e953 9514#if TARGET_ABI_OPEN_VMS
8df4a58b 9515
0dbd1c74 9516/* Return the VMS argument type corresponding to MODE. */
8df4a58b 9517
0dbd1c74 9518enum avms_arg_type
92643d95 9519alpha_arg_type (enum machine_mode mode)
0dbd1c74 9520{
9521 switch (mode)
8df4a58b 9522 {
0dbd1c74 9523 case SFmode:
9524 return TARGET_FLOAT_VAX ? FF : FS;
9525 case DFmode:
9526 return TARGET_FLOAT_VAX ? FD : FT;
9527 default:
9528 return I64;
8df4a58b 9529 }
0dbd1c74 9530}
8df4a58b 9531
0dbd1c74 9532/* Return an rtx for an integer representing the VMS Argument Information
9533 register value. */
8df4a58b 9534
1dd6c958 9535rtx
92643d95 9536alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
0dbd1c74 9537{
9538 unsigned HOST_WIDE_INT regval = cum.num_args;
9539 int i;
8df4a58b 9540
0dbd1c74 9541 for (i = 0; i < 6; i++)
9542 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
8df4a58b 9543
0dbd1c74 9544 return GEN_INT (regval);
9545}
9546\f
8df4a58b 9547/* Make (or fake) .linkage entry for function call.
9548
57e47080 9549 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8df4a58b 9550
57e47080 9551 Return an SYMBOL_REF rtx for the linkage. */
9552
9553rtx
92643d95 9554alpha_need_linkage (const char *name, int is_local)
8df4a58b 9555{
57e47080 9556 splay_tree_node node;
9557 struct alpha_links *al;
8df4a58b 9558
9559 if (name[0] == '*')
9560 name++;
9561
cf73d31f 9562 if (is_local)
9563 {
9de382d9 9564 struct alpha_funcs *cfaf;
9565
9566 if (!alpha_funcs_tree)
9567 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9568 splay_tree_compare_pointers);
9e7454d0 9569
9de382d9 9570 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
cf73d31f 9571
9572 cfaf->links = 0;
9573 cfaf->num = ++alpha_funcs_num;
9574
9575 splay_tree_insert (alpha_funcs_tree,
9576 (splay_tree_key) current_function_decl,
9577 (splay_tree_value) cfaf);
cf73d31f 9578 }
9579
9580 if (alpha_links_tree)
57e47080 9581 {
9582 /* Is this name already defined? */
8df4a58b 9583
cf73d31f 9584 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
57e47080 9585 if (node)
9586 {
9587 al = (struct alpha_links *) node->value;
9588 if (is_local)
9589 {
9590 /* Defined here but external assumed. */
cf73d31f 9591 if (al->lkind == KIND_EXTERN)
9592 al->lkind = KIND_LOCAL;
57e47080 9593 }
9594 else
9595 {
9596 /* Used here but unused assumed. */
cf73d31f 9597 if (al->lkind == KIND_UNUSED)
9598 al->lkind = KIND_LOCAL;
57e47080 9599 }
9600 return al->linkage;
9601 }
9602 }
9603 else
8482c296 9604 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8df4a58b 9605
573aba85 9606 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9607 name = ggc_strdup (name);
8df4a58b 9608
9609 /* Assume external if no definition. */
cf73d31f 9610 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
8df4a58b 9611
57e47080 9612 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
d2899e26 9613 get_identifier (name);
9614
57e47080 9615 /* Construct a SYMBOL_REF for us to call. */
9616 {
9617 size_t name_len = strlen (name);
c0b4d131 9618 char *linksym = XALLOCAVEC (char, name_len + 6);
57e47080 9619 linksym[0] = '$';
9620 memcpy (linksym + 1, name, name_len);
9621 memcpy (linksym + 1 + name_len, "..lk", 5);
44acf429 9622 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9623 ggc_alloc_string (linksym, name_len + 5));
57e47080 9624 }
9625
cf73d31f 9626 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
57e47080 9627 (splay_tree_value) al);
8df4a58b 9628
57e47080 9629 return al->linkage;
8df4a58b 9630}
9631
cf73d31f 9632rtx
92643d95 9633alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
cf73d31f 9634{
9635 splay_tree_node cfunnode;
9636 struct alpha_funcs *cfaf;
9637 struct alpha_links *al;
9638 const char *name = XSTR (linkage, 0);
9639
9640 cfaf = (struct alpha_funcs *) 0;
9641 al = (struct alpha_links *) 0;
9642
9643 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9644 cfaf = (struct alpha_funcs *) cfunnode->value;
9645
9646 if (cfaf->links)
9647 {
9648 splay_tree_node lnode;
9649
9650 /* Is this name already defined? */
9651
9652 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9653 if (lnode)
9654 al = (struct alpha_links *) lnode->value;
9655 }
9656 else
9de382d9 9657 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
cf73d31f 9658
9659 if (!al)
9660 {
9661 size_t name_len;
9662 size_t buflen;
9663 char buf [512];
9664 char *linksym;
9665 splay_tree_node node = 0;
9666 struct alpha_links *anl;
9667
9668 if (name[0] == '*')
9669 name++;
9670
9671 name_len = strlen (name);
9672
9de382d9 9673 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
cf73d31f 9674 al->num = cfaf->num;
9675
9676 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9677 if (node)
9678 {
9679 anl = (struct alpha_links *) node->value;
9680 al->lkind = anl->lkind;
9681 }
9682
9683 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9684 buflen = strlen (buf);
c0b4d131 9685 linksym = XALLOCAVEC (char, buflen + 1);
cf73d31f 9686 memcpy (linksym, buf, buflen + 1);
9687
9688 al->linkage = gen_rtx_SYMBOL_REF
9689 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9690
9691 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9692 (splay_tree_value) al);
9693 }
9694
9695 if (rflag)
9696 al->rkind = KIND_CODEADDR;
9697 else
9698 al->rkind = KIND_LINKAGE;
9e7454d0 9699
cf73d31f 9700 if (lflag)
9701 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9702 else
9703 return al->linkage;
9704}
9705
57e47080 9706static int
92643d95 9707alpha_write_one_linkage (splay_tree_node node, void *data)
57e47080 9708{
0d95286f 9709 const char *const name = (const char *) node->key;
cf73d31f 9710 struct alpha_links *link = (struct alpha_links *) node->value;
57e47080 9711 FILE *stream = (FILE *) data;
9712
cf73d31f 9713 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9714 if (link->rkind == KIND_CODEADDR)
57e47080 9715 {
cf73d31f 9716 if (link->lkind == KIND_LOCAL)
9717 {
9718 /* Local and used */
9719 fprintf (stream, "\t.quad %s..en\n", name);
9720 }
9721 else
9722 {
9723 /* External and used, request code address. */
9724 fprintf (stream, "\t.code_address %s\n", name);
9725 }
57e47080 9726 }
9727 else
9728 {
cf73d31f 9729 if (link->lkind == KIND_LOCAL)
9730 {
9731 /* Local and used, build linkage pair. */
9732 fprintf (stream, "\t.quad %s..en\n", name);
9733 fprintf (stream, "\t.quad %s\n", name);
9734 }
9735 else
9736 {
9737 /* External and used, request linkage pair. */
9738 fprintf (stream, "\t.linkage %s\n", name);
9739 }
57e47080 9740 }
9741
9742 return 0;
9743}
8df4a58b 9744
cf73d31f 9745static void
92643d95 9746alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
8df4a58b 9747{
cf73d31f 9748 splay_tree_node node;
9749 struct alpha_funcs *func;
9750
2f14b1f9 9751 fprintf (stream, "\t.link\n");
cf73d31f 9752 fprintf (stream, "\t.align 3\n");
2f14b1f9 9753 in_section = NULL;
9754
cf73d31f 9755 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9756 func = (struct alpha_funcs *) node->value;
9757
9758 fputs ("\t.name ", stream);
9759 assemble_name (stream, funname);
9760 fputs ("..na\n", stream);
9761 ASM_OUTPUT_LABEL (stream, funname);
9762 fprintf (stream, "\t.pdesc ");
9763 assemble_name (stream, funname);
9764 fprintf (stream, "..en,%s\n",
9765 alpha_procedure_type == PT_STACK ? "stack"
9766 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9767
9768 if (func->links)
c64a8830 9769 {
cf73d31f 9770 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9771 /* splay_tree_delete (func->links); */
c64a8830 9772 }
8df4a58b 9773}
9774
2cb4ac60 9775/* Given a decl, a section name, and whether the decl initializer
9776 has relocs, choose attributes for the section. */
9777
9778#define SECTION_VMS_OVERLAY SECTION_FORGET
c64a8830 9779#define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9780#define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
2cb4ac60 9781
9782static unsigned int
92643d95 9783vms_section_type_flags (tree decl, const char *name, int reloc)
2cb4ac60 9784{
9785 unsigned int flags = default_section_type_flags (decl, name, reloc);
9786
e3c541f0 9787 if (decl && DECL_ATTRIBUTES (decl)
9788 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
2cb4ac60 9789 flags |= SECTION_VMS_OVERLAY;
c64a8830 9790 if (decl && DECL_ATTRIBUTES (decl)
9791 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9792 flags |= SECTION_VMS_GLOBAL;
9793 if (decl && DECL_ATTRIBUTES (decl)
9794 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9795 flags |= SECTION_VMS_INITIALIZE;
2cb4ac60 9796
9797 return flags;
9798}
9799
9800/* Switch to an arbitrary section NAME with attributes as specified
9801 by FLAGS. ALIGN specifies any known alignment requirements for
9802 the section; 0 if the default should be used. */
9803
9804static void
537cd941 9805vms_asm_named_section (const char *name, unsigned int flags,
9806 tree decl ATTRIBUTE_UNUSED)
2cb4ac60 9807{
c64a8830 9808 fputc ('\n', asm_out_file);
9809 fprintf (asm_out_file, ".section\t%s", name);
2cb4ac60 9810
9811 if (flags & SECTION_VMS_OVERLAY)
c64a8830 9812 fprintf (asm_out_file, ",OVR");
9813 if (flags & SECTION_VMS_GLOBAL)
9814 fprintf (asm_out_file, ",GBL");
9815 if (flags & SECTION_VMS_INITIALIZE)
9816 fprintf (asm_out_file, ",NOMOD");
9817 if (flags & SECTION_DEBUG)
9818 fprintf (asm_out_file, ",NOWRT");
9819
9820 fputc ('\n', asm_out_file);
2cb4ac60 9821}
9822
01d15dc5 9823/* Record an element in the table of global constructors. SYMBOL is
9824 a SYMBOL_REF of the function to be called; PRIORITY is a number
9e7454d0 9825 between 0 and MAX_INIT_PRIORITY.
01d15dc5 9826
9827 Differs from default_ctors_section_asm_out_constructor in that the
9828 width of the .ctors entry is always 64 bits, rather than the 32 bits
9829 used by a normal pointer. */
9830
9831static void
92643d95 9832vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
01d15dc5 9833{
2f14b1f9 9834 switch_to_section (ctors_section);
09d688ff 9835 assemble_align (BITS_PER_WORD);
9836 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
01d15dc5 9837}
9838
9839static void
92643d95 9840vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
01d15dc5 9841{
2f14b1f9 9842 switch_to_section (dtors_section);
09d688ff 9843 assemble_align (BITS_PER_WORD);
9844 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
01d15dc5 9845}
8df4a58b 9846#else
9847
57e47080 9848rtx
92643d95 9849alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9850 int is_local ATTRIBUTE_UNUSED)
8df4a58b 9851{
57e47080 9852 return NULL_RTX;
8df4a58b 9853}
9854
cf73d31f 9855rtx
92643d95 9856alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9857 tree cfundecl ATTRIBUTE_UNUSED,
9858 int lflag ATTRIBUTE_UNUSED,
9859 int rflag ATTRIBUTE_UNUSED)
cf73d31f 9860{
9861 return NULL_RTX;
9862}
9863
1467e953 9864#endif /* TARGET_ABI_OPEN_VMS */
9caef960 9865\f
9866#if TARGET_ABI_UNICOSMK
9867
0336f0f0 9868/* This evaluates to true if we do not know how to pass TYPE solely in
9869 registers. This is the case for all arguments that do not fit in two
9870 registers. */
9871
9872static bool
fb80456a 9873unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
0336f0f0 9874{
9875 if (type == NULL)
9876 return false;
9877
9878 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9879 return true;
9880 if (TREE_ADDRESSABLE (type))
9881 return true;
9882
9883 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9884}
9885
9caef960 9886/* Define the offset between two registers, one to be eliminated, and the
9887 other its replacement, at the start of a routine. */
9888
9889int
92643d95 9890unicosmk_initial_elimination_offset (int from, int to)
9caef960 9891{
9892 int fixed_size;
9e7454d0 9893
9caef960 9894 fixed_size = alpha_sa_size();
9895 if (fixed_size != 0)
9896 fixed_size += 48;
9897
9898 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9e7454d0 9899 return -fixed_size;
9caef960 9900 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9901 return 0;
9902 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
abe32cce 9903 return (ALPHA_ROUND (crtl->outgoing_args_size)
9caef960 9904 + ALPHA_ROUND (get_frame_size()));
9905 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9906 return (ALPHA_ROUND (fixed_size)
9e7454d0 9907 + ALPHA_ROUND (get_frame_size()
abe32cce 9908 + crtl->outgoing_args_size));
9caef960 9909 else
4d10b463 9910 gcc_unreachable ();
9caef960 9911}
9912
9913/* Output the module name for .ident and .end directives. We have to strip
9914 directories and add make sure that the module name starts with a letter
9915 or '$'. */
9916
9917static void
92643d95 9918unicosmk_output_module_name (FILE *file)
9caef960 9919{
8789d51c 9920 const char *name = lbasename (main_input_filename);
9921 unsigned len = strlen (name);
9922 char *clean_name = alloca (len + 2);
9923 char *ptr = clean_name;
9e7454d0 9924
9caef960 9925 /* CAM only accepts module names that start with a letter or '$'. We
9926 prefix the module name with a '$' if necessary. */
9927
9928 if (!ISALPHA (*name))
8789d51c 9929 *ptr++ = '$';
9930 memcpy (ptr, name, len + 1);
9931 clean_symbol_name (clean_name);
9932 fputs (clean_name, file);
9caef960 9933}
9934
92643d95 9935/* Output the definition of a common variable. */
9caef960 9936
92643d95 9937void
9938unicosmk_output_common (FILE *file, const char *name, int size, int align)
9caef960 9939{
92643d95 9940 tree name_tree;
9941 printf ("T3E__: common %s\n", name);
9caef960 9942
2f14b1f9 9943 in_section = NULL;
9caef960 9944 fputs("\t.endp\n\n\t.psect ", file);
9945 assemble_name(file, name);
9946 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9947 fprintf(file, "\t.byte\t0:%d\n", size);
9948
9949 /* Mark the symbol as defined in this module. */
9950 name_tree = get_identifier (name);
9951 TREE_ASM_WRITTEN (name_tree) = 1;
9952}
9953
9954#define SECTION_PUBLIC SECTION_MACH_DEP
9955#define SECTION_MAIN (SECTION_PUBLIC << 1)
9956static int current_section_align;
9957
2f14b1f9 9958/* A get_unnamed_section callback for switching to the text section. */
9959
9960static void
9961unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9962{
9963 static int count = 0;
9964 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9965}
9966
9967/* A get_unnamed_section callback for switching to the data section. */
9968
9969static void
9970unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9971{
9972 static int count = 1;
9973 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9974}
9975
9976/* Implement TARGET_ASM_INIT_SECTIONS.
9977
9978 The Cray assembler is really weird with respect to sections. It has only
9979 named sections and you can't reopen a section once it has been closed.
9980 This means that we have to generate unique names whenever we want to
9981 reenter the text or the data section. */
9982
9983static void
9984unicosmk_init_sections (void)
9985{
9986 text_section = get_unnamed_section (SECTION_CODE,
9987 unicosmk_output_text_section_asm_op,
9988 NULL);
9989 data_section = get_unnamed_section (SECTION_WRITE,
9990 unicosmk_output_data_section_asm_op,
9991 NULL);
9992 readonly_data_section = data_section;
9993}
9994
9caef960 9995static unsigned int
92643d95 9996unicosmk_section_type_flags (tree decl, const char *name,
9997 int reloc ATTRIBUTE_UNUSED)
9caef960 9998{
9999 unsigned int flags = default_section_type_flags (decl, name, reloc);
10000
10001 if (!decl)
10002 return flags;
10003
10004 if (TREE_CODE (decl) == FUNCTION_DECL)
10005 {
10006 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10007 if (align_functions_log > current_section_align)
10008 current_section_align = align_functions_log;
10009
10010 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10011 flags |= SECTION_MAIN;
10012 }
10013 else
10014 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10015
10016 if (TREE_PUBLIC (decl))
10017 flags |= SECTION_PUBLIC;
10018
10019 return flags;
10020}
10021
10022/* Generate a section name for decl and associate it with the
10023 declaration. */
10024
52470889 10025static void
92643d95 10026unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9caef960 10027{
10028 const char *name;
10029 int len;
10030
4d10b463 10031 gcc_assert (decl);
9caef960 10032
10033 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
09a1f342 10034 name = default_strip_name_encoding (name);
9caef960 10035 len = strlen (name);
10036
10037 if (TREE_CODE (decl) == FUNCTION_DECL)
10038 {
10039 char *string;
10040
9e7454d0 10041 /* It is essential that we prefix the section name here because
10042 otherwise the section names generated for constructors and
9caef960 10043 destructors confuse collect2. */
10044
10045 string = alloca (len + 6);
10046 sprintf (string, "code@%s", name);
10047 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10048 }
10049 else if (TREE_PUBLIC (decl))
10050 DECL_SECTION_NAME (decl) = build_string (len, name);
10051 else
10052 {
10053 char *string;
10054
10055 string = alloca (len + 6);
10056 sprintf (string, "data@%s", name);
10057 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10058 }
10059}
10060
10061/* Switch to an arbitrary section NAME with attributes as specified
10062 by FLAGS. ALIGN specifies any known alignment requirements for
10063 the section; 0 if the default should be used. */
10064
10065static void
537cd941 10066unicosmk_asm_named_section (const char *name, unsigned int flags,
10067 tree decl ATTRIBUTE_UNUSED)
9caef960 10068{
10069 const char *kind;
10070
10071 /* Close the previous section. */
10072
10073 fputs ("\t.endp\n\n", asm_out_file);
10074
10075 /* Find out what kind of section we are opening. */
10076
10077 if (flags & SECTION_MAIN)
10078 fputs ("\t.start\tmain\n", asm_out_file);
10079
10080 if (flags & SECTION_CODE)
10081 kind = "code";
10082 else if (flags & SECTION_PUBLIC)
10083 kind = "common";
10084 else
10085 kind = "data";
10086
10087 if (current_section_align != 0)
10088 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10089 current_section_align, kind);
10090 else
10091 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10092}
10093
10094static void
92643d95 10095unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9caef960 10096{
10097 if (DECL_P (decl)
10098 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
52470889 10099 unicosmk_unique_section (decl, 0);
9caef960 10100}
10101
10102/* Output an alignment directive. We have to use the macro 'gcc@code@align'
10103 in code sections because .align fill unused space with zeroes. */
9e7454d0 10104
9caef960 10105void
92643d95 10106unicosmk_output_align (FILE *file, int align)
9caef960 10107{
10108 if (inside_function)
10109 fprintf (file, "\tgcc@code@align\t%d\n", align);
10110 else
10111 fprintf (file, "\t.align\t%d\n", align);
10112}
10113
10114/* Add a case vector to the current function's list of deferred case
10115 vectors. Case vectors have to be put into a separate section because CAM
10116 does not allow data definitions in code sections. */
10117
10118void
92643d95 10119unicosmk_defer_case_vector (rtx lab, rtx vec)
9caef960 10120{
10121 struct machine_function *machine = cfun->machine;
9e7454d0 10122
9caef960 10123 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10124 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9e7454d0 10125 machine->addr_list);
9caef960 10126}
10127
10128/* Output a case vector. */
10129
10130static void
92643d95 10131unicosmk_output_addr_vec (FILE *file, rtx vec)
9caef960 10132{
10133 rtx lab = XEXP (vec, 0);
10134 rtx body = XEXP (vec, 1);
10135 int vlen = XVECLEN (body, 0);
10136 int idx;
10137
805e22b2 10138 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9caef960 10139
10140 for (idx = 0; idx < vlen; idx++)
10141 {
10142 ASM_OUTPUT_ADDR_VEC_ELT
10143 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10144 }
10145}
10146
10147/* Output current function's deferred case vectors. */
10148
10149static void
92643d95 10150unicosmk_output_deferred_case_vectors (FILE *file)
9caef960 10151{
10152 struct machine_function *machine = cfun->machine;
10153 rtx t;
10154
10155 if (machine->addr_list == NULL_RTX)
10156 return;
10157
2f14b1f9 10158 switch_to_section (data_section);
9caef960 10159 for (t = machine->addr_list; t; t = XEXP (t, 1))
10160 unicosmk_output_addr_vec (file, XEXP (t, 0));
10161}
10162
92643d95 10163/* Generate the name of the SSIB section for the current function. */
10164
10165#define SSIB_PREFIX "__SSIB_"
10166#define SSIB_PREFIX_LEN 7
10167
10168static const char *
10169unicosmk_ssib_name (void)
10170{
9e7454d0 10171 /* This is ok since CAM won't be able to deal with names longer than that
92643d95 10172 anyway. */
10173
10174 static char name[256];
10175
10176 rtx x;
10177 const char *fnname;
10178 int len;
10179
10180 x = DECL_RTL (cfun->decl);
c933fb42 10181 gcc_assert (MEM_P (x));
92643d95 10182 x = XEXP (x, 0);
4d10b463 10183 gcc_assert (GET_CODE (x) == SYMBOL_REF);
92643d95 10184 fnname = XSTR (x, 0);
10185
10186 len = strlen (fnname);
10187 if (len + SSIB_PREFIX_LEN > 255)
10188 len = 255 - SSIB_PREFIX_LEN;
10189
10190 strcpy (name, SSIB_PREFIX);
10191 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10192 name[len + SSIB_PREFIX_LEN] = 0;
10193
10194 return name;
10195}
10196
9e7454d0 10197/* Set up the dynamic subprogram information block (DSIB) and update the
10198 frame pointer register ($15) for subroutines which have a frame. If the
9caef960 10199 subroutine doesn't have a frame, simply increment $15. */
10200
10201static void
92643d95 10202unicosmk_gen_dsib (unsigned long *imaskP)
9caef960 10203{
b19d7ab1 10204 if (alpha_procedure_type == PT_STACK)
9caef960 10205 {
10206 const char *ssib_name;
10207 rtx mem;
10208
10209 /* Allocate 64 bytes for the DSIB. */
10210
10211 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10212 GEN_INT (-64))));
10213 emit_insn (gen_blockage ());
10214
10215 /* Save the return address. */
10216
10217 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10218 set_mem_alias_set (mem, alpha_sr_alias_set);
10219 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
df7d0d23 10220 (*imaskP) &= ~(1UL << REG_RA);
9caef960 10221
10222 /* Save the old frame pointer. */
10223
10224 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10225 set_mem_alias_set (mem, alpha_sr_alias_set);
10226 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
df7d0d23 10227 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
9caef960 10228
10229 emit_insn (gen_blockage ());
10230
10231 /* Store the SSIB pointer. */
10232
10233 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10234 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10235 set_mem_alias_set (mem, alpha_sr_alias_set);
10236
10237 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10238 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10239 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10240
10241 /* Save the CIW index. */
10242
10243 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10244 set_mem_alias_set (mem, alpha_sr_alias_set);
10245 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10246
10247 emit_insn (gen_blockage ());
10248
10249 /* Set the new frame pointer. */
10250
10251 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10252 stack_pointer_rtx, GEN_INT (64))));
10253
10254 }
10255 else
10256 {
10257 /* Increment the frame pointer register to indicate that we do not
10258 have a frame. */
10259
10260 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
bcd9bd66 10261 hard_frame_pointer_rtx, const1_rtx)));
9caef960 10262 }
10263}
10264
9caef960 10265/* Output the static subroutine information block for the current
10266 function. */
10267
10268static void
92643d95 10269unicosmk_output_ssib (FILE *file, const char *fnname)
9caef960 10270{
10271 int len;
10272 int i;
10273 rtx x;
10274 rtx ciw;
10275 struct machine_function *machine = cfun->machine;
10276
2f14b1f9 10277 in_section = NULL;
9caef960 10278 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10279 unicosmk_ssib_name ());
10280
10281 /* Some required stuff and the function name length. */
10282
10283 len = strlen (fnname);
10284 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10285
10286 /* Saved registers
10287 ??? We don't do that yet. */
10288
10289 fputs ("\t.quad\t0\n", file);
10290
10291 /* Function address. */
10292
10293 fputs ("\t.quad\t", file);
10294 assemble_name (file, fnname);
10295 putc ('\n', file);
10296
10297 fputs ("\t.quad\t0\n", file);
10298 fputs ("\t.quad\t0\n", file);
10299
10300 /* Function name.
10301 ??? We do it the same way Cray CC does it but this could be
10302 simplified. */
10303
10304 for( i = 0; i < len; i++ )
10305 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10306 if( (len % 8) == 0 )
10307 fputs ("\t.quad\t0\n", file);
10308 else
10309 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10310
10311 /* All call information words used in the function. */
10312
10313 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10314 {
10315 ciw = XEXP (x, 0);
9caef960 10316#if HOST_BITS_PER_WIDE_INT == 32
4840a03a 10317 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
9caef960 10318 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10319#else
4840a03a 10320 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
9caef960 10321#endif
9caef960 10322 }
10323}
10324
10325/* Add a call information word (CIW) to the list of the current function's
10326 CIWs and return its index.
10327
10328 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10329
10330rtx
92643d95 10331unicosmk_add_call_info_word (rtx x)
9caef960 10332{
10333 rtx node;
10334 struct machine_function *machine = cfun->machine;
10335
10336 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10337 if (machine->first_ciw == NULL_RTX)
10338 machine->first_ciw = node;
10339 else
10340 XEXP (machine->last_ciw, 1) = node;
10341
10342 machine->last_ciw = node;
10343 ++machine->ciw_count;
10344
10345 return GEN_INT (machine->ciw_count
35901471 10346 + strlen (current_function_name ())/8 + 5);
9caef960 10347}
10348
9caef960 10349/* The Cray assembler doesn't accept extern declarations for symbols which
10350 are defined in the same file. We have to keep track of all global
10351 symbols which are referenced and/or defined in a source file and output
10352 extern declarations for those which are referenced but not defined at
10353 the end of file. */
10354
10355/* List of identifiers for which an extern declaration might have to be
10356 emitted. */
9de382d9 10357/* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9caef960 10358
10359struct unicosmk_extern_list
10360{
10361 struct unicosmk_extern_list *next;
10362 const char *name;
10363};
10364
10365static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10366
10367/* Output extern declarations which are required for every asm file. */
10368
10369static void
92643d95 10370unicosmk_output_default_externs (FILE *file)
9caef960 10371{
0d95286f 10372 static const char *const externs[] =
9caef960 10373 { "__T3E_MISMATCH" };
10374
10375 int i;
10376 int n;
10377
10378 n = ARRAY_SIZE (externs);
10379
10380 for (i = 0; i < n; i++)
10381 fprintf (file, "\t.extern\t%s\n", externs[i]);
10382}
10383
10384/* Output extern declarations for global symbols which are have been
10385 referenced but not defined. */
10386
10387static void
92643d95 10388unicosmk_output_externs (FILE *file)
9caef960 10389{
10390 struct unicosmk_extern_list *p;
10391 const char *real_name;
10392 int len;
10393 tree name_tree;
10394
10395 len = strlen (user_label_prefix);
10396 for (p = unicosmk_extern_head; p != 0; p = p->next)
10397 {
9e7454d0 10398 /* We have to strip the encoding and possibly remove user_label_prefix
9caef960 10399 from the identifier in order to handle -fleading-underscore and
10400 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
09a1f342 10401 real_name = default_strip_name_encoding (p->name);
9caef960 10402 if (len && p->name[0] == '*'
10403 && !memcmp (real_name, user_label_prefix, len))
10404 real_name += len;
9e7454d0 10405
9caef960 10406 name_tree = get_identifier (real_name);
10407 if (! TREE_ASM_WRITTEN (name_tree))
10408 {
10409 TREE_ASM_WRITTEN (name_tree) = 1;
10410 fputs ("\t.extern\t", file);
10411 assemble_name (file, p->name);
10412 putc ('\n', file);
10413 }
10414 }
10415}
9e7454d0 10416
9caef960 10417/* Record an extern. */
10418
10419void
92643d95 10420unicosmk_add_extern (const char *name)
9caef960 10421{
10422 struct unicosmk_extern_list *p;
10423
10424 p = (struct unicosmk_extern_list *)
92192583 10425 xmalloc (sizeof (struct unicosmk_extern_list));
9caef960 10426 p->next = unicosmk_extern_head;
10427 p->name = name;
10428 unicosmk_extern_head = p;
10429}
10430
10431/* The Cray assembler generates incorrect code if identifiers which
10432 conflict with register names are used as instruction operands. We have
10433 to replace such identifiers with DEX expressions. */
10434
10435/* Structure to collect identifiers which have been replaced by DEX
10436 expressions. */
9de382d9 10437/* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9caef960 10438
10439struct unicosmk_dex {
10440 struct unicosmk_dex *next;
10441 const char *name;
10442};
10443
9e7454d0 10444/* List of identifiers which have been replaced by DEX expressions. The DEX
9caef960 10445 number is determined by the position in the list. */
10446
9e7454d0 10447static struct unicosmk_dex *unicosmk_dex_list = NULL;
9caef960 10448
10449/* The number of elements in the DEX list. */
10450
10451static int unicosmk_dex_count = 0;
10452
10453/* Check if NAME must be replaced by a DEX expression. */
10454
10455static int
92643d95 10456unicosmk_special_name (const char *name)
9caef960 10457{
10458 if (name[0] == '*')
10459 ++name;
10460
10461 if (name[0] == '$')
10462 ++name;
10463
10464 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10465 return 0;
10466
10467 switch (name[1])
10468 {
10469 case '1': case '2':
10470 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10471
10472 case '3':
10473 return (name[2] == '\0'
10474 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10475
10476 default:
10477 return (ISDIGIT (name[1]) && name[2] == '\0');
10478 }
10479}
10480
10481/* Return the DEX number if X must be replaced by a DEX expression and 0
10482 otherwise. */
10483
10484static int
92643d95 10485unicosmk_need_dex (rtx x)
9caef960 10486{
10487 struct unicosmk_dex *dex;
10488 const char *name;
10489 int i;
9e7454d0 10490
9caef960 10491 if (GET_CODE (x) != SYMBOL_REF)
10492 return 0;
10493
10494 name = XSTR (x,0);
10495 if (! unicosmk_special_name (name))
10496 return 0;
10497
10498 i = unicosmk_dex_count;
10499 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10500 {
10501 if (! strcmp (name, dex->name))
10502 return i;
10503 --i;
10504 }
9e7454d0 10505
92192583 10506 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9caef960 10507 dex->name = name;
10508 dex->next = unicosmk_dex_list;
10509 unicosmk_dex_list = dex;
10510
10511 ++unicosmk_dex_count;
10512 return unicosmk_dex_count;
10513}
10514
10515/* Output the DEX definitions for this file. */
10516
10517static void
92643d95 10518unicosmk_output_dex (FILE *file)
9caef960 10519{
10520 struct unicosmk_dex *dex;
10521 int i;
10522
10523 if (unicosmk_dex_list == NULL)
10524 return;
10525
10526 fprintf (file, "\t.dexstart\n");
10527
10528 i = unicosmk_dex_count;
10529 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10530 {
10531 fprintf (file, "\tDEX (%d) = ", i);
10532 assemble_name (file, dex->name);
10533 putc ('\n', file);
10534 --i;
10535 }
9e7454d0 10536
9caef960 10537 fprintf (file, "\t.dexend\n");
10538}
10539
92643d95 10540/* Output text that to appear at the beginning of an assembler file. */
10541
9e7454d0 10542static void
92c473b8 10543unicosmk_file_start (void)
92643d95 10544{
10545 int i;
10546
92c473b8 10547 fputs ("\t.ident\t", asm_out_file);
10548 unicosmk_output_module_name (asm_out_file);
10549 fputs ("\n\n", asm_out_file);
92643d95 10550
10551 /* The Unicos/Mk assembler uses different register names. Instead of trying
10552 to support them, we simply use micro definitions. */
10553
10554 /* CAM has different register names: rN for the integer register N and fN
10555 for the floating-point register N. Instead of trying to use these in
10556 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10557 register. */
10558
10559 for (i = 0; i < 32; ++i)
92c473b8 10560 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
92643d95 10561
10562 for (i = 0; i < 32; ++i)
92c473b8 10563 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
92643d95 10564
92c473b8 10565 putc ('\n', asm_out_file);
92643d95 10566
10567 /* The .align directive fill unused space with zeroes which does not work
10568 in code sections. We define the macro 'gcc@code@align' which uses nops
10569 instead. Note that it assumes that code sections always have the
10570 biggest possible alignment since . refers to the current offset from
10571 the beginning of the section. */
10572
92c473b8 10573 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10574 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10575 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10576 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10577 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10578 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10579 fputs ("\t.endr\n", asm_out_file);
10580 fputs ("\t.endif\n", asm_out_file);
10581 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
92643d95 10582
10583 /* Output extern declarations which should always be visible. */
92c473b8 10584 unicosmk_output_default_externs (asm_out_file);
92643d95 10585
10586 /* Open a dummy section. We always need to be inside a section for the
10587 section-switching code to work correctly.
10588 ??? This should be a module id or something like that. I still have to
10589 figure out what the rules for those are. */
92c473b8 10590 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
92643d95 10591}
10592
10593/* Output text to appear at the end of an assembler file. This includes all
10594 pending extern declarations and DEX expressions. */
10595
10596static void
10597unicosmk_file_end (void)
10598{
10599 fputs ("\t.endp\n\n", asm_out_file);
10600
10601 /* Output all pending externs. */
10602
10603 unicosmk_output_externs (asm_out_file);
10604
9e7454d0 10605 /* Output dex definitions used for functions whose names conflict with
92643d95 10606 register names. */
10607
10608 unicosmk_output_dex (asm_out_file);
10609
10610 fputs ("\t.end\t", asm_out_file);
10611 unicosmk_output_module_name (asm_out_file);
10612 putc ('\n', asm_out_file);
10613}
10614
9caef960 10615#else
10616
10617static void
92643d95 10618unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
9caef960 10619{}
10620
10621static void
92643d95 10622unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
9caef960 10623{}
10624
10625static void
92643d95 10626unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10627 const char * fnname ATTRIBUTE_UNUSED)
9caef960 10628{}
10629
10630rtx
92643d95 10631unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
9caef960 10632{
10633 return NULL_RTX;
10634}
10635
10636static int
92643d95 10637unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
9caef960 10638{
10639 return 0;
10640}
10641
10642#endif /* TARGET_ABI_UNICOSMK */
1f3233d1 10643
f2f543a3 10644static void
10645alpha_init_libfuncs (void)
10646{
10647 if (TARGET_ABI_UNICOSMK)
10648 {
10649 /* Prevent gcc from generating calls to __divsi3. */
10650 set_optab_libfunc (sdiv_optab, SImode, 0);
10651 set_optab_libfunc (udiv_optab, SImode, 0);
10652
10653 /* Use the functions provided by the system library
10654 for DImode integer division. */
10655 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10656 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10657 }
10658 else if (TARGET_ABI_OPEN_VMS)
10659 {
10660 /* Use the VMS runtime library functions for division and
10661 remainder. */
10662 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10663 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10664 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10665 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10666 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10667 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10668 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10669 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10670 }
10671}
10672
92643d95 10673\f
10674/* Initialize the GCC target structure. */
10675#if TARGET_ABI_OPEN_VMS
10676# undef TARGET_ATTRIBUTE_TABLE
10677# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10678# undef TARGET_SECTION_TYPE_FLAGS
10679# define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10680#endif
10681
10682#undef TARGET_IN_SMALL_DATA_P
10683#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10684
10685#if TARGET_ABI_UNICOSMK
10686# undef TARGET_INSERT_ATTRIBUTES
10687# define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10688# undef TARGET_SECTION_TYPE_FLAGS
10689# define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10690# undef TARGET_ASM_UNIQUE_SECTION
10691# define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
76aec42f 10692#undef TARGET_ASM_FUNCTION_RODATA_SECTION
10693#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
92643d95 10694# undef TARGET_ASM_GLOBALIZE_LABEL
10695# define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
0336f0f0 10696# undef TARGET_MUST_PASS_IN_STACK
10697# define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
92643d95 10698#endif
10699
10700#undef TARGET_ASM_ALIGNED_HI_OP
10701#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10702#undef TARGET_ASM_ALIGNED_DI_OP
10703#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10704
10705/* Default unaligned ops are provided for ELF systems. To get unaligned
10706 data for non-ELF systems, we have to turn off auto alignment. */
10707#ifndef OBJECT_FORMAT_ELF
10708#undef TARGET_ASM_UNALIGNED_HI_OP
10709#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10710#undef TARGET_ASM_UNALIGNED_SI_OP
10711#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10712#undef TARGET_ASM_UNALIGNED_DI_OP
10713#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10714#endif
10715
10716#ifdef OBJECT_FORMAT_ELF
4e151b05 10717#undef TARGET_ASM_RELOC_RW_MASK
10718#define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
92643d95 10719#undef TARGET_ASM_SELECT_RTX_SECTION
10720#define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
cc2af183 10721#undef TARGET_SECTION_TYPE_FLAGS
10722#define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
92643d95 10723#endif
10724
10725#undef TARGET_ASM_FUNCTION_END_PROLOGUE
10726#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10727
f2f543a3 10728#undef TARGET_INIT_LIBFUNCS
10729#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10730
41e3a0c7 10731#undef TARGET_LEGITIMIZE_ADDRESS
10732#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
10733
92c473b8 10734#if TARGET_ABI_UNICOSMK
10735#undef TARGET_ASM_FILE_START
10736#define TARGET_ASM_FILE_START unicosmk_file_start
10737#undef TARGET_ASM_FILE_END
10738#define TARGET_ASM_FILE_END unicosmk_file_end
10739#else
10740#undef TARGET_ASM_FILE_START
10741#define TARGET_ASM_FILE_START alpha_file_start
10742#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10743#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10744#endif
10745
92643d95 10746#undef TARGET_SCHED_ADJUST_COST
10747#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10748#undef TARGET_SCHED_ISSUE_RATE
10749#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
92643d95 10750#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10751#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10752 alpha_multipass_dfa_lookahead
10753
10754#undef TARGET_HAVE_TLS
10755#define TARGET_HAVE_TLS HAVE_AS_TLS
10756
10757#undef TARGET_INIT_BUILTINS
10758#define TARGET_INIT_BUILTINS alpha_init_builtins
10759#undef TARGET_EXPAND_BUILTIN
10760#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
849c7bc6 10761#undef TARGET_FOLD_BUILTIN
10762#define TARGET_FOLD_BUILTIN alpha_fold_builtin
92643d95 10763
10764#undef TARGET_FUNCTION_OK_FOR_SIBCALL
10765#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10766#undef TARGET_CANNOT_COPY_INSN_P
10767#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
c0da4391 10768#undef TARGET_CANNOT_FORCE_CONST_MEM
10769#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
92643d95 10770
10771#if TARGET_ABI_OSF
10772#undef TARGET_ASM_OUTPUT_MI_THUNK
10773#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10774#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
a9f1838b 10775#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
7955d282 10776#undef TARGET_STDARG_OPTIMIZE_HOOK
10777#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
92643d95 10778#endif
10779
10780#undef TARGET_RTX_COSTS
10781#define TARGET_RTX_COSTS alpha_rtx_costs
10782#undef TARGET_ADDRESS_COST
f529eb25 10783#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
92643d95 10784
10785#undef TARGET_MACHINE_DEPENDENT_REORG
10786#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10787
dd9f3024 10788#undef TARGET_PROMOTE_FUNCTION_ARGS
fb80456a 10789#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
dd9f3024 10790#undef TARGET_PROMOTE_FUNCTION_RETURN
fb80456a 10791#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
dd9f3024 10792#undef TARGET_PROMOTE_PROTOTYPES
fb80456a 10793#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
dd9f3024 10794#undef TARGET_RETURN_IN_MEMORY
10795#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
b981d932 10796#undef TARGET_PASS_BY_REFERENCE
10797#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
dd9f3024 10798#undef TARGET_SETUP_INCOMING_VARARGS
10799#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10800#undef TARGET_STRICT_ARGUMENT_NAMING
10801#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10802#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10803#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
92d40bc4 10804#undef TARGET_SPLIT_COMPLEX_ARG
10805#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
de8f9b94 10806#undef TARGET_GIMPLIFY_VA_ARG_EXPR
10807#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
f054eb3c 10808#undef TARGET_ARG_PARTIAL_BYTES
10809#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
b2aef146 10810
0d96cd2b 10811#undef TARGET_SECONDARY_RELOAD
10812#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10813
b2aef146 10814#undef TARGET_SCALAR_MODE_SUPPORTED_P
10815#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9e7454d0 10816#undef TARGET_VECTOR_MODE_SUPPORTED_P
10817#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
dd9f3024 10818
2e15d750 10819#undef TARGET_BUILD_BUILTIN_VA_LIST
10820#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10821
8a58ed0a 10822#undef TARGET_EXPAND_BUILTIN_VA_START
10823#define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10824
2ca3d426 10825/* The Alpha architecture does not require sequential consistency. See
10826 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10827 for an example of how it can be violated in practice. */
10828#undef TARGET_RELAXED_ORDERING
10829#define TARGET_RELAXED_ORDERING true
10830
fb64edde 10831#undef TARGET_DEFAULT_TARGET_FLAGS
10832#define TARGET_DEFAULT_TARGET_FLAGS \
10833 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10834#undef TARGET_HANDLE_OPTION
10835#define TARGET_HANDLE_OPTION alpha_handle_option
10836
4257b08a 10837#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
eddcdde1 10838#undef TARGET_MANGLE_TYPE
10839#define TARGET_MANGLE_TYPE alpha_mangle_type
4257b08a 10840#endif
10841
fd50b071 10842#undef TARGET_LEGITIMATE_ADDRESS_P
10843#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10844
92643d95 10845struct gcc_target targetm = TARGET_INITIALIZER;
10846
10847\f
1f3233d1 10848#include "gt-alpha.h"