]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/alpha/alpha.c
./:
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
CommitLineData
bf2a98b3 1/* Subroutines used for code generation on the DEC Alpha.
b657e73a 2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
0e0a0e7a 4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
bf2a98b3 5
187b36cf 6This file is part of GCC.
bf2a98b3 7
187b36cf 8GCC is free software; you can redistribute it and/or modify
bf2a98b3 9it under the terms of the GNU General Public License as published by
038d1e19 10the Free Software Foundation; either version 3, or (at your option)
bf2a98b3 11any later version.
12
187b36cf 13GCC is distributed in the hope that it will be useful,
bf2a98b3 14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
038d1e19 19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
bf2a98b3 21
22
bf2a98b3 23#include "config.h"
769ea120 24#include "system.h"
805e22b2 25#include "coretypes.h"
26#include "tm.h"
bf2a98b3 27#include "rtl.h"
d8fc4d0b 28#include "tree.h"
bf2a98b3 29#include "regs.h"
30#include "hard-reg-set.h"
31#include "real.h"
32#include "insn-config.h"
33#include "conditions.h"
bf2a98b3 34#include "output.h"
35#include "insn-attr.h"
36#include "flags.h"
37#include "recog.h"
bf2a98b3 38#include "expr.h"
d8fc4d0b 39#include "optabs.h"
40#include "reload.h"
bf2a98b3 41#include "obstack.h"
0c0464e6 42#include "except.h"
43#include "function.h"
769ea120 44#include "toplev.h"
11016d99 45#include "ggc.h"
0f37b7a2 46#include "integrate.h"
0d50f0b7 47#include "tm_p.h"
a767736d 48#include "target.h"
49#include "target-def.h"
961d6ddd 50#include "debug.h"
a1f71e15 51#include "langhooks.h"
573aba85 52#include <splay-tree.h>
356907d3 53#include "cfglayout.h"
de8f9b94 54#include "tree-gimple.h"
a6c787e5 55#include "tree-flow.h"
56#include "tree-stdarg.h"
1dffd068 57#include "tm-constrs.h"
3072d30e 58#include "df.h"
0c0464e6 59
65abff06 60/* Specify which cpu to schedule for. */
fb64edde 61enum processor_type alpha_tune;
0c0464e6 62
fb64edde 63/* Which cpu we're generating code for. */
f141a8b4 64enum processor_type alpha_cpu;
fb64edde 65
9e7454d0 66static const char * const alpha_cpu_name[] =
07c1a295 67{
68 "ev4", "ev5", "ev6"
69};
0c5845b3 70
c4622276 71/* Specify how accurate floating-point traps need to be. */
72
73enum alpha_trap_precision alpha_tp;
74
75/* Specify the floating-point rounding mode. */
76
77enum alpha_fp_rounding_mode alpha_fprm;
78
79/* Specify which things cause traps. */
80
81enum alpha_fp_trap_mode alpha_fptm;
82
bf2a98b3 83/* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
85
b18b881f 86struct alpha_compare alpha_compare;
bf2a98b3 87
e3e08e7f 88/* Nonzero if inside of a function, because the Alpha asm can't
449b7f2d 89 handle .files inside of functions. */
90
91static int inside_function = FALSE;
92
07c1a295 93/* The number of cycles of latency we should assume on memory reads. */
94
95int alpha_memory_latency = 3;
96
b9a5aa8e 97/* Whether the function needs the GP. */
98
99static int alpha_function_needs_gp;
100
849674a3 101/* The alias set for prologue/epilogue register save/restore. */
102
32c2fdea 103static GTY(()) alias_set_type alpha_sr_alias_set;
849674a3 104
a314eb5e 105/* The assembler name of the current function. */
106
107static const char *alpha_fnname;
108
1f0ce6a6 109/* The next explicit relocation sequence number. */
9de382d9 110extern GTY(()) int alpha_next_sequence_number;
1f0ce6a6 111int alpha_next_sequence_number = 1;
112
113/* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
9de382d9 115extern GTY(()) int alpha_this_literal_sequence_number;
116extern GTY(()) int alpha_this_gpdisp_sequence_number;
1f0ce6a6 117int alpha_this_literal_sequence_number;
118int alpha_this_gpdisp_sequence_number;
119
fab7adbf 120/* Costs of various operations on the different architectures. */
121
122struct alpha_rtx_cost_data
123{
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
d7cf2331 132 unsigned short int_div;
fab7adbf 133};
134
135static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136{
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
f6777b0a 146 COSTS_N_INSNS (97), /* int_div */
fab7adbf 147 },
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
f6777b0a 157 COSTS_N_INSNS (83), /* int_div */
fab7adbf 158 },
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
f6777b0a 168 COSTS_N_INSNS (86), /* int_div */
fab7adbf 169 },
170};
171
d7cf2331 172/* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
176
177static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178{
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
188};
189
0dbd1c74 190/* Get the number of args of a function in one of two ways. */
9caef960 191#if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
0dbd1c74 192#define NUM_ARGS current_function_args_info.num_args
193#else
194#define NUM_ARGS current_function_args_info
195#endif
d2832bd8 196
d2832bd8 197#define REG_PV 27
198#define REG_RA 26
f2cc13dc 199
92643d95 200/* Declarations of static functions. */
201static struct machine_function *alpha_init_machine_status (void);
8c3428a6 202static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
805e22b2 203
92643d95 204#if TARGET_ABI_OPEN_VMS
205static void alpha_write_linkage (FILE *, const char *, tree);
6988553d 206#endif
207
92643d95 208static void unicosmk_output_deferred_case_vectors (FILE *);
209static void unicosmk_gen_dsib (unsigned long *);
210static void unicosmk_output_ssib (FILE *, const char *);
211static int unicosmk_need_dex (rtx);
a767736d 212\f
fb64edde 213/* Implement TARGET_HANDLE_OPTION. */
214
215static bool
216alpha_handle_option (size_t code, const char *arg, int value)
217{
218 switch (code)
219 {
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
224
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
229
fb64edde 230 case OPT_mtls_size_:
0fe44c73 231 if (value != 16 && value != 32 && value != 64)
fb64edde 232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
234 }
235
236 return true;
237}
238
4257b08a 239#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
eddcdde1 240/* Implement TARGET_MANGLE_TYPE. */
4257b08a 241
242static const char *
eddcdde1 243alpha_mangle_type (tree type)
4257b08a 244{
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
248
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
251}
252#endif
253
65abff06 254/* Parse target option strings. */
c4622276 255
256void
92643d95 257override_options (void)
c4622276 258{
e99c3a1d 259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
27de1488 263 } cpu_table[] = {
27de1488 264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
fb64edde 267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
27de1488 278 { 0, 0, 0 }
279 };
9e7454d0 280
fb64edde 281 int i;
282
9caef960 283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
285 {
c3ceba8e 286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
9caef960 287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
289 }
290
9e7454d0 291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
9caef960 292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
297
c4622276 298 alpha_tp = ALPHA_TP_PROG;
c4622276 299 alpha_fptm = ALPHA_FPTM_N;
300
9e7454d0 301 /* We cannot use su and sui qualifiers for conversion instructions on
9caef960 302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
306
c4622276 307 if (TARGET_IEEE)
308 {
9caef960 309 if (TARGET_ABI_UNICOSMK)
c3ceba8e 310 warning (0, "-mieee not supported on Unicos/Mk");
9caef960 311 else
312 {
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
315 }
c4622276 316 }
317
318 if (TARGET_IEEE_WITH_INEXACT)
319 {
9caef960 320 if (TARGET_ABI_UNICOSMK)
c3ceba8e 321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
9caef960 322 else
323 {
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
326 }
c4622276 327 }
328
329 if (alpha_tp_string)
264f7d8c 330 {
331 if (! strcmp (alpha_tp_string, "p"))
c4622276 332 alpha_tp = ALPHA_TP_PROG;
264f7d8c 333 else if (! strcmp (alpha_tp_string, "f"))
c4622276 334 alpha_tp = ALPHA_TP_FUNC;
264f7d8c 335 else if (! strcmp (alpha_tp_string, "i"))
c4622276 336 alpha_tp = ALPHA_TP_INSN;
264f7d8c 337 else
1e5fcbe2 338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
264f7d8c 339 }
c4622276 340
341 if (alpha_fprm_string)
264f7d8c 342 {
343 if (! strcmp (alpha_fprm_string, "n"))
c4622276 344 alpha_fprm = ALPHA_FPRM_NORM;
264f7d8c 345 else if (! strcmp (alpha_fprm_string, "m"))
c4622276 346 alpha_fprm = ALPHA_FPRM_MINF;
264f7d8c 347 else if (! strcmp (alpha_fprm_string, "c"))
c4622276 348 alpha_fprm = ALPHA_FPRM_CHOP;
264f7d8c 349 else if (! strcmp (alpha_fprm_string,"d"))
c4622276 350 alpha_fprm = ALPHA_FPRM_DYN;
264f7d8c 351 else
1e5fcbe2 352 error ("bad value %qs for -mfp-rounding-mode switch",
c4622276 353 alpha_fprm_string);
264f7d8c 354 }
c4622276 355
356 if (alpha_fptm_string)
264f7d8c 357 {
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
1e5fcbe2 367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
264f7d8c 368 }
c4622276 369
cbd8ec27 370 if (alpha_cpu_string)
371 {
27de1488 372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 {
fb64edde 375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
27de1488 377 target_flags |= cpu_table [i].flags;
378 break;
379 }
380 if (! cpu_table [i].name)
1e5fcbe2 381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
cbd8ec27 382 }
383
27de1488 384 if (alpha_tune_string)
385 {
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 {
fb64edde 389 alpha_tune = cpu_table [i].processor;
27de1488 390 break;
391 }
392 if (! cpu_table [i].name)
1e5fcbe2 393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
27de1488 394 }
395
65abff06 396 /* Do some sanity checks on the above options. */
c4622276 397
9caef960 398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 {
c3ceba8e 400 warning (0, "trap mode not supported on Unicos/Mk");
9caef960 401 alpha_fptm = ALPHA_FPTM_N;
402 }
403
264f7d8c 404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
fb64edde 405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
c4622276 406 {
c3ceba8e 407 warning (0, "fp software completion requires -mtrap-precision=i");
c4622276 408 alpha_tp = ALPHA_TP_INSN;
409 }
8df4a58b 410
fb64edde 411 if (alpha_cpu == PROCESSOR_EV6)
bc16f0c1 412 {
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
417 }
418
8df4a58b 419 if (TARGET_FLOAT_VAX)
420 {
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 {
c3ceba8e 423 warning (0, "rounding mode not supported for VAX floats");
8df4a58b 424 alpha_fprm = ALPHA_FPRM_NORM;
425 }
426 if (alpha_fptm == ALPHA_FPTM_SUI)
427 {
c3ceba8e 428 warning (0, "trap mode not supported for VAX floats");
8df4a58b 429 alpha_fptm = ALPHA_FPTM_SU;
430 }
ef76af46 431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
c3ceba8e 432 warning (0, "128-bit long double not supported for VAX floats");
ef76af46 433 target_flags &= ~MASK_LONG_DOUBLE_128;
8df4a58b 434 }
07c1a295 435
436 {
437 char *end;
438 int lat;
439
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
442
14184418 443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
07c1a295 444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 ;
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
14184418 447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
07c1a295 448 && alpha_mlat_string[2] == '\0')
449 {
9e7454d0 450 static int const cache_latency[][4] =
07c1a295 451 {
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
65abff06 454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
07c1a295 455 };
456
457 lat = alpha_mlat_string[1] - '0';
fb64edde 458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
07c1a295 459 {
c3ceba8e 460 warning (0, "L%d cache latency unknown for %s",
fb64edde 461 lat, alpha_cpu_name[alpha_tune]);
07c1a295 462 lat = 3;
463 }
464 else
fb64edde 465 lat = cache_latency[alpha_tune][lat-1];
07c1a295 466 }
467 else if (! strcmp (alpha_mlat_string, "main"))
468 {
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
472 }
473 else
474 {
c3ceba8e 475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
07c1a295 476 lat = 3;
477 }
478
479 alpha_memory_latency = lat;
480 }
a9fa9190 481
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
849674a3 485
5dcb037d 486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
491
0ea5169b 492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
65abff06 494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
0ea5169b 495 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 {
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
501 }
502 if (align_functions <= 0)
503 align_functions = 16;
504
849674a3 505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
9caef960 507
508 /* Register variables and functions with the garbage collector. */
509
9caef960 510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
1268285a 512
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
515 {
0021bea9 516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
1268285a 519 }
2dde0cc6 520
521#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524#endif
c4622276 525}
526\f
bf2a98b3 527/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
528
529int
92643d95 530zap_mask (HOST_WIDE_INT value)
bf2a98b3 531{
532 int i;
533
534 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
535 i++, value >>= 8)
536 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
537 return 0;
538
539 return 1;
540}
541
9e7454d0 542/* Return true if OP is valid for a particular TLS relocation.
41421c64 543 We are already guaranteed that OP is a CONST. */
bf2a98b3 544
545int
41421c64 546tls_symbolic_operand_1 (rtx op, int size, int unspec)
bf2a98b3 547{
5f7b9df8 548 op = XEXP (op, 0);
549
550 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
551 return 0;
552 op = XVECEXP (op, 0, 0);
553
554 if (GET_CODE (op) != SYMBOL_REF)
555 return 0;
5f7b9df8 556
09a1f342 557 switch (SYMBOL_REF_TLS_MODEL (op))
9bdcc1e5 558 {
09a1f342 559 case TLS_MODEL_LOCAL_DYNAMIC:
ea284d73 560 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
09a1f342 561 case TLS_MODEL_INITIAL_EXEC:
9bdcc1e5 562 return unspec == UNSPEC_TPREL && size == 64;
09a1f342 563 case TLS_MODEL_LOCAL_EXEC:
ea284d73 564 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
9bdcc1e5 565 default:
4d10b463 566 gcc_unreachable ();
9bdcc1e5 567 }
5f7b9df8 568}
569
41421c64 570/* Used by aligned_memory_operand and unaligned_memory_operand to
571 resolve what reload is going to do with OP if it's a register. */
bbf31a61 572
41421c64 573rtx
574resolve_reload_operand (rtx op)
bf2a98b3 575{
cc215844 576 if (reload_in_progress)
bf2a98b3 577 {
cc215844 578 rtx tmp = op;
579 if (GET_CODE (tmp) == SUBREG)
580 tmp = SUBREG_REG (tmp);
581 if (GET_CODE (tmp) == REG
582 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
583 {
584 op = reg_equiv_memory_loc[REGNO (tmp)];
585 if (op == 0)
586 return 0;
587 }
bf2a98b3 588 }
41421c64 589 return op;
14f7bc98 590}
591
b2aef146 592/* The scalar modes supported differs from the default check-what-c-supports
593 version in that sometimes TFmode is available even when long double
594 indicates only DFmode. On unicosmk, we have the situation that HImode
595 doesn't map to any C type, but of course we still support that. */
596
597static bool
598alpha_scalar_mode_supported_p (enum machine_mode mode)
599{
600 switch (mode)
601 {
602 case QImode:
603 case HImode:
604 case SImode:
605 case DImode:
606 case TImode: /* via optabs.c */
607 return true;
608
609 case SFmode:
610 case DFmode:
611 return true;
612
613 case TFmode:
614 return TARGET_HAS_XFLOATING_LIBS;
615
616 default:
617 return false;
618 }
619}
620
621/* Alpha implements a couple of integer vector mode operations when
b739144f 622 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
623 which allows the vectorizer to operate on e.g. move instructions,
624 or when expand_vector_operations can do something useful. */
b2aef146 625
9e7454d0 626static bool
627alpha_vector_mode_supported_p (enum machine_mode mode)
628{
b739144f 629 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
9e7454d0 630}
631
550e415f 632/* Return 1 if this function can directly return via $26. */
633
634int
92643d95 635direct_return (void)
550e415f 636{
9caef960 637 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
1467e953 638 && reload_completed
639 && alpha_sa_size () == 0
550e415f 640 && get_frame_size () == 0
641 && current_function_outgoing_args_size == 0
642 && current_function_pretend_args_size == 0);
643}
ecb98d40 644
645/* Return the ADDR_VEC associated with a tablejump insn. */
646
647rtx
92643d95 648alpha_tablejump_addr_vec (rtx insn)
ecb98d40 649{
650 rtx tmp;
651
652 tmp = JUMP_LABEL (insn);
653 if (!tmp)
654 return NULL_RTX;
655 tmp = NEXT_INSN (tmp);
656 if (!tmp)
657 return NULL_RTX;
658 if (GET_CODE (tmp) == JUMP_INSN
659 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
660 return PATTERN (tmp);
661 return NULL_RTX;
662}
663
664/* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
665
666rtx
92643d95 667alpha_tablejump_best_label (rtx insn)
ecb98d40 668{
669 rtx jump_table = alpha_tablejump_addr_vec (insn);
670 rtx best_label = NULL_RTX;
671
672 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
673 there for edge frequency counts from profile data. */
674
675 if (jump_table)
676 {
677 int n_labels = XVECLEN (jump_table, 1);
678 int best_count = -1;
679 int i, j;
680
681 for (i = 0; i < n_labels; i++)
682 {
683 int count = 1;
684
685 for (j = i + 1; j < n_labels; j++)
686 if (XEXP (XVECEXP (jump_table, 1, i), 0)
687 == XEXP (XVECEXP (jump_table, 1, j), 0))
688 count++;
689
690 if (count > best_count)
691 best_count = count, best_label = XVECEXP (jump_table, 1, i);
692 }
693 }
694
695 return best_label ? best_label : const0_rtx;
696}
5f7b9df8 697
698/* Return the TLS model to use for SYMBOL. */
699
700static enum tls_model
92643d95 701tls_symbolic_operand_type (rtx symbol)
5f7b9df8 702{
09a1f342 703 enum tls_model model;
5f7b9df8 704
705 if (GET_CODE (symbol) != SYMBOL_REF)
706 return 0;
09a1f342 707 model = SYMBOL_REF_TLS_MODEL (symbol);
5f7b9df8 708
09a1f342 709 /* Local-exec with a 64-bit size is the same code as initial-exec. */
710 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
711 model = TLS_MODEL_INITIAL_EXEC;
5f7b9df8 712
09a1f342 713 return model;
5f7b9df8 714}
14f7bc98 715\f
9bdcc1e5 716/* Return true if the function DECL will share the same GP as any
717 function in the current unit of translation. */
718
719static bool
92643d95 720decl_has_samegp (tree decl)
9bdcc1e5 721{
722 /* Functions that are not local can be overridden, and thus may
723 not share the same gp. */
724 if (!(*targetm.binds_local_p) (decl))
725 return false;
726
727 /* If -msmall-data is in effect, assume that there is only one GP
728 for the module, and so any local symbol has this property. We
729 need explicit relocations to be able to enforce this for symbols
730 not defined in this unit of translation, however. */
731 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
732 return true;
733
734 /* Functions that are not external are defined in this UoT. */
cf1d67e3 735 /* ??? Irritatingly, static functions not yet emitted are still
736 marked "external". Apply this to non-static functions only. */
737 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
9bdcc1e5 738}
739
52470889 740/* Return true if EXP should be placed in the small data section. */
741
742static bool
92643d95 743alpha_in_small_data_p (tree exp)
52470889 744{
0aad4cd2 745 /* We want to merge strings, so we never consider them small data. */
746 if (TREE_CODE (exp) == STRING_CST)
747 return false;
748
6ac09a46 749 /* Functions are never in the small data area. Duh. */
750 if (TREE_CODE (exp) == FUNCTION_DECL)
751 return false;
752
52470889 753 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
754 {
755 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
756 if (strcmp (section, ".sdata") == 0
757 || strcmp (section, ".sbss") == 0)
758 return true;
759 }
760 else
761 {
762 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
763
764 /* If this is an incomplete type with size 0, then we can't put it
765 in sdata because it might be too big when completed. */
3be2f219 766 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
52470889 767 return true;
768 }
769
770 return false;
771}
772
cf73d31f 773#if TARGET_ABI_OPEN_VMS
774static bool
92643d95 775alpha_linkage_symbol_p (const char *symname)
cf73d31f 776{
777 int symlen = strlen (symname);
778
779 if (symlen > 4)
780 return strcmp (&symname [symlen - 4], "..lk") == 0;
781
782 return false;
783}
784
785#define LINKAGE_SYMBOL_REF_P(X) \
786 ((GET_CODE (X) == SYMBOL_REF \
787 && alpha_linkage_symbol_p (XSTR (X, 0))) \
788 || (GET_CODE (X) == CONST \
789 && GET_CODE (XEXP (X, 0)) == PLUS \
790 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
791 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
792#endif
793
24b3c0ed 794/* legitimate_address_p recognizes an RTL expression that is a valid
795 memory address for an instruction. The MODE argument is the
796 machine mode for the MEM expression that wants to use this address.
797
798 For Alpha, we have either a constant address or the sum of a
799 register and a constant address, or just a register. For DImode,
800 any of those forms can be surrounded with an AND that clear the
801 low-order three bits; this is an "unaligned" access. */
802
803bool
92643d95 804alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
24b3c0ed 805{
806 /* If this is an ldq_u type address, discard the outer AND. */
807 if (mode == DImode
808 && GET_CODE (x) == AND
809 && GET_CODE (XEXP (x, 1)) == CONST_INT
810 && INTVAL (XEXP (x, 1)) == -8)
811 x = XEXP (x, 0);
812
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
817 x = SUBREG_REG (x);
818
819 /* Unadorned general registers are valid. */
820 if (REG_P (x)
821 && (strict
822 ? STRICT_REG_OK_FOR_BASE_P (x)
823 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
824 return true;
825
826 /* Constant addresses (i.e. +/- 32k) are valid. */
827 if (CONSTANT_ADDRESS_P (x))
828 return true;
829
cf73d31f 830#if TARGET_ABI_OPEN_VMS
831 if (LINKAGE_SYMBOL_REF_P (x))
832 return true;
833#endif
834
24b3c0ed 835 /* Register plus a small constant offset is valid. */
836 if (GET_CODE (x) == PLUS)
837 {
838 rtx ofs = XEXP (x, 1);
839 x = XEXP (x, 0);
840
841 /* Discard non-paradoxical subregs. */
842 if (GET_CODE (x) == SUBREG
843 && (GET_MODE_SIZE (GET_MODE (x))
844 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
845 x = SUBREG_REG (x);
846
847 if (REG_P (x))
848 {
849 if (! strict
850 && NONSTRICT_REG_OK_FP_BASE_P (x)
851 && GET_CODE (ofs) == CONST_INT)
852 return true;
853 if ((strict
854 ? STRICT_REG_OK_FOR_BASE_P (x)
855 : NONSTRICT_REG_OK_FOR_BASE_P (x))
856 && CONSTANT_ADDRESS_P (ofs))
857 return true;
858 }
24b3c0ed 859 }
860
f5a60074 861 /* If we're managing explicit relocations, LO_SUM is valid, as
862 are small data symbols. */
863 else if (TARGET_EXPLICIT_RELOCS)
1f0ce6a6 864 {
f5a60074 865 if (small_symbolic_operand (x, Pmode))
1f0ce6a6 866 return true;
f5a60074 867
868 if (GET_CODE (x) == LO_SUM)
869 {
870 rtx ofs = XEXP (x, 1);
871 x = XEXP (x, 0);
872
873 /* Discard non-paradoxical subregs. */
874 if (GET_CODE (x) == SUBREG
875 && (GET_MODE_SIZE (GET_MODE (x))
876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
877 x = SUBREG_REG (x);
878
879 /* Must have a valid base register. */
880 if (! (REG_P (x)
881 && (strict
882 ? STRICT_REG_OK_FOR_BASE_P (x)
883 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
884 return false;
885
886 /* The symbol must be local. */
5f7b9df8 887 if (local_symbolic_operand (ofs, Pmode)
888 || dtp32_symbolic_operand (ofs, Pmode)
889 || tp32_symbolic_operand (ofs, Pmode))
f5a60074 890 return true;
891 }
1f0ce6a6 892 }
893
24b3c0ed 894 return false;
895}
896
09a1f342 897/* Build the SYMBOL_REF for __tls_get_addr. */
898
899static GTY(()) rtx tls_get_addr_libfunc;
900
901static rtx
92643d95 902get_tls_get_addr (void)
09a1f342 903{
904 if (!tls_get_addr_libfunc)
905 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
906 return tls_get_addr_libfunc;
907}
908
0d50f0b7 909/* Try machine-dependent ways of modifying an illegitimate address
910 to be legitimate. If we find one, return the new, valid address. */
911
912rtx
92643d95 913alpha_legitimize_address (rtx x, rtx scratch,
914 enum machine_mode mode ATTRIBUTE_UNUSED)
0d50f0b7 915{
916 HOST_WIDE_INT addend;
917
918 /* If the address is (plus reg const_int) and the CONST_INT is not a
919 valid offset, compute the high part of the constant and add it to
920 the register. Then our address is (plus temp low-part-const). */
921 if (GET_CODE (x) == PLUS
922 && GET_CODE (XEXP (x, 0)) == REG
923 && GET_CODE (XEXP (x, 1)) == CONST_INT
924 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
925 {
926 addend = INTVAL (XEXP (x, 1));
927 x = XEXP (x, 0);
928 goto split_addend;
929 }
930
931 /* If the address is (const (plus FOO const_int)), find the low-order
932 part of the CONST_INT. Then load FOO plus any high-order part of the
933 CONST_INT into a register. Our address is (plus reg low-part-const).
934 This is done to reduce the number of GOT entries. */
e1ba4a27 935 if (can_create_pseudo_p ()
f5a60074 936 && GET_CODE (x) == CONST
0d50f0b7 937 && GET_CODE (XEXP (x, 0)) == PLUS
938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
939 {
940 addend = INTVAL (XEXP (XEXP (x, 0), 1));
941 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
942 goto split_addend;
943 }
944
945 /* If we have a (plus reg const), emit the load as in (2), then add
946 the two registers, and finally generate (plus reg low-part-const) as
947 our address. */
e1ba4a27 948 if (can_create_pseudo_p ()
f5a60074 949 && GET_CODE (x) == PLUS
0d50f0b7 950 && GET_CODE (XEXP (x, 0)) == REG
951 && GET_CODE (XEXP (x, 1)) == CONST
952 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
953 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
954 {
955 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
956 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
957 XEXP (XEXP (XEXP (x, 1), 0), 0),
958 NULL_RTX, 1, OPTAB_LIB_WIDEN);
959 goto split_addend;
960 }
961
1f0ce6a6 962 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
8afb6db4 963 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1f0ce6a6 964 {
5f7b9df8 965 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
966
967 switch (tls_symbolic_operand_type (x))
968 {
dda53cd5 969 case TLS_MODEL_NONE:
970 break;
971
5f7b9df8 972 case TLS_MODEL_GLOBAL_DYNAMIC:
973 start_sequence ();
974
975 r0 = gen_rtx_REG (Pmode, 0);
976 r16 = gen_rtx_REG (Pmode, 16);
09a1f342 977 tga = get_tls_get_addr ();
5f7b9df8 978 dest = gen_reg_rtx (Pmode);
979 seq = GEN_INT (alpha_next_sequence_number++);
9e7454d0 980
5f7b9df8 981 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
982 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
983 insn = emit_call_insn (insn);
984 CONST_OR_PURE_CALL_P (insn) = 1;
985 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
986
987 insn = get_insns ();
988 end_sequence ();
989
990 emit_libcall_block (insn, dest, r0, x);
991 return dest;
992
993 case TLS_MODEL_LOCAL_DYNAMIC:
994 start_sequence ();
995
996 r0 = gen_rtx_REG (Pmode, 0);
997 r16 = gen_rtx_REG (Pmode, 16);
09a1f342 998 tga = get_tls_get_addr ();
5f7b9df8 999 scratch = gen_reg_rtx (Pmode);
1000 seq = GEN_INT (alpha_next_sequence_number++);
1001
1002 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1003 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1004 insn = emit_call_insn (insn);
1005 CONST_OR_PURE_CALL_P (insn) = 1;
1006 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1007
1008 insn = get_insns ();
1009 end_sequence ();
1010
1011 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1012 UNSPEC_TLSLDM_CALL);
1013 emit_libcall_block (insn, scratch, r0, eqv);
1014
1015 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1016 eqv = gen_rtx_CONST (Pmode, eqv);
1017
1018 if (alpha_tls_size == 64)
1019 {
1020 dest = gen_reg_rtx (Pmode);
1021 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1022 emit_insn (gen_adddi3 (dest, dest, scratch));
1023 return dest;
1024 }
1025 if (alpha_tls_size == 32)
1026 {
1027 insn = gen_rtx_HIGH (Pmode, eqv);
1028 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1029 scratch = gen_reg_rtx (Pmode);
1030 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1031 }
1032 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1033
1034 case TLS_MODEL_INITIAL_EXEC:
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1036 eqv = gen_rtx_CONST (Pmode, eqv);
1037 tp = gen_reg_rtx (Pmode);
1038 scratch = gen_reg_rtx (Pmode);
1039 dest = gen_reg_rtx (Pmode);
1040
1041 emit_insn (gen_load_tp (tp));
1042 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1043 emit_insn (gen_adddi3 (dest, tp, scratch));
1044 return dest;
1045
1046 case TLS_MODEL_LOCAL_EXEC:
1047 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1048 eqv = gen_rtx_CONST (Pmode, eqv);
1049 tp = gen_reg_rtx (Pmode);
1050
1051 emit_insn (gen_load_tp (tp));
1052 if (alpha_tls_size == 32)
1053 {
1054 insn = gen_rtx_HIGH (Pmode, eqv);
1055 insn = gen_rtx_PLUS (Pmode, tp, insn);
1056 tp = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1058 }
1059 return gen_rtx_LO_SUM (Pmode, tp, eqv);
dda53cd5 1060
1061 default:
1062 gcc_unreachable ();
5f7b9df8 1063 }
1064
8afb6db4 1065 if (local_symbolic_operand (x, Pmode))
1066 {
1067 if (small_symbolic_operand (x, Pmode))
f5a60074 1068 return x;
8afb6db4 1069 else
1070 {
e1ba4a27 1071 if (can_create_pseudo_p ())
f5a60074 1072 scratch = gen_reg_rtx (Pmode);
1073 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1074 gen_rtx_HIGH (Pmode, x)));
1075 return gen_rtx_LO_SUM (Pmode, scratch, x);
8afb6db4 1076 }
5dcb037d 1077 }
1f0ce6a6 1078 }
1079
0d50f0b7 1080 return NULL;
1081
1082 split_addend:
1083 {
f5a60074 1084 HOST_WIDE_INT low, high;
1085
1086 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1087 addend -= low;
1088 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1089 addend -= high;
1090
1091 if (addend)
1092 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
e1ba4a27 1093 (!can_create_pseudo_p () ? scratch : NULL_RTX),
f5a60074 1094 1, OPTAB_LIB_WIDEN);
1095 if (high)
1096 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
e1ba4a27 1097 (!can_create_pseudo_p () ? scratch : NULL_RTX),
f5a60074 1098 1, OPTAB_LIB_WIDEN);
1099
1100 return plus_constant (x, low);
0d50f0b7 1101 }
1102}
1103
c0da4391 1104/* Primarily this is required for TLS symbols, but given that our move
1105 patterns *ought* to be able to handle any symbol at any time, we
1106 should never be spilling symbolic operands to the constant pool, ever. */
1107
1108static bool
1109alpha_cannot_force_const_mem (rtx x)
1110{
1111 enum rtx_code code = GET_CODE (x);
1112 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1113}
1114
805e22b2 1115/* We do not allow indirect calls to be optimized into sibling calls, nor
9bdcc1e5 1116 can we allow a call to a function with a different GP to be optimized
1117 into a sibcall. */
1118
805e22b2 1119static bool
92643d95 1120alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
805e22b2 1121{
9bdcc1e5 1122 /* Can't do indirect tail calls, since we don't know if the target
1123 uses the same GP. */
1124 if (!decl)
1125 return false;
1126
1127 /* Otherwise, we can make a tail call if the target function shares
1128 the same GP. */
1129 return decl_has_samegp (decl);
805e22b2 1130}
1131
b71b0310 1132int
1133some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
367e2ab3 1134{
1135 rtx x = *px;
f5a60074 1136
792433e3 1137 /* Don't re-split. */
1138 if (GET_CODE (x) == LO_SUM)
1139 return -1;
367e2ab3 1140
792433e3 1141 return small_symbolic_operand (x, Pmode) != 0;
f5a60074 1142}
1143
367e2ab3 1144static int
92643d95 1145split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
367e2ab3 1146{
1147 rtx x = *px;
443bb1a6 1148
792433e3 1149 /* Don't re-split. */
1150 if (GET_CODE (x) == LO_SUM)
1151 return -1;
f5a60074 1152
367e2ab3 1153 if (small_symbolic_operand (x, Pmode))
1154 {
1155 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1156 *px = x;
792433e3 1157 return -1;
367e2ab3 1158 }
1159
792433e3 1160 return 0;
f5a60074 1161}
1162
92643d95 1163rtx
1164split_small_symbolic_operand (rtx x)
1165{
1166 x = copy_insn (x);
1167 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1168 return x;
1169}
1170
2f58af60 1171/* Indicate that INSN cannot be duplicated. This is true for any insn
1172 that we've marked with gpdisp relocs, since those have to stay in
1173 1-1 correspondence with one another.
1174
5910bb95 1175 Technically we could copy them if we could set up a mapping from one
2f58af60 1176 sequence number to another, across the set of insns to be duplicated.
1177 This seems overly complicated and error-prone since interblock motion
b55f2ed8 1178 from sched-ebb could move one of the pair of insns to a different block.
1179
1180 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1181 then they'll be in a different block from their ldgp. Which could lead
1182 the bb reorder code to think that it would be ok to copy just the block
1183 containing the call and branch to the block containing the ldgp. */
2f58af60 1184
1185static bool
92643d95 1186alpha_cannot_copy_insn_p (rtx insn)
2f58af60 1187{
2f58af60 1188 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1189 return false;
b55f2ed8 1190 if (recog_memoized (insn) >= 0)
1191 return get_attr_cannot_copy (insn);
1192 else
2f58af60 1193 return false;
2f58af60 1194}
1195
9e7454d0 1196
0d50f0b7 1197/* Try a machine-dependent way of reloading an illegitimate address
1198 operand. If we find one, push the reload and return the new rtx. */
9e7454d0 1199
0d50f0b7 1200rtx
92643d95 1201alpha_legitimize_reload_address (rtx x,
1202 enum machine_mode mode ATTRIBUTE_UNUSED,
1203 int opnum, int type,
1204 int ind_levels ATTRIBUTE_UNUSED)
0d50f0b7 1205{
1206 /* We must recognize output that we have already generated ourselves. */
1207 if (GET_CODE (x) == PLUS
1208 && GET_CODE (XEXP (x, 0)) == PLUS
1209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1211 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1212 {
1213 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1214 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1215 opnum, type);
1216 return x;
1217 }
1218
1219 /* We wish to handle large displacements off a base register by
1220 splitting the addend across an ldah and the mem insn. This
1221 cuts number of extra insns needed from 3 to 1. */
1222 if (GET_CODE (x) == PLUS
1223 && GET_CODE (XEXP (x, 0)) == REG
1224 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1225 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1226 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1227 {
1228 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1229 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1230 HOST_WIDE_INT high
1231 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1232
1233 /* Check for 32-bit overflow. */
1234 if (high + low != val)
1235 return NULL_RTX;
1236
1237 /* Reload the high part into a base reg; leave the low part
1238 in the mem directly. */
1239 x = gen_rtx_PLUS (GET_MODE (x),
1240 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1241 GEN_INT (high)),
1242 GEN_INT (low));
1243
1244 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1245 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1246 opnum, type);
1247 return x;
1248 }
1249
1250 return NULL_RTX;
1251}
1252\f
fab7adbf 1253/* Compute a (partial) cost for rtx X. Return true if the complete
1254 cost has been computed, and false if subexpressions should be
1255 scanned. In either case, *TOTAL contains the cost result. */
1256
1257static bool
92643d95 1258alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
fab7adbf 1259{
1260 enum machine_mode mode = GET_MODE (x);
1261 bool float_mode_p = FLOAT_MODE_P (mode);
d7cf2331 1262 const struct alpha_rtx_cost_data *cost_data;
1263
1264 if (optimize_size)
1265 cost_data = &alpha_rtx_cost_size;
1266 else
fb64edde 1267 cost_data = &alpha_rtx_cost_data[alpha_tune];
fab7adbf 1268
1269 switch (code)
1270 {
d7cf2331 1271 case CONST_INT:
fab7adbf 1272 /* If this is an 8-bit constant, return zero since it can be used
1273 nearly anywhere with no cost. If it is a valid operand for an
1274 ADD or AND, likewise return 0 if we know it will be used in that
1275 context. Otherwise, return 2 since it might be used there later.
1276 All other constants take at least two insns. */
fab7adbf 1277 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1278 {
1279 *total = 0;
1280 return true;
1281 }
8e262b5e 1282 /* FALLTHRU */
fab7adbf 1283
1284 case CONST_DOUBLE:
1285 if (x == CONST0_RTX (mode))
1286 *total = 0;
1287 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1288 || (outer_code == AND && and_operand (x, VOIDmode)))
1289 *total = 0;
1290 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1291 *total = 2;
1292 else
1293 *total = COSTS_N_INSNS (2);
1294 return true;
9e7454d0 1295
fab7adbf 1296 case CONST:
1297 case SYMBOL_REF:
1298 case LABEL_REF:
1299 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1300 *total = COSTS_N_INSNS (outer_code != MEM);
1301 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1302 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1303 else if (tls_symbolic_operand_type (x))
1304 /* Estimate of cost for call_pal rduniq. */
d7cf2331 1305 /* ??? How many insns do we emit here? More than one... */
fab7adbf 1306 *total = COSTS_N_INSNS (15);
1307 else
1308 /* Otherwise we do a load from the GOT. */
d7cf2331 1309 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
fab7adbf 1310 return true;
9e7454d0 1311
91bc47b0 1312 case HIGH:
1313 /* This is effectively an add_operand. */
1314 *total = 2;
1315 return true;
1316
fab7adbf 1317 case PLUS:
1318 case MINUS:
1319 if (float_mode_p)
d7cf2331 1320 *total = cost_data->fp_add;
fab7adbf 1321 else if (GET_CODE (XEXP (x, 0)) == MULT
1322 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1323 {
1324 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
be8c6d9c 1325 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
fab7adbf 1326 return true;
1327 }
1328 return false;
1329
1330 case MULT:
1331 if (float_mode_p)
d7cf2331 1332 *total = cost_data->fp_mult;
fab7adbf 1333 else if (mode == DImode)
d7cf2331 1334 *total = cost_data->int_mult_di;
fab7adbf 1335 else
d7cf2331 1336 *total = cost_data->int_mult_si;
fab7adbf 1337 return false;
1338
1339 case ASHIFT:
1340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1341 && INTVAL (XEXP (x, 1)) <= 3)
1342 {
1343 *total = COSTS_N_INSNS (1);
1344 return false;
1345 }
8e262b5e 1346 /* FALLTHRU */
fab7adbf 1347
1348 case ASHIFTRT:
1349 case LSHIFTRT:
d7cf2331 1350 *total = cost_data->int_shift;
fab7adbf 1351 return false;
1352
1353 case IF_THEN_ELSE:
1354 if (float_mode_p)
d7cf2331 1355 *total = cost_data->fp_add;
fab7adbf 1356 else
d7cf2331 1357 *total = cost_data->int_cmov;
fab7adbf 1358 return false;
1359
1360 case DIV:
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 if (!float_mode_p)
d7cf2331 1365 *total = cost_data->int_div;
fab7adbf 1366 else if (mode == SFmode)
d7cf2331 1367 *total = cost_data->fp_div_sf;
fab7adbf 1368 else
d7cf2331 1369 *total = cost_data->fp_div_df;
fab7adbf 1370 return false;
1371
1372 case MEM:
d7cf2331 1373 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
fab7adbf 1374 return true;
1375
1376 case NEG:
1377 if (! float_mode_p)
1378 {
1379 *total = COSTS_N_INSNS (1);
1380 return false;
1381 }
8e262b5e 1382 /* FALLTHRU */
fab7adbf 1383
1384 case ABS:
1385 if (! float_mode_p)
1386 {
d7cf2331 1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
fab7adbf 1388 return false;
1389 }
8e262b5e 1390 /* FALLTHRU */
fab7adbf 1391
1392 case FLOAT:
1393 case UNSIGNED_FLOAT:
1394 case FIX:
1395 case UNSIGNED_FIX:
fab7adbf 1396 case FLOAT_TRUNCATE:
d7cf2331 1397 *total = cost_data->fp_add;
fab7adbf 1398 return false;
1399
5fc4edde 1400 case FLOAT_EXTEND:
1401 if (GET_CODE (XEXP (x, 0)) == MEM)
1402 *total = 0;
1403 else
1404 *total = cost_data->fp_add;
1405 return false;
1406
fab7adbf 1407 default:
1408 return false;
1409 }
1410}
1411\f
bf2a98b3 1412/* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
a99a652b 1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
bf2a98b3 1416
1417void
92643d95 1418get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
bf2a98b3 1419{
1420 rtx base;
a3fed3d2 1421 HOST_WIDE_INT disp, offset;
bf2a98b3 1422
4d10b463 1423 gcc_assert (GET_CODE (ref) == MEM);
bf2a98b3 1424
cc215844 1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
a99a652b 1427 {
cc215844 1428 base = find_replacement (&XEXP (ref, 0));
4d10b463 1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
a99a652b 1430 }
bf2a98b3 1431 else
4d10b463 1432 base = XEXP (ref, 0);
bf2a98b3 1433
1434 if (GET_CODE (base) == PLUS)
a3fed3d2 1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 else
1437 disp = 0;
1438
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1444 offset = 0;
1445 else
1446 offset = disp & 3;
bf2a98b3 1447
a3fed3d2 1448 /* Access the entire aligned word. */
1449 *paligned_mem = widen_memory_access (ref, SImode, -offset);
bf2a98b3 1450
a3fed3d2 1451 /* Convert the byte offset within the word to a bit offset. */
9caef960 1452 if (WORDS_BIG_ENDIAN)
a3fed3d2 1453 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
9caef960 1454 else
a3fed3d2 1455 offset *= 8;
1456 *pbitnum = GEN_INT (offset);
bf2a98b3 1457}
1458
9e7454d0 1459/* Similar, but just get the address. Handle the two reload cases.
b044f41c 1460 Add EXTRA_OFFSET to the address we return. */
bf2a98b3 1461
1462rtx
0934d969 1463get_unaligned_address (rtx ref)
bf2a98b3 1464{
1465 rtx base;
1466 HOST_WIDE_INT offset = 0;
1467
4d10b463 1468 gcc_assert (GET_CODE (ref) == MEM);
bf2a98b3 1469
cc215844 1470 if (reload_in_progress
1471 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
a99a652b 1472 {
a99a652b 1473 base = find_replacement (&XEXP (ref, 0));
cc215844 1474
4d10b463 1475 gcc_assert (memory_address_p (GET_MODE (ref), base));
a99a652b 1476 }
bf2a98b3 1477 else
4d10b463 1478 base = XEXP (ref, 0);
bf2a98b3 1479
1480 if (GET_CODE (base) == PLUS)
1481 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1482
0934d969 1483 return plus_constant (base, offset);
1484}
1485
1486/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1487 X is always returned in a register. */
1488
1489rtx
1490get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1491{
1492 if (GET_CODE (addr) == PLUS)
1493 {
1494 ofs += INTVAL (XEXP (addr, 1));
1495 addr = XEXP (addr, 0);
1496 }
1497
1498 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
bf2a98b3 1500}
14f7bc98 1501
f5a60074 1502/* On the Alpha, all (non-symbolic) constants except zero go into
9e7454d0 1503 a floating-point register via memory. Note that we cannot
f5a60074 1504 return anything that is not a subset of CLASS, and that some
1505 symbolic constants cannot be dropped to memory. */
1506
1507enum reg_class
92643d95 1508alpha_preferred_reload_class(rtx x, enum reg_class class)
f5a60074 1509{
1510 /* Zero is present in any register class. */
1511 if (x == CONST0_RTX (GET_MODE (x)))
1512 return class;
1513
1514 /* These sorts of constants we can easily drop to memory. */
91bc47b0 1515 if (GET_CODE (x) == CONST_INT
1516 || GET_CODE (x) == CONST_DOUBLE
1517 || GET_CODE (x) == CONST_VECTOR)
f5a60074 1518 {
1519 if (class == FLOAT_REGS)
1520 return NO_REGS;
1521 if (class == ALL_REGS)
1522 return GENERAL_REGS;
1523 return class;
1524 }
1525
1526 /* All other kinds of constants should not (and in the case of HIGH
1527 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1528 secondary reload. */
1529 if (CONSTANT_P (x))
1530 return (class == ALL_REGS ? GENERAL_REGS : class);
1531
1532 return class;
1533}
1534
0d96cd2b 1535/* Inform reload about cases where moving X with a mode MODE to a register in
1536 CLASS requires an extra scratch or immediate register. Return the class
1537 needed for the immediate register. */
14f7bc98 1538
0d96cd2b 1539static enum reg_class
1540alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1541 enum machine_mode mode, secondary_reload_info *sri)
14f7bc98 1542{
0d96cd2b 1543 /* Loading and storing HImode or QImode values to and from memory
1544 usually requires a scratch register. */
1545 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
d2494d49 1546 {
0d96cd2b 1547 if (any_memory_operand (x, mode))
d2494d49 1548 {
0d96cd2b 1549 if (in_p)
1550 {
1551 if (!aligned_memory_operand (x, mode))
1552 sri->icode = reload_in_optab[mode];
1553 }
1554 else
1555 sri->icode = reload_out_optab[mode];
1556 return NO_REGS;
d2494d49 1557 }
1558 }
14f7bc98 1559
0d96cd2b 1560 /* We also cannot do integral arithmetic into FP regs, as might result
1561 from register elimination into a DImode fp register. */
14f7bc98 1562 if (class == FLOAT_REGS)
1563 {
0d96cd2b 1564 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
14f7bc98 1565 return GENERAL_REGS;
0d96cd2b 1566 if (in_p && INTEGRAL_MODE_P (mode)
1567 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
14f7bc98 1568 return GENERAL_REGS;
1569 }
1570
1571 return NO_REGS;
1572}
bf2a98b3 1573\f
1574/* Subfunction of the following function. Update the flags of any MEM
1575 found in part of X. */
1576
b04fab2a 1577static int
1578alpha_set_memflags_1 (rtx *xp, void *data)
bf2a98b3 1579{
b04fab2a 1580 rtx x = *xp, orig = (rtx) data;
bf2a98b3 1581
b04fab2a 1582 if (GET_CODE (x) != MEM)
1583 return 0;
bf2a98b3 1584
b04fab2a 1585 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1586 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1587 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1588 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1589 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
bf2a98b3 1590
b04fab2a 1591 /* Sadly, we cannot use alias sets because the extra aliasing
1592 produced by the AND interferes. Given that two-byte quantities
1593 are the only thing we would be able to differentiate anyway,
1594 there does not seem to be any point in convoluting the early
1595 out of the alias check. */
99c14947 1596
b04fab2a 1597 return -1;
bf2a98b3 1598}
1599
31d3e01c 1600/* Given INSN, which is an INSN list or the PATTERN of a single insn
1601 generated to perform a memory operation, look for any MEMs in either
1602 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1603 volatile flags from REF into each of the MEMs found. If REF is not
1604 a MEM, don't do anything. */
bf2a98b3 1605
1606void
92643d95 1607alpha_set_memflags (rtx insn, rtx ref)
bf2a98b3 1608{
b04fab2a 1609 rtx *base_ptr;
849674a3 1610
1611 if (GET_CODE (ref) != MEM)
bf2a98b3 1612 return;
1613
9e7454d0 1614 /* This is only called from alpha.md, after having had something
849674a3 1615 generated from one of the insn patterns. So if everything is
1616 zero, the pattern is already up-to-date. */
b04fab2a 1617 if (!MEM_VOLATILE_P (ref)
1618 && !MEM_IN_STRUCT_P (ref)
1619 && !MEM_SCALAR_P (ref)
1620 && !MEM_NOTRAP_P (ref)
1621 && !MEM_READONLY_P (ref))
849674a3 1622 return;
1623
b04fab2a 1624 if (INSN_P (insn))
1625 base_ptr = &PATTERN (insn);
1626 else
1627 base_ptr = &insn;
1628 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
bf2a98b3 1629}
1630\f
91bc47b0 1631static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1632 int, bool);
1633
1634/* Internal routine for alpha_emit_set_const to check for N or below insns.
1635 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1636 and return pc_rtx if successful. */
6f86cb15 1637
1638static rtx
92643d95 1639alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
91bc47b0 1640 HOST_WIDE_INT c, int n, bool no_output)
bf2a98b3 1641{
bdb19034 1642 HOST_WIDE_INT new;
bf2a98b3 1643 int i, bits;
ea5db00c 1644 /* Use a pseudo if highly optimizing and still generating RTL. */
1645 rtx subtarget
e1ba4a27 1646 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
301416af 1647 rtx temp, insn;
bf2a98b3 1648
bf2a98b3 1649 /* If this is a sign-extended 32-bit constant, we can do this in at most
1650 three insns, so do it if we have enough insns left. We always have
65abff06 1651 a sign-extended 32-bit constant when compiling on a narrow machine. */
bf2a98b3 1652
3bc2043a 1653 if (HOST_BITS_PER_WIDE_INT != 64
1654 || c >> 31 == -1 || c >> 31 == 0)
bf2a98b3 1655 {
bdb19034 1656 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
bf2a98b3 1657 HOST_WIDE_INT tmp1 = c - low;
bdb19034 1658 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
bf2a98b3 1659 HOST_WIDE_INT extra = 0;
1660
81d03ebd 1661 /* If HIGH will be interpreted as negative but the constant is
1662 positive, we must adjust it to do two ldha insns. */
1663
1664 if ((high & 0x8000) != 0 && c >= 0)
bf2a98b3 1665 {
1666 extra = 0x4000;
1667 tmp1 -= 0x40000000;
1668 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1669 }
1670
1671 if (c == low || (low == 0 && extra == 0))
3bc2043a 1672 {
1673 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1674 but that meant that we can't handle INT_MIN on 32-bit machines
9e7454d0 1675 (like NT/Alpha), because we recurse indefinitely through
3bc2043a 1676 emit_move_insn to gen_movdi. So instead, since we know exactly
1677 what we want, create it explicitly. */
1678
91bc47b0 1679 if (no_output)
1680 return pc_rtx;
3bc2043a 1681 if (target == NULL)
1682 target = gen_reg_rtx (mode);
941522d6 1683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
3bc2043a 1684 return target;
1685 }
6f86cb15 1686 else if (n >= 2 + (extra != 0))
bf2a98b3 1687 {
91bc47b0 1688 if (no_output)
1689 return pc_rtx;
e1ba4a27 1690 if (!can_create_pseudo_p ())
5b952578 1691 {
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1693 temp = target;
1694 }
1695 else
1696 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1697 subtarget, mode);
ea5db00c 1698
301416af 1699 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1700 This means that if we go through expand_binop, we'll try to
1701 generate extensions, etc, which will require new pseudos, which
1702 will fail during some split phases. The SImode add patterns
1703 still exist, but are not named. So build the insns by hand. */
1704
bf2a98b3 1705 if (extra != 0)
301416af 1706 {
1707 if (! subtarget)
1708 subtarget = gen_reg_rtx (mode);
1709 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1710 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1711 emit_insn (insn);
b8585446 1712 temp = subtarget;
301416af 1713 }
bf2a98b3 1714
301416af 1715 if (target == NULL)
1716 target = gen_reg_rtx (mode);
1717 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1718 insn = gen_rtx_SET (VOIDmode, target, insn);
1719 emit_insn (insn);
1720 return target;
bf2a98b3 1721 }
1722 }
1723
dacd345b 1724 /* If we couldn't do it that way, try some other methods. But if we have
07014ed9 1725 no instructions left, don't bother. Likewise, if this is SImode and
1726 we can't make pseudos, we can't do anything since the expand_binop
1727 and expand_unop calls will widen and try to make pseudos. */
bf2a98b3 1728
e1ba4a27 1729 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
bf2a98b3 1730 return 0;
1731
dacd345b 1732 /* Next, see if we can load a related constant and then shift and possibly
bf2a98b3 1733 negate it to get the constant we want. Try this once each increasing
1734 numbers of insns. */
1735
1736 for (i = 1; i < n; i++)
1737 {
bdb19034 1738 /* First, see if minus some low bits, we've an easy load of
1739 high bits. */
1740
1741 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
91bc47b0 1742 if (new != 0)
1743 {
1744 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1745 if (temp)
1746 {
1747 if (no_output)
1748 return temp;
1749 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1750 target, 0, OPTAB_WIDEN);
1751 }
1752 }
bdb19034 1753
1754 /* Next try complementing. */
91bc47b0 1755 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1756 if (temp)
1757 {
1758 if (no_output)
1759 return temp;
1760 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1761 }
bf2a98b3 1762
ea5db00c 1763 /* Next try to form a constant and do a left shift. We can do this
bf2a98b3 1764 if some low-order bits are zero; the exact_log2 call below tells
1765 us that information. The bits we are shifting out could be any
1766 value, but here we'll just try the 0- and sign-extended forms of
1767 the constant. To try to increase the chance of having the same
1768 constant in more than one insn, start at the highest number of
1769 bits to shift, but try all possibilities in case a ZAPNOT will
1770 be useful. */
1771
91bc47b0 1772 bits = exact_log2 (c & -c);
1773 if (bits > 0)
bf2a98b3 1774 for (; bits > 0; bits--)
91bc47b0 1775 {
1776 new = c >> bits;
1777 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1778 if (!temp && c < 0)
1779 {
1780 new = (unsigned HOST_WIDE_INT)c >> bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new,
1782 i, no_output);
1783 }
1784 if (temp)
1785 {
1786 if (no_output)
1787 return temp;
1788 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1789 target, 0, OPTAB_WIDEN);
1790 }
1791 }
bf2a98b3 1792
1793 /* Now try high-order zero bits. Here we try the shifted-in bits as
066efb8d 1794 all zero and all ones. Be careful to avoid shifting outside the
1795 mode and to avoid shifting outside the host wide int size. */
3bc2043a 1796 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1797 confuse the recursive call and set all of the high 32 bits. */
bf2a98b3 1798
91bc47b0 1799 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1800 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1801 if (bits > 0)
bf2a98b3 1802 for (; bits > 0; bits--)
91bc47b0 1803 {
1804 new = c << bits;
1805 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1806 if (!temp)
1807 {
1808 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1809 temp = alpha_emit_set_const (subtarget, mode, new,
1810 i, no_output);
1811 }
1812 if (temp)
1813 {
1814 if (no_output)
1815 return temp;
1816 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1817 target, 1, OPTAB_WIDEN);
1818 }
1819 }
bf2a98b3 1820
1821 /* Now try high-order 1 bits. We get that with a sign-extension.
066efb8d 1822 But one bit isn't enough here. Be careful to avoid shifting outside
65abff06 1823 the mode and to avoid shifting outside the host wide int size. */
9caef960 1824
91bc47b0 1825 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1826 - floor_log2 (~ c) - 2);
1827 if (bits > 0)
bf2a98b3 1828 for (; bits > 0; bits--)
91bc47b0 1829 {
1830 new = c << bits;
1831 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1832 if (!temp)
1833 {
1834 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1835 temp = alpha_emit_set_const (subtarget, mode, new,
1836 i, no_output);
1837 }
1838 if (temp)
1839 {
1840 if (no_output)
1841 return temp;
1842 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1843 target, 0, OPTAB_WIDEN);
1844 }
1845 }
bf2a98b3 1846 }
1847
bdb19034 1848#if HOST_BITS_PER_WIDE_INT == 64
1849 /* Finally, see if can load a value into the target that is the same as the
1850 constant except that all bytes that are 0 are changed to be 0xff. If we
1851 can, then we can do a ZAPNOT to obtain the desired constant. */
1852
1853 new = c;
1854 for (i = 0; i < 64; i += 8)
1855 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1856 new |= (HOST_WIDE_INT) 0xff << i;
e52799e9 1857
bdb19034 1858 /* We are only called for SImode and DImode. If this is SImode, ensure that
1859 we are sign extended to a full word. */
1860
1861 if (mode == SImode)
1862 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1863
91bc47b0 1864 if (new != c)
1865 {
1866 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1867 if (temp)
1868 {
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1872 target, 0, OPTAB_WIDEN);
1873 }
1874 }
bdb19034 1875#endif
e52799e9 1876
bf2a98b3 1877 return 0;
1878}
996a379d 1879
92643d95 1880/* Try to output insns to set TARGET equal to the constant C if it can be
1881 done in less than N insns. Do all computations in MODE. Returns the place
1882 where the output has been placed if it can be done and the insns have been
1883 emitted. If it would take more than N insns, zero is returned and no
1884 insns and emitted. */
1885
91bc47b0 1886static rtx
92643d95 1887alpha_emit_set_const (rtx target, enum machine_mode mode,
91bc47b0 1888 HOST_WIDE_INT c, int n, bool no_output)
92643d95 1889{
91bc47b0 1890 enum machine_mode orig_mode = mode;
92643d95 1891 rtx orig_target = target;
91bc47b0 1892 rtx result = 0;
92643d95 1893 int i;
1894
1895 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1896 can't load this constant in one insn, do this in DImode. */
e1ba4a27 1897 if (!can_create_pseudo_p () && mode == SImode
91bc47b0 1898 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
92643d95 1899 {
91bc47b0 1900 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1901 if (result)
1902 return result;
1903
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1905 mode = DImode;
1906 }
1907 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1908 {
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
92643d95 1910 mode = DImode;
1911 }
1912
1913 /* Try 1 insn, then 2, then up to N. */
1914 for (i = 1; i <= n; i++)
1915 {
91bc47b0 1916 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
92643d95 1917 if (result)
1918 {
91bc47b0 1919 rtx insn, set;
1920
1921 if (no_output)
1922 return result;
1923
1924 insn = get_last_insn ();
1925 set = single_set (insn);
92643d95 1926 if (! CONSTANT_P (SET_SRC (set)))
1927 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1928 break;
1929 }
1930 }
1931
1932 /* Allow for the case where we changed the mode of TARGET. */
91bc47b0 1933 if (result)
1934 {
1935 if (result == target)
1936 result = orig_target;
1937 else if (mode != orig_mode)
1938 result = gen_lowpart (orig_mode, result);
1939 }
92643d95 1940
1941 return result;
1942}
1943
2612f626 1944/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1945 fall back to a straight forward decomposition. We do this to avoid
1946 exponential run times encountered when looking for longer sequences
1947 with alpha_emit_set_const. */
1948
91bc47b0 1949static rtx
92643d95 1950alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2612f626 1951{
2612f626 1952 HOST_WIDE_INT d1, d2, d3, d4;
2612f626 1953
1954 /* Decompose the entire word */
af792316 1955#if HOST_BITS_PER_WIDE_INT >= 64
4d10b463 1956 gcc_assert (c2 == -(c1 < 0));
af792316 1957 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1958 c1 -= d1;
1959 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1960 c1 = (c1 - d2) >> 32;
1961 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1962 c1 -= d3;
1963 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
4d10b463 1964 gcc_assert (c1 == d4);
af792316 1965#else
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d1;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
4d10b463 1969 gcc_assert (c1 == d2);
af792316 1970 c2 += (d2 < 0);
1971 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1972 c2 -= d3;
1973 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
4d10b463 1974 gcc_assert (c2 == d4);
af792316 1975#endif
2612f626 1976
1977 /* Construct the high word */
af792316 1978 if (d4)
1979 {
1980 emit_move_insn (target, GEN_INT (d4));
1981 if (d3)
1982 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1983 }
2612f626 1984 else
af792316 1985 emit_move_insn (target, GEN_INT (d3));
2612f626 1986
1987 /* Shift it into place */
af792316 1988 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2612f626 1989
af792316 1990 /* Add in the low bits. */
1991 if (d2)
1992 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1993 if (d1)
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2612f626 1995
af792316 1996 return target;
2612f626 1997}
2612f626 1998
91bc47b0 1999/* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2000 the low 64 bits. */
2001
2002static void
2003alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2004{
2005 HOST_WIDE_INT i0, i1;
2006
2007 if (GET_CODE (x) == CONST_VECTOR)
2008 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2009
2010
2011 if (GET_CODE (x) == CONST_INT)
2012 {
2013 i0 = INTVAL (x);
2014 i1 = -(i0 < 0);
2015 }
2016 else if (HOST_BITS_PER_WIDE_INT >= 64)
2017 {
2018 i0 = CONST_DOUBLE_LOW (x);
2019 i1 = -(i0 < 0);
2020 }
2021 else
2022 {
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = CONST_DOUBLE_HIGH (x);
2025 }
2026
2027 *p0 = i0;
2028 *p1 = i1;
2029}
2030
2031/* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2032 are willing to load the value into a register via a move pattern.
2033 Normally this is all symbolic constants, integral constants that
2034 take three or fewer instructions, and floating-point zero. */
2035
2036bool
2037alpha_legitimate_constant_p (rtx x)
2038{
2039 enum machine_mode mode = GET_MODE (x);
2040 HOST_WIDE_INT i0, i1;
2041
2042 switch (GET_CODE (x))
2043 {
2044 case CONST:
2045 case LABEL_REF:
91bc47b0 2046 case HIGH:
2047 return true;
2048
b5c0ec3d 2049 case SYMBOL_REF:
2050 /* TLS symbols are never valid. */
2051 return SYMBOL_REF_TLS_MODEL (x) == 0;
2052
91bc47b0 2053 case CONST_DOUBLE:
2054 if (x == CONST0_RTX (mode))
2055 return true;
2056 if (FLOAT_MODE_P (mode))
2057 return false;
2058 goto do_integer;
2059
2060 case CONST_VECTOR:
2061 if (x == CONST0_RTX (mode))
2062 return true;
2063 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2064 return false;
2065 if (GET_MODE_SIZE (mode) != 8)
2066 return false;
2067 goto do_integer;
2068
2069 case CONST_INT:
2070 do_integer:
2071 if (TARGET_BUILD_CONSTANTS)
2072 return true;
2073 alpha_extract_integer (x, &i0, &i1);
2074 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2075 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2076 return false;
2077
2078 default:
2079 return false;
2080 }
2081}
2082
2083/* Operand 1 is known to be a constant, and should require more than one
2084 instruction to load. Emit that multi-part load. */
2085
2086bool
2087alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2088{
2089 HOST_WIDE_INT i0, i1;
2090 rtx temp = NULL_RTX;
2091
2092 alpha_extract_integer (operands[1], &i0, &i1);
2093
2094 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2095 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2096
2097 if (!temp && TARGET_BUILD_CONSTANTS)
2098 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2099
2100 if (temp)
2101 {
2102 if (!rtx_equal_p (operands[0], temp))
2103 emit_move_insn (operands[0], temp);
2104 return true;
2105 }
2106
2107 return false;
2108}
2109
cb6e3ae1 2110/* Expand a move instruction; return true if all work is done.
2111 We don't handle non-bwx subword loads here. */
2112
2113bool
92643d95 2114alpha_expand_mov (enum machine_mode mode, rtx *operands)
cb6e3ae1 2115{
2116 /* If the output is not a register, the input must be. */
2117 if (GET_CODE (operands[0]) == MEM
2118 && ! reg_or_0_operand (operands[1], mode))
2119 operands[1] = force_reg (mode, operands[1]);
2120
f5a60074 2121 /* Allow legitimize_address to perform some simplifications. */
62e050c6 2122 if (mode == Pmode && symbolic_operand (operands[1], mode))
1f0ce6a6 2123 {
05b07808 2124 rtx tmp;
2125
05b07808 2126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
f5a60074 2127 if (tmp)
5dcb037d 2128 {
5f7b9df8 2129 if (tmp == operands[0])
2130 return true;
f5a60074 2131 operands[1] = tmp;
8afb6db4 2132 return false;
2133 }
1f0ce6a6 2134 }
2135
cb6e3ae1 2136 /* Early out for non-constants and valid constants. */
2137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2138 return false;
2139
2140 /* Split large integers. */
2141 if (GET_CODE (operands[1]) == CONST_INT
91bc47b0 2142 || GET_CODE (operands[1]) == CONST_DOUBLE
2143 || GET_CODE (operands[1]) == CONST_VECTOR)
cb6e3ae1 2144 {
91bc47b0 2145 if (alpha_split_const_mov (mode, operands))
2146 return true;
cb6e3ae1 2147 }
2148
2149 /* Otherwise we've nothing left but to drop the thing to memory. */
d18a1a2b 2150 operands[1] = force_const_mem (mode, operands[1]);
cb6e3ae1 2151 if (reload_in_progress)
2152 {
2153 emit_move_insn (operands[0], XEXP (operands[1], 0));
0d96cd2b 2154 operands[1] = replace_equiv_address (operands[1], operands[0]);
cb6e3ae1 2155 }
2156 else
2157 operands[1] = validize_mem (operands[1]);
2158 return false;
2159}
2160
2161/* Expand a non-bwx QImode or HImode move instruction;
2162 return true if all work is done. */
2163
2164bool
92643d95 2165alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
cb6e3ae1 2166{
0d96cd2b 2167 rtx seq;
2168
cb6e3ae1 2169 /* If the output is not a register, the input must be. */
0d96cd2b 2170 if (MEM_P (operands[0]))
cb6e3ae1 2171 operands[1] = force_reg (mode, operands[1]);
2172
2173 /* Handle four memory cases, unaligned and aligned for either the input
2174 or the output. The only case where we can be called during reload is
2175 for aligned loads; all other cases require temporaries. */
2176
0d96cd2b 2177 if (any_memory_operand (operands[1], mode))
cb6e3ae1 2178 {
2179 if (aligned_memory_operand (operands[1], mode))
2180 {
2181 if (reload_in_progress)
2182 {
0d96cd2b 2183 if (mode == QImode)
2184 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2185 else
2186 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2187 emit_insn (seq);
cb6e3ae1 2188 }
2189 else
2190 {
2191 rtx aligned_mem, bitnum;
2192 rtx scratch = gen_reg_rtx (SImode);
d67e1866 2193 rtx subtarget;
2194 bool copyout;
cb6e3ae1 2195
2196 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2197
d67e1866 2198 subtarget = operands[0];
2199 if (GET_CODE (subtarget) == REG)
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 else
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2203
0d96cd2b 2204 if (mode == QImode)
2205 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2206 bitnum, scratch);
2207 else
2208 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2209 bitnum, scratch);
2210 emit_insn (seq);
d67e1866 2211
2212 if (copyout)
2213 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
cb6e3ae1 2214 }
2215 }
2216 else
2217 {
2218 /* Don't pass these as parameters since that makes the generated
2219 code depend on parameter evaluation order which will cause
2220 bootstrap failures. */
2221
0d96cd2b 2222 rtx temp1, temp2, subtarget, ua;
d67e1866 2223 bool copyout;
2224
2225 temp1 = gen_reg_rtx (DImode);
2226 temp2 = gen_reg_rtx (DImode);
cb6e3ae1 2227
d67e1866 2228 subtarget = operands[0];
2229 if (GET_CODE (subtarget) == REG)
2230 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2231 else
2232 subtarget = gen_reg_rtx (DImode), copyout = true;
2233
0d96cd2b 2234 ua = get_unaligned_address (operands[1]);
2235 if (mode == QImode)
2236 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2237 else
2238 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2239
cb6e3ae1 2240 alpha_set_memflags (seq, operands[1]);
2241 emit_insn (seq);
d67e1866 2242
2243 if (copyout)
2244 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
cb6e3ae1 2245 }
2246 return true;
2247 }
2248
0d96cd2b 2249 if (any_memory_operand (operands[0], mode))
cb6e3ae1 2250 {
2251 if (aligned_memory_operand (operands[0], mode))
2252 {
2253 rtx aligned_mem, bitnum;
2254 rtx temp1 = gen_reg_rtx (SImode);
2255 rtx temp2 = gen_reg_rtx (SImode);
2256
2257 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2258
2259 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2260 temp1, temp2));
2261 }
2262 else
2263 {
2264 rtx temp1 = gen_reg_rtx (DImode);
2265 rtx temp2 = gen_reg_rtx (DImode);
2266 rtx temp3 = gen_reg_rtx (DImode);
0d96cd2b 2267 rtx ua = get_unaligned_address (operands[0]);
2268
2269 if (mode == QImode)
2270 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2271 else
2272 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
cb6e3ae1 2273
2274 alpha_set_memflags (seq, operands[0]);
2275 emit_insn (seq);
2276 }
2277 return true;
2278 }
2279
2280 return false;
2281}
2282
a31688d7 2283/* Implement the movmisalign patterns. One of the operands is a memory
84846cff 2284 that is not naturally aligned. Emit instructions to load it. */
a31688d7 2285
2286void
2287alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2288{
2289 /* Honor misaligned loads, for those we promised to do so. */
2290 if (MEM_P (operands[1]))
2291 {
2292 rtx tmp;
2293
2294 if (register_operand (operands[0], mode))
2295 tmp = operands[0];
2296 else
2297 tmp = gen_reg_rtx (mode);
2298
2299 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2300 if (tmp != operands[0])
2301 emit_move_insn (operands[0], tmp);
2302 }
2303 else if (MEM_P (operands[0]))
2304 {
2305 if (!reg_or_0_operand (operands[1], mode))
2306 operands[1] = force_reg (mode, operands[1]);
2307 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2308 }
2309 else
2310 gcc_unreachable ();
2311}
2312
2a42ba09 2313/* Generate an unsigned DImode to FP conversion. This is the same code
2314 optabs would emit if we didn't have TFmode patterns.
2315
2316 For SFmode, this is the only construction I've found that can pass
2317 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2318 intermediates will work, because you'll get intermediate rounding
2319 that ruins the end result. Some of this could be fixed by turning
2320 on round-to-positive-infinity, but that requires diddling the fpsr,
2321 which kills performance. I tried turning this around and converting
2322 to a negative number, so that I could turn on /m, but either I did
2323 it wrong or there's something else cause I wound up with the exact
2324 same single-bit error. There is a branch-less form of this same code:
2325
2326 srl $16,1,$1
2327 and $16,1,$2
2328 cmplt $16,0,$3
2329 or $1,$2,$2
2330 cmovge $16,$16,$2
2331 itoft $3,$f10
2332 itoft $2,$f11
2333 cvtqs $f11,$f11
2334 adds $f11,$f11,$f0
2335 fcmoveq $f10,$f11,$f0
2336
2337 I'm not using it because it's the same number of instructions as
2338 this branch-full form, and it has more serialized long latency
2339 instructions on the critical path.
2340
2341 For DFmode, we can avoid rounding errors by breaking up the word
2342 into two pieces, converting them separately, and adding them back:
2343
2344 LC0: .long 0,0x5f800000
2345
2346 itoft $16,$f11
2347 lda $2,LC0
093c0196 2348 cmplt $16,0,$1
2a42ba09 2349 cpyse $f11,$f31,$f10
2350 cpyse $f31,$f11,$f11
2351 s4addq $1,$2,$1
2352 lds $f12,0($1)
2353 cvtqt $f10,$f10
2354 cvtqt $f11,$f11
2355 addt $f12,$f10,$f0
2356 addt $f0,$f11,$f0
2357
2358 This doesn't seem to be a clear-cut win over the optabs form.
2359 It probably all depends on the distribution of numbers being
2360 converted -- in the optabs form, all but high-bit-set has a
2361 much lower minimum execution time. */
2362
2363void
92643d95 2364alpha_emit_floatuns (rtx operands[2])
2a42ba09 2365{
2366 rtx neglab, donelab, i0, i1, f0, in, out;
2367 enum machine_mode mode;
2368
2369 out = operands[0];
8e2025b4 2370 in = force_reg (DImode, operands[1]);
2a42ba09 2371 mode = GET_MODE (out);
2372 neglab = gen_label_rtx ();
2373 donelab = gen_label_rtx ();
2374 i0 = gen_reg_rtx (DImode);
2375 i1 = gen_reg_rtx (DImode);
2376 f0 = gen_reg_rtx (mode);
2377
7e69f45b 2378 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2a42ba09 2379
2380 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2381 emit_jump_insn (gen_jump (donelab));
093c0196 2382 emit_barrier ();
2a42ba09 2383
2384 emit_label (neglab);
2385
2386 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2387 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2388 emit_insn (gen_iordi3 (i0, i0, i1));
2389 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2390 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2391
2392 emit_label (donelab);
2393}
2394
3a2a3a7f 2395/* Generate the comparison for a conditional branch. */
2396
2397rtx
92643d95 2398alpha_emit_conditional_branch (enum rtx_code code)
3a2a3a7f 2399{
2400 enum rtx_code cmp_code, branch_code;
2401 enum machine_mode cmp_mode, branch_mode = VOIDmode;
b18b881f 2402 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3a2a3a7f 2403 rtx tem;
2404
915c336f 2405 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2406 {
8c3428a6 2407 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
915c336f 2408 op1 = const0_rtx;
2409 alpha_compare.fp_p = 0;
915c336f 2410 }
2411
3a2a3a7f 2412 /* The general case: fold the comparison code to the types of compares
2413 that we have, choosing the branch as necessary. */
2414 switch (code)
2415 {
2416 case EQ: case LE: case LT: case LEU: case LTU:
a4110d9a 2417 case UNORDERED:
3a2a3a7f 2418 /* We have these compares: */
2419 cmp_code = code, branch_code = NE;
2420 break;
2421
2422 case NE:
a4110d9a 2423 case ORDERED:
65abff06 2424 /* These must be reversed. */
a4110d9a 2425 cmp_code = reverse_condition (code), branch_code = EQ;
3a2a3a7f 2426 break;
2427
2428 case GE: case GT: case GEU: case GTU:
2429 /* For FP, we swap them, for INT, we reverse them. */
b18b881f 2430 if (alpha_compare.fp_p)
3a2a3a7f 2431 {
2432 cmp_code = swap_condition (code);
2433 branch_code = NE;
2434 tem = op0, op0 = op1, op1 = tem;
2435 }
2436 else
2437 {
2438 cmp_code = reverse_condition (code);
2439 branch_code = EQ;
2440 }
2441 break;
2442
2443 default:
4d10b463 2444 gcc_unreachable ();
3a2a3a7f 2445 }
2446
b18b881f 2447 if (alpha_compare.fp_p)
3a2a3a7f 2448 {
2449 cmp_mode = DFmode;
7f3be425 2450 if (flag_unsafe_math_optimizations)
3a2a3a7f 2451 {
2452 /* When we are not as concerned about non-finite values, and we
2453 are comparing against zero, we can branch directly. */
2454 if (op1 == CONST0_RTX (DFmode))
21f1e711 2455 cmp_code = UNKNOWN, branch_code = code;
3a2a3a7f 2456 else if (op0 == CONST0_RTX (DFmode))
2457 {
2458 /* Undo the swap we probably did just above. */
2459 tem = op0, op0 = op1, op1 = tem;
4899654e 2460 branch_code = swap_condition (cmp_code);
21f1e711 2461 cmp_code = UNKNOWN;
3a2a3a7f 2462 }
2463 }
2464 else
2465 {
d30e015b 2466 /* ??? We mark the branch mode to be CCmode to prevent the
9e7454d0 2467 compare and branch from being combined, since the compare
3a2a3a7f 2468 insn follows IEEE rules that the branch does not. */
2469 branch_mode = CCmode;
2470 }
2471 }
2472 else
2473 {
2474 cmp_mode = DImode;
2475
2476 /* The following optimizations are only for signed compares. */
2477 if (code != LEU && code != LTU && code != GEU && code != GTU)
2478 {
2479 /* Whee. Compare and branch against 0 directly. */
2480 if (op1 == const0_rtx)
21f1e711 2481 cmp_code = UNKNOWN, branch_code = code;
3a2a3a7f 2482
3a2f3420 2483 /* If the constants doesn't fit into an immediate, but can
2484 be generated by lda/ldah, we adjust the argument and
2485 compare against zero, so we can use beq/bne directly. */
62350d6c 2486 /* ??? Don't do this when comparing against symbols, otherwise
2487 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2488 be declared false out of hand (at least for non-weak). */
2489 else if (GET_CODE (op1) == CONST_INT
2490 && (code == EQ || code == NE)
2491 && !(symbolic_operand (op0, VOIDmode)
2492 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
3a2a3a7f 2493 {
1dffd068 2494 rtx n_op1 = GEN_INT (-INTVAL (op1));
2495
2496 if (! satisfies_constraint_I (op1)
2497 && (satisfies_constraint_K (n_op1)
2498 || satisfies_constraint_L (n_op1)))
2499 cmp_code = PLUS, branch_code = code, op1 = n_op1;
3a2a3a7f 2500 }
2501 }
3a2a3a7f 2502
d74ce6fa 2503 if (!reg_or_0_operand (op0, DImode))
2504 op0 = force_reg (DImode, op0);
2505 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2506 op1 = force_reg (DImode, op1);
2507 }
3a2a3a7f 2508
2509 /* Emit an initial compare instruction, if necessary. */
2510 tem = op0;
21f1e711 2511 if (cmp_code != UNKNOWN)
3a2a3a7f 2512 {
2513 tem = gen_reg_rtx (cmp_mode);
2514 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2515 }
2516
b18b881f 2517 /* Zero the operands. */
2518 memset (&alpha_compare, 0, sizeof (alpha_compare));
2519
3a2a3a7f 2520 /* Return the branch comparison. */
2521 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2522}
2523
d74ce6fa 2524/* Certain simplifications can be done to make invalid setcc operations
2525 valid. Return the final comparison, or NULL if we can't work. */
2526
2527rtx
92643d95 2528alpha_emit_setcc (enum rtx_code code)
d74ce6fa 2529{
2530 enum rtx_code cmp_code;
2531 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2532 int fp_p = alpha_compare.fp_p;
2533 rtx tmp;
2534
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2537
2538 if (fp_p && GET_MODE (op0) == TFmode)
2539 {
8c3428a6 2540 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
d74ce6fa 2541 op1 = const0_rtx;
2542 fp_p = 0;
d74ce6fa 2543 }
2544
2545 if (fp_p && !TARGET_FIX)
2546 return NULL_RTX;
2547
2548 /* The general case: fold the comparison code to the types of compares
2549 that we have, choosing the branch as necessary. */
2550
21f1e711 2551 cmp_code = UNKNOWN;
d74ce6fa 2552 switch (code)
2553 {
2554 case EQ: case LE: case LT: case LEU: case LTU:
2555 case UNORDERED:
2556 /* We have these compares. */
2557 if (fp_p)
2558 cmp_code = code, code = NE;
2559 break;
2560
2561 case NE:
2562 if (!fp_p && op1 == const0_rtx)
2563 break;
8e262b5e 2564 /* FALLTHRU */
d74ce6fa 2565
2566 case ORDERED:
2567 cmp_code = reverse_condition (code);
2568 code = EQ;
2569 break;
2570
2571 case GE: case GT: case GEU: case GTU:
75b3314a 2572 /* These normally need swapping, but for integer zero we have
bc882521 2573 special patterns that recognize swapped operands. */
2574 if (!fp_p && op1 == const0_rtx)
2575 break;
d74ce6fa 2576 code = swap_condition (code);
2577 if (fp_p)
2578 cmp_code = code, code = NE;
2579 tmp = op0, op0 = op1, op1 = tmp;
2580 break;
2581
2582 default:
4d10b463 2583 gcc_unreachable ();
d74ce6fa 2584 }
2585
2586 if (!fp_p)
2587 {
bc882521 2588 if (!register_operand (op0, DImode))
d74ce6fa 2589 op0 = force_reg (DImode, op0);
2590 if (!reg_or_8bit_operand (op1, DImode))
2591 op1 = force_reg (DImode, op1);
2592 }
2593
2594 /* Emit an initial compare instruction, if necessary. */
21f1e711 2595 if (cmp_code != UNKNOWN)
d74ce6fa 2596 {
2597 enum machine_mode mode = fp_p ? DFmode : DImode;
2598
2599 tmp = gen_reg_rtx (mode);
2600 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2601 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2602
2603 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2604 op1 = const0_rtx;
2605 }
2606
2607 /* Return the setcc comparison. */
2608 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2609}
2610
3a2a3a7f 2611
996a379d 2612/* Rewrite a comparison against zero CMP of the form
2613 (CODE (cc0) (const_int 0)) so it can be written validly in
2614 a conditional move (if_then_else CMP ...).
e3e08e7f 2615 If both of the operands that set cc0 are nonzero we must emit
996a379d 2616 an insn to perform the compare (it can't be done within
65abff06 2617 the conditional move). */
92643d95 2618
996a379d 2619rtx
92643d95 2620alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
996a379d 2621{
23be97c5 2622 enum rtx_code code = GET_CODE (cmp);
c60bc286 2623 enum rtx_code cmov_code = NE;
b18b881f 2624 rtx op0 = alpha_compare.op0;
2625 rtx op1 = alpha_compare.op1;
2626 int fp_p = alpha_compare.fp_p;
23be97c5 2627 enum machine_mode cmp_mode
2628 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
b18b881f 2629 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
3a2a3a7f 2630 enum machine_mode cmov_mode = VOIDmode;
7f3be425 2631 int local_fast_math = flag_unsafe_math_optimizations;
23be97c5 2632 rtx tem;
996a379d 2633
b18b881f 2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2636
2637 if (fp_p != FLOAT_MODE_P (mode))
d6cc9868 2638 {
2639 enum rtx_code cmp_code;
2640
2641 if (! TARGET_FIX)
2642 return 0;
2643
2644 /* If we have fp<->int register move instructions, do a cmov by
2645 performing the comparison in fp registers, and move the
e3e08e7f 2646 zero/nonzero value to integer registers, where we can then
d6cc9868 2647 use a normal cmov, or vice-versa. */
2648
2649 switch (code)
2650 {
2651 case EQ: case LE: case LT: case LEU: case LTU:
2652 /* We have these compares. */
2653 cmp_code = code, code = NE;
2654 break;
2655
2656 case NE:
2657 /* This must be reversed. */
2658 cmp_code = EQ, code = EQ;
2659 break;
2660
2661 case GE: case GT: case GEU: case GTU:
75b3314a 2662 /* These normally need swapping, but for integer zero we have
2663 special patterns that recognize swapped operands. */
2664 if (!fp_p && op1 == const0_rtx)
88f8f2a2 2665 cmp_code = code, code = NE;
2666 else
2667 {
2668 cmp_code = swap_condition (code);
2669 code = NE;
2670 tem = op0, op0 = op1, op1 = tem;
2671 }
d6cc9868 2672 break;
2673
2674 default:
4d10b463 2675 gcc_unreachable ();
d6cc9868 2676 }
2677
2678 tem = gen_reg_rtx (cmp_op_mode);
2679 emit_insn (gen_rtx_SET (VOIDmode, tem,
2680 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2681 op0, op1)));
2682
2683 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2684 op0 = gen_lowpart (cmp_op_mode, tem);
2685 op1 = CONST0_RTX (cmp_op_mode);
2686 fp_p = !fp_p;
2687 local_fast_math = 1;
2688 }
996a379d 2689
2690 /* We may be able to use a conditional move directly.
65abff06 2691 This avoids emitting spurious compares. */
2a42ba09 2692 if (signed_comparison_operator (cmp, VOIDmode)
d6cc9868 2693 && (!fp_p || local_fast_math)
23be97c5 2694 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
941522d6 2695 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
996a379d 2696
3029ee00 2697 /* We can't put the comparison inside the conditional move;
996a379d 2698 emit a compare instruction and put that inside the
23be97c5 2699 conditional move. Make sure we emit only comparisons we have;
2700 swap or reverse as necessary. */
996a379d 2701
e1ba4a27 2702 if (!can_create_pseudo_p ())
3029ee00 2703 return NULL_RTX;
2704
996a379d 2705 switch (code)
2706 {
23be97c5 2707 case EQ: case LE: case LT: case LEU: case LTU:
2708 /* We have these compares: */
996a379d 2709 break;
23be97c5 2710
996a379d 2711 case NE:
65abff06 2712 /* This must be reversed. */
23be97c5 2713 code = reverse_condition (code);
c60bc286 2714 cmov_code = EQ;
996a379d 2715 break;
23be97c5 2716
2717 case GE: case GT: case GEU: case GTU:
d74ce6fa 2718 /* These must be swapped. */
88f8f2a2 2719 if (op1 != CONST0_RTX (cmp_mode))
2720 {
2721 code = swap_condition (code);
2722 tem = op0, op0 = op1, op1 = tem;
2723 }
996a379d 2724 break;
23be97c5 2725
996a379d 2726 default:
4d10b463 2727 gcc_unreachable ();
996a379d 2728 }
2729
d74ce6fa 2730 if (!fp_p)
2731 {
2732 if (!reg_or_0_operand (op0, DImode))
2733 op0 = force_reg (DImode, op0);
2734 if (!reg_or_8bit_operand (op1, DImode))
2735 op1 = force_reg (DImode, op1);
2736 }
2737
b9b4428b 2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
3a2a3a7f 2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
d6cc9868 2741 if (fp_p && !local_fast_math)
3a2a3a7f 2742 cmov_mode = CCmode;
2743
23be97c5 2744 tem = gen_reg_rtx (cmp_op_mode);
941522d6 2745 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
3a2a3a7f 2746 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
996a379d 2747}
bbf31a61 2748
2749/* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2753
2754int
92643d95 2755alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2756 rtx t_rtx, rtx f_rtx)
bbf31a61 2757{
2758 HOST_WIDE_INT t, f, diff;
2759 enum machine_mode mode;
2760 rtx target, subtarget, tmp;
2761
2762 mode = GET_MODE (dest);
2763 t = INTVAL (t_rtx);
2764 f = INTVAL (f_rtx);
2765 diff = t - f;
2766
2767 if (((code == NE || code == EQ) && diff < 0)
2768 || (code == GE || code == GT))
2769 {
2770 code = reverse_condition (code);
2771 diff = t, t = f, f = diff;
2772 diff = t - f;
2773 }
2774
2775 subtarget = target = dest;
2776 if (mode != DImode)
2777 {
2778 target = gen_lowpart (DImode, dest);
e1ba4a27 2779 if (can_create_pseudo_p ())
bbf31a61 2780 subtarget = gen_reg_rtx (DImode);
2781 else
2782 subtarget = target;
2783 }
64656695 2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2786 be shared. */
bbf31a61 2787
2788 if (f == 0 && exact_log2 (diff) > 0
8d232dc7 2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
bbf31a61 2790 viable over a longer latency cmove. On EV5, the E0 slot is a
65abff06 2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
fb64edde 2792 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
bbf31a61 2793 {
2794 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
64656695 2795 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
bbf31a61 2796
64656695 2797 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2798 GEN_INT (exact_log2 (t)));
bbf31a61 2799 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2800 }
2801 else if (f == 0 && t == -1)
2802 {
2803 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
64656695 2804 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
bbf31a61 2805
64656695 2806 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
bbf31a61 2807 }
2808 else if (diff == 1 || diff == 4 || diff == 8)
2809 {
2810 rtx add_op;
2811
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
64656695 2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
bbf31a61 2814
2815 if (diff == 1)
64656695 2816 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
bbf31a61 2817 else
2818 {
2819 add_op = GEN_INT (f);
2820 if (sext_add_operand (add_op, mode))
2821 {
64656695 2822 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2823 GEN_INT (diff));
bbf31a61 2824 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2825 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2826 }
2827 else
2828 return 0;
2829 }
2830 }
2831 else
2832 return 0;
2833
2834 return 1;
2835}
34377880 2836\f
915c336f 2837/* Look up the function X_floating library function name for the
2838 given operation. */
2839
2d7c492e 2840struct xfloating_op GTY(())
2841{
2842 const enum rtx_code code;
7035b2ab 2843 const char *const GTY((skip)) osf_func;
2844 const char *const GTY((skip)) vms_func;
2d7c492e 2845 rtx libcall;
2846};
2847
9e7454d0 2848static GTY(()) struct xfloating_op xfloating_ops[] =
2d7c492e 2849{
2850 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2865};
2866
2867static GTY(()) struct xfloating_op vax_cvt_ops[] =
2868{
2869 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2871};
2872
2873static rtx
92643d95 2874alpha_lookup_xfloating_lib_func (enum rtx_code code)
915c336f 2875{
2d7c492e 2876 struct xfloating_op *ops = xfloating_ops;
2877 long n = ARRAY_SIZE (xfloating_ops);
915c336f 2878 long i;
2879
8c3428a6 2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2881
2d7c492e 2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
915c336f 2884 {
2d7c492e 2885 ops = vax_cvt_ops;
2886 n = ARRAY_SIZE (vax_cvt_ops);
915c336f 2887 }
2888
2d7c492e 2889 for (i = 0; i < n; ++i, ++ops)
2890 if (ops->code == code)
2891 {
2892 rtx func = ops->libcall;
2893 if (!func)
2894 {
2895 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops->vms_func : ops->osf_func);
2897 ops->libcall = func;
2898 }
2899 return func;
2900 }
915c336f 2901
4d10b463 2902 gcc_unreachable ();
915c336f 2903}
2904
2905/* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2907
2908static int
92643d95 2909alpha_compute_xfloating_mode_arg (enum rtx_code code,
2910 enum alpha_fp_rounding_mode round)
915c336f 2911{
2912 int mode;
2913
2914 switch (round)
2915 {
2916 case ALPHA_FPRM_NORM:
2917 mode = 2;
2918 break;
2919 case ALPHA_FPRM_MINF:
2920 mode = 1;
2921 break;
2922 case ALPHA_FPRM_CHOP:
2923 mode = 0;
2924 break;
2925 case ALPHA_FPRM_DYN:
2926 mode = 4;
2927 break;
2928 default:
4d10b463 2929 gcc_unreachable ();
915c336f 2930
2931 /* XXX For reference, round to +inf is mode = 3. */
2932 }
2933
2934 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2935 mode |= 0x10000;
2936
2937 return mode;
2938}
2939
2940/* Emit an X_floating library function call.
2941
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
9e7454d0 2944 indirect); TFmode return values appear in R16+R17.
915c336f 2945
2d7c492e 2946 FUNC is the function to call.
915c336f 2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2951*/
2952
2953static void
2d7c492e 2954alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
92643d95 2955 int noperands, rtx equiv)
915c336f 2956{
2957 rtx usage = NULL_RTX, tmp, reg;
2958 int regno = 16, i;
2959
2960 start_sequence ();
2961
2962 for (i = 0; i < noperands; ++i)
2963 {
2964 switch (GET_MODE (operands[i]))
2965 {
2966 case TFmode:
2967 reg = gen_rtx_REG (TFmode, regno);
2968 regno += 2;
2969 break;
2970
2971 case DFmode:
2972 reg = gen_rtx_REG (DFmode, regno + 32);
2973 regno += 1;
2974 break;
2975
2976 case VOIDmode:
4d10b463 2977 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
8e262b5e 2978 /* FALLTHRU */
915c336f 2979 case DImode:
2980 reg = gen_rtx_REG (DImode, regno);
2981 regno += 1;
2982 break;
2983
2984 default:
4d10b463 2985 gcc_unreachable ();
915c336f 2986 }
2987
2988 emit_move_insn (reg, operands[i]);
2989 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2990 }
2991
2992 switch (GET_MODE (target))
2993 {
2994 case TFmode:
2995 reg = gen_rtx_REG (TFmode, 16);
2996 break;
2997 case DFmode:
2998 reg = gen_rtx_REG (DFmode, 32);
2999 break;
3000 case DImode:
3001 reg = gen_rtx_REG (DImode, 0);
3002 break;
3003 default:
4d10b463 3004 gcc_unreachable ();
915c336f 3005 }
3006
2d7c492e 3007 tmp = gen_rtx_MEM (QImode, func);
2c6f8e4d 3008 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
915c336f 3009 const0_rtx, const0_rtx));
3010 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2d7c492e 3011 CONST_OR_PURE_CALL_P (tmp) = 1;
915c336f 3012
3013 tmp = get_insns ();
3014 end_sequence ();
3015
3016 emit_libcall_block (tmp, target, reg, equiv);
3017}
3018
3019/* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3020
3021void
92643d95 3022alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
915c336f 3023{
2d7c492e 3024 rtx func;
915c336f 3025 int mode;
b90b6519 3026 rtx out_operands[3];
915c336f 3027
3028 func = alpha_lookup_xfloating_lib_func (code);
3029 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3030
b90b6519 3031 out_operands[0] = operands[1];
3032 out_operands[1] = operands[2];
3033 out_operands[2] = GEN_INT (mode);
9e7454d0 3034 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
915c336f 3035 gen_rtx_fmt_ee (code, TFmode, operands[1],
3036 operands[2]));
3037}
3038
3039/* Emit an X_floating library function call for a comparison. */
3040
3041static rtx
8c3428a6 3042alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
915c336f 3043{
8c3428a6 3044 enum rtx_code cmp_code, res_code;
3045 rtx func, out, operands[2];
915c336f 3046
8c3428a6 3047 /* X_floating library comparison functions return
3048 -1 unordered
3049 0 false
3050 1 true
3051 Convert the compare against the raw return value. */
3052
3053 cmp_code = *pcode;
3054 switch (cmp_code)
3055 {
3056 case UNORDERED:
3057 cmp_code = EQ;
3058 res_code = LT;
3059 break;
3060 case ORDERED:
3061 cmp_code = EQ;
3062 res_code = GE;
3063 break;
3064 case NE:
3065 res_code = NE;
3066 break;
3067 case EQ:
3068 case LT:
3069 case GT:
3070 case LE:
3071 case GE:
3072 res_code = GT;
3073 break;
3074 default:
3075 gcc_unreachable ();
3076 }
3077 *pcode = res_code;
3078
3079 func = alpha_lookup_xfloating_lib_func (cmp_code);
915c336f 3080
3081 operands[0] = op0;
3082 operands[1] = op1;
3083 out = gen_reg_rtx (DImode);
3084
d1324b4b 3085 /* ??? Strange mode for equiv because what's actually returned
3086 is -1,0,1, not a proper boolean value. */
3087 alpha_emit_xfloating_libcall (func, out, operands, 2,
8c3428a6 3088 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
915c336f 3089
3090 return out;
3091}
3092
3093/* Emit an X_floating library function call for a conversion. */
3094
3095void
caf6f044 3096alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
915c336f 3097{
3098 int noperands = 1, mode;
b90b6519 3099 rtx out_operands[2];
2d7c492e 3100 rtx func;
caf6f044 3101 enum rtx_code code = orig_code;
3102
3103 if (code == UNSIGNED_FIX)
3104 code = FIX;
915c336f 3105
3106 func = alpha_lookup_xfloating_lib_func (code);
3107
b90b6519 3108 out_operands[0] = operands[1];
3109
915c336f 3110 switch (code)
3111 {
3112 case FIX:
3113 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
b90b6519 3114 out_operands[1] = GEN_INT (mode);
8581412d 3115 noperands = 2;
915c336f 3116 break;
3117 case FLOAT_TRUNCATE:
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
b90b6519 3119 out_operands[1] = GEN_INT (mode);
8581412d 3120 noperands = 2;
915c336f 3121 break;
3122 default:
3123 break;
3124 }
3125
b90b6519 3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
caf6f044 3127 gen_rtx_fmt_e (orig_code,
3128 GET_MODE (operands[0]),
915c336f 3129 operands[1]));
3130}
3420680b 3131
10c77d2b 3132/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3135 set (OP[0] OP[2])
3136 set (OP[1] OP[3])
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3139
3420680b 3140void
10c77d2b 3141alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3142 bool fixup_overlap)
3420680b 3143{
4d10b463 3144 switch (GET_CODE (operands[1]))
3420680b 3145 {
4d10b463 3146 case REG:
3420680b 3147 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3148 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
4d10b463 3149 break;
3150
3151 case MEM:
e513d163 3152 operands[3] = adjust_address (operands[1], DImode, 8);
3153 operands[2] = adjust_address (operands[1], DImode, 0);
4d10b463 3154 break;
3155
10c77d2b 3156 case CONST_INT:
e4a93d80 3157 case CONST_DOUBLE:
10c77d2b 3158 gcc_assert (operands[1] == CONST0_RTX (mode));
4d10b463 3159 operands[2] = operands[3] = const0_rtx;
3160 break;
3161
3162 default:
3163 gcc_unreachable ();
3420680b 3164 }
3420680b 3165
4d10b463 3166 switch (GET_CODE (operands[0]))
3420680b 3167 {
4d10b463 3168 case REG:
3420680b 3169 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3170 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
4d10b463 3171 break;
3172
3173 case MEM:
e513d163 3174 operands[1] = adjust_address (operands[0], DImode, 8);
3175 operands[0] = adjust_address (operands[0], DImode, 0);
4d10b463 3176 break;
3177
3178 default:
3179 gcc_unreachable ();
3420680b 3180 }
10c77d2b 3181
3182 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3183 {
3184 rtx tmp;
3185 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3186 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3187 }
3420680b 3188}
2267ca84 3189
9e7454d0 3190/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
2267ca84 3192 logical operation to be performed. */
3193
3194void
92643d95 3195alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
2267ca84 3196{
3197 rtx high_bit = operands[2];
3198 rtx scratch;
3199 int move;
3200
10c77d2b 3201 alpha_split_tmode_pair (operands, TFmode, false);
2267ca84 3202
e3e08e7f 3203 /* Detect three flavors of operand overlap. */
2267ca84 3204 move = 1;
3205 if (rtx_equal_p (operands[0], operands[2]))
3206 move = 0;
3207 else if (rtx_equal_p (operands[1], operands[2]))
3208 {
3209 if (rtx_equal_p (operands[0], high_bit))
3210 move = 2;
3211 else
3212 move = -1;
3213 }
3214
3215 if (move < 0)
3216 emit_move_insn (operands[0], operands[2]);
3217
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch = operands[move < 2 ? 1 : 3];
3223
3224 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3225
3226 if (move > 0)
3227 {
3228 emit_move_insn (operands[0], operands[2]);
3229 if (move > 1)
3230 emit_move_insn (operands[1], scratch);
3231 }
3232}
915c336f 3233\f
34377880 3234/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3235 unaligned data:
3236
3237 unsigned: signed:
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3244 sra r1,48,r1
3245
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3252
3253 quad: ldq_u r1,X(r11)
3254 ldq_u r2,X+7(r11)
3255 lda r3,X(r11)
3256 extql r1,r3,r1
3257 extqh r2,r3,r2
3258 or r1.r2.r1
3259*/
3260
3261void
92643d95 3262alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3263 HOST_WIDE_INT ofs, int sign)
34377880 3264{
1f0ce6a6 3265 rtx meml, memh, addr, extl, exth, tmp, mema;
2cc46ade 3266 enum machine_mode mode;
34377880 3267
9467fa25 3268 if (TARGET_BWX && size == 2)
3269 {
b83bea6c 3270 meml = adjust_address (mem, QImode, ofs);
3271 memh = adjust_address (mem, QImode, ofs+1);
9467fa25 3272 if (BYTES_BIG_ENDIAN)
3273 tmp = meml, meml = memh, memh = tmp;
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276 emit_insn (gen_zero_extendqidi2 (extl, meml));
3277 emit_insn (gen_zero_extendqidi2 (exth, memh));
3278 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3279 NULL, 1, OPTAB_LIB_WIDEN);
3280 addr = expand_simple_binop (DImode, IOR, extl, exth,
3281 NULL, 1, OPTAB_LIB_WIDEN);
3282
3283 if (sign && GET_MODE (tgt) != HImode)
3284 {
3285 addr = gen_lowpart (HImode, addr);
3286 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3287 }
3288 else
3289 {
3290 if (GET_MODE (tgt) != DImode)
3291 addr = gen_lowpart (GET_MODE (tgt), addr);
3292 emit_move_insn (tgt, addr);
3293 }
3294 return;
3295 }
3296
34377880 3297 meml = gen_reg_rtx (DImode);
3298 memh = gen_reg_rtx (DImode);
3299 addr = gen_reg_rtx (DImode);
3300 extl = gen_reg_rtx (DImode);
3301 exth = gen_reg_rtx (DImode);
3302
1f0ce6a6 3303 mema = XEXP (mem, 0);
3304 if (GET_CODE (mema) == LO_SUM)
3305 mema = force_reg (Pmode, mema);
3306
3024e9f8 3307 /* AND addresses cannot be in any alias set, since they may implicitly
9e7454d0 3308 alias surrounding code. Ideally we'd have some alias set that
3024e9f8 3309 covered all types except those with alignment 8 or higher. */
3310
3311 tmp = change_address (mem, DImode,
9e7454d0 3312 gen_rtx_AND (DImode,
1f0ce6a6 3313 plus_constant (mema, ofs),
3024e9f8 3314 GEN_INT (-8)));
ab6ab77e 3315 set_mem_alias_set (tmp, 0);
3024e9f8 3316 emit_move_insn (meml, tmp);
3317
3318 tmp = change_address (mem, DImode,
9e7454d0 3319 gen_rtx_AND (DImode,
1f0ce6a6 3320 plus_constant (mema, ofs + size - 1),
3024e9f8 3321 GEN_INT (-8)));
ab6ab77e 3322 set_mem_alias_set (tmp, 0);
3024e9f8 3323 emit_move_insn (memh, tmp);
34377880 3324
9caef960 3325 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3326 {
3327 emit_move_insn (addr, plus_constant (mema, -1));
3328
3329 emit_insn (gen_extqh_be (extl, meml, addr));
3330 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3331
3332 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3333 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3334 addr, 1, OPTAB_WIDEN);
3335 }
3336 else if (sign && size == 2)
34377880 3337 {
1f0ce6a6 3338 emit_move_insn (addr, plus_constant (mema, ofs+2));
34377880 3339
9caef960 3340 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3341 emit_insn (gen_extqh_le (exth, memh, addr));
34377880 3342
ba4a7733 3343 /* We must use tgt here for the target. Alpha-vms port fails if we use
3344 addr for the target, because addr is marked as a pointer and combine
85c36fd1 3345 knows that pointers are always sign-extended 32-bit values. */
ba4a7733 3346 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
9e7454d0 3347 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
2cc46ade 3348 addr, 1, OPTAB_WIDEN);
34377880 3349 }
2cc46ade 3350 else
34377880 3351 {
9caef960 3352 if (WORDS_BIG_ENDIAN)
2cc46ade 3353 {
9caef960 3354 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3355 switch ((int) size)
3356 {
3357 case 2:
3358 emit_insn (gen_extwh_be (extl, meml, addr));
3359 mode = HImode;
3360 break;
34377880 3361
9caef960 3362 case 4:
3363 emit_insn (gen_extlh_be (extl, meml, addr));
3364 mode = SImode;
3365 break;
34377880 3366
9caef960 3367 case 8:
3368 emit_insn (gen_extqh_be (extl, meml, addr));
3369 mode = DImode;
3370 break;
915c336f 3371
9caef960 3372 default:
4d10b463 3373 gcc_unreachable ();
9caef960 3374 }
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3376 }
3377 else
3378 {
3379 emit_move_insn (addr, plus_constant (mema, ofs));
3380 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3381 switch ((int) size)
3382 {
3383 case 2:
3384 emit_insn (gen_extwh_le (exth, memh, addr));
3385 mode = HImode;
3386 break;
3387
3388 case 4:
3389 emit_insn (gen_extlh_le (exth, memh, addr));
3390 mode = SImode;
3391 break;
3392
3393 case 8:
3394 emit_insn (gen_extqh_le (exth, memh, addr));
3395 mode = DImode;
3396 break;
3397
3398 default:
4d10b463 3399 gcc_unreachable ();
9caef960 3400 }
2cc46ade 3401 }
3402
3403 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3404 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3405 sign, OPTAB_WIDEN);
34377880 3406 }
3407
2cc46ade 3408 if (addr != tgt)
9467fa25 3409 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
34377880 3410}
3411
3412/* Similarly, use ins and msk instructions to perform unaligned stores. */
3413
3414void
92643d95 3415alpha_expand_unaligned_store (rtx dst, rtx src,
3416 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
34377880 3417{
1f0ce6a6 3418 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
9e7454d0 3419
9467fa25 3420 if (TARGET_BWX && size == 2)
3421 {
3422 if (src != const0_rtx)
3423 {
3424 dstl = gen_lowpart (QImode, src);
3425 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3426 NULL, 1, OPTAB_LIB_WIDEN);
3427 dsth = gen_lowpart (QImode, dsth);
3428 }
3429 else
3430 dstl = dsth = const0_rtx;
3431
b83bea6c 3432 meml = adjust_address (dst, QImode, ofs);
3433 memh = adjust_address (dst, QImode, ofs+1);
9467fa25 3434 if (BYTES_BIG_ENDIAN)
3435 addr = meml, meml = memh, memh = addr;
3436
3437 emit_move_insn (meml, dstl);
3438 emit_move_insn (memh, dsth);
3439 return;
3440 }
3441
34377880 3442 dstl = gen_reg_rtx (DImode);
3443 dsth = gen_reg_rtx (DImode);
3444 insl = gen_reg_rtx (DImode);
3445 insh = gen_reg_rtx (DImode);
3446
1f0ce6a6 3447 dsta = XEXP (dst, 0);
3448 if (GET_CODE (dsta) == LO_SUM)
3449 dsta = force_reg (Pmode, dsta);
3450
3024e9f8 3451 /* AND addresses cannot be in any alias set, since they may implicitly
9e7454d0 3452 alias surrounding code. Ideally we'd have some alias set that
3024e9f8 3453 covered all types except those with alignment 8 or higher. */
3454
34377880 3455 meml = change_address (dst, DImode,
9e7454d0 3456 gen_rtx_AND (DImode,
1f0ce6a6 3457 plus_constant (dsta, ofs),
941522d6 3458 GEN_INT (-8)));
ab6ab77e 3459 set_mem_alias_set (meml, 0);
3024e9f8 3460
34377880 3461 memh = change_address (dst, DImode,
9e7454d0 3462 gen_rtx_AND (DImode,
1f0ce6a6 3463 plus_constant (dsta, ofs + size - 1),
941522d6 3464 GEN_INT (-8)));
ab6ab77e 3465 set_mem_alias_set (memh, 0);
34377880 3466
3467 emit_move_insn (dsth, memh);
3468 emit_move_insn (dstl, meml);
9caef960 3469 if (WORDS_BIG_ENDIAN)
34377880 3470 {
9caef960 3471 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3472
3473 if (src != const0_rtx)
3474 {
3475 switch ((int) size)
3476 {
3477 case 2:
3478 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3479 break;
3480 case 4:
3481 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3482 break;
3483 case 8:
3484 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3485 break;
3486 }
3487 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3488 GEN_INT (size*8), addr));
3489 }
34377880 3490
29768226 3491 switch ((int) size)
34377880 3492 {
3493 case 2:
9caef960 3494 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
34377880 3495 break;
3496 case 4:
9caef960 3497 {
ae4cd3a5 3498 rtx msk = immed_double_const (0xffffffff, 0, DImode);
9caef960 3499 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
ae4cd3a5 3500 break;
9caef960 3501 }
ae4cd3a5 3502 case 8:
3503 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
34377880 3504 break;
3505 }
9caef960 3506
3507 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
34377880 3508 }
9caef960 3509 else
3510 {
3511 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
34377880 3512
b739144f 3513 if (src != CONST0_RTX (GET_MODE (src)))
9caef960 3514 {
3515 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3516 GEN_INT (size*8), addr));
34377880 3517
9caef960 3518 switch ((int) size)
3519 {
3520 case 2:
3521 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3522 break;
3523 case 4:
3524 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3525 break;
3526 case 8:
3527 emit_insn (gen_insql_le (insl, src, addr));
3528 break;
3529 }
3530 }
3531
3532 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3533
3534 switch ((int) size)
3535 {
3536 case 2:
3537 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3538 break;
3539 case 4:
9caef960 3540 {
ae4cd3a5 3541 rtx msk = immed_double_const (0xffffffff, 0, DImode);
9caef960 3542 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
ae4cd3a5 3543 break;
9caef960 3544 }
ae4cd3a5 3545 case 8:
3546 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
9caef960 3547 break;
3548 }
34377880 3549 }
3550
b739144f 3551 if (src != CONST0_RTX (GET_MODE (src)))
34377880 3552 {
2cc46ade 3553 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3554 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
34377880 3555 }
9e7454d0 3556
9caef960 3557 if (WORDS_BIG_ENDIAN)
3558 {
3559 emit_move_insn (meml, dstl);
3560 emit_move_insn (memh, dsth);
3561 }
3562 else
3563 {
3564 /* Must store high before low for degenerate case of aligned. */
3565 emit_move_insn (memh, dsth);
3566 emit_move_insn (meml, dstl);
3567 }
34377880 3568}
3569
2cc46ade 3570/* The block move code tries to maximize speed by separating loads and
3571 stores at the expense of register pressure: we load all of the data
3572 before we store it back out. There are two secondary effects worth
3573 mentioning, that this speeds copying to/from aligned and unaligned
3574 buffers, and that it makes the code significantly easier to write. */
34377880 3575
2cc46ade 3576#define MAX_MOVE_WORDS 8
3577
3578/* Load an integral number of consecutive unaligned quadwords. */
34377880 3579
3580static void
92643d95 3581alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3582 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
34377880 3583{
3584 rtx const im8 = GEN_INT (-8);
3585 rtx const i64 = GEN_INT (64);
2cc46ade 3586 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1f0ce6a6 3587 rtx sreg, areg, tmp, smema;
34377880 3588 HOST_WIDE_INT i;
3589
1f0ce6a6 3590 smema = XEXP (smem, 0);
3591 if (GET_CODE (smema) == LO_SUM)
3592 smema = force_reg (Pmode, smema);
3593
34377880 3594 /* Generate all the tmp registers we need. */
3595 for (i = 0; i < words; ++i)
2cc46ade 3596 {
3597 data_regs[i] = out_regs[i];
3598 ext_tmps[i] = gen_reg_rtx (DImode);
3599 }
3600 data_regs[words] = gen_reg_rtx (DImode);
3601
3602 if (ofs != 0)
e513d163 3603 smem = adjust_address (smem, GET_MODE (smem), ofs);
9e7454d0 3604
34377880 3605 /* Load up all of the source data. */
3606 for (i = 0; i < words; ++i)
3607 {
3024e9f8 3608 tmp = change_address (smem, DImode,
3609 gen_rtx_AND (DImode,
1f0ce6a6 3610 plus_constant (smema, 8*i),
3024e9f8 3611 im8));
ab6ab77e 3612 set_mem_alias_set (tmp, 0);
3024e9f8 3613 emit_move_insn (data_regs[i], tmp);
34377880 3614 }
3024e9f8 3615
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
1f0ce6a6 3618 plus_constant (smema, 8*words - 1),
3024e9f8 3619 im8));
ab6ab77e 3620 set_mem_alias_set (tmp, 0);
3024e9f8 3621 emit_move_insn (data_regs[words], tmp);
34377880 3622
3623 /* Extract the half-word fragments. Unfortunately DEC decided to make
9e7454d0 3624 extxh with offset zero a noop instead of zeroing the register, so
34377880 3625 we must take care of that edge condition ourselves with cmov. */
3626
1f0ce6a6 3627 sreg = copy_addr_to_reg (smema);
9e7454d0 3628 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
2cc46ade 3629 1, OPTAB_WIDEN);
9caef960 3630 if (WORDS_BIG_ENDIAN)
3631 emit_move_insn (sreg, plus_constant (sreg, 7));
34377880 3632 for (i = 0; i < words; ++i)
3633 {
9caef960 3634 if (WORDS_BIG_ENDIAN)
3635 {
3636 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3637 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3638 }
3639 else
3640 {
3641 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3642 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3643 }
941522d6 3644 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3645 gen_rtx_IF_THEN_ELSE (DImode,
2cc46ade 3646 gen_rtx_EQ (DImode, areg,
3647 const0_rtx),
941522d6 3648 const0_rtx, ext_tmps[i])));
34377880 3649 }
3650
3651 /* Merge the half-words into whole words. */
3652 for (i = 0; i < words; ++i)
3653 {
2cc46ade 3654 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3655 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
34377880 3656 }
3657}
3658
3659/* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3660 may be NULL to store zeros. */
3661
3662static void
92643d95 3663alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3664 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
34377880 3665{
3666 rtx const im8 = GEN_INT (-8);
3667 rtx const i64 = GEN_INT (64);
34377880 3668 rtx ins_tmps[MAX_MOVE_WORDS];
2cc46ade 3669 rtx st_tmp_1, st_tmp_2, dreg;
1f0ce6a6 3670 rtx st_addr_1, st_addr_2, dmema;
34377880 3671 HOST_WIDE_INT i;
3672
1f0ce6a6 3673 dmema = XEXP (dmem, 0);
3674 if (GET_CODE (dmema) == LO_SUM)
3675 dmema = force_reg (Pmode, dmema);
3676
34377880 3677 /* Generate all the tmp registers we need. */
3678 if (data_regs != NULL)
3679 for (i = 0; i < words; ++i)
3680 ins_tmps[i] = gen_reg_rtx(DImode);
3681 st_tmp_1 = gen_reg_rtx(DImode);
3682 st_tmp_2 = gen_reg_rtx(DImode);
9e7454d0 3683
2cc46ade 3684 if (ofs != 0)
e513d163 3685 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
2cc46ade 3686
3687 st_addr_2 = change_address (dmem, DImode,
941522d6 3688 gen_rtx_AND (DImode,
1f0ce6a6 3689 plus_constant (dmema, words*8 - 1),
34377880 3690 im8));
ab6ab77e 3691 set_mem_alias_set (st_addr_2, 0);
3024e9f8 3692
2cc46ade 3693 st_addr_1 = change_address (dmem, DImode,
1f0ce6a6 3694 gen_rtx_AND (DImode, dmema, im8));
ab6ab77e 3695 set_mem_alias_set (st_addr_1, 0);
34377880 3696
3697 /* Load up the destination end bits. */
3698 emit_move_insn (st_tmp_2, st_addr_2);
3699 emit_move_insn (st_tmp_1, st_addr_1);
3700
3701 /* Shift the input data into place. */
1f0ce6a6 3702 dreg = copy_addr_to_reg (dmema);
9caef960 3703 if (WORDS_BIG_ENDIAN)
3704 emit_move_insn (dreg, plus_constant (dreg, 7));
34377880 3705 if (data_regs != NULL)
3706 {
3707 for (i = words-1; i >= 0; --i)
3708 {
9caef960 3709 if (WORDS_BIG_ENDIAN)
3710 {
3711 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3712 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3713 }
3714 else
3715 {
3716 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3718 }
34377880 3719 }
34377880 3720 for (i = words-1; i > 0; --i)
3721 {
2cc46ade 3722 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3723 ins_tmps[i-1], ins_tmps[i-1], 1,
3724 OPTAB_WIDEN);
34377880 3725 }
3726 }
3727
3728 /* Split and merge the ends with the destination data. */
9caef960 3729 if (WORDS_BIG_ENDIAN)
3730 {
ae4cd3a5 3731 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
9caef960 3732 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3733 }
3734 else
3735 {
3736 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
ae4cd3a5 3737 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
9caef960 3738 }
34377880 3739
3740 if (data_regs != NULL)
3741 {
2cc46ade 3742 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3743 st_tmp_2, 1, OPTAB_WIDEN);
3744 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3745 st_tmp_1, 1, OPTAB_WIDEN);
34377880 3746 }
3747
3748 /* Store it all. */
9caef960 3749 if (WORDS_BIG_ENDIAN)
3750 emit_move_insn (st_addr_1, st_tmp_1);
3751 else
3752 emit_move_insn (st_addr_2, st_tmp_2);
34377880 3753 for (i = words-1; i > 0; --i)
3754 {
3024e9f8 3755 rtx tmp = change_address (dmem, DImode,
3756 gen_rtx_AND (DImode,
9caef960 3757 plus_constant(dmema,
3758 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3024e9f8 3759 im8));
ab6ab77e 3760 set_mem_alias_set (tmp, 0);
3024e9f8 3761 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
34377880 3762 }
9caef960 3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (st_addr_2, st_tmp_2);
3765 else
3766 emit_move_insn (st_addr_1, st_tmp_1);
34377880 3767}
3768
3769
3770/* Expand string/block move operations.
3771
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3776
3777int
92643d95 3778alpha_expand_block_move (rtx operands[])
34377880 3779{
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
d94b545b 3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
a9aaae37 3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
80909c64 3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
2cc46ade 3789 rtx tmp;
1f0ce6a6 3790 unsigned int i, words, ofs, nregs = 0;
9e7454d0 3791
80909c64 3792 if (orig_bytes <= 0)
34377880 3793 return 1;
a9aaae37 3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
34377880 3795 return 0;
3796
2cc46ade 3797 /* Look for additional alignment information from recorded register info. */
3798
3799 tmp = XEXP (orig_src, 0);
3800 if (GET_CODE (tmp) == REG)
80909c64 3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
2cc46ade 3802 else if (GET_CODE (tmp) == PLUS
3803 && GET_CODE (XEXP (tmp, 0)) == REG
3804 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3805 {
80909c64 3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2cc46ade 3808
3809 if (a > src_align)
3810 {
80909c64 3811 if (a >= 64 && c % 8 == 0)
3812 src_align = 64;
3813 else if (a >= 32 && c % 4 == 0)
3814 src_align = 32;
3815 else if (a >= 16 && c % 2 == 0)
3816 src_align = 16;
2cc46ade 3817 }
3818 }
9e7454d0 3819
2cc46ade 3820 tmp = XEXP (orig_dst, 0);
3821 if (GET_CODE (tmp) == REG)
80909c64 3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
2cc46ade 3823 else if (GET_CODE (tmp) == PLUS
3824 && GET_CODE (XEXP (tmp, 0)) == REG
3825 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3826 {
80909c64 3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2cc46ade 3829
3830 if (a > dst_align)
3831 {
80909c64 3832 if (a >= 64 && c % 8 == 0)
3833 dst_align = 64;
3834 else if (a >= 32 && c % 4 == 0)
3835 dst_align = 32;
3836 else if (a >= 16 && c % 2 == 0)
3837 dst_align = 16;
2cc46ade 3838 }
3839 }
3840
2cc46ade 3841 ofs = 0;
80909c64 3842 if (src_align >= 64 && bytes >= 8)
34377880 3843 {
3844 words = bytes / 8;
3845
34377880 3846 for (i = 0; i < words; ++i)
27d0c333 3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
34377880 3848
34377880 3849 for (i = 0; i < words; ++i)
80909c64 3850 emit_move_insn (data_regs[nregs + i],
e513d163 3851 adjust_address (orig_src, DImode, ofs + i * 8));
34377880 3852
2cc46ade 3853 nregs += words;
34377880 3854 bytes -= words * 8;
7597afe9 3855 ofs += words * 8;
34377880 3856 }
80909c64 3857
3858 if (src_align >= 32 && bytes >= 4)
34377880 3859 {
3860 words = bytes / 4;
3861
34377880 3862 for (i = 0; i < words; ++i)
27d0c333 3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
34377880 3864
34377880 3865 for (i = 0; i < words; ++i)
80909c64 3866 emit_move_insn (data_regs[nregs + i],
537ffcfc 3867 adjust_address (orig_src, SImode, ofs + i * 4));
34377880 3868
2cc46ade 3869 nregs += words;
34377880 3870 bytes -= words * 4;
7597afe9 3871 ofs += words * 4;
34377880 3872 }
80909c64 3873
a9aaae37 3874 if (bytes >= 8)
34377880 3875 {
3876 words = bytes / 8;
3877
34377880 3878 for (i = 0; i < words+1; ++i)
27d0c333 3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
34377880 3880
b47268cf 3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3882 words, ofs);
34377880 3883
2cc46ade 3884 nregs += words;
34377880 3885 bytes -= words * 8;
7597afe9 3886 ofs += words * 8;
34377880 3887 }
80909c64 3888
80909c64 3889 if (! TARGET_BWX && bytes >= 4)
34377880 3890 {
2cc46ade 3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
34377880 3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
34377880 3893 bytes -= 4;
3894 ofs += 4;
3895 }
80909c64 3896
34377880 3897 if (bytes >= 2)
3898 {
80909c64 3899 if (src_align >= 16)
34377880 3900 {
3901 do {
2cc46ade 3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
e513d163 3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
34377880 3904 bytes -= 2;
3905 ofs += 2;
3906 } while (bytes >= 2);
3907 }
80909c64 3908 else if (! TARGET_BWX)
34377880 3909 {
2cc46ade 3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
34377880 3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
34377880 3912 bytes -= 2;
3913 ofs += 2;
3914 }
3915 }
80909c64 3916
34377880 3917 while (bytes > 0)
3918 {
2cc46ade 3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
e513d163 3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
34377880 3921 bytes -= 1;
3922 ofs += 1;
3923 }
80909c64 3924
4d10b463 3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
2cc46ade 3926
80909c64 3927 /* Now save it back out again. */
2cc46ade 3928
3929 i = 0, ofs = 0;
3930
2cc46ade 3931 /* Write out the data in whatever chunks reading the source allowed. */
80909c64 3932 if (dst_align >= 64)
2cc46ade 3933 {
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3935 {
e513d163 3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
2cc46ade 3937 data_regs[i]);
3938 ofs += 8;
3939 i++;
3940 }
3941 }
80909c64 3942
3943 if (dst_align >= 32)
2cc46ade 3944 {
3945 /* If the source has remaining DImode regs, write them out in
3946 two pieces. */
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3948 {
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3951
e513d163 3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
2cc46ade 3953 gen_lowpart (SImode, data_regs[i]));
e513d163 3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
2cc46ade 3955 gen_lowpart (SImode, tmp));
3956 ofs += 8;
3957 i++;
3958 }
3959
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3961 {
e513d163 3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
2cc46ade 3963 data_regs[i]);
3964 ofs += 4;
3965 i++;
3966 }
3967 }
80909c64 3968
2cc46ade 3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3970 {
3971 /* Write out a remaining block of words using unaligned methods. */
3972
80909c64 3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
2cc46ade 3975 break;
3976
3977 if (words == 1)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3979 else
80909c64 3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3981 words, ofs);
9e7454d0 3982
2cc46ade 3983 i += words;
3984 ofs += words * 8;
3985 }
3986
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3991 {
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3993 ofs += 4;
3994 i++;
3995 }
3996
80909c64 3997 if (dst_align >= 16)
2cc46ade 3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3999 {
e513d163 4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
2cc46ade 4001 i++;
4002 ofs += 2;
4003 }
4004 else
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4006 {
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4008 i++;
4009 ofs += 2;
4010 }
80909c64 4011
4d10b463 4012 /* The remainder must be byte copies. */
4013 while (i < nregs)
2cc46ade 4014 {
4d10b463 4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
e513d163 4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
2cc46ade 4017 i++;
4018 ofs += 1;
4019 }
80909c64 4020
34377880 4021 return 1;
4022}
4023
4024int
92643d95 4025alpha_expand_block_clear (rtx operands[])
34377880 4026{
4027 rtx bytes_rtx = operands[1];
7a3e5564 4028 rtx align_rtx = operands[3];
80909c64 4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
a9aaae37 4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
80909c64 4033 rtx orig_dst = operands[0];
2cc46ade 4034 rtx tmp;
a9aaae37 4035 int i, words, ofs = 0;
9e7454d0 4036
80909c64 4037 if (orig_bytes <= 0)
34377880 4038 return 1;
a9aaae37 4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
34377880 4040 return 0;
4041
2cc46ade 4042 /* Look for stricter alignment. */
2cc46ade 4043 tmp = XEXP (orig_dst, 0);
4044 if (GET_CODE (tmp) == REG)
80909c64 4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
2cc46ade 4046 else if (GET_CODE (tmp) == PLUS
4047 && GET_CODE (XEXP (tmp, 0)) == REG
4048 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4049 {
a9aaae37 4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2cc46ade 4052
4053 if (a > align)
4054 {
a9aaae37 4055 if (a >= 64)
4056 align = a, alignofs = 8 - c % 8;
4057 else if (a >= 32)
4058 align = a, alignofs = 4 - c % 4;
4059 else if (a >= 16)
4060 align = a, alignofs = 2 - c % 2;
2cc46ade 4061 }
4062 }
4063
a9aaae37 4064 /* Handle an unaligned prefix first. */
4065
4066 if (alignofs > 0)
4067 {
4068#if HOST_BITS_PER_WIDE_INT >= 64
4069 /* Given that alignofs is bounded by align, the only time BWX could
4070 generate three stores is for a 7 byte fill. Prefer two individual
4071 stores over a load/mask/store sequence. */
4072 if ((!TARGET_BWX || alignofs == 7)
4073 && align >= 32
4074 && !(alignofs == 4 && bytes >= 4))
4075 {
4076 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4077 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4078 rtx mem, tmp;
4079 HOST_WIDE_INT mask;
4080
e513d163 4081 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
ab6ab77e 4082 set_mem_alias_set (mem, 0);
a9aaae37 4083
4084 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4085 if (bytes < alignofs)
4086 {
4087 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4088 ofs += bytes;
4089 bytes = 0;
4090 }
4091 else
4092 {
4093 bytes -= alignofs;
4094 ofs += alignofs;
4095 }
4096 alignofs = 0;
4097
4098 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4099 NULL_RTX, 1, OPTAB_WIDEN);
4100
4101 emit_move_insn (mem, tmp);
4102 }
4103#endif
4104
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4106 {
e513d163 4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
a9aaae37 4108 bytes -= 1;
4109 ofs += 1;
4110 alignofs -= 1;
4111 }
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4113 {
e513d163 4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
a9aaae37 4115 bytes -= 2;
4116 ofs += 2;
4117 alignofs -= 2;
4118 }
4119 if (alignofs == 4 && bytes >= 4)
4120 {
e513d163 4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
a9aaae37 4122 bytes -= 4;
4123 ofs += 4;
4124 alignofs = 0;
4125 }
4126
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4129 if (alignofs > 0)
4130 {
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4133 }
4134 }
4135
4136 /* Handle a block of contiguous long-words. */
34377880 4137
80909c64 4138 if (align >= 64 && bytes >= 8)
34377880 4139 {
4140 words = bytes / 8;
4141
4142 for (i = 0; i < words; ++i)
1f0ce6a6 4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
e513d163 4144 const0_rtx);
34377880 4145
4146 bytes -= words * 8;
7597afe9 4147 ofs += words * 8;
34377880 4148 }
80909c64 4149
a9aaae37 4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4152
4153 if (align >= 32 && bytes > 16)
4154 {
1f0ce6a6 4155 rtx orig_dsta;
4156
e513d163 4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
a9aaae37 4158 bytes -= 4;
4159 ofs += 4;
4160
1f0ce6a6 4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4164
a9aaae37 4165 words = bytes / 8;
4166 for (i = 0; i < words; ++i)
4167 {
ab6ab77e 4168 rtx mem
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
1f0ce6a6 4171 plus_constant (orig_dsta, ofs + i*8),
ab6ab77e 4172 GEN_INT (-8)));
4173 set_mem_alias_set (mem, 0);
a9aaae37 4174 emit_move_insn (mem, const0_rtx);
4175 }
4176
4177 /* Depending on the alignment, the first stq_u may have overlapped
4178 with the initial stl, which means that the last stq_u didn't
4179 write as much as it would appear. Leave those questionable bytes
4180 unaccounted for. */
4181 bytes -= words * 8 - 4;
4182 ofs += words * 8 - 4;
4183 }
4184
4185 /* Handle a smaller block of aligned words. */
4186
4187 if ((align >= 64 && bytes == 4)
4188 || (align == 32 && bytes >= 4))
34377880 4189 {
4190 words = bytes / 4;
4191
4192 for (i = 0; i < words; ++i)
e513d163 4193 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
80909c64 4194 const0_rtx);
34377880 4195
4196 bytes -= words * 4;
7597afe9 4197 ofs += words * 4;
34377880 4198 }
80909c64 4199
a9aaae37 4200 /* An unaligned block uses stq_u stores for as many as possible. */
4201
4202 if (bytes >= 8)
34377880 4203 {
4204 words = bytes / 8;
4205
7597afe9 4206 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
34377880 4207
4208 bytes -= words * 8;
7597afe9 4209 ofs += words * 8;
34377880 4210 }
4211
a9aaae37 4212 /* Next clean up any trailing pieces. */
34377880 4213
a9aaae37 4214#if HOST_BITS_PER_WIDE_INT >= 64
4215 /* Count the number of bits in BYTES for which aligned stores could
4216 be emitted. */
4217 words = 0;
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4219 if (bytes & i)
4220 words += 1;
4221
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4225 {
4226 if (align >= 64)
4227 {
4228 rtx mem, tmp;
4229 HOST_WIDE_INT mask;
4230
e513d163 4231 mem = adjust_address (orig_dst, DImode, ofs);
ab6ab77e 4232 set_mem_alias_set (mem, 0);
a9aaae37 4233
4234 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4235
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4238
4239 emit_move_insn (mem, tmp);
4240 return 1;
4241 }
4242 else if (align >= 32 && bytes < 4)
4243 {
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4246
e513d163 4247 mem = adjust_address (orig_dst, SImode, ofs);
ab6ab77e 4248 set_mem_alias_set (mem, 0);
a9aaae37 4249
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4251
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4254
4255 emit_move_insn (mem, tmp);
4256 return 1;
4257 }
34377880 4258 }
a9aaae37 4259#endif
80909c64 4260
34377880 4261 if (!TARGET_BWX && bytes >= 4)
4262 {
4263 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4264 bytes -= 4;
4265 ofs += 4;
4266 }
80909c64 4267
34377880 4268 if (bytes >= 2)
4269 {
80909c64 4270 if (align >= 16)
34377880 4271 {
4272 do {
e513d163 4273 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
34377880 4274 const0_rtx);
4275 bytes -= 2;
4276 ofs += 2;
4277 } while (bytes >= 2);
4278 }
80909c64 4279 else if (! TARGET_BWX)
34377880 4280 {
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4282 bytes -= 2;
4283 ofs += 2;
4284 }
4285 }
80909c64 4286
34377880 4287 while (bytes > 0)
4288 {
e513d163 4289 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
34377880 4290 bytes -= 1;
4291 ofs += 1;
4292 }
4293
4294 return 1;
4295}
f2cc13dc 4296
4297/* Returns a mask so that zap(x, value) == x & mask. */
4298
4299rtx
92643d95 4300alpha_expand_zap_mask (HOST_WIDE_INT value)
f2cc13dc 4301{
4302 rtx result;
4303 int i;
4304
4305 if (HOST_BITS_PER_WIDE_INT >= 64)
4306 {
4307 HOST_WIDE_INT mask = 0;
4308
4309 for (i = 7; i >= 0; --i)
4310 {
4311 mask <<= 8;
4312 if (!((value >> i) & 1))
4313 mask |= 0xff;
4314 }
4315
4316 result = gen_int_mode (mask, DImode);
4317 }
4d10b463 4318 else
f2cc13dc 4319 {
4320 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4321
4d10b463 4322 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4323
f2cc13dc 4324 for (i = 7; i >= 4; --i)
4325 {
4326 mask_hi <<= 8;
4327 if (!((value >> i) & 1))
4328 mask_hi |= 0xff;
4329 }
4330
4331 for (i = 3; i >= 0; --i)
4332 {
4333 mask_lo <<= 8;
4334 if (!((value >> i) & 1))
4335 mask_lo |= 0xff;
4336 }
4337
4338 result = immed_double_const (mask_lo, mask_hi, DImode);
4339 }
f2cc13dc 4340
4341 return result;
4342}
4343
4344void
92643d95 4345alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4346 enum machine_mode mode,
4347 rtx op0, rtx op1, rtx op2)
f2cc13dc 4348{
4349 op0 = gen_lowpart (mode, op0);
4350
4351 if (op1 == const0_rtx)
4352 op1 = CONST0_RTX (mode);
4353 else
4354 op1 = gen_lowpart (mode, op1);
ae4cd3a5 4355
4356 if (op2 == const0_rtx)
f2cc13dc 4357 op2 = CONST0_RTX (mode);
4358 else
4359 op2 = gen_lowpart (mode, op2);
4360
4361 emit_insn ((*gen) (op0, op1, op2));
4362}
f155876e 4363
032caa7b 4364/* A subroutine of the atomic operation splitters. Jump to LABEL if
4365 COND is true. Mark the jump as unlikely to be taken. */
4366
4367static void
4368emit_unlikely_jump (rtx cond, rtx label)
4369{
4370 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4371 rtx x;
4372
4373 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4374 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4375 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4376}
4377
4378/* A subroutine of the atomic operation splitters. Emit a load-locked
4379 instruction in MODE. */
4380
4381static void
4382emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4383{
4384 rtx (*fn) (rtx, rtx) = NULL;
4385 if (mode == SImode)
4386 fn = gen_load_locked_si;
4387 else if (mode == DImode)
4388 fn = gen_load_locked_di;
4389 emit_insn (fn (reg, mem));
4390}
4391
4392/* A subroutine of the atomic operation splitters. Emit a store-conditional
4393 instruction in MODE. */
4394
4395static void
4396emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4397{
4398 rtx (*fn) (rtx, rtx, rtx) = NULL;
4399 if (mode == SImode)
4400 fn = gen_store_conditional_si;
4401 else if (mode == DImode)
4402 fn = gen_store_conditional_di;
4403 emit_insn (fn (res, mem, val));
4404}
4405
596d3184 4406/* A subroutine of the atomic operation splitters. Emit an insxl
4407 instruction in MODE. */
4408
4409static rtx
4410emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4411{
4412 rtx ret = gen_reg_rtx (DImode);
4413 rtx (*fn) (rtx, rtx, rtx);
4414
4415 if (WORDS_BIG_ENDIAN)
4416 {
4417 if (mode == QImode)
4418 fn = gen_insbl_be;
4419 else
4420 fn = gen_inswl_be;
4421 }
4422 else
4423 {
4424 if (mode == QImode)
4425 fn = gen_insbl_le;
4426 else
4427 fn = gen_inswl_le;
4428 }
9a6f4ddd 4429 /* The insbl and inswl patterns require a register operand. */
4430 op1 = force_reg (mode, op1);
596d3184 4431 emit_insn (fn (ret, op1, op2));
4432
4433 return ret;
4434}
4435
85694bac 4436/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
f155876e 4437 to perform. MEM is the memory on which to operate. VAL is the second
4438 operand of the binary operator. BEFORE and AFTER are optional locations to
4439 return the value of MEM either before of after the operation. SCRATCH is
4440 a scratch register. */
4441
4442void
4443alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4444 rtx before, rtx after, rtx scratch)
4445{
4446 enum machine_mode mode = GET_MODE (mem);
032caa7b 4447 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
f155876e 4448
4449 emit_insn (gen_memory_barrier ());
4450
4451 label = gen_label_rtx ();
4452 emit_label (label);
4453 label = gen_rtx_LABEL_REF (DImode, label);
4454
4455 if (before == NULL)
4456 before = scratch;
032caa7b 4457 emit_load_locked (mode, before, mem);
f155876e 4458
4459 if (code == NOT)
032caa7b 4460 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
f155876e 4461 else
4462 x = gen_rtx_fmt_ee (code, mode, before, val);
f155876e 4463 if (after)
4464 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
87121034 4465 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
f155876e 4466
032caa7b 4467 emit_store_conditional (mode, cond, mem, scratch);
4468
4469 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4470 emit_unlikely_jump (x, label);
4471
4472 emit_insn (gen_memory_barrier ());
4473}
4474
4475/* Expand a compare and swap operation. */
4476
4477void
4478alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4479 rtx scratch)
4480{
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4483
4484 emit_insn (gen_memory_barrier ());
4485
4486 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4487 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 emit_label (XEXP (label1, 0));
4489
4490 emit_load_locked (mode, retval, mem);
4491
4492 x = gen_lowpart (DImode, retval);
4493 if (oldval == const0_rtx)
4494 x = gen_rtx_NE (DImode, x, const0_rtx);
f155876e 4495 else
032caa7b 4496 {
4497 x = gen_rtx_EQ (DImode, x, oldval);
4498 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4499 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4500 }
4501 emit_unlikely_jump (x, label2);
4502
4503 emit_move_insn (scratch, newval);
4504 emit_store_conditional (mode, cond, mem, scratch);
f155876e 4505
4506 x = gen_rtx_EQ (DImode, cond, const0_rtx);
032caa7b 4507 emit_unlikely_jump (x, label1);
4508
4509 emit_insn (gen_memory_barrier ());
4510 emit_label (XEXP (label2, 0));
4511}
4512
596d3184 4513void
4514alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4515{
4516 enum machine_mode mode = GET_MODE (mem);
4517 rtx addr, align, wdst;
4518 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4519
4520 addr = force_reg (DImode, XEXP (mem, 0));
4521 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4522 NULL_RTX, 1, OPTAB_DIRECT);
4523
4524 oldval = convert_modes (DImode, mode, oldval, 1);
4525 newval = emit_insxl (mode, newval, addr);
4526
4527 wdst = gen_reg_rtx (DImode);
4528 if (mode == QImode)
4529 fn5 = gen_sync_compare_and_swapqi_1;
4530 else
4531 fn5 = gen_sync_compare_and_swaphi_1;
4532 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4533
4534 emit_move_insn (dst, gen_lowpart (mode, wdst));
4535}
4536
4537void
4538alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4539 rtx oldval, rtx newval, rtx align,
4540 rtx scratch, rtx cond)
4541{
4542 rtx label1, label2, mem, width, mask, x;
4543
4544 mem = gen_rtx_MEM (DImode, align);
4545 MEM_VOLATILE_P (mem) = 1;
4546
4547 emit_insn (gen_memory_barrier ());
4548 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4549 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 emit_label (XEXP (label1, 0));
4551
4552 emit_load_locked (DImode, scratch, mem);
4553
4554 width = GEN_INT (GET_MODE_BITSIZE (mode));
4555 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4556 if (WORDS_BIG_ENDIAN)
4557 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4558 else
4559 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4560
4561 if (oldval == const0_rtx)
4562 x = gen_rtx_NE (DImode, dest, const0_rtx);
4563 else
4564 {
4565 x = gen_rtx_EQ (DImode, dest, oldval);
4566 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4567 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4568 }
4569 emit_unlikely_jump (x, label2);
4570
4571 if (WORDS_BIG_ENDIAN)
4572 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4573 else
4574 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4575 emit_insn (gen_iordi3 (scratch, scratch, newval));
4576
4577 emit_store_conditional (DImode, scratch, mem, scratch);
4578
4579 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4580 emit_unlikely_jump (x, label1);
4581
4582 emit_insn (gen_memory_barrier ());
4583 emit_label (XEXP (label2, 0));
4584}
4585
032caa7b 4586/* Expand an atomic exchange operation. */
4587
4588void
4589alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4590{
4591 enum machine_mode mode = GET_MODE (mem);
4592 rtx label, x, cond = gen_lowpart (DImode, scratch);
f155876e 4593
4594 emit_insn (gen_memory_barrier ());
032caa7b 4595
4596 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 emit_label (XEXP (label, 0));
4598
4599 emit_load_locked (mode, retval, mem);
4600 emit_move_insn (scratch, val);
4601 emit_store_conditional (mode, cond, mem, scratch);
4602
4603 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4604 emit_unlikely_jump (x, label);
f155876e 4605}
596d3184 4606
4607void
4608alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4609{
4610 enum machine_mode mode = GET_MODE (mem);
4611 rtx addr, align, wdst;
4612 rtx (*fn4) (rtx, rtx, rtx, rtx);
4613
4614 /* Force the address into a register. */
4615 addr = force_reg (DImode, XEXP (mem, 0));
4616
4617 /* Align it to a multiple of 8. */
4618 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4619 NULL_RTX, 1, OPTAB_DIRECT);
4620
4621 /* Insert val into the correct byte location within the word. */
4622 val = emit_insxl (mode, val, addr);
4623
4624 wdst = gen_reg_rtx (DImode);
4625 if (mode == QImode)
4626 fn4 = gen_sync_lock_test_and_setqi_1;
4627 else
4628 fn4 = gen_sync_lock_test_and_sethi_1;
4629 emit_insn (fn4 (wdst, addr, val, align));
4630
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4632}
4633
4634void
4635alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4636 rtx val, rtx align, rtx scratch)
4637{
4638 rtx label, mem, width, mask, x;
4639
4640 mem = gen_rtx_MEM (DImode, align);
4641 MEM_VOLATILE_P (mem) = 1;
4642
4643 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4646
4647 emit_load_locked (DImode, scratch, mem);
4648
4649 width = GEN_INT (GET_MODE_BITSIZE (mode));
4650 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4651 if (WORDS_BIG_ENDIAN)
4652 {
4653 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4655 }
4656 else
4657 {
4658 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4659 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4660 }
4661 emit_insn (gen_iordi3 (scratch, scratch, val));
4662
4663 emit_store_conditional (DImode, scratch, mem, scratch);
4664
4665 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4666 emit_unlikely_jump (x, label);
4667}
bf2a98b3 4668\f
4669/* Adjust the cost of a scheduling dependency. Return the new cost of
4670 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4671
747af5e7 4672static int
92643d95 4673alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
bf2a98b3 4674{
d2832bd8 4675 enum attr_type insn_type, dep_insn_type;
bf2a98b3 4676
4677 /* If the dependence is an anti-dependence, there is no cost. For an
4678 output dependence, there is sometimes a cost, but it doesn't seem
4679 worth handling those few cases. */
bf2a98b3 4680 if (REG_NOTE_KIND (link) != 0)
7eb0c947 4681 return cost;
bf2a98b3 4682
d2832bd8 4683 /* If we can't recognize the insns, we can't really do anything. */
4684 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4685 return cost;
4686
4687 insn_type = get_attr_type (insn);
4688 dep_insn_type = get_attr_type (dep_insn);
4689
07c1a295 4690 /* Bring in the user-defined memory latency. */
e7a69d05 4691 if (dep_insn_type == TYPE_ILD
4692 || dep_insn_type == TYPE_FLD
4693 || dep_insn_type == TYPE_LDSYM)
07c1a295 4694 cost += alpha_memory_latency-1;
4695
7eb0c947 4696 /* Everything else handled in DFA bypasses now. */
3680ac41 4697
bf2a98b3 4698 return cost;
4699}
747af5e7 4700
7eb0c947 4701/* The number of instructions that can be issued per cycle. */
4702
747af5e7 4703static int
92643d95 4704alpha_issue_rate (void)
747af5e7 4705{
fb64edde 4706 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
747af5e7 4707}
4708
7eb0c947 4709/* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4712
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
8d232dc7 4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
7eb0c947 4716
4717static int
92643d95 4718alpha_multipass_dfa_lookahead (void)
7eb0c947 4719{
fb64edde 4720 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
7eb0c947 4721}
0c0464e6 4722\f
5f7b9df8 4723/* Machine-specific function data. */
4724
1f3233d1 4725struct machine_function GTY(())
5f7b9df8 4726{
674a8f0b 4727 /* For unicosmk. */
5f7b9df8 4728 /* List of call information words for calls from this function. */
4729 struct rtx_def *first_ciw;
4730 struct rtx_def *last_ciw;
4731 int ciw_count;
4732
4733 /* List of deferred case vectors. */
4734 struct rtx_def *addr_list;
1f3233d1 4735
674a8f0b 4736 /* For OSF. */
5f7b9df8 4737 const char *some_ld_name;
a221313c 4738
4739 /* For TARGET_LD_BUGGY_LDGP. */
4740 struct rtx_def *gp_save_rtx;
5f7b9df8 4741};
4742
1f3233d1 4743/* How to allocate a 'struct machine_function'. */
9caef960 4744
1f3233d1 4745static struct machine_function *
92643d95 4746alpha_init_machine_status (void)
9caef960 4747{
9e7454d0 4748 return ((struct machine_function *)
1f3233d1 4749 ggc_alloc_cleared (sizeof (struct machine_function)));
9caef960 4750}
9caef960 4751
0c0464e6 4752/* Functions to save and restore alpha_return_addr_rtx. */
4753
0c0464e6 4754/* Start the ball rolling with RETURN_ADDR_RTX. */
4755
4756rtx
92643d95 4757alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
0c0464e6 4758{
0c0464e6 4759 if (count != 0)
4760 return const0_rtx;
4761
0f37b7a2 4762 return get_hard_reg_initial_val (Pmode, REG_RA);
0c0464e6 4763}
4764
a221313c 4765/* Return or create a memory slot containing the gp value for the current
66561750 4766 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4767
4768rtx
92643d95 4769alpha_gp_save_rtx (void)
66561750 4770{
a221313c 4771 rtx seq, m = cfun->machine->gp_save_rtx;
4772
4773 if (m == NULL)
4774 {
4775 start_sequence ();
4776
4777 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778 m = validize_mem (m);
4779 emit_move_insn (m, pic_offset_table_rtx);
4780
4781 seq = get_insns ();
4782 end_sequence ();
3e24a03e 4783 emit_insn_at_entry (seq);
a221313c 4784
4785 cfun->machine->gp_save_rtx = m;
4786 }
4787
4788 return m;
66561750 4789}
4790
0c0464e6 4791static int
92643d95 4792alpha_ra_ever_killed (void)
0c0464e6 4793{
5a965225 4794 rtx top;
4795
0f37b7a2 4796 if (!has_hard_reg_initial_val (Pmode, REG_RA))
3072d30e 4797 return (int)df_regs_ever_live_p (REG_RA);
0c0464e6 4798
5a965225 4799 push_topmost_sequence ();
4800 top = get_insns ();
4801 pop_topmost_sequence ();
4802
4803 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
0c0464e6 4804}
4805
bf2a98b3 4806\f
6fec94c5 4807/* Return the trap mode suffix applicable to the current
65abff06 4808 instruction, or NULL. */
bf2a98b3 4809
6fec94c5 4810static const char *
92643d95 4811get_trap_mode_suffix (void)
bf2a98b3 4812{
6fec94c5 4813 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
bf2a98b3 4814
6fec94c5 4815 switch (s)
bf2a98b3 4816 {
6fec94c5 4817 case TRAP_SUFFIX_NONE:
4818 return NULL;
c4622276 4819
6fec94c5 4820 case TRAP_SUFFIX_SU:
bc16f0c1 4821 if (alpha_fptm >= ALPHA_FPTM_SU)
6fec94c5 4822 return "su";
4823 return NULL;
c4622276 4824
6fec94c5 4825 case TRAP_SUFFIX_SUI:
4826 if (alpha_fptm >= ALPHA_FPTM_SUI)
4827 return "sui";
4828 return NULL;
4829
4830 case TRAP_SUFFIX_V_SV:
39344852 4831 switch (alpha_fptm)
4832 {
4833 case ALPHA_FPTM_N:
6fec94c5 4834 return NULL;
39344852 4835 case ALPHA_FPTM_U:
6fec94c5 4836 return "v";
39344852 4837 case ALPHA_FPTM_SU:
4838 case ALPHA_FPTM_SUI:
6fec94c5 4839 return "sv";
4d10b463 4840 default:
4841 gcc_unreachable ();
39344852 4842 }
39344852 4843
6fec94c5 4844 case TRAP_SUFFIX_V_SV_SVI:
b5ea3193 4845 switch (alpha_fptm)
4846 {
4847 case ALPHA_FPTM_N:
6fec94c5 4848 return NULL;
b5ea3193 4849 case ALPHA_FPTM_U:
6fec94c5 4850 return "v";
b5ea3193 4851 case ALPHA_FPTM_SU:
6fec94c5 4852 return "sv";
b5ea3193 4853 case ALPHA_FPTM_SUI:
6fec94c5 4854 return "svi";
4d10b463 4855 default:
4856 gcc_unreachable ();
b5ea3193 4857 }
4858 break;
4859
6fec94c5 4860 case TRAP_SUFFIX_U_SU_SUI:
c4622276 4861 switch (alpha_fptm)
4862 {
4863 case ALPHA_FPTM_N:
6fec94c5 4864 return NULL;
c4622276 4865 case ALPHA_FPTM_U:
6fec94c5 4866 return "u";
c4622276 4867 case ALPHA_FPTM_SU:
6fec94c5 4868 return "su";
c4622276 4869 case ALPHA_FPTM_SUI:
6fec94c5 4870 return "sui";
4d10b463 4871 default:
4872 gcc_unreachable ();
c4622276 4873 }
4874 break;
4d10b463 4875
4876 default:
4877 gcc_unreachable ();
6fec94c5 4878 }
4d10b463 4879 gcc_unreachable ();
6fec94c5 4880}
c4622276 4881
6fec94c5 4882/* Return the rounding mode suffix applicable to the current
65abff06 4883 instruction, or NULL. */
6fec94c5 4884
4885static const char *
92643d95 4886get_round_mode_suffix (void)
6fec94c5 4887{
4888 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4889
4890 switch (s)
4891 {
4892 case ROUND_SUFFIX_NONE:
4893 return NULL;
4894 case ROUND_SUFFIX_NORMAL:
4895 switch (alpha_fprm)
c4622276 4896 {
6fec94c5 4897 case ALPHA_FPRM_NORM:
4898 return NULL;
9e7454d0 4899 case ALPHA_FPRM_MINF:
6fec94c5 4900 return "m";
4901 case ALPHA_FPRM_CHOP:
4902 return "c";
4903 case ALPHA_FPRM_DYN:
4904 return "d";
4d10b463 4905 default:
4906 gcc_unreachable ();
c4622276 4907 }
4908 break;
4909
6fec94c5 4910 case ROUND_SUFFIX_C:
4911 return "c";
4d10b463 4912
4913 default:
4914 gcc_unreachable ();
6fec94c5 4915 }
4d10b463 4916 gcc_unreachable ();
6fec94c5 4917}
4918
5f7b9df8 4919/* Locate some local-dynamic symbol still in use by this function
4920 so that we can print its name in some movdi_er_tlsldm pattern. */
4921
92643d95 4922static int
4923get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4924{
4925 rtx x = *px;
4926
4927 if (GET_CODE (x) == SYMBOL_REF
4928 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4929 {
4930 cfun->machine->some_ld_name = XSTR (x, 0);
4931 return 1;
4932 }
4933
4934 return 0;
4935}
4936
5f7b9df8 4937static const char *
92643d95 4938get_some_local_dynamic_name (void)
5f7b9df8 4939{
4940 rtx insn;
4941
4942 if (cfun->machine->some_ld_name)
4943 return cfun->machine->some_ld_name;
4944
4945 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4946 if (INSN_P (insn)
4947 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4948 return cfun->machine->some_ld_name;
4949
4d10b463 4950 gcc_unreachable ();
5f7b9df8 4951}
4952
6fec94c5 4953/* Print an operand. Recognize special options, documented below. */
4954
4955void
92643d95 4956print_operand (FILE *file, rtx x, int code)
6fec94c5 4957{
4958 int i;
4959
4960 switch (code)
4961 {
4962 case '~':
4963 /* Print the assembler name of the current function. */
4964 assemble_name (file, alpha_fnname);
4965 break;
4966
5f7b9df8 4967 case '&':
4968 assemble_name (file, get_some_local_dynamic_name ());
4969 break;
4970
6fec94c5 4971 case '/':
4972 {
4973 const char *trap = get_trap_mode_suffix ();
4974 const char *round = get_round_mode_suffix ();
4975
4976 if (trap || round)
9caef960 4977 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4978 (trap ? trap : ""), (round ? round : ""));
6fec94c5 4979 break;
4980 }
4981
8df4a58b 4982 case ',':
4983 /* Generates single precision instruction suffix. */
6fec94c5 4984 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
8df4a58b 4985 break;
4986
4987 case '-':
4988 /* Generates double precision instruction suffix. */
6fec94c5 4989 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
8df4a58b 4990 break;
4991
1f0ce6a6 4992 case '#':
4993 if (alpha_this_literal_sequence_number == 0)
4994 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4995 fprintf (file, "%d", alpha_this_literal_sequence_number);
4996 break;
4997
4998 case '*':
4999 if (alpha_this_gpdisp_sequence_number == 0)
5000 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5001 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5002 break;
5003
5004 case 'H':
5005 if (GET_CODE (x) == HIGH)
5dcb037d 5006 output_addr_const (file, XEXP (x, 0));
1f0ce6a6 5007 else
5008 output_operand_lossage ("invalid %%H value");
5009 break;
5010
ad2ed779 5011 case 'J':
5f7b9df8 5012 {
5013 const char *lituse;
5014
5015 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5016 {
5017 x = XVECEXP (x, 0, 0);
5018 lituse = "lituse_tlsgd";
5019 }
5020 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5021 {
5022 x = XVECEXP (x, 0, 0);
5023 lituse = "lituse_tlsldm";
5024 }
5025 else if (GET_CODE (x) == CONST_INT)
5026 lituse = "lituse_jsr";
5027 else
5028 {
5029 output_operand_lossage ("invalid %%J value");
5030 break;
5031 }
5032
5033 if (x != const0_rtx)
5034 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5035 }
ad2ed779 5036 break;
5037
592222c2 5038 case 'j':
5039 {
5040 const char *lituse;
5041
5042#ifdef HAVE_AS_JSRDIRECT_RELOCS
5043 lituse = "lituse_jsrdirect";
5044#else
5045 lituse = "lituse_jsr";
5046#endif
5047
5048 gcc_assert (INTVAL (x) != 0);
5049 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5050 }
5051 break;
bf2a98b3 5052 case 'r':
5053 /* If this operand is the constant zero, write it as "$31". */
5054 if (GET_CODE (x) == REG)
5055 fprintf (file, "%s", reg_names[REGNO (x)]);
5056 else if (x == CONST0_RTX (GET_MODE (x)))
5057 fprintf (file, "$31");
5058 else
5059 output_operand_lossage ("invalid %%r value");
bf2a98b3 5060 break;
5061
5062 case 'R':
5063 /* Similar, but for floating-point. */
5064 if (GET_CODE (x) == REG)
5065 fprintf (file, "%s", reg_names[REGNO (x)]);
5066 else if (x == CONST0_RTX (GET_MODE (x)))
5067 fprintf (file, "$f31");
5068 else
5069 output_operand_lossage ("invalid %%R value");
bf2a98b3 5070 break;
5071
5072 case 'N':
5073 /* Write the 1's complement of a constant. */
5074 if (GET_CODE (x) != CONST_INT)
5075 output_operand_lossage ("invalid %%N value");
5076
61a63ca5 5077 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
bf2a98b3 5078 break;
5079
5080 case 'P':
5081 /* Write 1 << C, for a constant C. */
5082 if (GET_CODE (x) != CONST_INT)
5083 output_operand_lossage ("invalid %%P value");
5084
61a63ca5 5085 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
bf2a98b3 5086 break;
5087
5088 case 'h':
5089 /* Write the high-order 16 bits of a constant, sign-extended. */
5090 if (GET_CODE (x) != CONST_INT)
5091 output_operand_lossage ("invalid %%h value");
5092
61a63ca5 5093 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
bf2a98b3 5094 break;
5095
5096 case 'L':
5097 /* Write the low-order 16 bits of a constant, sign-extended. */
5098 if (GET_CODE (x) != CONST_INT)
5099 output_operand_lossage ("invalid %%L value");
5100
61a63ca5 5101 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5102 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
bf2a98b3 5103 break;
5104
5105 case 'm':
5106 /* Write mask for ZAP insn. */
5107 if (GET_CODE (x) == CONST_DOUBLE)
5108 {
5109 HOST_WIDE_INT mask = 0;
5110 HOST_WIDE_INT value;
5111
5112 value = CONST_DOUBLE_LOW (x);
5113 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5114 i++, value >>= 8)
5115 if (value & 0xff)
5116 mask |= (1 << i);
5117
5118 value = CONST_DOUBLE_HIGH (x);
5119 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5120 i++, value >>= 8)
5121 if (value & 0xff)
5122 mask |= (1 << (i + sizeof (int)));
5123
61a63ca5 5124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
bf2a98b3 5125 }
5126
5127 else if (GET_CODE (x) == CONST_INT)
5128 {
5129 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5130
5131 for (i = 0; i < 8; i++, value >>= 8)
5132 if (value & 0xff)
5133 mask |= (1 << i);
5134
61a63ca5 5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
bf2a98b3 5136 }
5137 else
5138 output_operand_lossage ("invalid %%m value");
5139 break;
5140
5141 case 'M':
34377880 5142 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
bf2a98b3 5143 if (GET_CODE (x) != CONST_INT
34377880 5144 || (INTVAL (x) != 8 && INTVAL (x) != 16
5145 && INTVAL (x) != 32 && INTVAL (x) != 64))
bf2a98b3 5146 output_operand_lossage ("invalid %%M value");
5147
5148 fprintf (file, "%s",
34377880 5149 (INTVAL (x) == 8 ? "b"
5150 : INTVAL (x) == 16 ? "w"
5151 : INTVAL (x) == 32 ? "l"
5152 : "q"));
bf2a98b3 5153 break;
5154
5155 case 'U':
5156 /* Similar, except do it from the mask. */
ae4cd3a5 5157 if (GET_CODE (x) == CONST_INT)
5158 {
5159 HOST_WIDE_INT value = INTVAL (x);
5160
5161 if (value == 0xff)
5162 {
5163 fputc ('b', file);
5164 break;
5165 }
5166 if (value == 0xffff)
5167 {
5168 fputc ('w', file);
5169 break;
5170 }
5171 if (value == 0xffffffff)
5172 {
5173 fputc ('l', file);
5174 break;
5175 }
5176 if (value == -1)
5177 {
5178 fputc ('q', file);
5179 break;
5180 }
5181 }
5182 else if (HOST_BITS_PER_WIDE_INT == 32
5183 && GET_CODE (x) == CONST_DOUBLE
5184 && CONST_DOUBLE_LOW (x) == 0xffffffff
5185 && CONST_DOUBLE_HIGH (x) == 0)
5186 {
5187 fputc ('l', file);
5188 break;
5189 }
5190 output_operand_lossage ("invalid %%U value");
bf2a98b3 5191 break;
5192
5193 case 's':
9caef960 5194 /* Write the constant value divided by 8 for little-endian mode or
5195 (56 - value) / 8 for big-endian mode. */
5196
bf2a98b3 5197 if (GET_CODE (x) != CONST_INT
9caef960 5198 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5199 ? 56
9e7454d0 5200 : 64)
9caef960 5201 || (INTVAL (x) & 7) != 0)
bf2a98b3 5202 output_operand_lossage ("invalid %%s value");
5203
9caef960 5204 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5205 WORDS_BIG_ENDIAN
5206 ? (56 - INTVAL (x)) / 8
5207 : INTVAL (x) / 8);
bf2a98b3 5208 break;
5209
5210 case 'S':
5211 /* Same, except compute (64 - c) / 8 */
5212
5213 if (GET_CODE (x) != CONST_INT
5214 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5215 && (INTVAL (x) & 7) != 8)
5216 output_operand_lossage ("invalid %%s value");
5217
61a63ca5 5218 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
bf2a98b3 5219 break;
5220
9caef960 5221 case 't':
5222 {
5223 /* On Unicos/Mk systems: use a DEX expression if the symbol
5224 clashes with a register name. */
5225 int dex = unicosmk_need_dex (x);
5226 if (dex)
5227 fprintf (file, "DEX(%d)", dex);
5228 else
5229 output_addr_const (file, x);
5230 }
5231 break;
5232
62dc3582 5233 case 'C': case 'D': case 'c': case 'd':
bf2a98b3 5234 /* Write out comparison name. */
62dc3582 5235 {
5236 enum rtx_code c = GET_CODE (x);
5237
6720e96c 5238 if (!COMPARISON_P (x))
62dc3582 5239 output_operand_lossage ("invalid %%C value");
5240
f3d263a7 5241 else if (code == 'D')
62dc3582 5242 c = reverse_condition (c);
5243 else if (code == 'c')
5244 c = swap_condition (c);
5245 else if (code == 'd')
5246 c = swap_condition (reverse_condition (c));
5247
5248 if (c == LEU)
5249 fprintf (file, "ule");
5250 else if (c == LTU)
5251 fprintf (file, "ult");
a4110d9a 5252 else if (c == UNORDERED)
5253 fprintf (file, "un");
62dc3582 5254 else
5255 fprintf (file, "%s", GET_RTX_NAME (c));
5256 }
8ad50a44 5257 break;
5258
bf2a98b3 5259 case 'E':
5260 /* Write the divide or modulus operator. */
5261 switch (GET_CODE (x))
5262 {
5263 case DIV:
5264 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5265 break;
5266 case UDIV:
5267 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5268 break;
5269 case MOD:
5270 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5271 break;
5272 case UMOD:
5273 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5274 break;
5275 default:
5276 output_operand_lossage ("invalid %%E value");
5277 break;
5278 }
5279 break;
5280
bf2a98b3 5281 case 'A':
5282 /* Write "_u" for unaligned access. */
5283 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5284 fprintf (file, "_u");
5285 break;
5286
5287 case 0:
5288 if (GET_CODE (x) == REG)
5289 fprintf (file, "%s", reg_names[REGNO (x)]);
5290 else if (GET_CODE (x) == MEM)
5291 output_address (XEXP (x, 0));
5f7b9df8 5292 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5293 {
5294 switch (XINT (XEXP (x, 0), 1))
5295 {
5296 case UNSPEC_DTPREL:
5297 case UNSPEC_TPREL:
5298 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5299 break;
5300 default:
5301 output_operand_lossage ("unknown relocation unspec");
5302 break;
5303 }
5304 }
bf2a98b3 5305 else
5306 output_addr_const (file, x);
5307 break;
5308
5309 default:
5310 output_operand_lossage ("invalid %%xn code");
5311 }
5312}
6e0fe99e 5313
5314void
92643d95 5315print_operand_address (FILE *file, rtx addr)
6e0fe99e 5316{
a3e39a24 5317 int basereg = 31;
6e0fe99e 5318 HOST_WIDE_INT offset = 0;
5319
5320 if (GET_CODE (addr) == AND)
5321 addr = XEXP (addr, 0);
6e0fe99e 5322
a3e39a24 5323 if (GET_CODE (addr) == PLUS
5324 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6e0fe99e 5325 {
5326 offset = INTVAL (XEXP (addr, 1));
a3e39a24 5327 addr = XEXP (addr, 0);
6e0fe99e 5328 }
1f0ce6a6 5329
5330 if (GET_CODE (addr) == LO_SUM)
5331 {
5f7b9df8 5332 const char *reloc16, *reloclo;
5333 rtx op1 = XEXP (addr, 1);
5334
5335 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5336 {
5337 op1 = XEXP (op1, 0);
5338 switch (XINT (op1, 1))
5339 {
5340 case UNSPEC_DTPREL:
5341 reloc16 = NULL;
5342 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5343 break;
5344 case UNSPEC_TPREL:
5345 reloc16 = NULL;
5346 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5347 break;
5348 default:
5349 output_operand_lossage ("unknown relocation unspec");
5350 return;
5351 }
5352
5353 output_addr_const (file, XVECEXP (op1, 0, 0));
5354 }
5355 else
5356 {
5357 reloc16 = "gprel";
5358 reloclo = "gprellow";
5359 output_addr_const (file, op1);
5360 }
5361
1f0ce6a6 5362 if (offset)
4840a03a 5363 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
9e7454d0 5364
1f0ce6a6 5365 addr = XEXP (addr, 0);
4d10b463 5366 switch (GET_CODE (addr))
5367 {
5368 case REG:
5369 basereg = REGNO (addr);
5370 break;
5371
5372 case SUBREG:
5373 basereg = subreg_regno (addr);
5374 break;
5375
5376 default:
5377 gcc_unreachable ();
5378 }
5dcb037d 5379
5380 fprintf (file, "($%d)\t\t!%s", basereg,
5f7b9df8 5381 (basereg == 29 ? reloc16 : reloclo));
1f0ce6a6 5382 return;
5383 }
5384
4d10b463 5385 switch (GET_CODE (addr))
5386 {
5387 case REG:
5388 basereg = REGNO (addr);
5389 break;
5390
5391 case SUBREG:
5392 basereg = subreg_regno (addr);
5393 break;
5394
5395 case CONST_INT:
5396 offset = INTVAL (addr);
5397 break;
cf73d31f 5398
5399#if TARGET_ABI_OPEN_VMS
4d10b463 5400 case SYMBOL_REF:
cf73d31f 5401 fprintf (file, "%s", XSTR (addr, 0));
5402 return;
4d10b463 5403
5404 case CONST:
5405 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5406 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
6433714e 5407 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
cf73d31f 5408 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5409 INTVAL (XEXP (XEXP (addr, 0), 1)));
5410 return;
4d10b463 5411
cf73d31f 5412#endif
4d10b463 5413 default:
5414 gcc_unreachable ();
5415 }
6e0fe99e 5416
4840a03a 5417 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
6e0fe99e 5418}
bf2a98b3 5419\f
9e042f31 5420/* Emit RTL insns to initialize the variable parts of a trampoline at
5421 TRAMP. FNADDR is an RTX for the address of the function's pure
5422 code. CXT is an RTX for the static chain value for the function.
96297568 5423
5424 The three offset parameters are for the individual template's
9e7454d0 5425 layout. A JMPOFS < 0 indicates that the trampoline does not
96297568 5426 contain instructions at all.
5427
9e042f31 5428 We assume here that a function will be called many more times than
5429 its address is taken (e.g., it might be passed to qsort), so we
5430 take the trouble to initialize the "hint" field in the JMP insn.
5431 Note that the hint field is PC (new) + 4 * bits 13:0. */
5432
5433void
92643d95 5434alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5435 int fnofs, int cxtofs, int jmpofs)
9e042f31 5436{
5437 rtx temp, temp1, addr;
17683b9f 5438 /* VMS really uses DImode pointers in memory at this point. */
1467e953 5439 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
9e042f31 5440
17683b9f 5441#ifdef POINTERS_EXTEND_UNSIGNED
5442 fnaddr = convert_memory_address (mode, fnaddr);
5443 cxt = convert_memory_address (mode, cxt);
5444#endif
5445
9e042f31 5446 /* Store function address and CXT. */
46ba8e1c 5447 addr = memory_address (mode, plus_constant (tramp, fnofs));
7014838c 5448 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
46ba8e1c 5449 addr = memory_address (mode, plus_constant (tramp, cxtofs));
7014838c 5450 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
96297568 5451
5452 /* This has been disabled since the hint only has a 32k range, and in
65abff06 5453 no existing OS is the stack within 32k of the text segment. */
96297568 5454 if (0 && jmpofs >= 0)
5455 {
5456 /* Compute hint value. */
5457 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5458 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5459 OPTAB_WIDEN);
5460 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
7016c612 5461 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
6de9716c 5462 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5463 GEN_INT (0x3fff), 0);
96297568 5464
5465 /* Merge in the hint. */
5466 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
7014838c 5467 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
6de9716c 5468 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
96297568 5469 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5470 OPTAB_WIDEN);
7014838c 5471 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
96297568 5472 }
9e042f31 5473
5577e296 5474#ifdef ENABLE_EXECUTE_STACK
09a1f342 5475 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
f0bdd254 5476 0, VOIDmode, 1, tramp, Pmode);
9e042f31 5477#endif
5478
96297568 5479 if (jmpofs >= 0)
5480 emit_insn (gen_imb ());
9e042f31 5481}
5482\f
915c336f 5483/* Determine where to put an argument to a function.
5484 Value is zero to push the argument on the stack,
5485 or a hard register in which to store the argument.
5486
5487 MODE is the argument's machine mode.
5488 TYPE is the data type of the argument (as a tree).
5489 This is null for libcalls where that information may
5490 not be available.
5491 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5492 the preceding args and about the function being called.
5493 NAMED is nonzero if this argument is a named parameter
5494 (otherwise it is an extra parameter matching an ellipsis).
5495
5496 On Alpha the first 6 words of args are normally in registers
5497 and the rest are pushed. */
5498
5499rtx
92643d95 5500function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5501 int named ATTRIBUTE_UNUSED)
915c336f 5502{
5503 int basereg;
57e47080 5504 int num_args;
915c336f 5505
a685f5d8 5506 /* Don't get confused and pass small structures in FP registers. */
5507 if (type && AGGREGATE_TYPE_P (type))
9caef960 5508 basereg = 16;
a685f5d8 5509 else
5510 {
5511#ifdef ENABLE_CHECKING
92d40bc4 5512 /* With alpha_split_complex_arg, we shouldn't see any raw complex
a685f5d8 5513 values here. */
4d10b463 5514 gcc_assert (!COMPLEX_MODE_P (mode));
a685f5d8 5515#endif
5516
5517 /* Set up defaults for FP operands passed in FP registers, and
5518 integral operands passed in integer registers. */
5519 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5520 basereg = 32 + 16;
5521 else
5522 basereg = 16;
5523 }
9caef960 5524
5525 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5526 the three platforms, so we can't avoid conditional compilation. */
1467e953 5527#if TARGET_ABI_OPEN_VMS
9caef960 5528 {
5529 if (mode == VOIDmode)
5530 return alpha_arg_info_reg_val (cum);
1467e953 5531
9caef960 5532 num_args = cum.num_args;
0336f0f0 5533 if (num_args >= 6
5534 || targetm.calls.must_pass_in_stack (mode, type))
9caef960 5535 return NULL_RTX;
5536 }
a685f5d8 5537#elif TARGET_ABI_UNICOSMK
9caef960 5538 {
5539 int size;
915c336f 5540
9caef960 5541 /* If this is the last argument, generate the call info word (CIW). */
5542 /* ??? We don't include the caller's line number in the CIW because
5543 I don't know how to determine it if debug infos are turned off. */
5544 if (mode == VOIDmode)
5545 {
5546 int i;
5547 HOST_WIDE_INT lo;
5548 HOST_WIDE_INT hi;
5549 rtx ciw;
5550
5551 lo = 0;
5552
5553 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5554 if (cum.reg_args_type[i])
5555 lo |= (1 << (7 - i));
5556
5557 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5558 lo |= 7;
5559 else
5560 lo |= cum.num_reg_words;
5561
5562#if HOST_BITS_PER_WIDE_INT == 32
5563 hi = (cum.num_args << 20) | cum.num_arg_words;
5564#else
e162157f 5565 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5566 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
9caef960 5567 hi = 0;
5568#endif
5569 ciw = immed_double_const (lo, hi, DImode);
5570
5571 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5572 UNSPEC_UMK_LOAD_CIW);
5573 }
5574
5575 size = ALPHA_ARG_SIZE (mode, type, named);
5576 num_args = cum.num_reg_words;
0336f0f0 5577 if (cum.force_stack
5578 || cum.num_reg_words + size > 6
5579 || targetm.calls.must_pass_in_stack (mode, type))
9caef960 5580 return NULL_RTX;
5581 else if (type && TYPE_MODE (type) == BLKmode)
5582 {
5583 rtx reg1, reg2;
5584
5585 reg1 = gen_rtx_REG (DImode, num_args + 16);
5586 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5587
5588 /* The argument fits in two registers. Note that we still need to
5589 reserve a register for empty structures. */
5590 if (size == 0)
5591 return NULL_RTX;
5592 else if (size == 1)
5593 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5594 else
5595 {
5596 reg2 = gen_rtx_REG (DImode, num_args + 17);
5597 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5598 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5599 }
5600 }
5601 }
a685f5d8 5602#elif TARGET_ABI_OSF
9caef960 5603 {
5604 if (cum >= 6)
5605 return NULL_RTX;
5606 num_args = cum;
5607
5608 /* VOID is passed as a special flag for "last argument". */
5609 if (type == void_type_node)
5610 basereg = 16;
0336f0f0 5611 else if (targetm.calls.must_pass_in_stack (mode, type))
9caef960 5612 return NULL_RTX;
9caef960 5613 }
a685f5d8 5614#else
5615#error Unhandled ABI
5616#endif
915c336f 5617
57e47080 5618 return gen_rtx_REG (mode, num_args + basereg);
915c336f 5619}
5620
f054eb3c 5621static int
5622alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5623 enum machine_mode mode ATTRIBUTE_UNUSED,
5624 tree type ATTRIBUTE_UNUSED,
5625 bool named ATTRIBUTE_UNUSED)
5626{
5627 int words = 0;
5628
5629#if TARGET_ABI_OPEN_VMS
5630 if (cum->num_args < 6
5631 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5c5b637a 5632 words = 6 - cum->num_args;
f054eb3c 5633#elif TARGET_ABI_UNICOSMK
5634 /* Never any split arguments. */
5635#elif TARGET_ABI_OSF
5636 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5637 words = 6 - *cum;
5638#else
5639#error Unhandled ABI
5640#endif
5641
5642 return words * UNITS_PER_WORD;
5643}
5644
5645
a685f5d8 5646/* Return true if TYPE must be returned in memory, instead of in registers. */
5647
dd9f3024 5648static bool
5649alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
a685f5d8 5650{
dd9f3024 5651 enum machine_mode mode = VOIDmode;
a685f5d8 5652 int size;
5653
5654 if (type)
5655 {
5656 mode = TYPE_MODE (type);
5657
5658 /* All aggregates are returned in memory. */
5659 if (AGGREGATE_TYPE_P (type))
5660 return true;
5661 }
5662
5663 size = GET_MODE_SIZE (mode);
5664 switch (GET_MODE_CLASS (mode))
5665 {
5666 case MODE_VECTOR_FLOAT:
5667 /* Pass all float vectors in memory, like an aggregate. */
5668 return true;
5669
5670 case MODE_COMPLEX_FLOAT:
5671 /* We judge complex floats on the size of their element,
5672 not the size of the whole type. */
5673 size = GET_MODE_UNIT_SIZE (mode);
5674 break;
5675
5676 case MODE_INT:
5677 case MODE_FLOAT:
5678 case MODE_COMPLEX_INT:
5679 case MODE_VECTOR_INT:
5680 break;
5681
5682 default:
9e7454d0 5683 /* ??? We get called on all sorts of random stuff from
4d10b463 5684 aggregate_value_p. We must return something, but it's not
5685 clear what's safe to return. Pretend it's a struct I
5686 guess. */
a685f5d8 5687 return true;
5688 }
5689
5690 /* Otherwise types must fit in one register. */
5691 return size > UNITS_PER_WORD;
5692}
5693
b981d932 5694/* Return true if TYPE should be passed by invisible reference. */
5695
5696static bool
5697alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5698 enum machine_mode mode,
5699 tree type ATTRIBUTE_UNUSED,
5700 bool named ATTRIBUTE_UNUSED)
5701{
5702 return mode == TFmode || mode == TCmode;
5703}
5704
a685f5d8 5705/* Define how to find the value returned by a function. VALTYPE is the
5706 data type of the value (as a tree). If the precise function being
5707 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5708 MODE is set instead of VALTYPE for libcalls.
5709
5710 On Alpha the value is found in $0 for integer functions and
5711 $f0 for floating-point functions. */
5712
5713rtx
5714function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5715 enum machine_mode mode)
5716{
79db42ad 5717 unsigned int regnum, dummy;
a685f5d8 5718 enum mode_class class;
5719
4d10b463 5720 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
a685f5d8 5721
5722 if (valtype)
5723 mode = TYPE_MODE (valtype);
5724
5725 class = GET_MODE_CLASS (mode);
5726 switch (class)
5727 {
5728 case MODE_INT:
79db42ad 5729 PROMOTE_MODE (mode, dummy, valtype);
8e262b5e 5730 /* FALLTHRU */
a685f5d8 5731
5732 case MODE_COMPLEX_INT:
5733 case MODE_VECTOR_INT:
5734 regnum = 0;
5735 break;
5736
5737 case MODE_FLOAT:
5738 regnum = 32;
5739 break;
5740
5741 case MODE_COMPLEX_FLOAT:
5742 {
5743 enum machine_mode cmode = GET_MODE_INNER (mode);
5744
5745 return gen_rtx_PARALLEL
5746 (VOIDmode,
5747 gen_rtvec (2,
5748 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
bcd9bd66 5749 const0_rtx),
a685f5d8 5750 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5751 GEN_INT (GET_MODE_SIZE (cmode)))));
5752 }
5753
5754 default:
4d10b463 5755 gcc_unreachable ();
a685f5d8 5756 }
5757
5758 return gen_rtx_REG (mode, regnum);
5759}
5760
9e7454d0 5761/* TCmode complex values are passed by invisible reference. We
92d40bc4 5762 should not split these values. */
5763
5764static bool
5765alpha_split_complex_arg (tree type)
5766{
5767 return TYPE_MODE (type) != TCmode;
5768}
5769
2e15d750 5770static tree
5771alpha_build_builtin_va_list (void)
bf2a98b3 5772{
7ba21c9f 5773 tree base, ofs, space, record, type_decl;
bf2a98b3 5774
9caef960 5775 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
e7aabeab 5776 return ptr_type_node;
5777
a1f71e15 5778 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
0054fd98 5779 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5780 TREE_CHAIN (record) = type_decl;
5781 TYPE_NAME (record) = type_decl;
5782
e7aabeab 5783 /* C++? SET_IS_AGGR_TYPE (record, 1); */
bf2a98b3 5784
7ba21c9f 5785 /* Dummy field to prevent alignment warnings. */
5786 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5787 DECL_FIELD_CONTEXT (space) = record;
5788 DECL_ARTIFICIAL (space) = 1;
5789 DECL_IGNORED_P (space) = 1;
5790
e7aabeab 5791 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5792 integer_type_node);
5793 DECL_FIELD_CONTEXT (ofs) = record;
7ba21c9f 5794 TREE_CHAIN (ofs) = space;
fc4c89ed 5795
e7aabeab 5796 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5797 ptr_type_node);
5798 DECL_FIELD_CONTEXT (base) = record;
5799 TREE_CHAIN (base) = ofs;
fc4c89ed 5800
e7aabeab 5801 TYPE_FIELDS (record) = base;
5802 layout_type (record);
5803
a6c787e5 5804 va_list_gpr_counter_field = ofs;
e7aabeab 5805 return record;
5806}
5807
7955d282 5808#if TARGET_ABI_OSF
a6c787e5 5809/* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5810 and constant additions. */
5811
5812static tree
5813va_list_skip_additions (tree lhs)
5814{
5815 tree rhs, stmt;
5816
5817 if (TREE_CODE (lhs) != SSA_NAME)
5818 return lhs;
5819
5820 for (;;)
5821 {
5822 stmt = SSA_NAME_DEF_STMT (lhs);
5823
5824 if (TREE_CODE (stmt) == PHI_NODE)
5825 return stmt;
5826
35cc02b5 5827 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5828 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
a6c787e5 5829 return lhs;
5830
35cc02b5 5831 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
a6c787e5 5832 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5833 rhs = TREE_OPERAND (rhs, 0);
5834
5835 if ((TREE_CODE (rhs) != NOP_EXPR
5836 && TREE_CODE (rhs) != CONVERT_EXPR
5837 && (TREE_CODE (rhs) != PLUS_EXPR
5838 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5839 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5840 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5841 return rhs;
5842
5843 lhs = TREE_OPERAND (rhs, 0);
5844 }
5845}
5846
5847/* Check if LHS = RHS statement is
5848 LHS = *(ap.__base + ap.__offset + cst)
5849 or
5850 LHS = *(ap.__base
5851 + ((ap.__offset + cst <= 47)
5852 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5853 If the former, indicate that GPR registers are needed,
5854 if the latter, indicate that FPR registers are needed.
adde8f91 5855
5856 Also look for LHS = (*ptr).field, where ptr is one of the forms
5857 listed above.
5858
a6c787e5 5859 On alpha, cfun->va_list_gpr_size is used as size of the needed
adde8f91 5860 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5861 registers are needed and bit 1 set if FPR registers are needed.
5862 Return true if va_list references should not be scanned for the
5863 current statement. */
a6c787e5 5864
5865static bool
5866alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5867{
5868 tree base, offset, arg1, arg2;
5869 int offset_arg = 1;
5870
adde8f91 5871 while (handled_component_p (rhs))
5872 rhs = TREE_OPERAND (rhs, 0);
a6c787e5 5873 if (TREE_CODE (rhs) != INDIRECT_REF
5874 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5875 return false;
5876
5877 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5878 if (lhs == NULL_TREE
5879 || TREE_CODE (lhs) != PLUS_EXPR)
5880 return false;
5881
5882 base = TREE_OPERAND (lhs, 0);
5883 if (TREE_CODE (base) == SSA_NAME)
5884 base = va_list_skip_additions (base);
5885
5886 if (TREE_CODE (base) != COMPONENT_REF
5887 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5888 {
5889 base = TREE_OPERAND (lhs, 0);
5890 if (TREE_CODE (base) == SSA_NAME)
5891 base = va_list_skip_additions (base);
5892
5893 if (TREE_CODE (base) != COMPONENT_REF
5894 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5895 return false;
5896
5897 offset_arg = 0;
5898 }
5899
5900 base = get_base_address (base);
5901 if (TREE_CODE (base) != VAR_DECL
dda53cd5 5902 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
a6c787e5 5903 return false;
5904
5905 offset = TREE_OPERAND (lhs, offset_arg);
5906 if (TREE_CODE (offset) == SSA_NAME)
5907 offset = va_list_skip_additions (offset);
5908
5909 if (TREE_CODE (offset) == PHI_NODE)
5910 {
5911 HOST_WIDE_INT sub;
5912
5913 if (PHI_NUM_ARGS (offset) != 2)
5914 goto escapes;
5915
5916 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5917 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
7955d282 5918 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
a6c787e5 5919 {
5920 tree tem = arg1;
a6c787e5 5921 arg1 = arg2;
5922 arg2 = tem;
a6c787e5 5923
7955d282 5924 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5925 goto escapes;
5926 }
5927 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
a6c787e5 5928 goto escapes;
5929
5930 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5931 if (TREE_CODE (arg2) == MINUS_EXPR)
5932 sub = -sub;
5933 if (sub < -48 || sub > -32)
5934 goto escapes;
5935
5936 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
7955d282 5937 if (arg1 != arg2)
5938 goto escapes;
5939
5940 if (TREE_CODE (arg1) == SSA_NAME)
5941 arg1 = va_list_skip_additions (arg1);
5942
5943 if (TREE_CODE (arg1) != COMPONENT_REF
a6c787e5 5944 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5945 || get_base_address (arg1) != base)
5946 goto escapes;
5947
5948 /* Need floating point regs. */
5949 cfun->va_list_fpr_size |= 2;
5950 }
5951 else if (TREE_CODE (offset) != COMPONENT_REF
5952 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5953 || get_base_address (offset) != base)
5954 goto escapes;
5955 else
5956 /* Need general regs. */
5957 cfun->va_list_fpr_size |= 1;
5958 return false;
5959
5960escapes:
5961 si->va_list_escapes = true;
5962 return false;
5963}
7955d282 5964#endif
a6c787e5 5965
4310aa50 5966/* Perform any needed actions needed for a function that is receiving a
dd9f3024 5967 variable number of arguments. */
4310aa50 5968
dd9f3024 5969static void
2dc656b7 5970alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5971 tree type, int *pretend_size, int no_rtl)
dd9f3024 5972{
2dc656b7 5973 CUMULATIVE_ARGS cum = *pcum;
5974
5975 /* Skip the current argument. */
5976 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5977
dd9f3024 5978#if TARGET_ABI_UNICOSMK
5979 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5980 arguments on the stack. Unfortunately, it doesn't always store the first
5981 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5982 with stdargs as we always have at least one named argument there. */
2dc656b7 5983 if (cum.num_reg_words < 6)
dd9f3024 5984 {
5985 if (!no_rtl)
5986 {
2dc656b7 5987 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
dd9f3024 5988 emit_insn (gen_arg_home_umk ());
5989 }
5990 *pretend_size = 0;
5991 }
5992#elif TARGET_ABI_OPEN_VMS
5993 /* For VMS, we allocate space for all 6 arg registers plus a count.
4310aa50 5994
dd9f3024 5995 However, if NO registers need to be saved, don't allocate any space.
5996 This is not only because we won't need the space, but because AP
5997 includes the current_pretend_args_size and we don't want to mess up
5998 any ap-relative addresses already made. */
2dc656b7 5999 if (cum.num_args < 6)
dd9f3024 6000 {
6001 if (!no_rtl)
6002 {
6003 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6004 emit_insn (gen_arg_home ());
6005 }
6006 *pretend_size = 7 * UNITS_PER_WORD;
6007 }
6008#else
6009 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6010 only push those that are remaining. However, if NO registers need to
6011 be saved, don't allocate any space. This is not only because we won't
6012 need the space, but because AP includes the current_pretend_args_size
6013 and we don't want to mess up any ap-relative addresses already made.
6014
6015 If we are not to use the floating-point registers, save the integer
6016 registers where we would put the floating-point registers. This is
6017 not the most efficient way to implement varargs with just one register
6018 class, but it isn't worth doing anything more efficient in this rare
6019 case. */
4310aa50 6020 if (cum >= 6)
6021 return;
6022
6023 if (!no_rtl)
6024 {
32c2fdea 6025 int count;
6026 alias_set_type set = get_varargs_alias_set ();
4310aa50 6027 rtx tmp;
6028
7955d282 6029 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6030 if (count > 6 - cum)
6031 count = 6 - cum;
4310aa50 6032
7955d282 6033 /* Detect whether integer registers or floating-point registers
6034 are needed by the detected va_arg statements. See above for
6035 how these values are computed. Note that the "escape" value
6036 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6037 these bits set. */
6038 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6039
6040 if (cfun->va_list_fpr_size & 1)
6041 {
6042 tmp = gen_rtx_MEM (BLKmode,
6043 plus_constant (virtual_incoming_args_rtx,
6044 (cum + 6) * UNITS_PER_WORD));
ae2dd339 6045 MEM_NOTRAP_P (tmp) = 1;
7955d282 6046 set_mem_alias_set (tmp, set);
6047 move_block_from_reg (16 + cum, tmp, count);
6048 }
6049
6050 if (cfun->va_list_fpr_size & 2)
6051 {
6052 tmp = gen_rtx_MEM (BLKmode,
6053 plus_constant (virtual_incoming_args_rtx,
6054 cum * UNITS_PER_WORD));
ae2dd339 6055 MEM_NOTRAP_P (tmp) = 1;
7955d282 6056 set_mem_alias_set (tmp, set);
6057 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6058 }
6059 }
4310aa50 6060 *pretend_size = 12 * UNITS_PER_WORD;
f6940372 6061#endif
dd9f3024 6062}
4310aa50 6063
e7aabeab 6064void
92643d95 6065alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
e7aabeab 6066{
6067 HOST_WIDE_INT offset;
6068 tree t, offset_field, base_field;
fc4c89ed 6069
80909c64 6070 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6071 return;
6072
fc264da3 6073 if (TARGET_ABI_UNICOSMK)
7df226a2 6074 std_expand_builtin_va_start (valist, nextarg);
e7aabeab 6075
6644435d 6076 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
e7aabeab 6077 up by 48, storing fp arg registers in the first 48 bytes, and the
6078 integer arg registers in the next 48 bytes. This is only done,
6079 however, if any integer registers need to be stored.
6080
6081 If no integer registers need be stored, then we must subtract 48
6082 in order to account for the integer arg registers which are counted
4310aa50 6083 in argsize above, but which are not actually stored on the stack.
6084 Must further be careful here about structures straddling the last
9e7454d0 6085 integer argument register; that futzes with pretend_args_size,
4310aa50 6086 which changes the meaning of AP. */
e7aabeab 6087
2dc656b7 6088 if (NUM_ARGS < 6)
fc264da3 6089 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
8df4a58b 6090 else
4310aa50 6091 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
e7aabeab 6092
fc264da3 6093 if (TARGET_ABI_OPEN_VMS)
6094 {
6095 nextarg = plus_constant (nextarg, offset);
6096 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
35cc02b5 6097 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
ed03eadb 6098 make_tree (ptr_type_node, nextarg));
fc264da3 6099 TREE_SIDE_EFFECTS (t) = 1;
e7aabeab 6100
fc264da3 6101 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6102 }
6103 else
6104 {
6105 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6106 offset_field = TREE_CHAIN (base_field);
6107
ed03eadb 6108 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6109 valist, base_field, NULL_TREE);
6110 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6111 valist, offset_field, NULL_TREE);
fc264da3 6112
6113 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
ed03eadb 6114 t = build2 (PLUS_EXPR, ptr_type_node, t,
6115 build_int_cst (NULL_TREE, offset));
35cc02b5 6116 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
fc264da3 6117 TREE_SIDE_EFFECTS (t) = 1;
6118 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6119
7016c612 6120 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
35cc02b5 6121 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6122 offset_field, t);
fc264da3 6123 TREE_SIDE_EFFECTS (t) = 1;
6124 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6125 }
e7aabeab 6126}
6127
de8f9b94 6128static tree
c7b3f103 6129alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
de8f9b94 6130{
c7b3f103 6131 tree type_size, ptr_type, addend, t, addr, internal_post;
de8f9b94 6132
de8f9b94 6133 /* If the type could not be passed in registers, skip the block
6134 reserved for the registers. */
0336f0f0 6135 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
de8f9b94 6136 {
7016c612 6137 t = build_int_cst (TREE_TYPE (offset), 6*8);
35cc02b5 6138 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
ed03eadb 6139 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
de8f9b94 6140 gimplify_and_add (t, pre_p);
6141 }
6142
6143 addend = offset;
6144 ptr_type = build_pointer_type (type);
de8f9b94 6145
2cd7bb84 6146 if (TREE_CODE (type) == COMPLEX_TYPE)
de8f9b94 6147 {
6148 tree real_part, imag_part, real_temp;
6149
c7b3f103 6150 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6151 offset, pre_p);
6152
6153 /* Copy the value into a new temporary, lest the formal temporary
de8f9b94 6154 be reused out from under us. */
c7b3f103 6155 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
de8f9b94 6156
c7b3f103 6157 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6158 offset, pre_p);
de8f9b94 6159
ed03eadb 6160 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
de8f9b94 6161 }
6162 else if (TREE_CODE (type) == REAL_TYPE)
6163 {
6164 tree fpaddend, cond, fourtyeight;
6165
7016c612 6166 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
ed03eadb 6167 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6168 addend, fourtyeight);
6169 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6170 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6171 fpaddend, addend);
de8f9b94 6172 }
6173
6174 /* Build the final address and force that value into a temporary. */
ed03eadb 6175 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6176 fold_convert (ptr_type, addend));
c7b3f103 6177 internal_post = NULL;
6178 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6179 append_to_statement_list (internal_post, pre_p);
de8f9b94 6180
6181 /* Update the offset field. */
c7b3f103 6182 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6183 if (type_size == NULL || TREE_OVERFLOW (type_size))
6184 t = size_zero_node;
6185 else
6186 {
6187 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6188 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6189 t = size_binop (MULT_EXPR, t, size_int (8));
6190 }
6191 t = fold_convert (TREE_TYPE (offset), t);
35cc02b5 6192 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
ed03eadb 6193 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
de8f9b94 6194 gimplify_and_add (t, pre_p);
6195
063f5fdd 6196 return build_va_arg_indirect_ref (addr);
de8f9b94 6197}
6198
e0eca1fa 6199static tree
6200alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
de8f9b94 6201{
e0eca1fa 6202 tree offset_field, base_field, offset, base, t, r;
2cd7bb84 6203 bool indirect;
de8f9b94 6204
6205 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
e0eca1fa 6206 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
de8f9b94 6207
6208 base_field = TYPE_FIELDS (va_list_type_node);
6209 offset_field = TREE_CHAIN (base_field);
ed03eadb 6210 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6211 valist, base_field, NULL_TREE);
6212 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6213 valist, offset_field, NULL_TREE);
de8f9b94 6214
c7b3f103 6215 /* Pull the fields of the structure out into temporaries. Since we never
6216 modify the base field, we can use a formal temporary. Sign-extend the
6217 offset field so that it's the proper width for pointer arithmetic. */
6218 base = get_formal_tmp_var (base_field, pre_p);
de8f9b94 6219
c7b3f103 6220 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6221 offset = get_initialized_tmp_var (t, pre_p, NULL);
de8f9b94 6222
2cd7bb84 6223 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6224 if (indirect)
6225 type = build_pointer_type (type);
6226
de8f9b94 6227 /* Find the value. Note that this will be a stable indirection, or
6228 a composite of stable indirections in the case of complex. */
c7b3f103 6229 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
de8f9b94 6230
6231 /* Stuff the offset temporary back into its field. */
35cc02b5 6232 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
ed03eadb 6233 fold_convert (TREE_TYPE (offset_field), offset));
de8f9b94 6234 gimplify_and_add (t, pre_p);
e0eca1fa 6235
2cd7bb84 6236 if (indirect)
063f5fdd 6237 r = build_va_arg_indirect_ref (r);
2cd7bb84 6238
e0eca1fa 6239 return r;
de8f9b94 6240}
bf2a98b3 6241\f
f2cc13dc 6242/* Builtins. */
6243
6244enum alpha_builtin
6245{
6246 ALPHA_BUILTIN_CMPBGE,
ae4cd3a5 6247 ALPHA_BUILTIN_EXTBL,
6248 ALPHA_BUILTIN_EXTWL,
6249 ALPHA_BUILTIN_EXTLL,
f2cc13dc 6250 ALPHA_BUILTIN_EXTQL,
ae4cd3a5 6251 ALPHA_BUILTIN_EXTWH,
6252 ALPHA_BUILTIN_EXTLH,
f2cc13dc 6253 ALPHA_BUILTIN_EXTQH,
ae4cd3a5 6254 ALPHA_BUILTIN_INSBL,
6255 ALPHA_BUILTIN_INSWL,
6256 ALPHA_BUILTIN_INSLL,
6257 ALPHA_BUILTIN_INSQL,
6258 ALPHA_BUILTIN_INSWH,
6259 ALPHA_BUILTIN_INSLH,
6260 ALPHA_BUILTIN_INSQH,
6261 ALPHA_BUILTIN_MSKBL,
6262 ALPHA_BUILTIN_MSKWL,
6263 ALPHA_BUILTIN_MSKLL,
6264 ALPHA_BUILTIN_MSKQL,
6265 ALPHA_BUILTIN_MSKWH,
6266 ALPHA_BUILTIN_MSKLH,
6267 ALPHA_BUILTIN_MSKQH,
6268 ALPHA_BUILTIN_UMULH,
f2cc13dc 6269 ALPHA_BUILTIN_ZAP,
6270 ALPHA_BUILTIN_ZAPNOT,
6271 ALPHA_BUILTIN_AMASK,
6272 ALPHA_BUILTIN_IMPLVER,
6273 ALPHA_BUILTIN_RPCC,
938e069b 6274 ALPHA_BUILTIN_THREAD_POINTER,
6275 ALPHA_BUILTIN_SET_THREAD_POINTER,
f2cc13dc 6276
6277 /* TARGET_MAX */
6278 ALPHA_BUILTIN_MINUB8,
6279 ALPHA_BUILTIN_MINSB8,
6280 ALPHA_BUILTIN_MINUW4,
6281 ALPHA_BUILTIN_MINSW4,
6282 ALPHA_BUILTIN_MAXUB8,
6283 ALPHA_BUILTIN_MAXSB8,
6284 ALPHA_BUILTIN_MAXUW4,
6285 ALPHA_BUILTIN_MAXSW4,
6286 ALPHA_BUILTIN_PERR,
6287 ALPHA_BUILTIN_PKLB,
6288 ALPHA_BUILTIN_PKWB,
6289 ALPHA_BUILTIN_UNPKBL,
6290 ALPHA_BUILTIN_UNPKBW,
6291
ae4cd3a5 6292 /* TARGET_CIX */
6293 ALPHA_BUILTIN_CTTZ,
6294 ALPHA_BUILTIN_CTLZ,
6295 ALPHA_BUILTIN_CTPOP,
6296
f2cc13dc 6297 ALPHA_BUILTIN_max
6298};
6299
ae4cd3a5 6300static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6301 CODE_FOR_builtin_cmpbge,
6302 CODE_FOR_builtin_extbl,
6303 CODE_FOR_builtin_extwl,
6304 CODE_FOR_builtin_extll,
6305 CODE_FOR_builtin_extql,
6306 CODE_FOR_builtin_extwh,
6307 CODE_FOR_builtin_extlh,
6308 CODE_FOR_builtin_extqh,
6309 CODE_FOR_builtin_insbl,
6310 CODE_FOR_builtin_inswl,
6311 CODE_FOR_builtin_insll,
6312 CODE_FOR_builtin_insql,
6313 CODE_FOR_builtin_inswh,
6314 CODE_FOR_builtin_inslh,
6315 CODE_FOR_builtin_insqh,
6316 CODE_FOR_builtin_mskbl,
6317 CODE_FOR_builtin_mskwl,
6318 CODE_FOR_builtin_mskll,
6319 CODE_FOR_builtin_mskql,
6320 CODE_FOR_builtin_mskwh,
6321 CODE_FOR_builtin_msklh,
6322 CODE_FOR_builtin_mskqh,
6323 CODE_FOR_umuldi3_highpart,
6324 CODE_FOR_builtin_zap,
6325 CODE_FOR_builtin_zapnot,
6326 CODE_FOR_builtin_amask,
6327 CODE_FOR_builtin_implver,
6328 CODE_FOR_builtin_rpcc,
938e069b 6329 CODE_FOR_load_tp,
6330 CODE_FOR_set_tp,
ae4cd3a5 6331
6332 /* TARGET_MAX */
6333 CODE_FOR_builtin_minub8,
6334 CODE_FOR_builtin_minsb8,
6335 CODE_FOR_builtin_minuw4,
6336 CODE_FOR_builtin_minsw4,
6337 CODE_FOR_builtin_maxub8,
6338 CODE_FOR_builtin_maxsb8,
6339 CODE_FOR_builtin_maxuw4,
6340 CODE_FOR_builtin_maxsw4,
6341 CODE_FOR_builtin_perr,
6342 CODE_FOR_builtin_pklb,
6343 CODE_FOR_builtin_pkwb,
6344 CODE_FOR_builtin_unpkbl,
6345 CODE_FOR_builtin_unpkbw,
6346
6347 /* TARGET_CIX */
849c7bc6 6348 CODE_FOR_ctzdi2,
6349 CODE_FOR_clzdi2,
6350 CODE_FOR_popcountdi2
ae4cd3a5 6351};
6352
f2cc13dc 6353struct alpha_builtin_def
6354{
6355 const char *name;
6356 enum alpha_builtin code;
6357 unsigned int target_mask;
849c7bc6 6358 bool is_const;
f2cc13dc 6359};
6360
6361static struct alpha_builtin_def const zero_arg_builtins[] = {
849c7bc6 6362 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6363 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
f2cc13dc 6364};
6365
6366static struct alpha_builtin_def const one_arg_builtins[] = {
849c7bc6 6367 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6368 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6369 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6370 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6371 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6372 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6373 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6374 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
f2cc13dc 6375};
6376
6377static struct alpha_builtin_def const two_arg_builtins[] = {
849c7bc6 6378 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6379 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6380 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6381 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6382 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6383 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6384 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6385 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6386 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6387 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6388 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6389 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6390 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6391 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6392 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6393 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6394 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6395 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6396 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6397 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6398 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6399 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6400 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6401 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6402 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6403 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6404 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6405 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6406 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6407 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6408 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6409 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6410 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6411 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
f2cc13dc 6412};
6413
849c7bc6 6414static GTY(()) tree alpha_v8qi_u;
6415static GTY(()) tree alpha_v8qi_s;
6416static GTY(()) tree alpha_v4hi_u;
6417static GTY(()) tree alpha_v4hi_s;
6418
b657e73a 6419/* Helper function of alpha_init_builtins. Add the COUNT built-in
6420 functions pointed to by P, with function type FTYPE. */
6421
6422static void
6423alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6424 tree ftype)
6425{
6426 tree decl;
6427 size_t i;
6428
6429 for (i = 0; i < count; ++i, ++p)
6430 if ((target_flags & p->target_mask) == p->target_mask)
6431 {
6432 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6433 NULL, NULL);
6434 if (p->is_const)
6435 TREE_READONLY (decl) = 1;
6436 TREE_NOTHROW (decl) = 1;
6437 }
6438}
6439
6440
f2cc13dc 6441static void
92643d95 6442alpha_init_builtins (void)
f2cc13dc 6443{
e2dc233c 6444 tree dimode_integer_type_node;
b657e73a 6445 tree ftype, decl;
f2cc13dc 6446
e2dc233c 6447 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6448
e2dc233c 6449 ftype = build_function_type (dimode_integer_type_node, void_list_node);
b657e73a 6450 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6451 ftype);
f2cc13dc 6452
e2dc233c 6453 ftype = build_function_type_list (dimode_integer_type_node,
6454 dimode_integer_type_node, NULL_TREE);
b657e73a 6455 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6456 ftype);
f2cc13dc 6457
e2dc233c 6458 ftype = build_function_type_list (dimode_integer_type_node,
6459 dimode_integer_type_node,
6460 dimode_integer_type_node, NULL_TREE);
b657e73a 6461 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6462 ftype);
938e069b 6463
6464 ftype = build_function_type (ptr_type_node, void_list_node);
b657e73a 6465 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6466 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6467 NULL, NULL);
6468 TREE_NOTHROW (decl) = 1;
938e069b 6469
8b55c4ba 6470 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
b657e73a 6471 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6472 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6473 NULL, NULL);
6474 TREE_NOTHROW (decl) = 1;
849c7bc6 6475
6476 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6477 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6478 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6479 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
f2cc13dc 6480}
6481
6482/* Expand an expression EXP that calls a built-in function,
6483 with result going to TARGET if that's convenient
6484 (and in mode MODE if that's convenient).
6485 SUBTARGET may be used as the target for computing one of EXP's operands.
6486 IGNORE is nonzero if the value is to be ignored. */
6487
6488static rtx
92643d95 6489alpha_expand_builtin (tree exp, rtx target,
6490 rtx subtarget ATTRIBUTE_UNUSED,
6491 enum machine_mode mode ATTRIBUTE_UNUSED,
6492 int ignore ATTRIBUTE_UNUSED)
f2cc13dc 6493{
f2cc13dc 6494#define MAX_ARGS 2
6495
c2f47e15 6496 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
f2cc13dc 6497 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
c2f47e15 6498 tree arg;
6499 call_expr_arg_iterator iter;
f2cc13dc 6500 enum insn_code icode;
6501 rtx op[MAX_ARGS], pat;
6502 int arity;
938e069b 6503 bool nonvoid;
f2cc13dc 6504
6505 if (fcode >= ALPHA_BUILTIN_max)
6506 internal_error ("bad builtin fcode");
6507 icode = code_for_builtin[fcode];
6508 if (icode == 0)
6509 internal_error ("bad builtin fcode");
6510
938e069b 6511 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6512
c2f47e15 6513 arity = 0;
6514 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
f2cc13dc 6515 {
6516 const struct insn_operand_data *insn_op;
6517
f2cc13dc 6518 if (arg == error_mark_node)
6519 return NULL_RTX;
6520 if (arity > MAX_ARGS)
6521 return NULL_RTX;
6522
938e069b 6523 insn_op = &insn_data[icode].operand[arity + nonvoid];
6524
6525 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
f2cc13dc 6526
f2cc13dc 6527 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6528 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
c2f47e15 6529 arity++;
f2cc13dc 6530 }
6531
938e069b 6532 if (nonvoid)
6533 {
6534 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6535 if (!target
6536 || GET_MODE (target) != tmode
6537 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6538 target = gen_reg_rtx (tmode);
6539 }
f2cc13dc 6540
6541 switch (arity)
6542 {
6543 case 0:
6544 pat = GEN_FCN (icode) (target);
6545 break;
6546 case 1:
938e069b 6547 if (nonvoid)
6548 pat = GEN_FCN (icode) (target, op[0]);
6549 else
6550 pat = GEN_FCN (icode) (op[0]);
f2cc13dc 6551 break;
6552 case 2:
6553 pat = GEN_FCN (icode) (target, op[0], op[1]);
6554 break;
6555 default:
4d10b463 6556 gcc_unreachable ();
f2cc13dc 6557 }
6558 if (!pat)
6559 return NULL_RTX;
6560 emit_insn (pat);
6561
938e069b 6562 if (nonvoid)
6563 return target;
6564 else
6565 return const0_rtx;
f2cc13dc 6566}
849c7bc6 6567
6568
6569/* Several bits below assume HWI >= 64 bits. This should be enforced
6570 by config.gcc. */
6571#if HOST_BITS_PER_WIDE_INT < 64
6572# error "HOST_WIDE_INT too small"
6573#endif
6574
6575/* Fold the builtin for the CMPBGE instruction. This is a vector comparison
85c36fd1 6576 with an 8-bit output vector. OPINT contains the integer operands; bit N
849c7bc6 6577 of OP_CONST is set if OPINT[N] is valid. */
6578
6579static tree
6580alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6581{
6582 if (op_const == 3)
6583 {
6584 int i, val;
6585 for (i = 0, val = 0; i < 8; ++i)
6586 {
6587 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6588 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6589 if (c0 >= c1)
6590 val |= 1 << i;
6591 }
6592 return build_int_cst (long_integer_type_node, val);
6593 }
3def9653 6594 else if (op_const == 2 && opint[1] == 0)
849c7bc6 6595 return build_int_cst (long_integer_type_node, 0xff);
6596 return NULL;
6597}
6598
6599/* Fold the builtin for the ZAPNOT instruction. This is essentially a
6600 specialized form of an AND operation. Other byte manipulation instructions
6601 are defined in terms of this instruction, so this is also used as a
6602 subroutine for other builtins.
6603
6604 OP contains the tree operands; OPINT contains the extracted integer values.
6605 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6606 OPINT may be considered. */
6607
6608static tree
6609alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6610 long op_const)
6611{
6612 if (op_const & 2)
6613 {
6614 unsigned HOST_WIDE_INT mask = 0;
6615 int i;
6616
6617 for (i = 0; i < 8; ++i)
6618 if ((opint[1] >> i) & 1)
6619 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6620
6621 if (op_const & 1)
6622 return build_int_cst (long_integer_type_node, opint[0] & mask);
6623
6624 if (op)
b3da1868 6625 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6626 build_int_cst (long_integer_type_node, mask));
849c7bc6 6627 }
6628 else if ((op_const & 1) && opint[0] == 0)
6629 return build_int_cst (long_integer_type_node, 0);
6630 return NULL;
6631}
6632
6633/* Fold the builtins for the EXT family of instructions. */
6634
6635static tree
6636alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6637 long op_const, unsigned HOST_WIDE_INT bytemask,
6638 bool is_high)
6639{
6640 long zap_const = 2;
6641 tree *zap_op = NULL;
6642
6643 if (op_const & 2)
6644 {
6645 unsigned HOST_WIDE_INT loc;
6646
6647 loc = opint[1] & 7;
6648 if (BYTES_BIG_ENDIAN)
6649 loc ^= 7;
6650 loc *= 8;
6651
6652 if (loc != 0)
6653 {
6654 if (op_const & 1)
6655 {
6656 unsigned HOST_WIDE_INT temp = opint[0];
6657 if (is_high)
6658 temp <<= loc;
6659 else
6660 temp >>= loc;
6661 opint[0] = temp;
6662 zap_const = 3;
6663 }
6664 }
6665 else
6666 zap_op = op;
6667 }
6668
6669 opint[1] = bytemask;
6670 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6671}
6672
6673/* Fold the builtins for the INS family of instructions. */
6674
6675static tree
6676alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6677 long op_const, unsigned HOST_WIDE_INT bytemask,
6678 bool is_high)
6679{
6680 if ((op_const & 1) && opint[0] == 0)
6681 return build_int_cst (long_integer_type_node, 0);
6682
6683 if (op_const & 2)
6684 {
6685 unsigned HOST_WIDE_INT temp, loc, byteloc;
6686 tree *zap_op = NULL;
6687
6688 loc = opint[1] & 7;
6689 if (BYTES_BIG_ENDIAN)
6690 loc ^= 7;
6691 bytemask <<= loc;
6692
6693 temp = opint[0];
6694 if (is_high)
6695 {
6696 byteloc = (64 - (loc * 8)) & 0x3f;
6697 if (byteloc == 0)
6698 zap_op = op;
6699 else
6700 temp >>= byteloc;
6701 bytemask >>= 8;
6702 }
6703 else
6704 {
6705 byteloc = loc * 8;
6706 if (byteloc == 0)
6707 zap_op = op;
6708 else
6709 temp <<= byteloc;
6710 }
6711
6712 opint[0] = temp;
6713 opint[1] = bytemask;
6714 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6715 }
6716
6717 return NULL;
6718}
6719
6720static tree
6721alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6722 long op_const, unsigned HOST_WIDE_INT bytemask,
6723 bool is_high)
6724{
6725 if (op_const & 2)
6726 {
6727 unsigned HOST_WIDE_INT loc;
6728
6729 loc = opint[1] & 7;
6730 if (BYTES_BIG_ENDIAN)
6731 loc ^= 7;
6732 bytemask <<= loc;
6733
6734 if (is_high)
6735 bytemask >>= 8;
6736
6737 opint[1] = bytemask ^ 0xff;
6738 }
6739
6740 return alpha_fold_builtin_zapnot (op, opint, op_const);
6741}
6742
6743static tree
6744alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6745{
6746 switch (op_const)
6747 {
6748 case 3:
6749 {
6750 unsigned HOST_WIDE_INT l;
6751 HOST_WIDE_INT h;
6752
6753 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6754
6755#if HOST_BITS_PER_WIDE_INT > 64
6756# error fixme
6757#endif
6758
6759 return build_int_cst (long_integer_type_node, h);
6760 }
6761
6762 case 1:
6763 opint[1] = opint[0];
6764 /* FALLTHRU */
6765 case 2:
6766 /* Note that (X*1) >> 64 == 0. */
6767 if (opint[1] == 0 || opint[1] == 1)
6768 return build_int_cst (long_integer_type_node, 0);
6769 break;
6770 }
6771 return NULL;
6772}
6773
6774static tree
6775alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6776{
6777 tree op0 = fold_convert (vtype, op[0]);
6778 tree op1 = fold_convert (vtype, op[1]);
b3da1868 6779 tree val = fold_build2 (code, vtype, op0, op1);
849c7bc6 6780 return fold_convert (long_integer_type_node, val);
6781}
6782
6783static tree
6784alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6785{
6786 unsigned HOST_WIDE_INT temp = 0;
6787 int i;
6788
6789 if (op_const != 3)
6790 return NULL;
6791
6792 for (i = 0; i < 8; ++i)
6793 {
6794 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6795 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6796 if (a >= b)
6797 temp += a - b;
6798 else
6799 temp += b - a;
6800 }
6801
6802 return build_int_cst (long_integer_type_node, temp);
6803}
6804
6805static tree
6806alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6807{
6808 unsigned HOST_WIDE_INT temp;
6809
6810 if (op_const == 0)
6811 return NULL;
6812
6813 temp = opint[0] & 0xff;
6814 temp |= (opint[0] >> 24) & 0xff00;
6815
6816 return build_int_cst (long_integer_type_node, temp);
6817}
6818
6819static tree
6820alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6821{
6822 unsigned HOST_WIDE_INT temp;
6823
6824 if (op_const == 0)
6825 return NULL;
6826
6827 temp = opint[0] & 0xff;
6828 temp |= (opint[0] >> 8) & 0xff00;
6829 temp |= (opint[0] >> 16) & 0xff0000;
6830 temp |= (opint[0] >> 24) & 0xff000000;
6831
6832 return build_int_cst (long_integer_type_node, temp);
6833}
6834
6835static tree
6836alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6837{
6838 unsigned HOST_WIDE_INT temp;
6839
6840 if (op_const == 0)
6841 return NULL;
6842
6843 temp = opint[0] & 0xff;
6844 temp |= (opint[0] & 0xff00) << 24;
6845
6846 return build_int_cst (long_integer_type_node, temp);
6847}
6848
6849static tree
6850alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6851{
6852 unsigned HOST_WIDE_INT temp;
6853
6854 if (op_const == 0)
6855 return NULL;
6856
6857 temp = opint[0] & 0xff;
6858 temp |= (opint[0] & 0x0000ff00) << 8;
6859 temp |= (opint[0] & 0x00ff0000) << 16;
6860 temp |= (opint[0] & 0xff000000) << 24;
6861
6862 return build_int_cst (long_integer_type_node, temp);
6863}
6864
6865static tree
6866alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6867{
6868 unsigned HOST_WIDE_INT temp;
6869
6870 if (op_const == 0)
6871 return NULL;
6872
6873 if (opint[0] == 0)
6874 temp = 64;
6875 else
6876 temp = exact_log2 (opint[0] & -opint[0]);
6877
6878 return build_int_cst (long_integer_type_node, temp);
6879}
6880
6881static tree
6882alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6883{
6884 unsigned HOST_WIDE_INT temp;
6885
6886 if (op_const == 0)
6887 return NULL;
6888
6889 if (opint[0] == 0)
6890 temp = 64;
6891 else
6892 temp = 64 - floor_log2 (opint[0]) - 1;
6893
6894 return build_int_cst (long_integer_type_node, temp);
6895}
6896
6897static tree
6898alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6899{
6900 unsigned HOST_WIDE_INT temp, op;
6901
6902 if (op_const == 0)
6903 return NULL;
6904
6905 op = opint[0];
6906 temp = 0;
6907 while (op)
6908 temp++, op &= op - 1;
6909
6910 return build_int_cst (long_integer_type_node, temp);
6911}
6912
6913/* Fold one of our builtin functions. */
6914
6915static tree
0ab8af67 6916alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
849c7bc6 6917{
849c7bc6 6918 tree op[MAX_ARGS], t;
6919 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6920 long op_const = 0, arity = 0;
6921
0ab8af67 6922 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
849c7bc6 6923 {
6924 tree arg = TREE_VALUE (t);
6925 if (arg == error_mark_node)
6926 return NULL;
6927 if (arity >= MAX_ARGS)
6928 return NULL;
6929
6930 op[arity] = arg;
6931 opint[arity] = 0;
6932 if (TREE_CODE (arg) == INTEGER_CST)
6933 {
6934 op_const |= 1L << arity;
6935 opint[arity] = int_cst_value (arg);
6936 }
6937 }
6938
6939 switch (DECL_FUNCTION_CODE (fndecl))
6940 {
6941 case ALPHA_BUILTIN_CMPBGE:
6942 return alpha_fold_builtin_cmpbge (opint, op_const);
6943
6944 case ALPHA_BUILTIN_EXTBL:
6945 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6946 case ALPHA_BUILTIN_EXTWL:
6947 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6948 case ALPHA_BUILTIN_EXTLL:
6949 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6950 case ALPHA_BUILTIN_EXTQL:
6951 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6952 case ALPHA_BUILTIN_EXTWH:
6953 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6954 case ALPHA_BUILTIN_EXTLH:
6955 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6956 case ALPHA_BUILTIN_EXTQH:
6957 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6958
6959 case ALPHA_BUILTIN_INSBL:
6960 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6961 case ALPHA_BUILTIN_INSWL:
6962 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6963 case ALPHA_BUILTIN_INSLL:
6964 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6965 case ALPHA_BUILTIN_INSQL:
6966 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6967 case ALPHA_BUILTIN_INSWH:
6968 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6969 case ALPHA_BUILTIN_INSLH:
6970 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6971 case ALPHA_BUILTIN_INSQH:
6972 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6973
6974 case ALPHA_BUILTIN_MSKBL:
6975 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6976 case ALPHA_BUILTIN_MSKWL:
6977 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6978 case ALPHA_BUILTIN_MSKLL:
6979 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6980 case ALPHA_BUILTIN_MSKQL:
6981 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6982 case ALPHA_BUILTIN_MSKWH:
6983 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6984 case ALPHA_BUILTIN_MSKLH:
6985 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6986 case ALPHA_BUILTIN_MSKQH:
6987 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6988
6989 case ALPHA_BUILTIN_UMULH:
6990 return alpha_fold_builtin_umulh (opint, op_const);
6991
6992 case ALPHA_BUILTIN_ZAP:
6993 opint[1] ^= 0xff;
6994 /* FALLTHRU */
6995 case ALPHA_BUILTIN_ZAPNOT:
6996 return alpha_fold_builtin_zapnot (op, opint, op_const);
6997
6998 case ALPHA_BUILTIN_MINUB8:
6999 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7000 case ALPHA_BUILTIN_MINSB8:
7001 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7002 case ALPHA_BUILTIN_MINUW4:
7003 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7004 case ALPHA_BUILTIN_MINSW4:
7005 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7006 case ALPHA_BUILTIN_MAXUB8:
7007 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7008 case ALPHA_BUILTIN_MAXSB8:
7009 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7010 case ALPHA_BUILTIN_MAXUW4:
7011 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7012 case ALPHA_BUILTIN_MAXSW4:
7013 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7014
7015 case ALPHA_BUILTIN_PERR:
7016 return alpha_fold_builtin_perr (opint, op_const);
7017 case ALPHA_BUILTIN_PKLB:
7018 return alpha_fold_builtin_pklb (opint, op_const);
7019 case ALPHA_BUILTIN_PKWB:
7020 return alpha_fold_builtin_pkwb (opint, op_const);
7021 case ALPHA_BUILTIN_UNPKBL:
7022 return alpha_fold_builtin_unpkbl (opint, op_const);
7023 case ALPHA_BUILTIN_UNPKBW:
7024 return alpha_fold_builtin_unpkbw (opint, op_const);
7025
7026 case ALPHA_BUILTIN_CTTZ:
7027 return alpha_fold_builtin_cttz (opint, op_const);
7028 case ALPHA_BUILTIN_CTLZ:
7029 return alpha_fold_builtin_ctlz (opint, op_const);
7030 case ALPHA_BUILTIN_CTPOP:
7031 return alpha_fold_builtin_ctpop (opint, op_const);
7032
7033 case ALPHA_BUILTIN_AMASK:
7034 case ALPHA_BUILTIN_IMPLVER:
7035 case ALPHA_BUILTIN_RPCC:
7036 case ALPHA_BUILTIN_THREAD_POINTER:
7037 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7038 /* None of these are foldable at compile-time. */
7039 default:
7040 return NULL;
7041 }
7042}
f2cc13dc 7043\f
bf2a98b3 7044/* This page contains routines that are used to determine what the function
7045 prologue and epilogue code will do and write them out. */
7046
7047/* Compute the size of the save area in the stack. */
7048
8df4a58b 7049/* These variables are used for communication between the following functions.
7050 They indicate various things about the current function being compiled
7051 that are used to tell what kind of prologue, epilogue and procedure
efee20da 7052 descriptor to generate. */
8df4a58b 7053
7054/* Nonzero if we need a stack procedure. */
b19d7ab1 7055enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7056static enum alpha_procedure_types alpha_procedure_type;
8df4a58b 7057
7058/* Register number (either FP or SP) that is used to unwind the frame. */
b9a5aa8e 7059static int vms_unwind_regno;
8df4a58b 7060
7061/* Register number used to save FP. We need not have one for RA since
7062 we don't modify it for register procedures. This is only defined
7063 for register frame procedures. */
b9a5aa8e 7064static int vms_save_fp_regno;
8df4a58b 7065
7066/* Register number used to reference objects off our PV. */
b9a5aa8e 7067static int vms_base_regno;
8df4a58b 7068
2cf1388a 7069/* Compute register masks for saved registers. */
8df4a58b 7070
7071static void
92643d95 7072alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
8df4a58b 7073{
7074 unsigned long imask = 0;
7075 unsigned long fmask = 0;
1f0ce6a6 7076 unsigned int i;
8df4a58b 7077
eaa112a0 7078 /* When outputting a thunk, we don't have valid register life info,
7079 but assemble_start_function wants to output .frame and .mask
7080 directives. */
7081 if (current_function_is_thunk)
2cf1388a 7082 {
961d6ddd 7083 *imaskP = 0;
7084 *fmaskP = 0;
7085 return;
7086 }
8df4a58b 7087
b19d7ab1 7088 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
df7d0d23 7089 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
8df4a58b 7090
961d6ddd 7091 /* One for every register we have to save. */
7092 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7093 if (! fixed_regs[i] && ! call_used_regs[i]
3072d30e 7094 && df_regs_ever_live_p (i) && i != REG_RA
961d6ddd 7095 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7096 {
7097 if (i < 32)
df7d0d23 7098 imask |= (1UL << i);
961d6ddd 7099 else
df7d0d23 7100 fmask |= (1UL << (i - 32));
961d6ddd 7101 }
7102
7103 /* We need to restore these for the handler. */
7104 if (current_function_calls_eh_return)
c49ad9ef 7105 {
7106 for (i = 0; ; ++i)
7107 {
7108 unsigned regno = EH_RETURN_DATA_REGNO (i);
7109 if (regno == INVALID_REGNUM)
7110 break;
7111 imask |= 1UL << regno;
7112 }
c49ad9ef 7113 }
9e7454d0 7114
961d6ddd 7115 /* If any register spilled, then spill the return address also. */
7116 /* ??? This is required by the Digital stack unwind specification
7117 and isn't needed if we're doing Dwarf2 unwinding. */
7118 if (imask || fmask || alpha_ra_ever_killed ())
df7d0d23 7119 imask |= (1UL << REG_RA);
b9a5aa8e 7120
8df4a58b 7121 *imaskP = imask;
7122 *fmaskP = fmask;
8df4a58b 7123}
7124
7125int
92643d95 7126alpha_sa_size (void)
8df4a58b 7127{
5aae9d06 7128 unsigned long mask[2];
8df4a58b 7129 int sa_size = 0;
5aae9d06 7130 int i, j;
8df4a58b 7131
5aae9d06 7132 alpha_sa_mask (&mask[0], &mask[1]);
7133
7134 if (TARGET_ABI_UNICOSMK)
7135 {
7136 if (mask[0] || mask[1])
7137 sa_size = 14;
7138 }
2cf1388a 7139 else
2cf1388a 7140 {
5aae9d06 7141 for (j = 0; j < 2; ++j)
7142 for (i = 0; i < 32; ++i)
7143 if ((mask[j] >> i) & 1)
7144 sa_size++;
2cf1388a 7145 }
8df4a58b 7146
9caef960 7147 if (TARGET_ABI_UNICOSMK)
7148 {
7149 /* We might not need to generate a frame if we don't make any calls
7150 (including calls to __T3E_MISMATCH if this is a vararg function),
7151 don't have any local variables which require stack slots, don't
7152 use alloca and have not determined that we need a frame for other
7153 reasons. */
7154
b19d7ab1 7155 alpha_procedure_type
7156 = (sa_size || get_frame_size() != 0
7ccc713a 7157 || current_function_outgoing_args_size
b19d7ab1 7158 || current_function_stdarg || current_function_calls_alloca
7159 || frame_pointer_needed)
7160 ? PT_STACK : PT_REGISTER;
9caef960 7161
7162 /* Always reserve space for saving callee-saved registers if we
7163 need a frame as required by the calling convention. */
b19d7ab1 7164 if (alpha_procedure_type == PT_STACK)
9caef960 7165 sa_size = 14;
7166 }
7167 else if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7168 {
7169 /* Start by assuming we can use a register procedure if we don't
7170 make any calls (REG_RA not used) or need to save any
7171 registers and a stack procedure if we do. */
b19d7ab1 7172 if ((mask[0] >> REG_RA) & 1)
7173 alpha_procedure_type = PT_STACK;
7174 else if (get_frame_size() != 0)
7175 alpha_procedure_type = PT_REGISTER;
7176 else
7177 alpha_procedure_type = PT_NULL;
5aae9d06 7178
2ab60bb1 7179 /* Don't reserve space for saving FP & RA yet. Do that later after we've
5aae9d06 7180 made the final decision on stack procedure vs register procedure. */
b19d7ab1 7181 if (alpha_procedure_type == PT_STACK)
2ab60bb1 7182 sa_size -= 2;
b9a5aa8e 7183
7184 /* Decide whether to refer to objects off our PV via FP or PV.
7185 If we need FP for something else or if we receive a nonlocal
7186 goto (which expects PV to contain the value), we must use PV.
7187 Otherwise, start by assuming we can use FP. */
b19d7ab1 7188
7189 vms_base_regno
7190 = (frame_pointer_needed
7191 || current_function_has_nonlocal_label
7192 || alpha_procedure_type == PT_STACK
7193 || current_function_outgoing_args_size)
7194 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
b9a5aa8e 7195
7196 /* If we want to copy PV into FP, we need to find some register
7197 in which to save FP. */
7198
7199 vms_save_fp_regno = -1;
7200 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7201 for (i = 0; i < 32; i++)
3072d30e 7202 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
b9a5aa8e 7203 vms_save_fp_regno = i;
7204
b19d7ab1 7205 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7206 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7207 else if (alpha_procedure_type == PT_NULL)
7208 vms_base_regno = REG_PV;
b9a5aa8e 7209
7210 /* Stack unwinding should be done via FP unless we use it for PV. */
7211 vms_unwind_regno = (vms_base_regno == REG_PV
7212 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7213
7214 /* If this is a stack procedure, allow space for saving FP and RA. */
b19d7ab1 7215 if (alpha_procedure_type == PT_STACK)
b9a5aa8e 7216 sa_size += 2;
7217 }
7218 else
7219 {
b9a5aa8e 7220 /* Our size must be even (multiple of 16 bytes). */
7221 if (sa_size & 1)
7222 sa_size++;
7223 }
8df4a58b 7224
7225 return sa_size * 8;
7226}
7227
4310aa50 7228/* Define the offset between two registers, one to be eliminated,
7229 and the other its replacement, at the start of a routine. */
7230
7231HOST_WIDE_INT
92643d95 7232alpha_initial_elimination_offset (unsigned int from,
7233 unsigned int to ATTRIBUTE_UNUSED)
4310aa50 7234{
7235 HOST_WIDE_INT ret;
7236
7237 ret = alpha_sa_size ();
7238 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7239
4d10b463 7240 switch (from)
7241 {
7242 case FRAME_POINTER_REGNUM:
7243 break;
7244
7245 case ARG_POINTER_REGNUM:
7246 ret += (ALPHA_ROUND (get_frame_size ()
7247 + current_function_pretend_args_size)
7248 - current_function_pretend_args_size);
7249 break;
7250
7251 default:
7252 gcc_unreachable ();
7253 }
4310aa50 7254
7255 return ret;
7256}
7257
8df4a58b 7258int
92643d95 7259alpha_pv_save_size (void)
8df4a58b 7260{
7261 alpha_sa_size ();
b19d7ab1 7262 return alpha_procedure_type == PT_STACK ? 8 : 0;
8df4a58b 7263}
7264
7265int
92643d95 7266alpha_using_fp (void)
8df4a58b 7267{
7268 alpha_sa_size ();
b9a5aa8e 7269 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
8df4a58b 7270}
7271
1467e953 7272#if TARGET_ABI_OPEN_VMS
2d280039 7273
e3c541f0 7274const struct attribute_spec vms_attribute_table[] =
bf2a98b3 7275{
e3c541f0 7276 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
c64a8830 7277 { "overlaid", 0, 0, true, false, false, NULL },
7278 { "global", 0, 0, true, false, false, NULL },
7279 { "initialize", 0, 0, true, false, false, NULL },
7280 { NULL, 0, 0, false, false, false, NULL }
e3c541f0 7281};
bf2a98b3 7282
2d280039 7283#endif
7284
1f0ce6a6 7285static int
92643d95 7286find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
1f0ce6a6 7287{
a3859c0f 7288 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7289}
7290
7291int
92643d95 7292alpha_find_lo_sum_using_gp (rtx insn)
a3859c0f 7293{
7294 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
1f0ce6a6 7295}
7296
b9a5aa8e 7297static int
92643d95 7298alpha_does_function_need_gp (void)
b9a5aa8e 7299{
7300 rtx insn;
bf2a98b3 7301
9caef960 7302 /* The GP being variable is an OSF abi thing. */
7303 if (! TARGET_ABI_OSF)
b9a5aa8e 7304 return 0;
bf2a98b3 7305
008fdc59 7306 /* We need the gp to load the address of __mcount. */
7811c823 7307 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
b9a5aa8e 7308 return 1;
0e0a0e7a 7309
008fdc59 7310 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
2cf1388a 7311 if (current_function_is_thunk)
7312 return 1;
2cf1388a 7313
008fdc59 7314 /* The nonlocal receiver pattern assumes that the gp is valid for
7315 the nested function. Reasonable because it's almost always set
7316 correctly already. For the cases where that's wrong, make sure
7317 the nested function loads its gp on entry. */
7318 if (current_function_has_nonlocal_goto)
7319 return 1;
7320
9e7454d0 7321 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
b9a5aa8e 7322 Even if we are a static function, we still need to do this in case
7323 our address is taken and passed to something like qsort. */
bf2a98b3 7324
b9a5aa8e 7325 push_topmost_sequence ();
7326 insn = get_insns ();
7327 pop_topmost_sequence ();
8df4a58b 7328
b9a5aa8e 7329 for (; insn; insn = NEXT_INSN (insn))
9204e736 7330 if (INSN_P (insn)
449b6e20 7331 && ! JUMP_TABLE_DATA_P (insn)
b9a5aa8e 7332 && GET_CODE (PATTERN (insn)) != USE
a3859c0f 7333 && GET_CODE (PATTERN (insn)) != CLOBBER
7334 && get_attr_usegp (insn))
7335 return 1;
bf2a98b3 7336
b9a5aa8e 7337 return 0;
bf2a98b3 7338}
7339
7d73bc2a 7340\f
5a965225 7341/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7342 sequences. */
7343
7344static rtx
92643d95 7345set_frame_related_p (void)
5a965225 7346{
31d3e01c 7347 rtx seq = get_insns ();
7348 rtx insn;
7349
5a965225 7350 end_sequence ();
7351
31d3e01c 7352 if (!seq)
7353 return NULL_RTX;
7354
7355 if (INSN_P (seq))
5a965225 7356 {
31d3e01c 7357 insn = seq;
7358 while (insn != NULL_RTX)
7359 {
7360 RTX_FRAME_RELATED_P (insn) = 1;
7361 insn = NEXT_INSN (insn);
7362 }
7363 seq = emit_insn (seq);
5a965225 7364 }
7365 else
7366 {
7367 seq = emit_insn (seq);
7368 RTX_FRAME_RELATED_P (seq) = 1;
5a965225 7369 }
31d3e01c 7370 return seq;
5a965225 7371}
7372
7373#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7374
fb0de38e 7375/* Generates a store with the proper unwind info attached. VALUE is
183f1993 7376 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
fb0de38e 7377 contains SP+FRAME_BIAS, and that is the unwind info that should be
7378 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7379 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7380
7381static void
7382emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7383 HOST_WIDE_INT base_ofs, rtx frame_reg)
7384{
7385 rtx addr, mem, insn;
7386
7387 addr = plus_constant (base_reg, base_ofs);
7388 mem = gen_rtx_MEM (DImode, addr);
7389 set_mem_alias_set (mem, alpha_sr_alias_set);
7390
7391 insn = emit_move_insn (mem, value);
7392 RTX_FRAME_RELATED_P (insn) = 1;
7393
7394 if (frame_bias || value != frame_reg)
7395 {
7396 if (frame_bias)
7397 {
7398 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7399 mem = gen_rtx_MEM (DImode, addr);
7400 }
7401
7402 REG_NOTES (insn)
7403 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7404 gen_rtx_SET (VOIDmode, mem, frame_reg),
7405 REG_NOTES (insn));
7406 }
7407}
7408
7409static void
7410emit_frame_store (unsigned int regno, rtx base_reg,
7411 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7412{
7413 rtx reg = gen_rtx_REG (DImode, regno);
7414 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7415}
7416
bf2a98b3 7417/* Write function prologue. */
7418
8df4a58b 7419/* On vms we have two kinds of functions:
7420
7421 - stack frame (PROC_STACK)
7422 these are 'normal' functions with local vars and which are
7423 calling other functions
7424 - register frame (PROC_REGISTER)
7425 keeps all data in registers, needs no stack
7426
7427 We must pass this to the assembler so it can generate the
7428 proper pdsc (procedure descriptor)
7429 This is done with the '.pdesc' command.
7430
b9a5aa8e 7431 On not-vms, we don't really differentiate between the two, as we can
7432 simply allocate stack without saving registers. */
8df4a58b 7433
7434void
92643d95 7435alpha_expand_prologue (void)
8df4a58b 7436{
b9a5aa8e 7437 /* Registers to save. */
8df4a58b 7438 unsigned long imask = 0;
7439 unsigned long fmask = 0;
7440 /* Stack space needed for pushing registers clobbered by us. */
7441 HOST_WIDE_INT sa_size;
7442 /* Complete stack size needed. */
7443 HOST_WIDE_INT frame_size;
7444 /* Offset from base reg to register save area. */
b9a5aa8e 7445 HOST_WIDE_INT reg_offset;
fb0de38e 7446 rtx sa_reg;
8df4a58b 7447 int i;
7448
7449 sa_size = alpha_sa_size ();
8df4a58b 7450
b9a5aa8e 7451 frame_size = get_frame_size ();
1467e953 7452 if (TARGET_ABI_OPEN_VMS)
9e7454d0 7453 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7454 + (alpha_procedure_type == PT_STACK ? 8 : 0)
b9a5aa8e 7455 + frame_size
7456 + current_function_pretend_args_size);
9caef960 7457 else if (TARGET_ABI_UNICOSMK)
7458 /* We have to allocate space for the DSIB if we generate a frame. */
7459 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7460 + (alpha_procedure_type == PT_STACK ? 48 : 0))
9caef960 7461 + ALPHA_ROUND (frame_size
7462 + current_function_outgoing_args_size);
b9a5aa8e 7463 else
7464 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7465 + sa_size
7466 + ALPHA_ROUND (frame_size
7467 + current_function_pretend_args_size));
8df4a58b 7468
1467e953 7469 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7470 reg_offset = 8;
7471 else
7472 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8df4a58b 7473
b9a5aa8e 7474 alpha_sa_mask (&imask, &fmask);
8df4a58b 7475
a314eb5e 7476 /* Emit an insn to reload GP, if needed. */
1467e953 7477 if (TARGET_ABI_OSF)
a314eb5e 7478 {
7479 alpha_function_needs_gp = alpha_does_function_need_gp ();
7480 if (alpha_function_needs_gp)
7481 emit_insn (gen_prologue_ldgp ());
7482 }
7483
30dceb30 7484 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7485 the call to mcount ourselves, rather than having the linker do it
7486 magically in response to -pg. Since _mcount has special linkage,
7487 don't represent the call as a call. */
7811c823 7488 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
30dceb30 7489 emit_insn (gen_prologue_mcount ());
9caef960 7490
7491 if (TARGET_ABI_UNICOSMK)
7492 unicosmk_gen_dsib (&imask);
7493
8df4a58b 7494 /* Adjust the stack by the frame size. If the frame size is > 4096
7495 bytes, we need to be sure we probe somewhere in the first and last
7496 4096 bytes (we can probably get away without the latter test) and
7497 every 8192 bytes in between. If the frame size is > 32768, we
7498 do this in a loop. Otherwise, we generate the explicit probe
9e7454d0 7499 instructions.
8df4a58b 7500
7501 Note that we are only allowed to adjust sp once in the prologue. */
7502
b9a5aa8e 7503 if (frame_size <= 32768)
8df4a58b 7504 {
7505 if (frame_size > 4096)
7506 {
baf8b2cc 7507 int probed;
8df4a58b 7508
baf8b2cc 7509 for (probed = 4096; probed < frame_size; probed += 8192)
9caef960 7510 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7511 ? -probed + 64
7512 : -probed)));
8df4a58b 7513
7514 /* We only have to do this probe if we aren't saving registers. */
baf8b2cc 7515 if (sa_size == 0 && frame_size > probed - 4096)
b9a5aa8e 7516 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
8df4a58b 7517 }
7518
7519 if (frame_size != 0)
205b281f 7520 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9caef960 7521 GEN_INT (TARGET_ABI_UNICOSMK
7522 ? -frame_size + 64
7523 : -frame_size))));
8df4a58b 7524 }
7525 else
7526 {
b9a5aa8e 7527 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
8df4a58b 7528 number of 8192 byte blocks to probe. We then probe each block
7529 in the loop and then set SP to the proper location. If the
7530 amount remaining is > 4096, we have to do one more probe if we
7531 are not saving any registers. */
7532
7533 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7534 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
b9a5aa8e 7535 rtx ptr = gen_rtx_REG (DImode, 22);
7536 rtx count = gen_rtx_REG (DImode, 23);
cd28cb76 7537 rtx seq;
8df4a58b 7538
b9a5aa8e 7539 emit_move_insn (count, GEN_INT (blocks));
9caef960 7540 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7541 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
8df4a58b 7542
b9a5aa8e 7543 /* Because of the difficulty in emitting a new basic block this
7544 late in the compilation, generate the loop as a single insn. */
7545 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
8df4a58b 7546
7547 if (leftover > 4096 && sa_size == 0)
b9a5aa8e 7548 {
7549 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7550 MEM_VOLATILE_P (last) = 1;
7551 emit_move_insn (last, const0_rtx);
7552 }
8df4a58b 7553
1467e953 7554 if (TARGET_ABI_WINDOWS_NT)
f88f2646 7555 {
7556 /* For NT stack unwind (done by 'reverse execution'), it's
7557 not OK to take the result of a loop, even though the value
7558 is already in ptr, so we reload it via a single operation
9e7454d0 7559 and subtract it to sp.
cd28cb76 7560
7561 Yes, that's correct -- we have to reload the whole constant
df9e12ce 7562 into a temporary via ldah+lda then subtract from sp. */
f88f2646 7563
7564 HOST_WIDE_INT lo, hi;
05bea6dd 7565 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7566 hi = frame_size - lo;
5a965225 7567
cd28cb76 7568 emit_move_insn (ptr, GEN_INT (hi));
df9e12ce 7569 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
cd28cb76 7570 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7571 ptr));
f88f2646 7572 }
7573 else
7574 {
f88f2646 7575 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7576 GEN_INT (-leftover)));
f88f2646 7577 }
cd28cb76 7578
7579 /* This alternative is special, because the DWARF code cannot
7580 possibly intuit through the loop above. So we invent this
7581 note it looks at instead. */
7582 RTX_FRAME_RELATED_P (seq) = 1;
7583 REG_NOTES (seq)
7584 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7585 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7586 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
9caef960 7587 GEN_INT (TARGET_ABI_UNICOSMK
7588 ? -frame_size + 64
7589 : -frame_size))),
cd28cb76 7590 REG_NOTES (seq));
8df4a58b 7591 }
7592
9caef960 7593 if (!TARGET_ABI_UNICOSMK)
8df4a58b 7594 {
fb0de38e 7595 HOST_WIDE_INT sa_bias = 0;
7596
9caef960 7597 /* Cope with very large offsets to the register save area. */
7598 sa_reg = stack_pointer_rtx;
7599 if (reg_offset + sa_size > 0x8000)
7600 {
7601 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
fb0de38e 7602 rtx sa_bias_rtx;
8df4a58b 7603
9caef960 7604 if (low + sa_size <= 0x8000)
fb0de38e 7605 sa_bias = reg_offset - low, reg_offset = low;
9e7454d0 7606 else
fb0de38e 7607 sa_bias = reg_offset, reg_offset = 0;
8df4a58b 7608
9caef960 7609 sa_reg = gen_rtx_REG (DImode, 24);
fb0de38e 7610 sa_bias_rtx = GEN_INT (sa_bias);
7611
7612 if (add_operand (sa_bias_rtx, DImode))
7613 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7614 else
7615 {
7616 emit_move_insn (sa_reg, sa_bias_rtx);
7617 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7618 }
9caef960 7619 }
9e7454d0 7620
9caef960 7621 /* Save regs in stack order. Beginning with VMS PV. */
b19d7ab1 7622 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
fb0de38e 7623 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
8df4a58b 7624
9caef960 7625 /* Save register RA next. */
df7d0d23 7626 if (imask & (1UL << REG_RA))
9caef960 7627 {
fb0de38e 7628 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
df7d0d23 7629 imask &= ~(1UL << REG_RA);
9caef960 7630 reg_offset += 8;
7631 }
8df4a58b 7632
9caef960 7633 /* Now save any other registers required to be saved. */
c49ad9ef 7634 for (i = 0; i < 31; i++)
df7d0d23 7635 if (imask & (1UL << i))
9caef960 7636 {
fb0de38e 7637 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
9caef960 7638 reg_offset += 8;
7639 }
8df4a58b 7640
c49ad9ef 7641 for (i = 0; i < 31; i++)
df7d0d23 7642 if (fmask & (1UL << i))
9caef960 7643 {
fb0de38e 7644 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
9caef960 7645 reg_offset += 8;
7646 }
7647 }
b19d7ab1 7648 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
9caef960 7649 {
7650 /* The standard frame on the T3E includes space for saving registers.
7651 We just have to use it. We don't have to save the return address and
7652 the old frame pointer here - they are saved in the DSIB. */
7653
7654 reg_offset = -56;
7655 for (i = 9; i < 15; i++)
df7d0d23 7656 if (imask & (1UL << i))
9caef960 7657 {
fb0de38e 7658 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
9caef960 7659 reg_offset -= 8;
7660 }
7661 for (i = 2; i < 10; i++)
df7d0d23 7662 if (fmask & (1UL << i))
9caef960 7663 {
fb0de38e 7664 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
9caef960 7665 reg_offset -= 8;
7666 }
7667 }
8df4a58b 7668
1467e953 7669 if (TARGET_ABI_OPEN_VMS)
8df4a58b 7670 {
b19d7ab1 7671 if (alpha_procedure_type == PT_REGISTER)
7672 /* Register frame procedures save the fp.
7673 ?? Ought to have a dwarf2 save for this. */
6d50e356 7674 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7675 hard_frame_pointer_rtx);
8df4a58b 7676
b19d7ab1 7677 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
6d50e356 7678 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7679 gen_rtx_REG (DImode, REG_PV)));
8df4a58b 7680
b19d7ab1 7681 if (alpha_procedure_type != PT_NULL
7682 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
205b281f 7683 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8df4a58b 7684
b9a5aa8e 7685 /* If we have to allocate space for outgoing args, do it now. */
7686 if (current_function_outgoing_args_size != 0)
81a5b286 7687 {
7688 rtx seq
9e7454d0 7689 = emit_move_insn (stack_pointer_rtx,
81a5b286 7690 plus_constant
7691 (hard_frame_pointer_rtx,
7692 - (ALPHA_ROUND
7693 (current_function_outgoing_args_size))));
9e7454d0 7694
81a5b286 7695 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7696 if ! frame_pointer_needed. Setting the bit will change the CFA
7697 computation rule to use sp again, which would be wrong if we had
7698 frame_pointer_needed, as this means sp might move unpredictably
7699 later on.
7700
7701 Also, note that
7702 frame_pointer_needed
7703 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7704 and
7705 current_function_outgoing_args_size != 0
7706 => alpha_procedure_type != PT_NULL,
7707
7708 so when we are not setting the bit here, we are guaranteed to
5910bb95 7709 have emitted an FRP frame pointer update just before. */
81a5b286 7710 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7711 }
b9a5aa8e 7712 }
9caef960 7713 else if (!TARGET_ABI_UNICOSMK)
b9a5aa8e 7714 {
7715 /* If we need a frame pointer, set it from the stack pointer. */
7716 if (frame_pointer_needed)
7717 {
7718 if (TARGET_CAN_FAULT_IN_PROLOGUE)
5a965225 7719 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8df4a58b 7720 else
205b281f 7721 /* This must always be the last instruction in the
7722 prologue, thus we emit a special move + clobber. */
5a965225 7723 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7724 stack_pointer_rtx, sa_reg)));
8df4a58b 7725 }
8df4a58b 7726 }
7727
b9a5aa8e 7728 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7729 the prologue, for exception handling reasons, we cannot do this for
7730 any insn that might fault. We could prevent this for mems with a
7731 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7732 have to prevent all such scheduling with a blockage.
8df4a58b 7733
9e7454d0 7734 Linux, on the other hand, never bothered to implement OSF/1's
b9a5aa8e 7735 exception handling, and so doesn't care about such things. Anyone
7736 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8df4a58b 7737
b9a5aa8e 7738 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7739 emit_insn (gen_blockage ());
1fce2e8a 7740}
7741
e3b8b697 7742/* Count the number of .file directives, so that .loc is up to date. */
d0de818d 7743int num_source_filenames = 0;
e3b8b697 7744
2cf1388a 7745/* Output the textual info surrounding the prologue. */
8df4a58b 7746
b9a5aa8e 7747void
92643d95 7748alpha_start_function (FILE *file, const char *fnname,
7749 tree decl ATTRIBUTE_UNUSED)
0c0464e6 7750{
b9a5aa8e 7751 unsigned long imask = 0;
7752 unsigned long fmask = 0;
7753 /* Stack space needed for pushing registers clobbered by us. */
7754 HOST_WIDE_INT sa_size;
7755 /* Complete stack size needed. */
f9e9d81d 7756 unsigned HOST_WIDE_INT frame_size;
6dbdfeeb 7757 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7758 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7759 ? 524288
7760 : 1UL << 31;
b9a5aa8e 7761 /* Offset from base reg to register save area. */
7762 HOST_WIDE_INT reg_offset;
2cf1388a 7763 char *entry_label = (char *) alloca (strlen (fnname) + 6);
b9a5aa8e 7764 int i;
0c0464e6 7765
9caef960 7766 /* Don't emit an extern directive for functions defined in the same file. */
7767 if (TARGET_ABI_UNICOSMK)
7768 {
7769 tree name_tree;
7770 name_tree = get_identifier (fnname);
7771 TREE_ASM_WRITTEN (name_tree) = 1;
7772 }
7773
a314eb5e 7774 alpha_fnname = fnname;
b9a5aa8e 7775 sa_size = alpha_sa_size ();
0c0464e6 7776
b9a5aa8e 7777 frame_size = get_frame_size ();
1467e953 7778 if (TARGET_ABI_OPEN_VMS)
9e7454d0 7779 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7780 + (alpha_procedure_type == PT_STACK ? 8 : 0)
b9a5aa8e 7781 + frame_size
7782 + current_function_pretend_args_size);
9caef960 7783 else if (TARGET_ABI_UNICOSMK)
7784 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7785 + (alpha_procedure_type == PT_STACK ? 48 : 0))
9caef960 7786 + ALPHA_ROUND (frame_size
7787 + current_function_outgoing_args_size);
b9a5aa8e 7788 else
7789 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7790 + sa_size
7791 + ALPHA_ROUND (frame_size
7792 + current_function_pretend_args_size));
0c0464e6 7793
1467e953 7794 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7795 reg_offset = 8;
7796 else
7797 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
0c0464e6 7798
b9a5aa8e 7799 alpha_sa_mask (&imask, &fmask);
bf2a98b3 7800
0e0a0e7a 7801 /* Ecoff can handle multiple .file directives, so put out file and lineno.
449b7f2d 7802 We have to do that before the .ent directive as we cannot switch
7803 files within procedures with native ecoff because line numbers are
7804 linked to procedure descriptors.
7805 Outputting the lineno helps debugging of one line functions as they
7806 would otherwise get no line number at all. Please note that we would
01cc3b75 7807 like to put out last_linenum from final.c, but it is not accessible. */
449b7f2d 7808
7809 if (write_symbols == SDB_DEBUG)
7810 {
9caef960 7811#ifdef ASM_OUTPUT_SOURCE_FILENAME
346064d9 7812 ASM_OUTPUT_SOURCE_FILENAME (file,
7813 DECL_SOURCE_FILE (current_function_decl));
9caef960 7814#endif
e3b8b697 7815#ifdef SDB_OUTPUT_SOURCE_LINE
449b7f2d 7816 if (debug_info_level != DINFO_LEVEL_TERSE)
e3b8b697 7817 SDB_OUTPUT_SOURCE_LINE (file,
7818 DECL_SOURCE_LINE (current_function_decl));
9caef960 7819#endif
449b7f2d 7820 }
7821
b9a5aa8e 7822 /* Issue function start and label. */
9caef960 7823 if (TARGET_ABI_OPEN_VMS
7824 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
f1fe649e 7825 {
b9a5aa8e 7826 fputs ("\t.ent ", file);
2cf1388a 7827 assemble_name (file, fnname);
b9a5aa8e 7828 putc ('\n', file);
a314eb5e 7829
7830 /* If the function needs GP, we'll write the "..ng" label there.
7831 Otherwise, do it here. */
961d6ddd 7832 if (TARGET_ABI_OSF
7833 && ! alpha_function_needs_gp
7834 && ! current_function_is_thunk)
a314eb5e 7835 {
7836 putc ('$', file);
7837 assemble_name (file, fnname);
7838 fputs ("..ng:\n", file);
7839 }
f1fe649e 7840 }
449b7f2d 7841
2cf1388a 7842 strcpy (entry_label, fnname);
1467e953 7843 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7844 strcat (entry_label, "..en");
9caef960 7845
7846 /* For public functions, the label must be globalized by appending an
7847 additional colon. */
7848 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7849 strcat (entry_label, ":");
7850
b9a5aa8e 7851 ASM_OUTPUT_LABEL (file, entry_label);
7852 inside_function = TRUE;
449b7f2d 7853
1467e953 7854 if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7855 fprintf (file, "\t.base $%d\n", vms_base_regno);
bf2a98b3 7856
9caef960 7857 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
b9a5aa8e 7858 && !flag_inhibit_size_directive)
9c0e5703 7859 {
b9a5aa8e 7860 /* Set flags in procedure descriptor to request IEEE-conformant
7861 math-library routines. The value we set it to is PDSC_EXC_IEEE
65abff06 7862 (/usr/include/pdsc.h). */
b9a5aa8e 7863 fputs ("\t.eflag 48\n", file);
9c0e5703 7864 }
bf2a98b3 7865
b9a5aa8e 7866 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7867 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7868 alpha_arg_offset = -frame_size + 48;
cb015df5 7869
b9a5aa8e 7870 /* Describe our frame. If the frame size is larger than an integer,
7871 print it as zero to avoid an assembler error. We won't be
7872 properly describing such a frame, but that's the best we can do. */
9caef960 7873 if (TARGET_ABI_UNICOSMK)
7874 ;
7875 else if (TARGET_ABI_OPEN_VMS)
4840a03a 7876 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7877 HOST_WIDE_INT_PRINT_DEC "\n",
7878 vms_unwind_regno,
7879 frame_size >= (1UL << 31) ? 0 : frame_size,
7880 reg_offset);
b9a5aa8e 7881 else if (!flag_inhibit_size_directive)
4840a03a 7882 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7883 (frame_pointer_needed
7884 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
6dbdfeeb 7885 frame_size >= max_frame_size ? 0 : frame_size,
4840a03a 7886 current_function_pretend_args_size);
15d5236f 7887
b9a5aa8e 7888 /* Describe which registers were spilled. */
9caef960 7889 if (TARGET_ABI_UNICOSMK)
7890 ;
7891 else if (TARGET_ABI_OPEN_VMS)
15d5236f 7892 {
b9a5aa8e 7893 if (imask)
9caef960 7894 /* ??? Does VMS care if mask contains ra? The old code didn't
b9a5aa8e 7895 set it, so I don't here. */
df7d0d23 7896 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
b9a5aa8e 7897 if (fmask)
769ea120 7898 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
b19d7ab1 7899 if (alpha_procedure_type == PT_REGISTER)
b9a5aa8e 7900 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7901 }
7902 else if (!flag_inhibit_size_directive)
7903 {
7904 if (imask)
15d5236f 7905 {
4840a03a 7906 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
6dbdfeeb 7907 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
b9a5aa8e 7908
7909 for (i = 0; i < 32; ++i)
df7d0d23 7910 if (imask & (1UL << i))
b9a5aa8e 7911 reg_offset += 8;
15d5236f 7912 }
b9a5aa8e 7913
7914 if (fmask)
4840a03a 7915 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
6dbdfeeb 7916 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
bf2a98b3 7917 }
7918
1467e953 7919#if TARGET_ABI_OPEN_VMS
6cde52a2 7920 /* Ifdef'ed cause link_section are only available then. */
2f14b1f9 7921 switch_to_section (readonly_data_section);
b9a5aa8e 7922 fprintf (file, "\t.align 3\n");
2cf1388a 7923 assemble_name (file, fnname); fputs ("..na:\n", file);
b9a5aa8e 7924 fputs ("\t.ascii \"", file);
2cf1388a 7925 assemble_name (file, fnname);
b9a5aa8e 7926 fputs ("\\0\"\n", file);
2cf1388a 7927 alpha_need_linkage (fnname, 1);
2f14b1f9 7928 switch_to_section (text_section);
b9a5aa8e 7929#endif
7930}
bf2a98b3 7931
b9a5aa8e 7932/* Emit the .prologue note at the scheduled end of the prologue. */
16b3392b 7933
85ae73e8 7934static void
92643d95 7935alpha_output_function_end_prologue (FILE *file)
b9a5aa8e 7936{
9caef960 7937 if (TARGET_ABI_UNICOSMK)
7938 ;
7939 else if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 7940 fputs ("\t.prologue\n", file);
1467e953 7941 else if (TARGET_ABI_WINDOWS_NT)
b9a5aa8e 7942 fputs ("\t.prologue 0\n", file);
7943 else if (!flag_inhibit_size_directive)
961d6ddd 7944 fprintf (file, "\t.prologue %d\n",
7945 alpha_function_needs_gp || current_function_is_thunk);
bf2a98b3 7946}
7947
7948/* Write function epilogue. */
7949
9e7454d0 7950/* ??? At some point we will want to support full unwind, and so will
5a965225 7951 need to mark the epilogue as well. At the moment, we just confuse
7952 dwarf2out. */
7953#undef FRP
7954#define FRP(exp) exp
7955
bf2a98b3 7956void
92643d95 7957alpha_expand_epilogue (void)
bf2a98b3 7958{
b9a5aa8e 7959 /* Registers to save. */
7960 unsigned long imask = 0;
7961 unsigned long fmask = 0;
7962 /* Stack space needed for pushing registers clobbered by us. */
7963 HOST_WIDE_INT sa_size;
7964 /* Complete stack size needed. */
7965 HOST_WIDE_INT frame_size;
7966 /* Offset from base reg to register save area. */
7967 HOST_WIDE_INT reg_offset;
7968 int fp_is_frame_pointer, fp_offset;
7969 rtx sa_reg, sa_reg_exp = NULL;
849674a3 7970 rtx sp_adj1, sp_adj2, mem;
11016d99 7971 rtx eh_ofs;
bf2a98b3 7972 int i;
7973
b9a5aa8e 7974 sa_size = alpha_sa_size ();
bf2a98b3 7975
b9a5aa8e 7976 frame_size = get_frame_size ();
1467e953 7977 if (TARGET_ABI_OPEN_VMS)
9e7454d0 7978 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7979 + (alpha_procedure_type == PT_STACK ? 8 : 0)
b9a5aa8e 7980 + frame_size
7981 + current_function_pretend_args_size);
9caef960 7982 else if (TARGET_ABI_UNICOSMK)
7983 frame_size = ALPHA_ROUND (sa_size
b19d7ab1 7984 + (alpha_procedure_type == PT_STACK ? 48 : 0))
9caef960 7985 + ALPHA_ROUND (frame_size
7986 + current_function_outgoing_args_size);
b9a5aa8e 7987 else
7988 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7989 + sa_size
7990 + ALPHA_ROUND (frame_size
7991 + current_function_pretend_args_size));
bf2a98b3 7992
1467e953 7993 if (TARGET_ABI_OPEN_VMS)
b19d7ab1 7994 {
7995 if (alpha_procedure_type == PT_STACK)
7996 reg_offset = 8;
7997 else
7998 reg_offset = 0;
7999 }
b9a5aa8e 8000 else
8001 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8002
8003 alpha_sa_mask (&imask, &fmask);
8004
b19d7ab1 8005 fp_is_frame_pointer
8006 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8007 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
29768226 8008 fp_offset = 0;
8009 sa_reg = stack_pointer_rtx;
b9a5aa8e 8010
c92c328f 8011 if (current_function_calls_eh_return)
8012 eh_ofs = EH_RETURN_STACKADJ_RTX;
8013 else
8014 eh_ofs = NULL_RTX;
8015
9caef960 8016 if (!TARGET_ABI_UNICOSMK && sa_size)
b9a5aa8e 8017 {
8018 /* If we have a frame pointer, restore SP from it. */
1467e953 8019 if ((TARGET_ABI_OPEN_VMS
b9a5aa8e 8020 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
1467e953 8021 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
205b281f 8022 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
15d5236f 8023
b9a5aa8e 8024 /* Cope with very large offsets to the register save area. */
b9a5aa8e 8025 if (reg_offset + sa_size > 0x8000)
bf2a98b3 8026 {
b9a5aa8e 8027 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8028 HOST_WIDE_INT bias;
8029
8030 if (low + sa_size <= 0x8000)
8031 bias = reg_offset - low, reg_offset = low;
9e7454d0 8032 else
b9a5aa8e 8033 bias = reg_offset, reg_offset = 0;
8034
8035 sa_reg = gen_rtx_REG (DImode, 22);
8036 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8037
5a965225 8038 FRP (emit_move_insn (sa_reg, sa_reg_exp));
bf2a98b3 8039 }
9e7454d0 8040
65abff06 8041 /* Restore registers in order, excepting a true frame pointer. */
bf2a98b3 8042
c92c328f 8043 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
11016d99 8044 if (! eh_ofs)
ab6ab77e 8045 set_mem_alias_set (mem, alpha_sr_alias_set);
c92c328f 8046 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8047
b9a5aa8e 8048 reg_offset += 8;
df7d0d23 8049 imask &= ~(1UL << REG_RA);
16b3392b 8050
c49ad9ef 8051 for (i = 0; i < 31; ++i)
df7d0d23 8052 if (imask & (1UL << i))
bf2a98b3 8053 {
b9a5aa8e 8054 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
16b3392b 8055 fp_offset = reg_offset;
8056 else
b9a5aa8e 8057 {
849674a3 8058 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
ab6ab77e 8059 set_mem_alias_set (mem, alpha_sr_alias_set);
849674a3 8060 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
b9a5aa8e 8061 }
bf2a98b3 8062 reg_offset += 8;
8063 }
8064
c49ad9ef 8065 for (i = 0; i < 31; ++i)
df7d0d23 8066 if (fmask & (1UL << i))
bf2a98b3 8067 {
849674a3 8068 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
ab6ab77e 8069 set_mem_alias_set (mem, alpha_sr_alias_set);
849674a3 8070 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
bf2a98b3 8071 reg_offset += 8;
8072 }
b9a5aa8e 8073 }
b19d7ab1 8074 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
9caef960 8075 {
8076 /* Restore callee-saved general-purpose registers. */
8077
8078 reg_offset = -56;
8079
8080 for (i = 9; i < 15; i++)
df7d0d23 8081 if (imask & (1UL << i))
9caef960 8082 {
8083 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8084 reg_offset));
8085 set_mem_alias_set (mem, alpha_sr_alias_set);
8086 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8087 reg_offset -= 8;
8088 }
8089
8090 for (i = 2; i < 10; i++)
df7d0d23 8091 if (fmask & (1UL << i))
9caef960 8092 {
8093 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8094 reg_offset));
8095 set_mem_alias_set (mem, alpha_sr_alias_set);
8096 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8097 reg_offset -= 8;
8098 }
8099
8100 /* Restore the return address from the DSIB. */
8101
8102 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8103 set_mem_alias_set (mem, alpha_sr_alias_set);
8104 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8105 }
bf2a98b3 8106
11016d99 8107 if (frame_size || eh_ofs)
b9a5aa8e 8108 {
ec37ccb4 8109 sp_adj1 = stack_pointer_rtx;
8110
11016d99 8111 if (eh_ofs)
ec37ccb4 8112 {
8113 sp_adj1 = gen_rtx_REG (DImode, 23);
8114 emit_move_insn (sp_adj1,
11016d99 8115 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
ec37ccb4 8116 }
8117
b9a5aa8e 8118 /* If the stack size is large, begin computation into a temporary
8119 register so as not to interfere with a potential fp restore,
8120 which must be consecutive with an SP restore. */
9caef960 8121 if (frame_size < 32768
8122 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
ec37ccb4 8123 sp_adj2 = GEN_INT (frame_size);
9caef960 8124 else if (TARGET_ABI_UNICOSMK)
8125 {
8126 sp_adj1 = gen_rtx_REG (DImode, 23);
8127 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8128 sp_adj2 = const0_rtx;
8129 }
b9a5aa8e 8130 else if (frame_size < 0x40007fffL)
8131 {
8132 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8133
ec37ccb4 8134 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
b9a5aa8e 8135 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8136 sp_adj1 = sa_reg;
8137 else
8138 {
8139 sp_adj1 = gen_rtx_REG (DImode, 23);
5a965225 8140 FRP (emit_move_insn (sp_adj1, sp_adj2));
b9a5aa8e 8141 }
8142 sp_adj2 = GEN_INT (low);
8143 }
0e0a0e7a 8144 else
b9a5aa8e 8145 {
ec37ccb4 8146 rtx tmp = gen_rtx_REG (DImode, 23);
91bc47b0 8147 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8148 3, false));
ec37ccb4 8149 if (!sp_adj2)
b9a5aa8e 8150 {
8151 /* We can't drop new things to memory this late, afaik,
8152 so build it up by pieces. */
af792316 8153 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8154 -(frame_size < 0)));
4d10b463 8155 gcc_assert (sp_adj2);
b9a5aa8e 8156 }
b9a5aa8e 8157 }
bf2a98b3 8158
b9a5aa8e 8159 /* From now on, things must be in order. So emit blockages. */
8160
8161 /* Restore the frame pointer. */
9caef960 8162 if (TARGET_ABI_UNICOSMK)
8163 {
8164 emit_insn (gen_blockage ());
8165 mem = gen_rtx_MEM (DImode,
8166 plus_constant (hard_frame_pointer_rtx, -16));
8167 set_mem_alias_set (mem, alpha_sr_alias_set);
8168 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8169 }
8170 else if (fp_is_frame_pointer)
b9a5aa8e 8171 {
8172 emit_insn (gen_blockage ());
205b281f 8173 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
ab6ab77e 8174 set_mem_alias_set (mem, alpha_sr_alias_set);
849674a3 8175 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
b9a5aa8e 8176 }
1467e953 8177 else if (TARGET_ABI_OPEN_VMS)
b9a5aa8e 8178 {
8179 emit_insn (gen_blockage ());
5a965225 8180 FRP (emit_move_insn (hard_frame_pointer_rtx,
8181 gen_rtx_REG (DImode, vms_save_fp_regno)));
b9a5aa8e 8182 }
8183
8184 /* Restore the stack pointer. */
8185 emit_insn (gen_blockage ());
9caef960 8186 if (sp_adj2 == const0_rtx)
8187 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8188 else
8189 FRP (emit_move_insn (stack_pointer_rtx,
8190 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
b9a5aa8e 8191 }
9e7454d0 8192 else
b9a5aa8e 8193 {
b19d7ab1 8194 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
b9a5aa8e 8195 {
8196 emit_insn (gen_blockage ());
5a965225 8197 FRP (emit_move_insn (hard_frame_pointer_rtx,
8198 gen_rtx_REG (DImode, vms_save_fp_regno)));
b9a5aa8e 8199 }
b19d7ab1 8200 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
9caef960 8201 {
8202 /* Decrement the frame pointer if the function does not have a
8203 frame. */
8204
8205 emit_insn (gen_blockage ());
8206 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
bcd9bd66 8207 hard_frame_pointer_rtx, constm1_rtx)));
9caef960 8208 }
bf2a98b3 8209 }
b9a5aa8e 8210}
cf73d31f 8211\f
b9a5aa8e 8212/* Output the rest of the textual info surrounding the epilogue. */
8213
8214void
92643d95 8215alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
b9a5aa8e 8216{
32a8f747 8217 rtx insn;
8218
8219 /* We output a nop after noreturn calls at the very end of the function to
8220 ensure that the return address always remains in the caller's code range,
8221 as not doing so might confuse unwinding engines. */
8222 insn = get_last_insn ();
8223 if (!INSN_P (insn))
8224 insn = prev_active_insn (insn);
8225 if (GET_CODE (insn) == CALL_INSN)
8226 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8227
04b0d94a 8228#if TARGET_ABI_OPEN_VMS
8229 alpha_write_linkage (file, fnname, decl);
8230#endif
8231
bf2a98b3 8232 /* End the function. */
9caef960 8233 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
f1fe649e 8234 {
b9a5aa8e 8235 fputs ("\t.end ", file);
2cf1388a 8236 assemble_name (file, fnname);
b9a5aa8e 8237 putc ('\n', file);
f1fe649e 8238 }
449b7f2d 8239 inside_function = FALSE;
9c0e5703 8240
9caef960 8241 /* Output jump tables and the static subroutine information block. */
8242 if (TARGET_ABI_UNICOSMK)
8243 {
8244 unicosmk_output_ssib (file, fnname);
8245 unicosmk_output_deferred_case_vectors (file);
8246 }
bf2a98b3 8247}
961d6ddd 8248
6988553d 8249#if TARGET_ABI_OSF
8250/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
961d6ddd 8251
8252 In order to avoid the hordes of differences between generated code
8253 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8254 lots of code loading up large constants, generate rtl and emit it
8255 instead of going straight to text.
8256
8257 Not sure why this idea hasn't been explored before... */
8258
6988553d 8259static void
92643d95 8260alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8261 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8262 tree function)
961d6ddd 8263{
8264 HOST_WIDE_INT hi, lo;
8265 rtx this, insn, funexp;
8266
8267 /* We always require a valid GP. */
8268 emit_insn (gen_prologue_ldgp ());
31b97e8f 8269 emit_note (NOTE_INSN_PROLOGUE_END);
961d6ddd 8270
8271 /* Find the "this" pointer. If the function returns a structure,
8272 the structure return pointer is in $16. */
45550790 8273 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
961d6ddd 8274 this = gen_rtx_REG (Pmode, 17);
8275 else
8276 this = gen_rtx_REG (Pmode, 16);
8277
8278 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8279 entire constant for the add. */
8280 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8281 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8282 if (hi + lo == delta)
8283 {
8284 if (hi)
8285 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8286 if (lo)
8287 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8288 }
8289 else
8290 {
8291 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8292 delta, -(delta < 0));
8293 emit_insn (gen_adddi3 (this, this, tmp));
8294 }
8295
a19ec9da 8296 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8297 if (vcall_offset)
8298 {
8299 rtx tmp, tmp2;
8300
8301 tmp = gen_rtx_REG (Pmode, 0);
8302 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8303
8304 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8305 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8306 if (hi + lo == vcall_offset)
8307 {
8308 if (hi)
8309 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8310 }
8311 else
8312 {
8313 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8314 vcall_offset, -(vcall_offset < 0));
8315 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8316 lo = 0;
8317 }
8318 if (lo)
8319 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8320 else
8321 tmp2 = tmp;
8322 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8323
8324 emit_insn (gen_adddi3 (this, this, tmp));
8325 }
8326
961d6ddd 8327 /* Generate a tail call to the target function. */
8328 if (! TREE_USED (function))
8329 {
8330 assemble_external (function);
8331 TREE_USED (function) = 1;
8332 }
8333 funexp = XEXP (DECL_RTL (function), 0);
8334 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8335 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8336 SIBLING_CALL_P (insn) = 1;
8337
8338 /* Run just enough of rest_of_compilation to get the insns emitted.
8339 There's not really enough bulk here to make other passes such as
8340 instruction scheduling worth while. Note that use_thunk calls
8341 assemble_start_function and assemble_end_function. */
8342 insn = get_insns ();
375c1c8a 8343 insn_locators_alloc ();
961d6ddd 8344 shorten_branches (insn);
8345 final_start_function (insn, file, 1);
4bf029b0 8346 final (insn, file, 1);
961d6ddd 8347 final_end_function ();
8348}
6988553d 8349#endif /* TARGET_ABI_OSF */
449b7f2d 8350\f
8351/* Debugging support. */
8352
8353#include "gstab.h"
8354
8355/* Count the number of sdb related labels are generated (to find block
8356 start and end boundaries). */
8357
8358int sdb_label_count = 0;
8359
449b7f2d 8360/* Name of the file containing the current function. */
8361
ace75b22 8362static const char *current_function_file = "";
449b7f2d 8363
8364/* Offsets to alpha virtual arg/local debugging pointers. */
8365
8366long alpha_arg_offset;
8367long alpha_auto_offset;
8368\f
8369/* Emit a new filename to a stream. */
8370
8371void
92643d95 8372alpha_output_filename (FILE *stream, const char *name)
449b7f2d 8373{
8374 static int first_time = TRUE;
449b7f2d 8375
8376 if (first_time)
8377 {
8378 first_time = FALSE;
8379 ++num_source_filenames;
8380 current_function_file = name;
8381 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8382 output_quoted_string (stream, name);
8383 fprintf (stream, "\n");
8384 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8385 fprintf (stream, "\t#@stabs\n");
8386 }
8387
8763f243 8388 else if (write_symbols == DBX_DEBUG)
e3b8b697 8389 /* dbxout.c will emit an appropriate .stabs directive. */
8390 return;
449b7f2d 8391
8392 else if (name != current_function_file
be3797c1 8393 && strcmp (name, current_function_file) != 0)
449b7f2d 8394 {
8395 if (inside_function && ! TARGET_GAS)
8396 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8397 else
8398 {
8399 ++num_source_filenames;
8400 current_function_file = name;
8401 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8402 }
8403
8404 output_quoted_string (stream, name);
8405 fprintf (stream, "\n");
8406 }
8407}
c4622276 8408\f
8409/* Structure to show the current status of registers and memory. */
8410
8411struct shadow_summary
8412{
8413 struct {
495c4a78 8414 unsigned int i : 31; /* Mask of int regs */
8415 unsigned int fp : 31; /* Mask of fp regs */
8416 unsigned int mem : 1; /* mem == imem | fpmem */
c4622276 8417 } used, defd;
8418};
8419
8420/* Summary the effects of expression X on the machine. Update SUM, a pointer
8421 to the summary structure. SET is nonzero if the insn is setting the
8422 object, otherwise zero. */
8423
8424static void
92643d95 8425summarize_insn (rtx x, struct shadow_summary *sum, int set)
c4622276 8426{
d2ca078f 8427 const char *format_ptr;
c4622276 8428 int i, j;
8429
8430 if (x == 0)
8431 return;
8432
8433 switch (GET_CODE (x))
8434 {
8435 /* ??? Note that this case would be incorrect if the Alpha had a
8436 ZERO_EXTRACT in SET_DEST. */
8437 case SET:
8438 summarize_insn (SET_SRC (x), sum, 0);
8439 summarize_insn (SET_DEST (x), sum, 1);
8440 break;
8441
8442 case CLOBBER:
8443 summarize_insn (XEXP (x, 0), sum, 1);
8444 break;
8445
8446 case USE:
8447 summarize_insn (XEXP (x, 0), sum, 0);
8448 break;
8449
a886cc41 8450 case ASM_OPERANDS:
8451 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8452 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8453 break;
8454
c4622276 8455 case PARALLEL:
3a5dbb5e 8456 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
c4622276 8457 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8458 break;
8459
a886cc41 8460 case SUBREG:
b9a5aa8e 8461 summarize_insn (SUBREG_REG (x), sum, 0);
8462 break;
a886cc41 8463
c4622276 8464 case REG:
8465 {
8466 int regno = REGNO (x);
f3d263a7 8467 unsigned long mask = ((unsigned long) 1) << (regno % 32);
c4622276 8468
8469 if (regno == 31 || regno == 63)
8470 break;
8471
8472 if (set)
8473 {
8474 if (regno < 32)
8475 sum->defd.i |= mask;
8476 else
8477 sum->defd.fp |= mask;
8478 }
8479 else
8480 {
8481 if (regno < 32)
8482 sum->used.i |= mask;
8483 else
8484 sum->used.fp |= mask;
8485 }
8486 }
8487 break;
8488
8489 case MEM:
8490 if (set)
8491 sum->defd.mem = 1;
8492 else
8493 sum->used.mem = 1;
8494
8495 /* Find the regs used in memory address computation: */
8496 summarize_insn (XEXP (x, 0), sum, 0);
8497 break;
8498
2d710b28 8499 case CONST_INT: case CONST_DOUBLE:
8500 case SYMBOL_REF: case LABEL_REF: case CONST:
5bdbf614 8501 case SCRATCH: case ASM_INPUT:
2d710b28 8502 break;
8503
c4622276 8504 /* Handle common unary and binary ops for efficiency. */
8505 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8506 case MOD: case UDIV: case UMOD: case AND: case IOR:
8507 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8508 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8509 case NE: case EQ: case GE: case GT: case LE:
8510 case LT: case GEU: case GTU: case LEU: case LTU:
8511 summarize_insn (XEXP (x, 0), sum, 0);
8512 summarize_insn (XEXP (x, 1), sum, 0);
8513 break;
8514
8515 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8516 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8517 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
9e7454d0 8518 case SQRT: case FFS:
c4622276 8519 summarize_insn (XEXP (x, 0), sum, 0);
8520 break;
8521
8522 default:
8523 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
3a5dbb5e 8524 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
cada32d3 8525 switch (format_ptr[i])
c4622276 8526 {
8527 case 'e':
8528 summarize_insn (XEXP (x, i), sum, 0);
8529 break;
8530
8531 case 'E':
3a5dbb5e 8532 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
c4622276 8533 summarize_insn (XVECEXP (x, i, j), sum, 0);
8534 break;
8535
1dc5f36f 8536 case 'i':
8537 break;
8538
c4622276 8539 default:
4d10b463 8540 gcc_unreachable ();
c4622276 8541 }
8542 }
8543}
c4622276 8544
b9a5aa8e 8545/* Ensure a sufficient number of `trapb' insns are in the code when
8546 the user requests code with a trap precision of functions or
8547 instructions.
8548
8549 In naive mode, when the user requests a trap-precision of
8550 "instruction", a trapb is needed after every instruction that may
8551 generate a trap. This ensures that the code is resumption safe but
8552 it is also slow.
8553
8554 When optimizations are turned on, we delay issuing a trapb as long
8555 as possible. In this context, a trap shadow is the sequence of
8556 instructions that starts with a (potentially) trap generating
8557 instruction and extends to the next trapb or call_pal instruction
8558 (but GCC never generates call_pal by itself). We can delay (and
8559 therefore sometimes omit) a trapb subject to the following
8560 conditions:
8561
8562 (a) On entry to the trap shadow, if any Alpha register or memory
8563 location contains a value that is used as an operand value by some
8564 instruction in the trap shadow (live on entry), then no instruction
8565 in the trap shadow may modify the register or memory location.
8566
8567 (b) Within the trap shadow, the computation of the base register
8568 for a memory load or store instruction may not involve using the
8569 result of an instruction that might generate an UNPREDICTABLE
8570 result.
8571
8572 (c) Within the trap shadow, no register may be used more than once
8573 as a destination register. (This is to make life easier for the
8574 trap-handler.)
c4622276 8575
18adf4f6 8576 (d) The trap shadow may not include any branch instructions. */
c4622276 8577
18adf4f6 8578static void
92643d95 8579alpha_handle_trap_shadows (void)
c4622276 8580{
18adf4f6 8581 struct shadow_summary shadow;
8582 int trap_pending, exception_nesting;
b9b4428b 8583 rtx i, n;
c4622276 8584
18adf4f6 8585 trap_pending = 0;
8586 exception_nesting = 0;
8587 shadow.used.i = 0;
8588 shadow.used.fp = 0;
8589 shadow.used.mem = 0;
8590 shadow.defd = shadow.used;
9e7454d0 8591
2efea8c0 8592 for (i = get_insns (); i ; i = NEXT_INSN (i))
18adf4f6 8593 {
8594 if (GET_CODE (i) == NOTE)
8595 {
ad4583d9 8596 switch (NOTE_KIND (i))
18adf4f6 8597 {
8598 case NOTE_INSN_EH_REGION_BEG:
8599 exception_nesting++;
8600 if (trap_pending)
8601 goto close_shadow;
8602 break;
8603
8604 case NOTE_INSN_EH_REGION_END:
8605 exception_nesting--;
8606 if (trap_pending)
8607 goto close_shadow;
8608 break;
8609
8610 case NOTE_INSN_EPILOGUE_BEG:
8611 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8612 goto close_shadow;
8613 break;
8614 }
8615 }
8616 else if (trap_pending)
8617 {
8618 if (alpha_tp == ALPHA_TP_FUNC)
8619 {
8620 if (GET_CODE (i) == JUMP_INSN
8621 && GET_CODE (PATTERN (i)) == RETURN)
8622 goto close_shadow;
8623 }
8624 else if (alpha_tp == ALPHA_TP_INSN)
8625 {
8626 if (optimize > 0)
8627 {
8628 struct shadow_summary sum;
8629
8630 sum.used.i = 0;
8631 sum.used.fp = 0;
8632 sum.used.mem = 0;
a886cc41 8633 sum.defd = sum.used;
18adf4f6 8634
8635 switch (GET_CODE (i))
8636 {
8637 case INSN:
4d10b463 8638 /* Annoyingly, get_attr_trap will die on these. */
fad0a39b 8639 if (GET_CODE (PATTERN (i)) == USE
8640 || GET_CODE (PATTERN (i)) == CLOBBER)
18adf4f6 8641 break;
8642
8643 summarize_insn (PATTERN (i), &sum, 0);
8644
8645 if ((sum.defd.i & shadow.defd.i)
8646 || (sum.defd.fp & shadow.defd.fp))
8647 {
8648 /* (c) would be violated */
8649 goto close_shadow;
8650 }
8651
8652 /* Combine shadow with summary of current insn: */
8653 shadow.used.i |= sum.used.i;
8654 shadow.used.fp |= sum.used.fp;
8655 shadow.used.mem |= sum.used.mem;
8656 shadow.defd.i |= sum.defd.i;
8657 shadow.defd.fp |= sum.defd.fp;
8658 shadow.defd.mem |= sum.defd.mem;
8659
8660 if ((sum.defd.i & shadow.used.i)
8661 || (sum.defd.fp & shadow.used.fp)
8662 || (sum.defd.mem & shadow.used.mem))
8663 {
8664 /* (a) would be violated (also takes care of (b)) */
4d10b463 8665 gcc_assert (get_attr_trap (i) != TRAP_YES
8666 || (!(sum.defd.i & sum.used.i)
8667 && !(sum.defd.fp & sum.used.fp)));
18adf4f6 8668
8669 goto close_shadow;
8670 }
8671 break;
8672
8673 case JUMP_INSN:
8674 case CALL_INSN:
8675 case CODE_LABEL:
8676 goto close_shadow;
8677
8678 default:
4d10b463 8679 gcc_unreachable ();
18adf4f6 8680 }
8681 }
8682 else
8683 {
8684 close_shadow:
b9b4428b 8685 n = emit_insn_before (gen_trapb (), i);
8686 PUT_MODE (n, TImode);
8687 PUT_MODE (i, TImode);
18adf4f6 8688 trap_pending = 0;
8689 shadow.used.i = 0;
8690 shadow.used.fp = 0;
8691 shadow.used.mem = 0;
8692 shadow.defd = shadow.used;
8693 }
8694 }
8695 }
c4622276 8696
609d4083 8697 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8698 && GET_CODE (i) == INSN
8699 && GET_CODE (PATTERN (i)) != USE
8700 && GET_CODE (PATTERN (i)) != CLOBBER
8701 && get_attr_trap (i) == TRAP_YES)
8702 {
8703 if (optimize && !trap_pending)
8704 summarize_insn (PATTERN (i), &shadow, 0);
8705 trap_pending = 1;
8706 }
c4622276 8707 }
8708}
b9b4428b 8709\f
b9b4428b 8710/* Alpha can only issue instruction groups simultaneously if they are
5910bb95 8711 suitably aligned. This is very processor-specific. */
07770f18 8712/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8713 that are marked "fake". These instructions do not exist on that target,
8714 but it is possible to see these insns with deranged combinations of
8715 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8716 choose a result at random. */
b9b4428b 8717
849674a3 8718enum alphaev4_pipe {
8719 EV4_STOP = 0,
8720 EV4_IB0 = 1,
8721 EV4_IB1 = 2,
8722 EV4_IBX = 4
8723};
8724
b9b4428b 8725enum alphaev5_pipe {
8726 EV5_STOP = 0,
8727 EV5_NONE = 1,
8728 EV5_E01 = 2,
8729 EV5_E0 = 4,
8730 EV5_E1 = 8,
8731 EV5_FAM = 16,
8732 EV5_FA = 32,
8733 EV5_FM = 64
8734};
8735
849674a3 8736static enum alphaev4_pipe
92643d95 8737alphaev4_insn_pipe (rtx insn)
849674a3 8738{
8739 if (recog_memoized (insn) < 0)
8740 return EV4_STOP;
8741 if (get_attr_length (insn) != 4)
8742 return EV4_STOP;
8743
8744 switch (get_attr_type (insn))
8745 {
8746 case TYPE_ILD:
f155876e 8747 case TYPE_LDSYM:
849674a3 8748 case TYPE_FLD:
f155876e 8749 case TYPE_LD_L:
849674a3 8750 return EV4_IBX;
8751
849674a3 8752 case TYPE_IADD:
8753 case TYPE_ILOG:
8754 case TYPE_ICMOV:
8755 case TYPE_ICMP:
849674a3 8756 case TYPE_FST:
8757 case TYPE_SHIFT:
8758 case TYPE_IMUL:
8759 case TYPE_FBR:
07770f18 8760 case TYPE_MVI: /* fake */
849674a3 8761 return EV4_IB0;
8762
f155876e 8763 case TYPE_IST:
849674a3 8764 case TYPE_MISC:
8765 case TYPE_IBR:
8766 case TYPE_JSR:
1050b77e 8767 case TYPE_CALLPAL:
849674a3 8768 case TYPE_FCPYS:
8769 case TYPE_FCMOV:
8770 case TYPE_FADD:
8771 case TYPE_FDIV:
8772 case TYPE_FMUL:
f155876e 8773 case TYPE_ST_C:
8774 case TYPE_MB:
07770f18 8775 case TYPE_FSQRT: /* fake */
8776 case TYPE_FTOI: /* fake */
8777 case TYPE_ITOF: /* fake */
849674a3 8778 return EV4_IB1;
8779
8780 default:
4d10b463 8781 gcc_unreachable ();
849674a3 8782 }
8783}
8784
b9b4428b 8785static enum alphaev5_pipe
92643d95 8786alphaev5_insn_pipe (rtx insn)
b9b4428b 8787{
8788 if (recog_memoized (insn) < 0)
8789 return EV5_STOP;
8790 if (get_attr_length (insn) != 4)
8791 return EV5_STOP;
8792
8793 switch (get_attr_type (insn))
8794 {
8795 case TYPE_ILD:
8796 case TYPE_FLD:
8797 case TYPE_LDSYM:
8798 case TYPE_IADD:
8799 case TYPE_ILOG:
8800 case TYPE_ICMOV:
8801 case TYPE_ICMP:
8802 return EV5_E01;
8803
8804 case TYPE_IST:
8805 case TYPE_FST:
8806 case TYPE_SHIFT:
8807 case TYPE_IMUL:
8808 case TYPE_MISC:
8809 case TYPE_MVI:
f155876e 8810 case TYPE_LD_L:
8811 case TYPE_ST_C:
8812 case TYPE_MB:
07770f18 8813 case TYPE_FTOI: /* fake */
8814 case TYPE_ITOF: /* fake */
b9b4428b 8815 return EV5_E0;
8816
8817 case TYPE_IBR:
8818 case TYPE_JSR:
1050b77e 8819 case TYPE_CALLPAL:
b9b4428b 8820 return EV5_E1;
8821
8822 case TYPE_FCPYS:
8823 return EV5_FAM;
8824
8825 case TYPE_FBR:
8826 case TYPE_FCMOV:
8827 case TYPE_FADD:
8828 case TYPE_FDIV:
07770f18 8829 case TYPE_FSQRT: /* fake */
b9b4428b 8830 return EV5_FA;
8831
8832 case TYPE_FMUL:
8833 return EV5_FM;
ddca68f8 8834
8835 default:
4d10b463 8836 gcc_unreachable ();
b9b4428b 8837 }
b9b4428b 8838}
8839
9e7454d0 8840/* IN_USE is a mask of the slots currently filled within the insn group.
849674a3 8841 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9e7454d0 8842 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
849674a3 8843
8844 LEN is, of course, the length of the group in bytes. */
8845
8846static rtx
92643d95 8847alphaev4_next_group (rtx insn, int *pin_use, int *plen)
849674a3 8848{
8849 int len, in_use;
8850
8851 len = in_use = 0;
8852
9204e736 8853 if (! INSN_P (insn)
849674a3 8854 || GET_CODE (PATTERN (insn)) == CLOBBER
8855 || GET_CODE (PATTERN (insn)) == USE)
8856 goto next_and_done;
8857
8858 while (1)
8859 {
8860 enum alphaev4_pipe pipe;
8861
8862 pipe = alphaev4_insn_pipe (insn);
8863 switch (pipe)
8864 {
8865 case EV4_STOP:
8866 /* Force complex instructions to start new groups. */
8867 if (in_use)
8868 goto done;
8869
20833d12 8870 /* If this is a completely unrecognized insn, it's an asm.
849674a3 8871 We don't know how long it is, so record length as -1 to
8872 signal a needed realignment. */
8873 if (recog_memoized (insn) < 0)
8874 len = -1;
8875 else
8876 len = get_attr_length (insn);
8877 goto next_and_done;
8878
8879 case EV4_IBX:
8880 if (in_use & EV4_IB0)
8881 {
8882 if (in_use & EV4_IB1)
8883 goto done;
8884 in_use |= EV4_IB1;
8885 }
8886 else
8887 in_use |= EV4_IB0 | EV4_IBX;
8888 break;
8889
8890 case EV4_IB0:
8891 if (in_use & EV4_IB0)
8892 {
8893 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8894 goto done;
8895 in_use |= EV4_IB1;
8896 }
8897 in_use |= EV4_IB0;
8898 break;
8899
8900 case EV4_IB1:
8901 if (in_use & EV4_IB1)
8902 goto done;
8903 in_use |= EV4_IB1;
8904 break;
8905
8906 default:
4d10b463 8907 gcc_unreachable ();
849674a3 8908 }
8909 len += 4;
9e7454d0 8910
849674a3 8911 /* Haifa doesn't do well scheduling branches. */
8912 if (GET_CODE (insn) == JUMP_INSN)
8913 goto next_and_done;
8914
8915 next:
8916 insn = next_nonnote_insn (insn);
8917
9204e736 8918 if (!insn || ! INSN_P (insn))
849674a3 8919 goto done;
8920
8921 /* Let Haifa tell us where it thinks insn group boundaries are. */
8922 if (GET_MODE (insn) == TImode)
8923 goto done;
8924
8925 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8926 goto next;
8927 }
8928
8929 next_and_done:
8930 insn = next_nonnote_insn (insn);
8931
8932 done:
8933 *plen = len;
8934 *pin_use = in_use;
8935 return insn;
8936}
8937
9e7454d0 8938/* IN_USE is a mask of the slots currently filled within the insn group.
849674a3 8939 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9e7454d0 8940 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
b9b4428b 8941
8942 LEN is, of course, the length of the group in bytes. */
8943
8944static rtx
92643d95 8945alphaev5_next_group (rtx insn, int *pin_use, int *plen)
b9b4428b 8946{
8947 int len, in_use;
8948
8949 len = in_use = 0;
8950
9204e736 8951 if (! INSN_P (insn)
ddca68f8 8952 || GET_CODE (PATTERN (insn)) == CLOBBER
8953 || GET_CODE (PATTERN (insn)) == USE)
8954 goto next_and_done;
b9b4428b 8955
ddca68f8 8956 while (1)
b9b4428b 8957 {
8958 enum alphaev5_pipe pipe;
b9b4428b 8959
8960 pipe = alphaev5_insn_pipe (insn);
8961 switch (pipe)
8962 {
8963 case EV5_STOP:
8964 /* Force complex instructions to start new groups. */
8965 if (in_use)
8966 goto done;
8967
20833d12 8968 /* If this is a completely unrecognized insn, it's an asm.
b9b4428b 8969 We don't know how long it is, so record length as -1 to
8970 signal a needed realignment. */
8971 if (recog_memoized (insn) < 0)
8972 len = -1;
8973 else
8974 len = get_attr_length (insn);
ddca68f8 8975 goto next_and_done;
b9b4428b 8976
4d10b463 8977 /* ??? Most of the places below, we would like to assert never
8978 happen, as it would indicate an error either in Haifa, or
8979 in the scheduling description. Unfortunately, Haifa never
8980 schedules the last instruction of the BB, so we don't have
8981 an accurate TI bit to go off. */
b9b4428b 8982 case EV5_E01:
8983 if (in_use & EV5_E0)
8984 {
8985 if (in_use & EV5_E1)
8986 goto done;
8987 in_use |= EV5_E1;
8988 }
8989 else
8990 in_use |= EV5_E0 | EV5_E01;
8991 break;
8992
8993 case EV5_E0:
8994 if (in_use & EV5_E0)
8995 {
849674a3 8996 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
b9b4428b 8997 goto done;
8998 in_use |= EV5_E1;
8999 }
9000 in_use |= EV5_E0;
9001 break;
9002
9003 case EV5_E1:
9004 if (in_use & EV5_E1)
9005 goto done;
9006 in_use |= EV5_E1;
9007 break;
9008
9009 case EV5_FAM:
9010 if (in_use & EV5_FA)
9011 {
9012 if (in_use & EV5_FM)
9013 goto done;
9014 in_use |= EV5_FM;
9015 }
9016 else
9017 in_use |= EV5_FA | EV5_FAM;
9018 break;
9019
9020 case EV5_FA:
9021 if (in_use & EV5_FA)
9022 goto done;
9023 in_use |= EV5_FA;
9024 break;
9025
9026 case EV5_FM:
9027 if (in_use & EV5_FM)
9028 goto done;
9029 in_use |= EV5_FM;
9030 break;
9031
9032 case EV5_NONE:
9033 break;
9034
9035 default:
4d10b463 9036 gcc_unreachable ();
b9b4428b 9037 }
9038 len += 4;
9e7454d0 9039
b9b4428b 9040 /* Haifa doesn't do well scheduling branches. */
9041 /* ??? If this is predicted not-taken, slotting continues, except
9042 that no more IBR, FBR, or JSR insns may be slotted. */
9043 if (GET_CODE (insn) == JUMP_INSN)
ddca68f8 9044 goto next_and_done;
b9b4428b 9045
ddca68f8 9046 next:
b9b4428b 9047 insn = next_nonnote_insn (insn);
9048
9204e736 9049 if (!insn || ! INSN_P (insn))
b9b4428b 9050 goto done;
f9137da0 9051
b9b4428b 9052 /* Let Haifa tell us where it thinks insn group boundaries are. */
9053 if (GET_MODE (insn) == TImode)
9054 goto done;
9055
ddca68f8 9056 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9057 goto next;
b9b4428b 9058 }
ddca68f8 9059
9060 next_and_done:
9061 insn = next_nonnote_insn (insn);
b9b4428b 9062
9063 done:
9064 *plen = len;
9065 *pin_use = in_use;
9066 return insn;
b9b4428b 9067}
9068
849674a3 9069static rtx
92643d95 9070alphaev4_next_nop (int *pin_use)
849674a3 9071{
9072 int in_use = *pin_use;
9073 rtx nop;
9074
9075 if (!(in_use & EV4_IB0))
9076 {
9077 in_use |= EV4_IB0;
9078 nop = gen_nop ();
9079 }
9080 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9081 {
9082 in_use |= EV4_IB1;
9083 nop = gen_nop ();
9084 }
9085 else if (TARGET_FP && !(in_use & EV4_IB1))
9086 {
9087 in_use |= EV4_IB1;
9088 nop = gen_fnop ();
9089 }
9090 else
9091 nop = gen_unop ();
9092
9093 *pin_use = in_use;
9094 return nop;
9095}
9096
9097static rtx
92643d95 9098alphaev5_next_nop (int *pin_use)
849674a3 9099{
9100 int in_use = *pin_use;
9101 rtx nop;
9102
9103 if (!(in_use & EV5_E1))
9104 {
9105 in_use |= EV5_E1;
9106 nop = gen_nop ();
9107 }
9108 else if (TARGET_FP && !(in_use & EV5_FA))
9109 {
9110 in_use |= EV5_FA;
9111 nop = gen_fnop ();
9112 }
9113 else if (TARGET_FP && !(in_use & EV5_FM))
9114 {
9115 in_use |= EV5_FM;
9116 nop = gen_fnop ();
9117 }
9118 else
9119 nop = gen_unop ();
9120
9121 *pin_use = in_use;
9122 return nop;
9123}
9124
9125/* The instruction group alignment main loop. */
9126
b9b4428b 9127static void
92643d95 9128alpha_align_insns (unsigned int max_align,
9129 rtx (*next_group) (rtx, int *, int *),
9130 rtx (*next_nop) (int *))
b9b4428b 9131{
9132 /* ALIGN is the known alignment for the insn group. */
b53f315c 9133 unsigned int align;
b9b4428b 9134 /* OFS is the offset of the current insn in the insn group. */
9135 int ofs;
fd1ace94 9136 int prev_in_use, in_use, len, ldgp;
b9b4428b 9137 rtx i, next;
9138
9139 /* Let shorten branches care for assigning alignments to code labels. */
2efea8c0 9140 shorten_branches (get_insns ());
b9b4428b 9141
d815ce59 9142 if (align_functions < 4)
9143 align = 4;
eeca3ba1 9144 else if ((unsigned int) align_functions < max_align)
d815ce59 9145 align = align_functions;
9146 else
9147 align = max_align;
e2c8a34a 9148
b9b4428b 9149 ofs = prev_in_use = 0;
2efea8c0 9150 i = get_insns ();
b9b4428b 9151 if (GET_CODE (i) == NOTE)
9152 i = next_nonnote_insn (i);
9153
fd1ace94 9154 ldgp = alpha_function_needs_gp ? 8 : 0;
9155
b9b4428b 9156 while (i)
9157 {
b53f315c 9158 next = (*next_group) (i, &in_use, &len);
b9b4428b 9159
9160 /* When we see a label, resync alignment etc. */
9161 if (GET_CODE (i) == CODE_LABEL)
9162 {
b53f315c 9163 unsigned int new_align = 1 << label_to_alignment (i);
9164
b9b4428b 9165 if (new_align >= align)
9166 {
849674a3 9167 align = new_align < max_align ? new_align : max_align;
b9b4428b 9168 ofs = 0;
9169 }
b53f315c 9170
b9b4428b 9171 else if (ofs & (new_align-1))
9172 ofs = (ofs | (new_align-1)) + 1;
4d10b463 9173 gcc_assert (!len);
b9b4428b 9174 }
9175
9176 /* Handle complex instructions special. */
9177 else if (in_use == 0)
9178 {
9179 /* Asms will have length < 0. This is a signal that we have
9180 lost alignment knowledge. Assume, however, that the asm
9181 will not mis-align instructions. */
9182 if (len < 0)
9183 {
9184 ofs = 0;
9185 align = 4;
9186 len = 0;
9187 }
9188 }
9189
9190 /* If the known alignment is smaller than the recognized insn group,
9191 realign the output. */
1f0ce6a6 9192 else if ((int) align < len)
b9b4428b 9193 {
b53f315c 9194 unsigned int new_log_align = len > 8 ? 4 : 3;
943a1b57 9195 rtx prev, where;
b9b4428b 9196
943a1b57 9197 where = prev = prev_nonnote_insn (i);
b9b4428b 9198 if (!where || GET_CODE (where) != CODE_LABEL)
9199 where = i;
9200
943a1b57 9201 /* Can't realign between a call and its gp reload. */
9202 if (! (TARGET_EXPLICIT_RELOCS
9203 && prev && GET_CODE (prev) == CALL_INSN))
9204 {
9205 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9206 align = 1 << new_log_align;
9207 ofs = 0;
9208 }
b9b4428b 9209 }
9210
fd1ace94 9211 /* We may not insert padding inside the initial ldgp sequence. */
9212 else if (ldgp > 0)
9213 ldgp -= len;
9214
b9b4428b 9215 /* If the group won't fit in the same INT16 as the previous,
9216 we need to add padding to keep the group together. Rather
9217 than simply leaving the insn filling to the assembler, we
9218 can make use of the knowledge of what sorts of instructions
9219 were issued in the previous group to make sure that all of
9220 the added nops are really free. */
1f0ce6a6 9221 else if (ofs + len > (int) align)
b9b4428b 9222 {
9223 int nop_count = (align - ofs) / 4;
9224 rtx where;
9225
efee20da 9226 /* Insert nops before labels, branches, and calls to truly merge
943a1b57 9227 the execution of the nops with the previous instruction group. */
b9b4428b 9228 where = prev_nonnote_insn (i);
849674a3 9229 if (where)
b9b4428b 9230 {
849674a3 9231 if (GET_CODE (where) == CODE_LABEL)
b9b4428b 9232 {
849674a3 9233 rtx where2 = prev_nonnote_insn (where);
9234 if (where2 && GET_CODE (where2) == JUMP_INSN)
9235 where = where2;
b9b4428b 9236 }
943a1b57 9237 else if (GET_CODE (where) == INSN)
849674a3 9238 where = i;
b9b4428b 9239 }
849674a3 9240 else
9241 where = i;
9242
9e7454d0 9243 do
849674a3 9244 emit_insn_before ((*next_nop)(&prev_in_use), where);
b9b4428b 9245 while (--nop_count);
9246 ofs = 0;
9247 }
9248
9249 ofs = (ofs + len) & (align - 1);
9250 prev_in_use = in_use;
9251 i = next;
9252 }
9253}
b9b4428b 9254\f
35a3065a 9255/* Machine dependent reorg pass. */
18adf4f6 9256
2efea8c0 9257static void
92643d95 9258alpha_reorg (void)
18adf4f6 9259{
b9b4428b 9260 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
2efea8c0 9261 alpha_handle_trap_shadows ();
b9b4428b 9262
b9b4428b 9263 /* Due to the number of extra trapb insns, don't bother fixing up
9264 alignment when trap precision is instruction. Moreover, we can
b53f315c 9265 only do our job when sched2 is run. */
b9b4428b 9266 if (optimize && !optimize_size
9267 && alpha_tp != ALPHA_TP_INSN
9268 && flag_schedule_insns_after_reload)
9269 {
fb64edde 9270 if (alpha_tune == PROCESSOR_EV4)
2efea8c0 9271 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
fb64edde 9272 else if (alpha_tune == PROCESSOR_EV5)
2efea8c0 9273 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
b9b4428b 9274 }
18adf4f6 9275}
18adf4f6 9276\f
92c473b8 9277#if !TARGET_ABI_UNICOSMK
9278
9279#ifdef HAVE_STAMP_H
9280#include <stamp.h>
9281#endif
9282
9283static void
9284alpha_file_start (void)
9285{
0fdc84d7 9286#ifdef OBJECT_FORMAT_ELF
9287 /* If emitting dwarf2 debug information, we cannot generate a .file
9288 directive to start the file, as it will conflict with dwarf2out
9289 file numbers. So it's only useful when emitting mdebug output. */
9290 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9291#endif
9292
92c473b8 9293 default_file_start ();
9294#ifdef MS_STAMP
bc964653 9295 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
92c473b8 9296#endif
9297
9298 fputs ("\t.set noreorder\n", asm_out_file);
9299 fputs ("\t.set volatile\n", asm_out_file);
9300 if (!TARGET_ABI_OPEN_VMS)
9301 fputs ("\t.set noat\n", asm_out_file);
9302 if (TARGET_EXPLICIT_RELOCS)
9303 fputs ("\t.set nomacro\n", asm_out_file);
9304 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
fb64edde 9305 {
9306 const char *arch;
9307
9308 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9309 arch = "ev6";
9310 else if (TARGET_MAX)
9311 arch = "pca56";
9312 else if (TARGET_BWX)
9313 arch = "ev56";
9314 else if (alpha_cpu == PROCESSOR_EV5)
9315 arch = "ev5";
9316 else
9317 arch = "ev4";
9318
9319 fprintf (asm_out_file, "\t.arch %s\n", arch);
9320 }
92c473b8 9321}
9322#endif
9323
bbfbe351 9324#ifdef OBJECT_FORMAT_ELF
4e151b05 9325/* Since we don't have a .dynbss section, we should not allow global
9326 relocations in the .rodata section. */
9327
9328static int
9329alpha_elf_reloc_rw_mask (void)
9330{
9331 return flag_pic ? 3 : 2;
9332}
bbfbe351 9333
2f14b1f9 9334/* Return a section for X. The only special thing we do here is to
9335 honor small data. */
bbfbe351 9336
2f14b1f9 9337static section *
92643d95 9338alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9339 unsigned HOST_WIDE_INT align)
bbfbe351 9340{
9341 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
5910bb95 9342 /* ??? Consider using mergeable sdata sections. */
2f14b1f9 9343 return sdata_section;
bbfbe351 9344 else
2f14b1f9 9345 return default_elf_select_rtx_section (mode, x, align);
bbfbe351 9346}
9347
cc2af183 9348static unsigned int
9349alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9350{
9351 unsigned int flags = 0;
9352
9353 if (strcmp (name, ".sdata") == 0
9354 || strncmp (name, ".sdata.", 7) == 0
9355 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9356 || strcmp (name, ".sbss") == 0
9357 || strncmp (name, ".sbss.", 6) == 0
9358 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9359 flags = SECTION_SMALL;
9360
9361 flags |= default_section_type_flags (decl, name, reloc);
9362 return flags;
9363}
bbfbe351 9364#endif /* OBJECT_FORMAT_ELF */
9365\f
9de382d9 9366/* Structure to collect function names for final output in link section. */
9367/* Note that items marked with GTY can't be ifdef'ed out. */
573aba85 9368
9369enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9de382d9 9370enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
573aba85 9371
9372struct alpha_links GTY(())
9373{
9de382d9 9374 int num;
573aba85 9375 rtx linkage;
9de382d9 9376 enum links_kind lkind;
9377 enum reloc_kind rkind;
9378};
9379
9380struct alpha_funcs GTY(())
9381{
9382 int num;
9383 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9384 links;
573aba85 9385};
9386
9387static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9de382d9 9388 splay_tree alpha_links_tree;
9389static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9390 splay_tree alpha_funcs_tree;
9391
9392static GTY(()) int alpha_funcs_num;
573aba85 9393
1467e953 9394#if TARGET_ABI_OPEN_VMS
8df4a58b 9395
0dbd1c74 9396/* Return the VMS argument type corresponding to MODE. */
8df4a58b 9397
0dbd1c74 9398enum avms_arg_type
92643d95 9399alpha_arg_type (enum machine_mode mode)
0dbd1c74 9400{
9401 switch (mode)
8df4a58b 9402 {
0dbd1c74 9403 case SFmode:
9404 return TARGET_FLOAT_VAX ? FF : FS;
9405 case DFmode:
9406 return TARGET_FLOAT_VAX ? FD : FT;
9407 default:
9408 return I64;
8df4a58b 9409 }
0dbd1c74 9410}
8df4a58b 9411
0dbd1c74 9412/* Return an rtx for an integer representing the VMS Argument Information
9413 register value. */
8df4a58b 9414
1dd6c958 9415rtx
92643d95 9416alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
0dbd1c74 9417{
9418 unsigned HOST_WIDE_INT regval = cum.num_args;
9419 int i;
8df4a58b 9420
0dbd1c74 9421 for (i = 0; i < 6; i++)
9422 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
8df4a58b 9423
0dbd1c74 9424 return GEN_INT (regval);
9425}
9426\f
8df4a58b 9427/* Make (or fake) .linkage entry for function call.
9428
57e47080 9429 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8df4a58b 9430
57e47080 9431 Return an SYMBOL_REF rtx for the linkage. */
9432
9433rtx
92643d95 9434alpha_need_linkage (const char *name, int is_local)
8df4a58b 9435{
57e47080 9436 splay_tree_node node;
9437 struct alpha_links *al;
8df4a58b 9438
9439 if (name[0] == '*')
9440 name++;
9441
cf73d31f 9442 if (is_local)
9443 {
9de382d9 9444 struct alpha_funcs *cfaf;
9445
9446 if (!alpha_funcs_tree)
9447 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9448 splay_tree_compare_pointers);
9e7454d0 9449
9de382d9 9450 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
cf73d31f 9451
9452 cfaf->links = 0;
9453 cfaf->num = ++alpha_funcs_num;
9454
9455 splay_tree_insert (alpha_funcs_tree,
9456 (splay_tree_key) current_function_decl,
9457 (splay_tree_value) cfaf);
cf73d31f 9458 }
9459
9460 if (alpha_links_tree)
57e47080 9461 {
9462 /* Is this name already defined? */
8df4a58b 9463
cf73d31f 9464 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
57e47080 9465 if (node)
9466 {
9467 al = (struct alpha_links *) node->value;
9468 if (is_local)
9469 {
9470 /* Defined here but external assumed. */
cf73d31f 9471 if (al->lkind == KIND_EXTERN)
9472 al->lkind = KIND_LOCAL;
57e47080 9473 }
9474 else
9475 {
9476 /* Used here but unused assumed. */
cf73d31f 9477 if (al->lkind == KIND_UNUSED)
9478 al->lkind = KIND_LOCAL;
57e47080 9479 }
9480 return al->linkage;
9481 }
9482 }
9483 else
8482c296 9484 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8df4a58b 9485
573aba85 9486 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9487 name = ggc_strdup (name);
8df4a58b 9488
9489 /* Assume external if no definition. */
cf73d31f 9490 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
8df4a58b 9491
57e47080 9492 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
d2899e26 9493 get_identifier (name);
9494
57e47080 9495 /* Construct a SYMBOL_REF for us to call. */
9496 {
9497 size_t name_len = strlen (name);
44acf429 9498 char *linksym = alloca (name_len + 6);
57e47080 9499 linksym[0] = '$';
9500 memcpy (linksym + 1, name, name_len);
9501 memcpy (linksym + 1 + name_len, "..lk", 5);
44acf429 9502 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9503 ggc_alloc_string (linksym, name_len + 5));
57e47080 9504 }
9505
cf73d31f 9506 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
57e47080 9507 (splay_tree_value) al);
8df4a58b 9508
57e47080 9509 return al->linkage;
8df4a58b 9510}
9511
cf73d31f 9512rtx
92643d95 9513alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
cf73d31f 9514{
9515 splay_tree_node cfunnode;
9516 struct alpha_funcs *cfaf;
9517 struct alpha_links *al;
9518 const char *name = XSTR (linkage, 0);
9519
9520 cfaf = (struct alpha_funcs *) 0;
9521 al = (struct alpha_links *) 0;
9522
9523 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9524 cfaf = (struct alpha_funcs *) cfunnode->value;
9525
9526 if (cfaf->links)
9527 {
9528 splay_tree_node lnode;
9529
9530 /* Is this name already defined? */
9531
9532 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9533 if (lnode)
9534 al = (struct alpha_links *) lnode->value;
9535 }
9536 else
9de382d9 9537 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
cf73d31f 9538
9539 if (!al)
9540 {
9541 size_t name_len;
9542 size_t buflen;
9543 char buf [512];
9544 char *linksym;
9545 splay_tree_node node = 0;
9546 struct alpha_links *anl;
9547
9548 if (name[0] == '*')
9549 name++;
9550
9551 name_len = strlen (name);
9552
9de382d9 9553 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
cf73d31f 9554 al->num = cfaf->num;
9555
9556 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9557 if (node)
9558 {
9559 anl = (struct alpha_links *) node->value;
9560 al->lkind = anl->lkind;
9561 }
9562
9563 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9564 buflen = strlen (buf);
9565 linksym = alloca (buflen + 1);
9566 memcpy (linksym, buf, buflen + 1);
9567
9568 al->linkage = gen_rtx_SYMBOL_REF
9569 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9570
9571 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9572 (splay_tree_value) al);
9573 }
9574
9575 if (rflag)
9576 al->rkind = KIND_CODEADDR;
9577 else
9578 al->rkind = KIND_LINKAGE;
9e7454d0 9579
cf73d31f 9580 if (lflag)
9581 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9582 else
9583 return al->linkage;
9584}
9585
57e47080 9586static int
92643d95 9587alpha_write_one_linkage (splay_tree_node node, void *data)
57e47080 9588{
0d95286f 9589 const char *const name = (const char *) node->key;
cf73d31f 9590 struct alpha_links *link = (struct alpha_links *) node->value;
57e47080 9591 FILE *stream = (FILE *) data;
9592
cf73d31f 9593 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9594 if (link->rkind == KIND_CODEADDR)
57e47080 9595 {
cf73d31f 9596 if (link->lkind == KIND_LOCAL)
9597 {
9598 /* Local and used */
9599 fprintf (stream, "\t.quad %s..en\n", name);
9600 }
9601 else
9602 {
9603 /* External and used, request code address. */
9604 fprintf (stream, "\t.code_address %s\n", name);
9605 }
57e47080 9606 }
9607 else
9608 {
cf73d31f 9609 if (link->lkind == KIND_LOCAL)
9610 {
9611 /* Local and used, build linkage pair. */
9612 fprintf (stream, "\t.quad %s..en\n", name);
9613 fprintf (stream, "\t.quad %s\n", name);
9614 }
9615 else
9616 {
9617 /* External and used, request linkage pair. */
9618 fprintf (stream, "\t.linkage %s\n", name);
9619 }
57e47080 9620 }
9621
9622 return 0;
9623}
8df4a58b 9624
cf73d31f 9625static void
92643d95 9626alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
8df4a58b 9627{
cf73d31f 9628 splay_tree_node node;
9629 struct alpha_funcs *func;
9630
2f14b1f9 9631 fprintf (stream, "\t.link\n");
cf73d31f 9632 fprintf (stream, "\t.align 3\n");
2f14b1f9 9633 in_section = NULL;
9634
cf73d31f 9635 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9636 func = (struct alpha_funcs *) node->value;
9637
9638 fputs ("\t.name ", stream);
9639 assemble_name (stream, funname);
9640 fputs ("..na\n", stream);
9641 ASM_OUTPUT_LABEL (stream, funname);
9642 fprintf (stream, "\t.pdesc ");
9643 assemble_name (stream, funname);
9644 fprintf (stream, "..en,%s\n",
9645 alpha_procedure_type == PT_STACK ? "stack"
9646 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9647
9648 if (func->links)
c64a8830 9649 {
cf73d31f 9650 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9651 /* splay_tree_delete (func->links); */
c64a8830 9652 }
8df4a58b 9653}
9654
2cb4ac60 9655/* Given a decl, a section name, and whether the decl initializer
9656 has relocs, choose attributes for the section. */
9657
9658#define SECTION_VMS_OVERLAY SECTION_FORGET
c64a8830 9659#define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9660#define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
2cb4ac60 9661
9662static unsigned int
92643d95 9663vms_section_type_flags (tree decl, const char *name, int reloc)
2cb4ac60 9664{
9665 unsigned int flags = default_section_type_flags (decl, name, reloc);
9666
e3c541f0 9667 if (decl && DECL_ATTRIBUTES (decl)
9668 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
2cb4ac60 9669 flags |= SECTION_VMS_OVERLAY;
c64a8830 9670 if (decl && DECL_ATTRIBUTES (decl)
9671 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9672 flags |= SECTION_VMS_GLOBAL;
9673 if (decl && DECL_ATTRIBUTES (decl)
9674 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9675 flags |= SECTION_VMS_INITIALIZE;
2cb4ac60 9676
9677 return flags;
9678}
9679
9680/* Switch to an arbitrary section NAME with attributes as specified
9681 by FLAGS. ALIGN specifies any known alignment requirements for
9682 the section; 0 if the default should be used. */
9683
9684static void
537cd941 9685vms_asm_named_section (const char *name, unsigned int flags,
9686 tree decl ATTRIBUTE_UNUSED)
2cb4ac60 9687{
c64a8830 9688 fputc ('\n', asm_out_file);
9689 fprintf (asm_out_file, ".section\t%s", name);
2cb4ac60 9690
9691 if (flags & SECTION_VMS_OVERLAY)
c64a8830 9692 fprintf (asm_out_file, ",OVR");
9693 if (flags & SECTION_VMS_GLOBAL)
9694 fprintf (asm_out_file, ",GBL");
9695 if (flags & SECTION_VMS_INITIALIZE)
9696 fprintf (asm_out_file, ",NOMOD");
9697 if (flags & SECTION_DEBUG)
9698 fprintf (asm_out_file, ",NOWRT");
9699
9700 fputc ('\n', asm_out_file);
2cb4ac60 9701}
9702
01d15dc5 9703/* Record an element in the table of global constructors. SYMBOL is
9704 a SYMBOL_REF of the function to be called; PRIORITY is a number
9e7454d0 9705 between 0 and MAX_INIT_PRIORITY.
01d15dc5 9706
9707 Differs from default_ctors_section_asm_out_constructor in that the
9708 width of the .ctors entry is always 64 bits, rather than the 32 bits
9709 used by a normal pointer. */
9710
9711static void
92643d95 9712vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
01d15dc5 9713{
2f14b1f9 9714 switch_to_section (ctors_section);
09d688ff 9715 assemble_align (BITS_PER_WORD);
9716 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
01d15dc5 9717}
9718
9719static void
92643d95 9720vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
01d15dc5 9721{
2f14b1f9 9722 switch_to_section (dtors_section);
09d688ff 9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
01d15dc5 9725}
8df4a58b 9726#else
9727
57e47080 9728rtx
92643d95 9729alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9730 int is_local ATTRIBUTE_UNUSED)
8df4a58b 9731{
57e47080 9732 return NULL_RTX;
8df4a58b 9733}
9734
cf73d31f 9735rtx
92643d95 9736alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9737 tree cfundecl ATTRIBUTE_UNUSED,
9738 int lflag ATTRIBUTE_UNUSED,
9739 int rflag ATTRIBUTE_UNUSED)
cf73d31f 9740{
9741 return NULL_RTX;
9742}
9743
1467e953 9744#endif /* TARGET_ABI_OPEN_VMS */
9caef960 9745\f
9746#if TARGET_ABI_UNICOSMK
9747
0336f0f0 9748/* This evaluates to true if we do not know how to pass TYPE solely in
9749 registers. This is the case for all arguments that do not fit in two
9750 registers. */
9751
9752static bool
9753unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9754{
9755 if (type == NULL)
9756 return false;
9757
9758 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9759 return true;
9760 if (TREE_ADDRESSABLE (type))
9761 return true;
9762
9763 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9764}
9765
9caef960 9766/* Define the offset between two registers, one to be eliminated, and the
9767 other its replacement, at the start of a routine. */
9768
9769int
92643d95 9770unicosmk_initial_elimination_offset (int from, int to)
9caef960 9771{
9772 int fixed_size;
9e7454d0 9773
9caef960 9774 fixed_size = alpha_sa_size();
9775 if (fixed_size != 0)
9776 fixed_size += 48;
9777
9778 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9e7454d0 9779 return -fixed_size;
9caef960 9780 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9781 return 0;
9782 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9783 return (ALPHA_ROUND (current_function_outgoing_args_size)
9784 + ALPHA_ROUND (get_frame_size()));
9785 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9786 return (ALPHA_ROUND (fixed_size)
9e7454d0 9787 + ALPHA_ROUND (get_frame_size()
9caef960 9788 + current_function_outgoing_args_size));
9789 else
4d10b463 9790 gcc_unreachable ();
9caef960 9791}
9792
9793/* Output the module name for .ident and .end directives. We have to strip
9794 directories and add make sure that the module name starts with a letter
9795 or '$'. */
9796
9797static void
92643d95 9798unicosmk_output_module_name (FILE *file)
9caef960 9799{
8789d51c 9800 const char *name = lbasename (main_input_filename);
9801 unsigned len = strlen (name);
9802 char *clean_name = alloca (len + 2);
9803 char *ptr = clean_name;
9e7454d0 9804
9caef960 9805 /* CAM only accepts module names that start with a letter or '$'. We
9806 prefix the module name with a '$' if necessary. */
9807
9808 if (!ISALPHA (*name))
8789d51c 9809 *ptr++ = '$';
9810 memcpy (ptr, name, len + 1);
9811 clean_symbol_name (clean_name);
9812 fputs (clean_name, file);
9caef960 9813}
9814
92643d95 9815/* Output the definition of a common variable. */
9caef960 9816
92643d95 9817void
9818unicosmk_output_common (FILE *file, const char *name, int size, int align)
9caef960 9819{
92643d95 9820 tree name_tree;
9821 printf ("T3E__: common %s\n", name);
9caef960 9822
2f14b1f9 9823 in_section = NULL;
9caef960 9824 fputs("\t.endp\n\n\t.psect ", file);
9825 assemble_name(file, name);
9826 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9827 fprintf(file, "\t.byte\t0:%d\n", size);
9828
9829 /* Mark the symbol as defined in this module. */
9830 name_tree = get_identifier (name);
9831 TREE_ASM_WRITTEN (name_tree) = 1;
9832}
9833
9834#define SECTION_PUBLIC SECTION_MACH_DEP
9835#define SECTION_MAIN (SECTION_PUBLIC << 1)
9836static int current_section_align;
9837
2f14b1f9 9838/* A get_unnamed_section callback for switching to the text section. */
9839
9840static void
9841unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9842{
9843 static int count = 0;
9844 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9845}
9846
9847/* A get_unnamed_section callback for switching to the data section. */
9848
9849static void
9850unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9851{
9852 static int count = 1;
9853 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9854}
9855
9856/* Implement TARGET_ASM_INIT_SECTIONS.
9857
9858 The Cray assembler is really weird with respect to sections. It has only
9859 named sections and you can't reopen a section once it has been closed.
9860 This means that we have to generate unique names whenever we want to
9861 reenter the text or the data section. */
9862
9863static void
9864unicosmk_init_sections (void)
9865{
9866 text_section = get_unnamed_section (SECTION_CODE,
9867 unicosmk_output_text_section_asm_op,
9868 NULL);
9869 data_section = get_unnamed_section (SECTION_WRITE,
9870 unicosmk_output_data_section_asm_op,
9871 NULL);
9872 readonly_data_section = data_section;
9873}
9874
9caef960 9875static unsigned int
92643d95 9876unicosmk_section_type_flags (tree decl, const char *name,
9877 int reloc ATTRIBUTE_UNUSED)
9caef960 9878{
9879 unsigned int flags = default_section_type_flags (decl, name, reloc);
9880
9881 if (!decl)
9882 return flags;
9883
9884 if (TREE_CODE (decl) == FUNCTION_DECL)
9885 {
9886 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9887 if (align_functions_log > current_section_align)
9888 current_section_align = align_functions_log;
9889
9890 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9891 flags |= SECTION_MAIN;
9892 }
9893 else
9894 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9895
9896 if (TREE_PUBLIC (decl))
9897 flags |= SECTION_PUBLIC;
9898
9899 return flags;
9900}
9901
9902/* Generate a section name for decl and associate it with the
9903 declaration. */
9904
52470889 9905static void
92643d95 9906unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9caef960 9907{
9908 const char *name;
9909 int len;
9910
4d10b463 9911 gcc_assert (decl);
9caef960 9912
9913 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
09a1f342 9914 name = default_strip_name_encoding (name);
9caef960 9915 len = strlen (name);
9916
9917 if (TREE_CODE (decl) == FUNCTION_DECL)
9918 {
9919 char *string;
9920
9e7454d0 9921 /* It is essential that we prefix the section name here because
9922 otherwise the section names generated for constructors and
9caef960 9923 destructors confuse collect2. */
9924
9925 string = alloca (len + 6);
9926 sprintf (string, "code@%s", name);
9927 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9928 }
9929 else if (TREE_PUBLIC (decl))
9930 DECL_SECTION_NAME (decl) = build_string (len, name);
9931 else
9932 {
9933 char *string;
9934
9935 string = alloca (len + 6);
9936 sprintf (string, "data@%s", name);
9937 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9938 }
9939}
9940
9941/* Switch to an arbitrary section NAME with attributes as specified
9942 by FLAGS. ALIGN specifies any known alignment requirements for
9943 the section; 0 if the default should be used. */
9944
9945static void
537cd941 9946unicosmk_asm_named_section (const char *name, unsigned int flags,
9947 tree decl ATTRIBUTE_UNUSED)
9caef960 9948{
9949 const char *kind;
9950
9951 /* Close the previous section. */
9952
9953 fputs ("\t.endp\n\n", asm_out_file);
9954
9955 /* Find out what kind of section we are opening. */
9956
9957 if (flags & SECTION_MAIN)
9958 fputs ("\t.start\tmain\n", asm_out_file);
9959
9960 if (flags & SECTION_CODE)
9961 kind = "code";
9962 else if (flags & SECTION_PUBLIC)
9963 kind = "common";
9964 else
9965 kind = "data";
9966
9967 if (current_section_align != 0)
9968 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9969 current_section_align, kind);
9970 else
9971 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9972}
9973
9974static void
92643d95 9975unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9caef960 9976{
9977 if (DECL_P (decl)
9978 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
52470889 9979 unicosmk_unique_section (decl, 0);
9caef960 9980}
9981
9982/* Output an alignment directive. We have to use the macro 'gcc@code@align'
9983 in code sections because .align fill unused space with zeroes. */
9e7454d0 9984
9caef960 9985void
92643d95 9986unicosmk_output_align (FILE *file, int align)
9caef960 9987{
9988 if (inside_function)
9989 fprintf (file, "\tgcc@code@align\t%d\n", align);
9990 else
9991 fprintf (file, "\t.align\t%d\n", align);
9992}
9993
9994/* Add a case vector to the current function's list of deferred case
9995 vectors. Case vectors have to be put into a separate section because CAM
9996 does not allow data definitions in code sections. */
9997
9998void
92643d95 9999unicosmk_defer_case_vector (rtx lab, rtx vec)
9caef960 10000{
10001 struct machine_function *machine = cfun->machine;
9e7454d0 10002
9caef960 10003 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10004 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9e7454d0 10005 machine->addr_list);
9caef960 10006}
10007
10008/* Output a case vector. */
10009
10010static void
92643d95 10011unicosmk_output_addr_vec (FILE *file, rtx vec)
9caef960 10012{
10013 rtx lab = XEXP (vec, 0);
10014 rtx body = XEXP (vec, 1);
10015 int vlen = XVECLEN (body, 0);
10016 int idx;
10017
805e22b2 10018 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9caef960 10019
10020 for (idx = 0; idx < vlen; idx++)
10021 {
10022 ASM_OUTPUT_ADDR_VEC_ELT
10023 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10024 }
10025}
10026
10027/* Output current function's deferred case vectors. */
10028
10029static void
92643d95 10030unicosmk_output_deferred_case_vectors (FILE *file)
9caef960 10031{
10032 struct machine_function *machine = cfun->machine;
10033 rtx t;
10034
10035 if (machine->addr_list == NULL_RTX)
10036 return;
10037
2f14b1f9 10038 switch_to_section (data_section);
9caef960 10039 for (t = machine->addr_list; t; t = XEXP (t, 1))
10040 unicosmk_output_addr_vec (file, XEXP (t, 0));
10041}
10042
92643d95 10043/* Generate the name of the SSIB section for the current function. */
10044
10045#define SSIB_PREFIX "__SSIB_"
10046#define SSIB_PREFIX_LEN 7
10047
10048static const char *
10049unicosmk_ssib_name (void)
10050{
9e7454d0 10051 /* This is ok since CAM won't be able to deal with names longer than that
92643d95 10052 anyway. */
10053
10054 static char name[256];
10055
10056 rtx x;
10057 const char *fnname;
10058 int len;
10059
10060 x = DECL_RTL (cfun->decl);
4d10b463 10061 gcc_assert (GET_CODE (x) == MEM);
92643d95 10062 x = XEXP (x, 0);
4d10b463 10063 gcc_assert (GET_CODE (x) == SYMBOL_REF);
92643d95 10064 fnname = XSTR (x, 0);
10065
10066 len = strlen (fnname);
10067 if (len + SSIB_PREFIX_LEN > 255)
10068 len = 255 - SSIB_PREFIX_LEN;
10069
10070 strcpy (name, SSIB_PREFIX);
10071 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10072 name[len + SSIB_PREFIX_LEN] = 0;
10073
10074 return name;
10075}
10076
9e7454d0 10077/* Set up the dynamic subprogram information block (DSIB) and update the
10078 frame pointer register ($15) for subroutines which have a frame. If the
9caef960 10079 subroutine doesn't have a frame, simply increment $15. */
10080
10081static void
92643d95 10082unicosmk_gen_dsib (unsigned long *imaskP)
9caef960 10083{
b19d7ab1 10084 if (alpha_procedure_type == PT_STACK)
9caef960 10085 {
10086 const char *ssib_name;
10087 rtx mem;
10088
10089 /* Allocate 64 bytes for the DSIB. */
10090
10091 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10092 GEN_INT (-64))));
10093 emit_insn (gen_blockage ());
10094
10095 /* Save the return address. */
10096
10097 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10098 set_mem_alias_set (mem, alpha_sr_alias_set);
10099 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
df7d0d23 10100 (*imaskP) &= ~(1UL << REG_RA);
9caef960 10101
10102 /* Save the old frame pointer. */
10103
10104 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10105 set_mem_alias_set (mem, alpha_sr_alias_set);
10106 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
df7d0d23 10107 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
9caef960 10108
10109 emit_insn (gen_blockage ());
10110
10111 /* Store the SSIB pointer. */
10112
10113 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10114 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10115 set_mem_alias_set (mem, alpha_sr_alias_set);
10116
10117 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10118 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10119 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10120
10121 /* Save the CIW index. */
10122
10123 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10124 set_mem_alias_set (mem, alpha_sr_alias_set);
10125 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10126
10127 emit_insn (gen_blockage ());
10128
10129 /* Set the new frame pointer. */
10130
10131 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10132 stack_pointer_rtx, GEN_INT (64))));
10133
10134 }
10135 else
10136 {
10137 /* Increment the frame pointer register to indicate that we do not
10138 have a frame. */
10139
10140 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
bcd9bd66 10141 hard_frame_pointer_rtx, const1_rtx)));
9caef960 10142 }
10143}
10144
9caef960 10145/* Output the static subroutine information block for the current
10146 function. */
10147
10148static void
92643d95 10149unicosmk_output_ssib (FILE *file, const char *fnname)
9caef960 10150{
10151 int len;
10152 int i;
10153 rtx x;
10154 rtx ciw;
10155 struct machine_function *machine = cfun->machine;
10156
2f14b1f9 10157 in_section = NULL;
9caef960 10158 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10159 unicosmk_ssib_name ());
10160
10161 /* Some required stuff and the function name length. */
10162
10163 len = strlen (fnname);
10164 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10165
10166 /* Saved registers
10167 ??? We don't do that yet. */
10168
10169 fputs ("\t.quad\t0\n", file);
10170
10171 /* Function address. */
10172
10173 fputs ("\t.quad\t", file);
10174 assemble_name (file, fnname);
10175 putc ('\n', file);
10176
10177 fputs ("\t.quad\t0\n", file);
10178 fputs ("\t.quad\t0\n", file);
10179
10180 /* Function name.
10181 ??? We do it the same way Cray CC does it but this could be
10182 simplified. */
10183
10184 for( i = 0; i < len; i++ )
10185 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10186 if( (len % 8) == 0 )
10187 fputs ("\t.quad\t0\n", file);
10188 else
10189 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10190
10191 /* All call information words used in the function. */
10192
10193 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10194 {
10195 ciw = XEXP (x, 0);
9caef960 10196#if HOST_BITS_PER_WIDE_INT == 32
4840a03a 10197 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
9caef960 10198 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10199#else
4840a03a 10200 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
9caef960 10201#endif
9caef960 10202 }
10203}
10204
10205/* Add a call information word (CIW) to the list of the current function's
10206 CIWs and return its index.
10207
10208 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10209
10210rtx
92643d95 10211unicosmk_add_call_info_word (rtx x)
9caef960 10212{
10213 rtx node;
10214 struct machine_function *machine = cfun->machine;
10215
10216 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10217 if (machine->first_ciw == NULL_RTX)
10218 machine->first_ciw = node;
10219 else
10220 XEXP (machine->last_ciw, 1) = node;
10221
10222 machine->last_ciw = node;
10223 ++machine->ciw_count;
10224
10225 return GEN_INT (machine->ciw_count
35901471 10226 + strlen (current_function_name ())/8 + 5);
9caef960 10227}
10228
9caef960 10229/* The Cray assembler doesn't accept extern declarations for symbols which
10230 are defined in the same file. We have to keep track of all global
10231 symbols which are referenced and/or defined in a source file and output
10232 extern declarations for those which are referenced but not defined at
10233 the end of file. */
10234
10235/* List of identifiers for which an extern declaration might have to be
10236 emitted. */
9de382d9 10237/* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9caef960 10238
10239struct unicosmk_extern_list
10240{
10241 struct unicosmk_extern_list *next;
10242 const char *name;
10243};
10244
10245static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10246
10247/* Output extern declarations which are required for every asm file. */
10248
10249static void
92643d95 10250unicosmk_output_default_externs (FILE *file)
9caef960 10251{
0d95286f 10252 static const char *const externs[] =
9caef960 10253 { "__T3E_MISMATCH" };
10254
10255 int i;
10256 int n;
10257
10258 n = ARRAY_SIZE (externs);
10259
10260 for (i = 0; i < n; i++)
10261 fprintf (file, "\t.extern\t%s\n", externs[i]);
10262}
10263
10264/* Output extern declarations for global symbols which are have been
10265 referenced but not defined. */
10266
10267static void
92643d95 10268unicosmk_output_externs (FILE *file)
9caef960 10269{
10270 struct unicosmk_extern_list *p;
10271 const char *real_name;
10272 int len;
10273 tree name_tree;
10274
10275 len = strlen (user_label_prefix);
10276 for (p = unicosmk_extern_head; p != 0; p = p->next)
10277 {
9e7454d0 10278 /* We have to strip the encoding and possibly remove user_label_prefix
9caef960 10279 from the identifier in order to handle -fleading-underscore and
10280 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
09a1f342 10281 real_name = default_strip_name_encoding (p->name);
9caef960 10282 if (len && p->name[0] == '*'
10283 && !memcmp (real_name, user_label_prefix, len))
10284 real_name += len;
9e7454d0 10285
9caef960 10286 name_tree = get_identifier (real_name);
10287 if (! TREE_ASM_WRITTEN (name_tree))
10288 {
10289 TREE_ASM_WRITTEN (name_tree) = 1;
10290 fputs ("\t.extern\t", file);
10291 assemble_name (file, p->name);
10292 putc ('\n', file);
10293 }
10294 }
10295}
9e7454d0 10296
9caef960 10297/* Record an extern. */
10298
10299void
92643d95 10300unicosmk_add_extern (const char *name)
9caef960 10301{
10302 struct unicosmk_extern_list *p;
10303
10304 p = (struct unicosmk_extern_list *)
92192583 10305 xmalloc (sizeof (struct unicosmk_extern_list));
9caef960 10306 p->next = unicosmk_extern_head;
10307 p->name = name;
10308 unicosmk_extern_head = p;
10309}
10310
10311/* The Cray assembler generates incorrect code if identifiers which
10312 conflict with register names are used as instruction operands. We have
10313 to replace such identifiers with DEX expressions. */
10314
10315/* Structure to collect identifiers which have been replaced by DEX
10316 expressions. */
9de382d9 10317/* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9caef960 10318
10319struct unicosmk_dex {
10320 struct unicosmk_dex *next;
10321 const char *name;
10322};
10323
9e7454d0 10324/* List of identifiers which have been replaced by DEX expressions. The DEX
9caef960 10325 number is determined by the position in the list. */
10326
9e7454d0 10327static struct unicosmk_dex *unicosmk_dex_list = NULL;
9caef960 10328
10329/* The number of elements in the DEX list. */
10330
10331static int unicosmk_dex_count = 0;
10332
10333/* Check if NAME must be replaced by a DEX expression. */
10334
10335static int
92643d95 10336unicosmk_special_name (const char *name)
9caef960 10337{
10338 if (name[0] == '*')
10339 ++name;
10340
10341 if (name[0] == '$')
10342 ++name;
10343
10344 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10345 return 0;
10346
10347 switch (name[1])
10348 {
10349 case '1': case '2':
10350 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10351
10352 case '3':
10353 return (name[2] == '\0'
10354 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10355
10356 default:
10357 return (ISDIGIT (name[1]) && name[2] == '\0');
10358 }
10359}
10360
10361/* Return the DEX number if X must be replaced by a DEX expression and 0
10362 otherwise. */
10363
10364static int
92643d95 10365unicosmk_need_dex (rtx x)
9caef960 10366{
10367 struct unicosmk_dex *dex;
10368 const char *name;
10369 int i;
9e7454d0 10370
9caef960 10371 if (GET_CODE (x) != SYMBOL_REF)
10372 return 0;
10373
10374 name = XSTR (x,0);
10375 if (! unicosmk_special_name (name))
10376 return 0;
10377
10378 i = unicosmk_dex_count;
10379 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10380 {
10381 if (! strcmp (name, dex->name))
10382 return i;
10383 --i;
10384 }
9e7454d0 10385
92192583 10386 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9caef960 10387 dex->name = name;
10388 dex->next = unicosmk_dex_list;
10389 unicosmk_dex_list = dex;
10390
10391 ++unicosmk_dex_count;
10392 return unicosmk_dex_count;
10393}
10394
10395/* Output the DEX definitions for this file. */
10396
10397static void
92643d95 10398unicosmk_output_dex (FILE *file)
9caef960 10399{
10400 struct unicosmk_dex *dex;
10401 int i;
10402
10403 if (unicosmk_dex_list == NULL)
10404 return;
10405
10406 fprintf (file, "\t.dexstart\n");
10407
10408 i = unicosmk_dex_count;
10409 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10410 {
10411 fprintf (file, "\tDEX (%d) = ", i);
10412 assemble_name (file, dex->name);
10413 putc ('\n', file);
10414 --i;
10415 }
9e7454d0 10416
9caef960 10417 fprintf (file, "\t.dexend\n");
10418}
10419
92643d95 10420/* Output text that to appear at the beginning of an assembler file. */
10421
9e7454d0 10422static void
92c473b8 10423unicosmk_file_start (void)
92643d95 10424{
10425 int i;
10426
92c473b8 10427 fputs ("\t.ident\t", asm_out_file);
10428 unicosmk_output_module_name (asm_out_file);
10429 fputs ("\n\n", asm_out_file);
92643d95 10430
10431 /* The Unicos/Mk assembler uses different register names. Instead of trying
10432 to support them, we simply use micro definitions. */
10433
10434 /* CAM has different register names: rN for the integer register N and fN
10435 for the floating-point register N. Instead of trying to use these in
10436 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10437 register. */
10438
10439 for (i = 0; i < 32; ++i)
92c473b8 10440 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
92643d95 10441
10442 for (i = 0; i < 32; ++i)
92c473b8 10443 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
92643d95 10444
92c473b8 10445 putc ('\n', asm_out_file);
92643d95 10446
10447 /* The .align directive fill unused space with zeroes which does not work
10448 in code sections. We define the macro 'gcc@code@align' which uses nops
10449 instead. Note that it assumes that code sections always have the
10450 biggest possible alignment since . refers to the current offset from
10451 the beginning of the section. */
10452
92c473b8 10453 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10454 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10455 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10456 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10457 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10458 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10459 fputs ("\t.endr\n", asm_out_file);
10460 fputs ("\t.endif\n", asm_out_file);
10461 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
92643d95 10462
10463 /* Output extern declarations which should always be visible. */
92c473b8 10464 unicosmk_output_default_externs (asm_out_file);
92643d95 10465
10466 /* Open a dummy section. We always need to be inside a section for the
10467 section-switching code to work correctly.
10468 ??? This should be a module id or something like that. I still have to
10469 figure out what the rules for those are. */
92c473b8 10470 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
92643d95 10471}
10472
10473/* Output text to appear at the end of an assembler file. This includes all
10474 pending extern declarations and DEX expressions. */
10475
10476static void
10477unicosmk_file_end (void)
10478{
10479 fputs ("\t.endp\n\n", asm_out_file);
10480
10481 /* Output all pending externs. */
10482
10483 unicosmk_output_externs (asm_out_file);
10484
9e7454d0 10485 /* Output dex definitions used for functions whose names conflict with
92643d95 10486 register names. */
10487
10488 unicosmk_output_dex (asm_out_file);
10489
10490 fputs ("\t.end\t", asm_out_file);
10491 unicosmk_output_module_name (asm_out_file);
10492 putc ('\n', asm_out_file);
10493}
10494
9caef960 10495#else
10496
10497static void
92643d95 10498unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
9caef960 10499{}
10500
10501static void
92643d95 10502unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
9caef960 10503{}
10504
10505static void
92643d95 10506unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10507 const char * fnname ATTRIBUTE_UNUSED)
9caef960 10508{}
10509
10510rtx
92643d95 10511unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
9caef960 10512{
10513 return NULL_RTX;
10514}
10515
10516static int
92643d95 10517unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
9caef960 10518{
10519 return 0;
10520}
10521
10522#endif /* TARGET_ABI_UNICOSMK */
1f3233d1 10523
f2f543a3 10524static void
10525alpha_init_libfuncs (void)
10526{
10527 if (TARGET_ABI_UNICOSMK)
10528 {
10529 /* Prevent gcc from generating calls to __divsi3. */
10530 set_optab_libfunc (sdiv_optab, SImode, 0);
10531 set_optab_libfunc (udiv_optab, SImode, 0);
10532
10533 /* Use the functions provided by the system library
10534 for DImode integer division. */
10535 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10536 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10537 }
10538 else if (TARGET_ABI_OPEN_VMS)
10539 {
10540 /* Use the VMS runtime library functions for division and
10541 remainder. */
10542 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10543 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10544 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10545 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10546 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10547 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10548 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10549 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10550 }
10551}
10552
92643d95 10553\f
10554/* Initialize the GCC target structure. */
10555#if TARGET_ABI_OPEN_VMS
10556# undef TARGET_ATTRIBUTE_TABLE
10557# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10558# undef TARGET_SECTION_TYPE_FLAGS
10559# define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10560#endif
10561
10562#undef TARGET_IN_SMALL_DATA_P
10563#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10564
10565#if TARGET_ABI_UNICOSMK
10566# undef TARGET_INSERT_ATTRIBUTES
10567# define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10568# undef TARGET_SECTION_TYPE_FLAGS
10569# define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10570# undef TARGET_ASM_UNIQUE_SECTION
10571# define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
76aec42f 10572#undef TARGET_ASM_FUNCTION_RODATA_SECTION
10573#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
92643d95 10574# undef TARGET_ASM_GLOBALIZE_LABEL
10575# define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
0336f0f0 10576# undef TARGET_MUST_PASS_IN_STACK
10577# define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
92643d95 10578#endif
10579
10580#undef TARGET_ASM_ALIGNED_HI_OP
10581#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10582#undef TARGET_ASM_ALIGNED_DI_OP
10583#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10584
10585/* Default unaligned ops are provided for ELF systems. To get unaligned
10586 data for non-ELF systems, we have to turn off auto alignment. */
10587#ifndef OBJECT_FORMAT_ELF
10588#undef TARGET_ASM_UNALIGNED_HI_OP
10589#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10590#undef TARGET_ASM_UNALIGNED_SI_OP
10591#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10592#undef TARGET_ASM_UNALIGNED_DI_OP
10593#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10594#endif
10595
10596#ifdef OBJECT_FORMAT_ELF
4e151b05 10597#undef TARGET_ASM_RELOC_RW_MASK
10598#define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
92643d95 10599#undef TARGET_ASM_SELECT_RTX_SECTION
10600#define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
cc2af183 10601#undef TARGET_SECTION_TYPE_FLAGS
10602#define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
92643d95 10603#endif
10604
10605#undef TARGET_ASM_FUNCTION_END_PROLOGUE
10606#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10607
f2f543a3 10608#undef TARGET_INIT_LIBFUNCS
10609#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10610
92c473b8 10611#if TARGET_ABI_UNICOSMK
10612#undef TARGET_ASM_FILE_START
10613#define TARGET_ASM_FILE_START unicosmk_file_start
10614#undef TARGET_ASM_FILE_END
10615#define TARGET_ASM_FILE_END unicosmk_file_end
10616#else
10617#undef TARGET_ASM_FILE_START
10618#define TARGET_ASM_FILE_START alpha_file_start
10619#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10620#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10621#endif
10622
92643d95 10623#undef TARGET_SCHED_ADJUST_COST
10624#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10625#undef TARGET_SCHED_ISSUE_RATE
10626#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
92643d95 10627#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10628#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10629 alpha_multipass_dfa_lookahead
10630
10631#undef TARGET_HAVE_TLS
10632#define TARGET_HAVE_TLS HAVE_AS_TLS
10633
10634#undef TARGET_INIT_BUILTINS
10635#define TARGET_INIT_BUILTINS alpha_init_builtins
10636#undef TARGET_EXPAND_BUILTIN
10637#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
849c7bc6 10638#undef TARGET_FOLD_BUILTIN
10639#define TARGET_FOLD_BUILTIN alpha_fold_builtin
92643d95 10640
10641#undef TARGET_FUNCTION_OK_FOR_SIBCALL
10642#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10643#undef TARGET_CANNOT_COPY_INSN_P
10644#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
c0da4391 10645#undef TARGET_CANNOT_FORCE_CONST_MEM
10646#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
92643d95 10647
10648#if TARGET_ABI_OSF
10649#undef TARGET_ASM_OUTPUT_MI_THUNK
10650#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10651#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10652#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
7955d282 10653#undef TARGET_STDARG_OPTIMIZE_HOOK
10654#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
92643d95 10655#endif
10656
10657#undef TARGET_RTX_COSTS
10658#define TARGET_RTX_COSTS alpha_rtx_costs
10659#undef TARGET_ADDRESS_COST
10660#define TARGET_ADDRESS_COST hook_int_rtx_0
10661
10662#undef TARGET_MACHINE_DEPENDENT_REORG
10663#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10664
dd9f3024 10665#undef TARGET_PROMOTE_FUNCTION_ARGS
10666#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10667#undef TARGET_PROMOTE_FUNCTION_RETURN
10668#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10669#undef TARGET_PROMOTE_PROTOTYPES
10670#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
dd9f3024 10671#undef TARGET_RETURN_IN_MEMORY
10672#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
b981d932 10673#undef TARGET_PASS_BY_REFERENCE
10674#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
dd9f3024 10675#undef TARGET_SETUP_INCOMING_VARARGS
10676#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10677#undef TARGET_STRICT_ARGUMENT_NAMING
10678#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10679#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10680#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
92d40bc4 10681#undef TARGET_SPLIT_COMPLEX_ARG
10682#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
de8f9b94 10683#undef TARGET_GIMPLIFY_VA_ARG_EXPR
10684#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
f054eb3c 10685#undef TARGET_ARG_PARTIAL_BYTES
10686#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
b2aef146 10687
0d96cd2b 10688#undef TARGET_SECONDARY_RELOAD
10689#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10690
b2aef146 10691#undef TARGET_SCALAR_MODE_SUPPORTED_P
10692#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9e7454d0 10693#undef TARGET_VECTOR_MODE_SUPPORTED_P
10694#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
dd9f3024 10695
2e15d750 10696#undef TARGET_BUILD_BUILTIN_VA_LIST
10697#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10698
2ca3d426 10699/* The Alpha architecture does not require sequential consistency. See
10700 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10701 for an example of how it can be violated in practice. */
10702#undef TARGET_RELAXED_ORDERING
10703#define TARGET_RELAXED_ORDERING true
10704
fb64edde 10705#undef TARGET_DEFAULT_TARGET_FLAGS
10706#define TARGET_DEFAULT_TARGET_FLAGS \
10707 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10708#undef TARGET_HANDLE_OPTION
10709#define TARGET_HANDLE_OPTION alpha_handle_option
10710
4257b08a 10711#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
eddcdde1 10712#undef TARGET_MANGLE_TYPE
10713#define TARGET_MANGLE_TYPE alpha_mangle_type
4257b08a 10714#endif
10715
92643d95 10716struct gcc_target targetm = TARGET_INITIALIZER;
10717
10718\f
1f3233d1 10719#include "gt-alpha.h"