]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/alpha/alpha.c
hashtab.h: Update GTY annotations to new syntax
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
63
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
66
67 static const char * const alpha_cpu_name[] =
68 {
69 "ev4", "ev5", "ev6"
70 };
71
72 /* Specify how accurate floating-point traps need to be. */
73
74 enum alpha_trap_precision alpha_tp;
75
76 /* Specify the floating-point rounding mode. */
77
78 enum alpha_fp_rounding_mode alpha_fprm;
79
80 /* Specify which things cause traps. */
81
82 enum alpha_fp_trap_mode alpha_fptm;
83
84 /* Save information from a "cmpxx" operation until the branch or scc is
85 emitted. */
86
87 struct alpha_compare alpha_compare;
88
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
91
92 static int inside_function = FALSE;
93
94 /* The number of cycles of latency we should assume on memory reads. */
95
96 int alpha_memory_latency = 3;
97
98 /* Whether the function needs the GP. */
99
100 static int alpha_function_needs_gp;
101
102 /* The alias set for prologue/epilogue register save/restore. */
103
104 static GTY(()) alias_set_type alpha_sr_alias_set;
105
106 /* The assembler name of the current function. */
107
108 static const char *alpha_fnname;
109
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number;
112 int alpha_next_sequence_number = 1;
113
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number;
118 int alpha_this_literal_sequence_number;
119 int alpha_this_gpdisp_sequence_number;
120
121 /* Costs of various operations on the different architectures. */
122
123 struct alpha_rtx_cost_data
124 {
125 unsigned char fp_add;
126 unsigned char fp_mult;
127 unsigned char fp_div_sf;
128 unsigned char fp_div_df;
129 unsigned char int_mult_si;
130 unsigned char int_mult_di;
131 unsigned char int_shift;
132 unsigned char int_cmov;
133 unsigned short int_div;
134 };
135
136 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
137 {
138 { /* EV4 */
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
148 },
149 { /* EV5 */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
159 },
160 { /* EV6 */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
170 },
171 };
172
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
176 one of them. */
177
178 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
179 {
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
189 };
190
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS crtl->args.info.num_args
194 #else
195 #define NUM_ARGS crtl->args.info
196 #endif
197
198 #define REG_PV 27
199 #define REG_RA 26
200
201 /* Declarations of static functions. */
202 static struct machine_function *alpha_init_machine_status (void);
203 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
204
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree);
207 #endif
208
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx);
213 \f
214 /* Implement TARGET_HANDLE_OPTION. */
215
216 static bool
217 alpha_handle_option (size_t code, const char *arg, int value)
218 {
219 switch (code)
220 {
221 case OPT_mfp_regs:
222 if (value == 0)
223 target_flags |= MASK_SOFT_FP;
224 break;
225
226 case OPT_mieee:
227 case OPT_mieee_with_inexact:
228 target_flags |= MASK_IEEE_CONFORMANT;
229 break;
230
231 case OPT_mtls_size_:
232 if (value != 16 && value != 32 && value != 64)
233 error ("bad value %qs for -mtls-size switch", arg);
234 break;
235 }
236
237 return true;
238 }
239
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_TYPE. */
242
243 static const char *
244 alpha_mangle_type (const_tree type)
245 {
246 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128)
248 return "g";
249
250 /* For all other types, use normal C++ mangling. */
251 return NULL;
252 }
253 #endif
254
255 /* Parse target option strings. */
256
257 void
258 override_options (void)
259 {
260 static const struct cpu_table {
261 const char *const name;
262 const enum processor_type processor;
263 const int flags;
264 } cpu_table[] = {
265 { "ev4", PROCESSOR_EV4, 0 },
266 { "ev45", PROCESSOR_EV4, 0 },
267 { "21064", PROCESSOR_EV4, 0 },
268 { "ev5", PROCESSOR_EV5, 0 },
269 { "21164", PROCESSOR_EV5, 0 },
270 { "ev56", PROCESSOR_EV5, MASK_BWX },
271 { "21164a", PROCESSOR_EV5, MASK_BWX },
272 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
275 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
277 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
279 { 0, 0, 0 }
280 };
281
282 int i;
283
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK && flag_pic)
286 {
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic > 1) ? "PIC" : "pic");
289 flag_pic = 0;
290 }
291
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK)
295 alpha_fprm = ALPHA_FPRM_DYN;
296 else
297 alpha_fprm = ALPHA_FPRM_NORM;
298
299 alpha_tp = ALPHA_TP_PROG;
300 alpha_fptm = ALPHA_FPTM_N;
301
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
307
308 if (TARGET_IEEE)
309 {
310 if (TARGET_ABI_UNICOSMK)
311 warning (0, "-mieee not supported on Unicos/Mk");
312 else
313 {
314 alpha_tp = ALPHA_TP_INSN;
315 alpha_fptm = ALPHA_FPTM_SU;
316 }
317 }
318
319 if (TARGET_IEEE_WITH_INEXACT)
320 {
321 if (TARGET_ABI_UNICOSMK)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
323 else
324 {
325 alpha_tp = ALPHA_TP_INSN;
326 alpha_fptm = ALPHA_FPTM_SUI;
327 }
328 }
329
330 if (alpha_tp_string)
331 {
332 if (! strcmp (alpha_tp_string, "p"))
333 alpha_tp = ALPHA_TP_PROG;
334 else if (! strcmp (alpha_tp_string, "f"))
335 alpha_tp = ALPHA_TP_FUNC;
336 else if (! strcmp (alpha_tp_string, "i"))
337 alpha_tp = ALPHA_TP_INSN;
338 else
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
340 }
341
342 if (alpha_fprm_string)
343 {
344 if (! strcmp (alpha_fprm_string, "n"))
345 alpha_fprm = ALPHA_FPRM_NORM;
346 else if (! strcmp (alpha_fprm_string, "m"))
347 alpha_fprm = ALPHA_FPRM_MINF;
348 else if (! strcmp (alpha_fprm_string, "c"))
349 alpha_fprm = ALPHA_FPRM_CHOP;
350 else if (! strcmp (alpha_fprm_string,"d"))
351 alpha_fprm = ALPHA_FPRM_DYN;
352 else
353 error ("bad value %qs for -mfp-rounding-mode switch",
354 alpha_fprm_string);
355 }
356
357 if (alpha_fptm_string)
358 {
359 if (strcmp (alpha_fptm_string, "n") == 0)
360 alpha_fptm = ALPHA_FPTM_N;
361 else if (strcmp (alpha_fptm_string, "u") == 0)
362 alpha_fptm = ALPHA_FPTM_U;
363 else if (strcmp (alpha_fptm_string, "su") == 0)
364 alpha_fptm = ALPHA_FPTM_SU;
365 else if (strcmp (alpha_fptm_string, "sui") == 0)
366 alpha_fptm = ALPHA_FPTM_SUI;
367 else
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
369 }
370
371 if (alpha_cpu_string)
372 {
373 for (i = 0; cpu_table [i].name; i++)
374 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
375 {
376 alpha_tune = alpha_cpu = cpu_table [i].processor;
377 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
378 target_flags |= cpu_table [i].flags;
379 break;
380 }
381 if (! cpu_table [i].name)
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
383 }
384
385 if (alpha_tune_string)
386 {
387 for (i = 0; cpu_table [i].name; i++)
388 if (! strcmp (alpha_tune_string, cpu_table [i].name))
389 {
390 alpha_tune = cpu_table [i].processor;
391 break;
392 }
393 if (! cpu_table [i].name)
394 error ("bad value %qs for -mcpu switch", alpha_tune_string);
395 }
396
397 /* Do some sanity checks on the above options. */
398
399 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
400 {
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm = ALPHA_FPTM_N;
403 }
404
405 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
406 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
407 {
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp = ALPHA_TP_INSN;
410 }
411
412 if (alpha_cpu == PROCESSOR_EV6)
413 {
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp = ALPHA_TP_PROG;
418 }
419
420 if (TARGET_FLOAT_VAX)
421 {
422 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
423 {
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm = ALPHA_FPRM_NORM;
426 }
427 if (alpha_fptm == ALPHA_FPTM_SUI)
428 {
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm = ALPHA_FPTM_SU;
431 }
432 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags &= ~MASK_LONG_DOUBLE_128;
435 }
436
437 {
438 char *end;
439 int lat;
440
441 if (!alpha_mlat_string)
442 alpha_mlat_string = "L1";
443
444 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
445 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
446 ;
447 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
449 && alpha_mlat_string[2] == '\0')
450 {
451 static int const cache_latency[][4] =
452 {
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
456 };
457
458 lat = alpha_mlat_string[1] - '0';
459 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
460 {
461 warning (0, "L%d cache latency unknown for %s",
462 lat, alpha_cpu_name[alpha_tune]);
463 lat = 3;
464 }
465 else
466 lat = cache_latency[alpha_tune][lat-1];
467 }
468 else if (! strcmp (alpha_mlat_string, "main"))
469 {
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
472 lat = 150;
473 }
474 else
475 {
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
477 lat = 3;
478 }
479
480 alpha_memory_latency = lat;
481 }
482
483 /* Default the definition of "small data" to 8 bytes. */
484 if (!g_switch_set)
485 g_switch_value = 8;
486
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
488 if (flag_pic == 1)
489 target_flags |= MASK_SMALL_DATA;
490 else if (flag_pic == 2)
491 target_flags &= ~MASK_SMALL_DATA;
492
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize > 0 && write_symbols != SDB_DEBUG)
497 {
498 if (align_loops <= 0)
499 align_loops = 16;
500 if (align_jumps <= 0)
501 align_jumps = 16;
502 }
503 if (align_functions <= 0)
504 align_functions = 16;
505
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set = new_alias_set ();
508
509 /* Register variables and functions with the garbage collector. */
510
511 /* Set up function hooks. */
512 init_machine_status = alpha_init_machine_status;
513
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX)
516 {
517 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
518 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
519 REAL_MODE_FORMAT (TFmode) = NULL;
520 }
521
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
524 target_flags |= MASK_LONG_DOUBLE_128;
525 #endif
526
527 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
528 can be optimized to ap = __builtin_next_arg (0). */
529 if (TARGET_ABI_UNICOSMK)
530 targetm.expand_builtin_va_start = NULL;
531 }
532 \f
533 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
534
535 int
536 zap_mask (HOST_WIDE_INT value)
537 {
538 int i;
539
540 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
541 i++, value >>= 8)
542 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
543 return 0;
544
545 return 1;
546 }
547
548 /* Return true if OP is valid for a particular TLS relocation.
549 We are already guaranteed that OP is a CONST. */
550
551 int
552 tls_symbolic_operand_1 (rtx op, int size, int unspec)
553 {
554 op = XEXP (op, 0);
555
556 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
557 return 0;
558 op = XVECEXP (op, 0, 0);
559
560 if (GET_CODE (op) != SYMBOL_REF)
561 return 0;
562
563 switch (SYMBOL_REF_TLS_MODEL (op))
564 {
565 case TLS_MODEL_LOCAL_DYNAMIC:
566 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
567 case TLS_MODEL_INITIAL_EXEC:
568 return unspec == UNSPEC_TPREL && size == 64;
569 case TLS_MODEL_LOCAL_EXEC:
570 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
571 default:
572 gcc_unreachable ();
573 }
574 }
575
576 /* Used by aligned_memory_operand and unaligned_memory_operand to
577 resolve what reload is going to do with OP if it's a register. */
578
579 rtx
580 resolve_reload_operand (rtx op)
581 {
582 if (reload_in_progress)
583 {
584 rtx tmp = op;
585 if (GET_CODE (tmp) == SUBREG)
586 tmp = SUBREG_REG (tmp);
587 if (REG_P (tmp)
588 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
589 {
590 op = reg_equiv_memory_loc[REGNO (tmp)];
591 if (op == 0)
592 return 0;
593 }
594 }
595 return op;
596 }
597
598 /* The scalar modes supported differs from the default check-what-c-supports
599 version in that sometimes TFmode is available even when long double
600 indicates only DFmode. On unicosmk, we have the situation that HImode
601 doesn't map to any C type, but of course we still support that. */
602
603 static bool
604 alpha_scalar_mode_supported_p (enum machine_mode mode)
605 {
606 switch (mode)
607 {
608 case QImode:
609 case HImode:
610 case SImode:
611 case DImode:
612 case TImode: /* via optabs.c */
613 return true;
614
615 case SFmode:
616 case DFmode:
617 return true;
618
619 case TFmode:
620 return TARGET_HAS_XFLOATING_LIBS;
621
622 default:
623 return false;
624 }
625 }
626
627 /* Alpha implements a couple of integer vector mode operations when
628 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
629 which allows the vectorizer to operate on e.g. move instructions,
630 or when expand_vector_operations can do something useful. */
631
632 static bool
633 alpha_vector_mode_supported_p (enum machine_mode mode)
634 {
635 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
636 }
637
638 /* Return 1 if this function can directly return via $26. */
639
640 int
641 direct_return (void)
642 {
643 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
644 && reload_completed
645 && alpha_sa_size () == 0
646 && get_frame_size () == 0
647 && crtl->outgoing_args_size == 0
648 && crtl->args.pretend_args_size == 0);
649 }
650
651 /* Return the ADDR_VEC associated with a tablejump insn. */
652
653 rtx
654 alpha_tablejump_addr_vec (rtx insn)
655 {
656 rtx tmp;
657
658 tmp = JUMP_LABEL (insn);
659 if (!tmp)
660 return NULL_RTX;
661 tmp = NEXT_INSN (tmp);
662 if (!tmp)
663 return NULL_RTX;
664 if (JUMP_P (tmp)
665 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
666 return PATTERN (tmp);
667 return NULL_RTX;
668 }
669
670 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
671
672 rtx
673 alpha_tablejump_best_label (rtx insn)
674 {
675 rtx jump_table = alpha_tablejump_addr_vec (insn);
676 rtx best_label = NULL_RTX;
677
678 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
679 there for edge frequency counts from profile data. */
680
681 if (jump_table)
682 {
683 int n_labels = XVECLEN (jump_table, 1);
684 int best_count = -1;
685 int i, j;
686
687 for (i = 0; i < n_labels; i++)
688 {
689 int count = 1;
690
691 for (j = i + 1; j < n_labels; j++)
692 if (XEXP (XVECEXP (jump_table, 1, i), 0)
693 == XEXP (XVECEXP (jump_table, 1, j), 0))
694 count++;
695
696 if (count > best_count)
697 best_count = count, best_label = XVECEXP (jump_table, 1, i);
698 }
699 }
700
701 return best_label ? best_label : const0_rtx;
702 }
703
704 /* Return the TLS model to use for SYMBOL. */
705
706 static enum tls_model
707 tls_symbolic_operand_type (rtx symbol)
708 {
709 enum tls_model model;
710
711 if (GET_CODE (symbol) != SYMBOL_REF)
712 return 0;
713 model = SYMBOL_REF_TLS_MODEL (symbol);
714
715 /* Local-exec with a 64-bit size is the same code as initial-exec. */
716 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
717 model = TLS_MODEL_INITIAL_EXEC;
718
719 return model;
720 }
721 \f
722 /* Return true if the function DECL will share the same GP as any
723 function in the current unit of translation. */
724
725 static bool
726 decl_has_samegp (const_tree decl)
727 {
728 /* Functions that are not local can be overridden, and thus may
729 not share the same gp. */
730 if (!(*targetm.binds_local_p) (decl))
731 return false;
732
733 /* If -msmall-data is in effect, assume that there is only one GP
734 for the module, and so any local symbol has this property. We
735 need explicit relocations to be able to enforce this for symbols
736 not defined in this unit of translation, however. */
737 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
738 return true;
739
740 /* Functions that are not external are defined in this UoT. */
741 /* ??? Irritatingly, static functions not yet emitted are still
742 marked "external". Apply this to non-static functions only. */
743 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
744 }
745
746 /* Return true if EXP should be placed in the small data section. */
747
748 static bool
749 alpha_in_small_data_p (const_tree exp)
750 {
751 /* We want to merge strings, so we never consider them small data. */
752 if (TREE_CODE (exp) == STRING_CST)
753 return false;
754
755 /* Functions are never in the small data area. Duh. */
756 if (TREE_CODE (exp) == FUNCTION_DECL)
757 return false;
758
759 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
760 {
761 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
762 if (strcmp (section, ".sdata") == 0
763 || strcmp (section, ".sbss") == 0)
764 return true;
765 }
766 else
767 {
768 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
769
770 /* If this is an incomplete type with size 0, then we can't put it
771 in sdata because it might be too big when completed. */
772 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
773 return true;
774 }
775
776 return false;
777 }
778
779 #if TARGET_ABI_OPEN_VMS
780 static bool
781 alpha_linkage_symbol_p (const char *symname)
782 {
783 int symlen = strlen (symname);
784
785 if (symlen > 4)
786 return strcmp (&symname [symlen - 4], "..lk") == 0;
787
788 return false;
789 }
790
791 #define LINKAGE_SYMBOL_REF_P(X) \
792 ((GET_CODE (X) == SYMBOL_REF \
793 && alpha_linkage_symbol_p (XSTR (X, 0))) \
794 || (GET_CODE (X) == CONST \
795 && GET_CODE (XEXP (X, 0)) == PLUS \
796 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
797 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
798 #endif
799
800 /* legitimate_address_p recognizes an RTL expression that is a valid
801 memory address for an instruction. The MODE argument is the
802 machine mode for the MEM expression that wants to use this address.
803
804 For Alpha, we have either a constant address or the sum of a
805 register and a constant address, or just a register. For DImode,
806 any of those forms can be surrounded with an AND that clear the
807 low-order three bits; this is an "unaligned" access. */
808
809 bool
810 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
811 {
812 /* If this is an ldq_u type address, discard the outer AND. */
813 if (mode == DImode
814 && GET_CODE (x) == AND
815 && CONST_INT_P (XEXP (x, 1))
816 && INTVAL (XEXP (x, 1)) == -8)
817 x = XEXP (x, 0);
818
819 /* Discard non-paradoxical subregs. */
820 if (GET_CODE (x) == SUBREG
821 && (GET_MODE_SIZE (GET_MODE (x))
822 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
823 x = SUBREG_REG (x);
824
825 /* Unadorned general registers are valid. */
826 if (REG_P (x)
827 && (strict
828 ? STRICT_REG_OK_FOR_BASE_P (x)
829 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
830 return true;
831
832 /* Constant addresses (i.e. +/- 32k) are valid. */
833 if (CONSTANT_ADDRESS_P (x))
834 return true;
835
836 #if TARGET_ABI_OPEN_VMS
837 if (LINKAGE_SYMBOL_REF_P (x))
838 return true;
839 #endif
840
841 /* Register plus a small constant offset is valid. */
842 if (GET_CODE (x) == PLUS)
843 {
844 rtx ofs = XEXP (x, 1);
845 x = XEXP (x, 0);
846
847 /* Discard non-paradoxical subregs. */
848 if (GET_CODE (x) == SUBREG
849 && (GET_MODE_SIZE (GET_MODE (x))
850 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
851 x = SUBREG_REG (x);
852
853 if (REG_P (x))
854 {
855 if (! strict
856 && NONSTRICT_REG_OK_FP_BASE_P (x)
857 && CONST_INT_P (ofs))
858 return true;
859 if ((strict
860 ? STRICT_REG_OK_FOR_BASE_P (x)
861 : NONSTRICT_REG_OK_FOR_BASE_P (x))
862 && CONSTANT_ADDRESS_P (ofs))
863 return true;
864 }
865 }
866
867 /* If we're managing explicit relocations, LO_SUM is valid, as are small
868 data symbols. Avoid explicit relocations of modes larger than word
869 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
870 else if (TARGET_EXPLICIT_RELOCS
871 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
872 {
873 if (small_symbolic_operand (x, Pmode))
874 return true;
875
876 if (GET_CODE (x) == LO_SUM)
877 {
878 rtx ofs = XEXP (x, 1);
879 x = XEXP (x, 0);
880
881 /* Discard non-paradoxical subregs. */
882 if (GET_CODE (x) == SUBREG
883 && (GET_MODE_SIZE (GET_MODE (x))
884 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
885 x = SUBREG_REG (x);
886
887 /* Must have a valid base register. */
888 if (! (REG_P (x)
889 && (strict
890 ? STRICT_REG_OK_FOR_BASE_P (x)
891 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
892 return false;
893
894 /* The symbol must be local. */
895 if (local_symbolic_operand (ofs, Pmode)
896 || dtp32_symbolic_operand (ofs, Pmode)
897 || tp32_symbolic_operand (ofs, Pmode))
898 return true;
899 }
900 }
901
902 return false;
903 }
904
905 /* Build the SYMBOL_REF for __tls_get_addr. */
906
907 static GTY(()) rtx tls_get_addr_libfunc;
908
909 static rtx
910 get_tls_get_addr (void)
911 {
912 if (!tls_get_addr_libfunc)
913 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
914 return tls_get_addr_libfunc;
915 }
916
917 /* Try machine-dependent ways of modifying an illegitimate address
918 to be legitimate. If we find one, return the new, valid address. */
919
920 rtx
921 alpha_legitimize_address (rtx x, rtx scratch, enum machine_mode mode)
922 {
923 HOST_WIDE_INT addend;
924
925 /* If the address is (plus reg const_int) and the CONST_INT is not a
926 valid offset, compute the high part of the constant and add it to
927 the register. Then our address is (plus temp low-part-const). */
928 if (GET_CODE (x) == PLUS
929 && REG_P (XEXP (x, 0))
930 && CONST_INT_P (XEXP (x, 1))
931 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
932 {
933 addend = INTVAL (XEXP (x, 1));
934 x = XEXP (x, 0);
935 goto split_addend;
936 }
937
938 /* If the address is (const (plus FOO const_int)), find the low-order
939 part of the CONST_INT. Then load FOO plus any high-order part of the
940 CONST_INT into a register. Our address is (plus reg low-part-const).
941 This is done to reduce the number of GOT entries. */
942 if (can_create_pseudo_p ()
943 && GET_CODE (x) == CONST
944 && GET_CODE (XEXP (x, 0)) == PLUS
945 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
946 {
947 addend = INTVAL (XEXP (XEXP (x, 0), 1));
948 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
949 goto split_addend;
950 }
951
952 /* If we have a (plus reg const), emit the load as in (2), then add
953 the two registers, and finally generate (plus reg low-part-const) as
954 our address. */
955 if (can_create_pseudo_p ()
956 && GET_CODE (x) == PLUS
957 && REG_P (XEXP (x, 0))
958 && GET_CODE (XEXP (x, 1)) == CONST
959 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
960 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
961 {
962 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
963 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
964 XEXP (XEXP (XEXP (x, 1), 0), 0),
965 NULL_RTX, 1, OPTAB_LIB_WIDEN);
966 goto split_addend;
967 }
968
969 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
970 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
971 around +/- 32k offset. */
972 if (TARGET_EXPLICIT_RELOCS
973 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
974 && symbolic_operand (x, Pmode))
975 {
976 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
977
978 switch (tls_symbolic_operand_type (x))
979 {
980 case TLS_MODEL_NONE:
981 break;
982
983 case TLS_MODEL_GLOBAL_DYNAMIC:
984 start_sequence ();
985
986 r0 = gen_rtx_REG (Pmode, 0);
987 r16 = gen_rtx_REG (Pmode, 16);
988 tga = get_tls_get_addr ();
989 dest = gen_reg_rtx (Pmode);
990 seq = GEN_INT (alpha_next_sequence_number++);
991
992 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
993 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
994 insn = emit_call_insn (insn);
995 RTL_CONST_CALL_P (insn) = 1;
996 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
997
998 insn = get_insns ();
999 end_sequence ();
1000
1001 emit_libcall_block (insn, dest, r0, x);
1002 return dest;
1003
1004 case TLS_MODEL_LOCAL_DYNAMIC:
1005 start_sequence ();
1006
1007 r0 = gen_rtx_REG (Pmode, 0);
1008 r16 = gen_rtx_REG (Pmode, 16);
1009 tga = get_tls_get_addr ();
1010 scratch = gen_reg_rtx (Pmode);
1011 seq = GEN_INT (alpha_next_sequence_number++);
1012
1013 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1014 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1015 insn = emit_call_insn (insn);
1016 RTL_CONST_CALL_P (insn) = 1;
1017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1018
1019 insn = get_insns ();
1020 end_sequence ();
1021
1022 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1023 UNSPEC_TLSLDM_CALL);
1024 emit_libcall_block (insn, scratch, r0, eqv);
1025
1026 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1027 eqv = gen_rtx_CONST (Pmode, eqv);
1028
1029 if (alpha_tls_size == 64)
1030 {
1031 dest = gen_reg_rtx (Pmode);
1032 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1033 emit_insn (gen_adddi3 (dest, dest, scratch));
1034 return dest;
1035 }
1036 if (alpha_tls_size == 32)
1037 {
1038 insn = gen_rtx_HIGH (Pmode, eqv);
1039 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1040 scratch = gen_reg_rtx (Pmode);
1041 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1042 }
1043 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1044
1045 case TLS_MODEL_INITIAL_EXEC:
1046 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1047 eqv = gen_rtx_CONST (Pmode, eqv);
1048 tp = gen_reg_rtx (Pmode);
1049 scratch = gen_reg_rtx (Pmode);
1050 dest = gen_reg_rtx (Pmode);
1051
1052 emit_insn (gen_load_tp (tp));
1053 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1054 emit_insn (gen_adddi3 (dest, tp, scratch));
1055 return dest;
1056
1057 case TLS_MODEL_LOCAL_EXEC:
1058 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1059 eqv = gen_rtx_CONST (Pmode, eqv);
1060 tp = gen_reg_rtx (Pmode);
1061
1062 emit_insn (gen_load_tp (tp));
1063 if (alpha_tls_size == 32)
1064 {
1065 insn = gen_rtx_HIGH (Pmode, eqv);
1066 insn = gen_rtx_PLUS (Pmode, tp, insn);
1067 tp = gen_reg_rtx (Pmode);
1068 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1069 }
1070 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1071
1072 default:
1073 gcc_unreachable ();
1074 }
1075
1076 if (local_symbolic_operand (x, Pmode))
1077 {
1078 if (small_symbolic_operand (x, Pmode))
1079 return x;
1080 else
1081 {
1082 if (can_create_pseudo_p ())
1083 scratch = gen_reg_rtx (Pmode);
1084 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1085 gen_rtx_HIGH (Pmode, x)));
1086 return gen_rtx_LO_SUM (Pmode, scratch, x);
1087 }
1088 }
1089 }
1090
1091 return NULL;
1092
1093 split_addend:
1094 {
1095 HOST_WIDE_INT low, high;
1096
1097 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1098 addend -= low;
1099 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1100 addend -= high;
1101
1102 if (addend)
1103 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1104 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1105 1, OPTAB_LIB_WIDEN);
1106 if (high)
1107 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1108 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1109 1, OPTAB_LIB_WIDEN);
1110
1111 return plus_constant (x, low);
1112 }
1113 }
1114
1115 /* Primarily this is required for TLS symbols, but given that our move
1116 patterns *ought* to be able to handle any symbol at any time, we
1117 should never be spilling symbolic operands to the constant pool, ever. */
1118
1119 static bool
1120 alpha_cannot_force_const_mem (rtx x)
1121 {
1122 enum rtx_code code = GET_CODE (x);
1123 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1124 }
1125
1126 /* We do not allow indirect calls to be optimized into sibling calls, nor
1127 can we allow a call to a function with a different GP to be optimized
1128 into a sibcall. */
1129
1130 static bool
1131 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1132 {
1133 /* Can't do indirect tail calls, since we don't know if the target
1134 uses the same GP. */
1135 if (!decl)
1136 return false;
1137
1138 /* Otherwise, we can make a tail call if the target function shares
1139 the same GP. */
1140 return decl_has_samegp (decl);
1141 }
1142
1143 int
1144 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1145 {
1146 rtx x = *px;
1147
1148 /* Don't re-split. */
1149 if (GET_CODE (x) == LO_SUM)
1150 return -1;
1151
1152 return small_symbolic_operand (x, Pmode) != 0;
1153 }
1154
1155 static int
1156 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1157 {
1158 rtx x = *px;
1159
1160 /* Don't re-split. */
1161 if (GET_CODE (x) == LO_SUM)
1162 return -1;
1163
1164 if (small_symbolic_operand (x, Pmode))
1165 {
1166 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1167 *px = x;
1168 return -1;
1169 }
1170
1171 return 0;
1172 }
1173
1174 rtx
1175 split_small_symbolic_operand (rtx x)
1176 {
1177 x = copy_insn (x);
1178 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1179 return x;
1180 }
1181
1182 /* Indicate that INSN cannot be duplicated. This is true for any insn
1183 that we've marked with gpdisp relocs, since those have to stay in
1184 1-1 correspondence with one another.
1185
1186 Technically we could copy them if we could set up a mapping from one
1187 sequence number to another, across the set of insns to be duplicated.
1188 This seems overly complicated and error-prone since interblock motion
1189 from sched-ebb could move one of the pair of insns to a different block.
1190
1191 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1192 then they'll be in a different block from their ldgp. Which could lead
1193 the bb reorder code to think that it would be ok to copy just the block
1194 containing the call and branch to the block containing the ldgp. */
1195
1196 static bool
1197 alpha_cannot_copy_insn_p (rtx insn)
1198 {
1199 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1200 return false;
1201 if (recog_memoized (insn) >= 0)
1202 return get_attr_cannot_copy (insn);
1203 else
1204 return false;
1205 }
1206
1207
1208 /* Try a machine-dependent way of reloading an illegitimate address
1209 operand. If we find one, push the reload and return the new rtx. */
1210
1211 rtx
1212 alpha_legitimize_reload_address (rtx x,
1213 enum machine_mode mode ATTRIBUTE_UNUSED,
1214 int opnum, int type,
1215 int ind_levels ATTRIBUTE_UNUSED)
1216 {
1217 /* We must recognize output that we have already generated ourselves. */
1218 if (GET_CODE (x) == PLUS
1219 && GET_CODE (XEXP (x, 0)) == PLUS
1220 && REG_P (XEXP (XEXP (x, 0), 0))
1221 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1222 && CONST_INT_P (XEXP (x, 1)))
1223 {
1224 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1225 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1226 opnum, type);
1227 return x;
1228 }
1229
1230 /* We wish to handle large displacements off a base register by
1231 splitting the addend across an ldah and the mem insn. This
1232 cuts number of extra insns needed from 3 to 1. */
1233 if (GET_CODE (x) == PLUS
1234 && REG_P (XEXP (x, 0))
1235 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1236 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1237 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1238 {
1239 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1240 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1241 HOST_WIDE_INT high
1242 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1243
1244 /* Check for 32-bit overflow. */
1245 if (high + low != val)
1246 return NULL_RTX;
1247
1248 /* Reload the high part into a base reg; leave the low part
1249 in the mem directly. */
1250 x = gen_rtx_PLUS (GET_MODE (x),
1251 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1252 GEN_INT (high)),
1253 GEN_INT (low));
1254
1255 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1256 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1257 opnum, type);
1258 return x;
1259 }
1260
1261 return NULL_RTX;
1262 }
1263 \f
1264 /* Compute a (partial) cost for rtx X. Return true if the complete
1265 cost has been computed, and false if subexpressions should be
1266 scanned. In either case, *TOTAL contains the cost result. */
1267
1268 static bool
1269 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1270 bool speed)
1271 {
1272 enum machine_mode mode = GET_MODE (x);
1273 bool float_mode_p = FLOAT_MODE_P (mode);
1274 const struct alpha_rtx_cost_data *cost_data;
1275
1276 if (!speed)
1277 cost_data = &alpha_rtx_cost_size;
1278 else
1279 cost_data = &alpha_rtx_cost_data[alpha_tune];
1280
1281 switch (code)
1282 {
1283 case CONST_INT:
1284 /* If this is an 8-bit constant, return zero since it can be used
1285 nearly anywhere with no cost. If it is a valid operand for an
1286 ADD or AND, likewise return 0 if we know it will be used in that
1287 context. Otherwise, return 2 since it might be used there later.
1288 All other constants take at least two insns. */
1289 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1290 {
1291 *total = 0;
1292 return true;
1293 }
1294 /* FALLTHRU */
1295
1296 case CONST_DOUBLE:
1297 if (x == CONST0_RTX (mode))
1298 *total = 0;
1299 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1300 || (outer_code == AND && and_operand (x, VOIDmode)))
1301 *total = 0;
1302 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1303 *total = 2;
1304 else
1305 *total = COSTS_N_INSNS (2);
1306 return true;
1307
1308 case CONST:
1309 case SYMBOL_REF:
1310 case LABEL_REF:
1311 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1312 *total = COSTS_N_INSNS (outer_code != MEM);
1313 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1314 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1315 else if (tls_symbolic_operand_type (x))
1316 /* Estimate of cost for call_pal rduniq. */
1317 /* ??? How many insns do we emit here? More than one... */
1318 *total = COSTS_N_INSNS (15);
1319 else
1320 /* Otherwise we do a load from the GOT. */
1321 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1322 return true;
1323
1324 case HIGH:
1325 /* This is effectively an add_operand. */
1326 *total = 2;
1327 return true;
1328
1329 case PLUS:
1330 case MINUS:
1331 if (float_mode_p)
1332 *total = cost_data->fp_add;
1333 else if (GET_CODE (XEXP (x, 0)) == MULT
1334 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1335 {
1336 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
1337 + rtx_cost (XEXP (x, 1), outer_code, speed) + COSTS_N_INSNS (1));
1338 return true;
1339 }
1340 return false;
1341
1342 case MULT:
1343 if (float_mode_p)
1344 *total = cost_data->fp_mult;
1345 else if (mode == DImode)
1346 *total = cost_data->int_mult_di;
1347 else
1348 *total = cost_data->int_mult_si;
1349 return false;
1350
1351 case ASHIFT:
1352 if (CONST_INT_P (XEXP (x, 1))
1353 && INTVAL (XEXP (x, 1)) <= 3)
1354 {
1355 *total = COSTS_N_INSNS (1);
1356 return false;
1357 }
1358 /* FALLTHRU */
1359
1360 case ASHIFTRT:
1361 case LSHIFTRT:
1362 *total = cost_data->int_shift;
1363 return false;
1364
1365 case IF_THEN_ELSE:
1366 if (float_mode_p)
1367 *total = cost_data->fp_add;
1368 else
1369 *total = cost_data->int_cmov;
1370 return false;
1371
1372 case DIV:
1373 case UDIV:
1374 case MOD:
1375 case UMOD:
1376 if (!float_mode_p)
1377 *total = cost_data->int_div;
1378 else if (mode == SFmode)
1379 *total = cost_data->fp_div_sf;
1380 else
1381 *total = cost_data->fp_div_df;
1382 return false;
1383
1384 case MEM:
1385 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1386 return true;
1387
1388 case NEG:
1389 if (! float_mode_p)
1390 {
1391 *total = COSTS_N_INSNS (1);
1392 return false;
1393 }
1394 /* FALLTHRU */
1395
1396 case ABS:
1397 if (! float_mode_p)
1398 {
1399 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1400 return false;
1401 }
1402 /* FALLTHRU */
1403
1404 case FLOAT:
1405 case UNSIGNED_FLOAT:
1406 case FIX:
1407 case UNSIGNED_FIX:
1408 case FLOAT_TRUNCATE:
1409 *total = cost_data->fp_add;
1410 return false;
1411
1412 case FLOAT_EXTEND:
1413 if (MEM_P (XEXP (x, 0)))
1414 *total = 0;
1415 else
1416 *total = cost_data->fp_add;
1417 return false;
1418
1419 default:
1420 return false;
1421 }
1422 }
1423 \f
1424 /* REF is an alignable memory location. Place an aligned SImode
1425 reference into *PALIGNED_MEM and the number of bits to shift into
1426 *PBITNUM. SCRATCH is a free register for use in reloading out
1427 of range stack slots. */
1428
1429 void
1430 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1431 {
1432 rtx base;
1433 HOST_WIDE_INT disp, offset;
1434
1435 gcc_assert (MEM_P (ref));
1436
1437 if (reload_in_progress
1438 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1439 {
1440 base = find_replacement (&XEXP (ref, 0));
1441 gcc_assert (memory_address_p (GET_MODE (ref), base));
1442 }
1443 else
1444 base = XEXP (ref, 0);
1445
1446 if (GET_CODE (base) == PLUS)
1447 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1448 else
1449 disp = 0;
1450
1451 /* Find the byte offset within an aligned word. If the memory itself is
1452 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1453 will have examined the base register and determined it is aligned, and
1454 thus displacements from it are naturally alignable. */
1455 if (MEM_ALIGN (ref) >= 32)
1456 offset = 0;
1457 else
1458 offset = disp & 3;
1459
1460 /* Access the entire aligned word. */
1461 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1462
1463 /* Convert the byte offset within the word to a bit offset. */
1464 if (WORDS_BIG_ENDIAN)
1465 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1466 else
1467 offset *= 8;
1468 *pbitnum = GEN_INT (offset);
1469 }
1470
1471 /* Similar, but just get the address. Handle the two reload cases.
1472 Add EXTRA_OFFSET to the address we return. */
1473
1474 rtx
1475 get_unaligned_address (rtx ref)
1476 {
1477 rtx base;
1478 HOST_WIDE_INT offset = 0;
1479
1480 gcc_assert (MEM_P (ref));
1481
1482 if (reload_in_progress
1483 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1484 {
1485 base = find_replacement (&XEXP (ref, 0));
1486
1487 gcc_assert (memory_address_p (GET_MODE (ref), base));
1488 }
1489 else
1490 base = XEXP (ref, 0);
1491
1492 if (GET_CODE (base) == PLUS)
1493 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1494
1495 return plus_constant (base, offset);
1496 }
1497
1498 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1499 X is always returned in a register. */
1500
1501 rtx
1502 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1503 {
1504 if (GET_CODE (addr) == PLUS)
1505 {
1506 ofs += INTVAL (XEXP (addr, 1));
1507 addr = XEXP (addr, 0);
1508 }
1509
1510 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1511 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1512 }
1513
1514 /* On the Alpha, all (non-symbolic) constants except zero go into
1515 a floating-point register via memory. Note that we cannot
1516 return anything that is not a subset of RCLASS, and that some
1517 symbolic constants cannot be dropped to memory. */
1518
1519 enum reg_class
1520 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1521 {
1522 /* Zero is present in any register class. */
1523 if (x == CONST0_RTX (GET_MODE (x)))
1524 return rclass;
1525
1526 /* These sorts of constants we can easily drop to memory. */
1527 if (CONST_INT_P (x)
1528 || GET_CODE (x) == CONST_DOUBLE
1529 || GET_CODE (x) == CONST_VECTOR)
1530 {
1531 if (rclass == FLOAT_REGS)
1532 return NO_REGS;
1533 if (rclass == ALL_REGS)
1534 return GENERAL_REGS;
1535 return rclass;
1536 }
1537
1538 /* All other kinds of constants should not (and in the case of HIGH
1539 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1540 secondary reload. */
1541 if (CONSTANT_P (x))
1542 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1543
1544 return rclass;
1545 }
1546
1547 /* Inform reload about cases where moving X with a mode MODE to a register in
1548 RCLASS requires an extra scratch or immediate register. Return the class
1549 needed for the immediate register. */
1550
1551 static enum reg_class
1552 alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
1553 enum machine_mode mode, secondary_reload_info *sri)
1554 {
1555 /* Loading and storing HImode or QImode values to and from memory
1556 usually requires a scratch register. */
1557 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1558 {
1559 if (any_memory_operand (x, mode))
1560 {
1561 if (in_p)
1562 {
1563 if (!aligned_memory_operand (x, mode))
1564 sri->icode = reload_in_optab[mode];
1565 }
1566 else
1567 sri->icode = reload_out_optab[mode];
1568 return NO_REGS;
1569 }
1570 }
1571
1572 /* We also cannot do integral arithmetic into FP regs, as might result
1573 from register elimination into a DImode fp register. */
1574 if (rclass == FLOAT_REGS)
1575 {
1576 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1577 return GENERAL_REGS;
1578 if (in_p && INTEGRAL_MODE_P (mode)
1579 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1580 return GENERAL_REGS;
1581 }
1582
1583 return NO_REGS;
1584 }
1585 \f
1586 /* Subfunction of the following function. Update the flags of any MEM
1587 found in part of X. */
1588
1589 static int
1590 alpha_set_memflags_1 (rtx *xp, void *data)
1591 {
1592 rtx x = *xp, orig = (rtx) data;
1593
1594 if (!MEM_P (x))
1595 return 0;
1596
1597 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1598 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1599 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1600 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1601 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1602
1603 /* Sadly, we cannot use alias sets because the extra aliasing
1604 produced by the AND interferes. Given that two-byte quantities
1605 are the only thing we would be able to differentiate anyway,
1606 there does not seem to be any point in convoluting the early
1607 out of the alias check. */
1608
1609 return -1;
1610 }
1611
1612 /* Given SEQ, which is an INSN list, look for any MEMs in either
1613 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1614 volatile flags from REF into each of the MEMs found. If REF is not
1615 a MEM, don't do anything. */
1616
1617 void
1618 alpha_set_memflags (rtx seq, rtx ref)
1619 {
1620 rtx insn;
1621
1622 if (!MEM_P (ref))
1623 return;
1624
1625 /* This is only called from alpha.md, after having had something
1626 generated from one of the insn patterns. So if everything is
1627 zero, the pattern is already up-to-date. */
1628 if (!MEM_VOLATILE_P (ref)
1629 && !MEM_IN_STRUCT_P (ref)
1630 && !MEM_SCALAR_P (ref)
1631 && !MEM_NOTRAP_P (ref)
1632 && !MEM_READONLY_P (ref))
1633 return;
1634
1635 for (insn = seq; insn; insn = NEXT_INSN (insn))
1636 if (INSN_P (insn))
1637 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1638 else
1639 gcc_unreachable ();
1640 }
1641 \f
1642 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1643 int, bool);
1644
1645 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1646 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1647 and return pc_rtx if successful. */
1648
1649 static rtx
1650 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1651 HOST_WIDE_INT c, int n, bool no_output)
1652 {
1653 HOST_WIDE_INT new_const;
1654 int i, bits;
1655 /* Use a pseudo if highly optimizing and still generating RTL. */
1656 rtx subtarget
1657 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1658 rtx temp, insn;
1659
1660 /* If this is a sign-extended 32-bit constant, we can do this in at most
1661 three insns, so do it if we have enough insns left. We always have
1662 a sign-extended 32-bit constant when compiling on a narrow machine. */
1663
1664 if (HOST_BITS_PER_WIDE_INT != 64
1665 || c >> 31 == -1 || c >> 31 == 0)
1666 {
1667 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1668 HOST_WIDE_INT tmp1 = c - low;
1669 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1670 HOST_WIDE_INT extra = 0;
1671
1672 /* If HIGH will be interpreted as negative but the constant is
1673 positive, we must adjust it to do two ldha insns. */
1674
1675 if ((high & 0x8000) != 0 && c >= 0)
1676 {
1677 extra = 0x4000;
1678 tmp1 -= 0x40000000;
1679 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1680 }
1681
1682 if (c == low || (low == 0 && extra == 0))
1683 {
1684 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1685 but that meant that we can't handle INT_MIN on 32-bit machines
1686 (like NT/Alpha), because we recurse indefinitely through
1687 emit_move_insn to gen_movdi. So instead, since we know exactly
1688 what we want, create it explicitly. */
1689
1690 if (no_output)
1691 return pc_rtx;
1692 if (target == NULL)
1693 target = gen_reg_rtx (mode);
1694 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1695 return target;
1696 }
1697 else if (n >= 2 + (extra != 0))
1698 {
1699 if (no_output)
1700 return pc_rtx;
1701 if (!can_create_pseudo_p ())
1702 {
1703 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1704 temp = target;
1705 }
1706 else
1707 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1708 subtarget, mode);
1709
1710 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1711 This means that if we go through expand_binop, we'll try to
1712 generate extensions, etc, which will require new pseudos, which
1713 will fail during some split phases. The SImode add patterns
1714 still exist, but are not named. So build the insns by hand. */
1715
1716 if (extra != 0)
1717 {
1718 if (! subtarget)
1719 subtarget = gen_reg_rtx (mode);
1720 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1721 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1722 emit_insn (insn);
1723 temp = subtarget;
1724 }
1725
1726 if (target == NULL)
1727 target = gen_reg_rtx (mode);
1728 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1729 insn = gen_rtx_SET (VOIDmode, target, insn);
1730 emit_insn (insn);
1731 return target;
1732 }
1733 }
1734
1735 /* If we couldn't do it that way, try some other methods. But if we have
1736 no instructions left, don't bother. Likewise, if this is SImode and
1737 we can't make pseudos, we can't do anything since the expand_binop
1738 and expand_unop calls will widen and try to make pseudos. */
1739
1740 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1741 return 0;
1742
1743 /* Next, see if we can load a related constant and then shift and possibly
1744 negate it to get the constant we want. Try this once each increasing
1745 numbers of insns. */
1746
1747 for (i = 1; i < n; i++)
1748 {
1749 /* First, see if minus some low bits, we've an easy load of
1750 high bits. */
1751
1752 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1753 if (new_const != 0)
1754 {
1755 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1756 if (temp)
1757 {
1758 if (no_output)
1759 return temp;
1760 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1761 target, 0, OPTAB_WIDEN);
1762 }
1763 }
1764
1765 /* Next try complementing. */
1766 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1767 if (temp)
1768 {
1769 if (no_output)
1770 return temp;
1771 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1772 }
1773
1774 /* Next try to form a constant and do a left shift. We can do this
1775 if some low-order bits are zero; the exact_log2 call below tells
1776 us that information. The bits we are shifting out could be any
1777 value, but here we'll just try the 0- and sign-extended forms of
1778 the constant. To try to increase the chance of having the same
1779 constant in more than one insn, start at the highest number of
1780 bits to shift, but try all possibilities in case a ZAPNOT will
1781 be useful. */
1782
1783 bits = exact_log2 (c & -c);
1784 if (bits > 0)
1785 for (; bits > 0; bits--)
1786 {
1787 new_const = c >> bits;
1788 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1789 if (!temp && c < 0)
1790 {
1791 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1792 temp = alpha_emit_set_const (subtarget, mode, new_const,
1793 i, no_output);
1794 }
1795 if (temp)
1796 {
1797 if (no_output)
1798 return temp;
1799 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1800 target, 0, OPTAB_WIDEN);
1801 }
1802 }
1803
1804 /* Now try high-order zero bits. Here we try the shifted-in bits as
1805 all zero and all ones. Be careful to avoid shifting outside the
1806 mode and to avoid shifting outside the host wide int size. */
1807 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1808 confuse the recursive call and set all of the high 32 bits. */
1809
1810 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1811 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1812 if (bits > 0)
1813 for (; bits > 0; bits--)
1814 {
1815 new_const = c << bits;
1816 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1817 if (!temp)
1818 {
1819 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1820 temp = alpha_emit_set_const (subtarget, mode, new_const,
1821 i, no_output);
1822 }
1823 if (temp)
1824 {
1825 if (no_output)
1826 return temp;
1827 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1828 target, 1, OPTAB_WIDEN);
1829 }
1830 }
1831
1832 /* Now try high-order 1 bits. We get that with a sign-extension.
1833 But one bit isn't enough here. Be careful to avoid shifting outside
1834 the mode and to avoid shifting outside the host wide int size. */
1835
1836 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1837 - floor_log2 (~ c) - 2);
1838 if (bits > 0)
1839 for (; bits > 0; bits--)
1840 {
1841 new_const = c << bits;
1842 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1843 if (!temp)
1844 {
1845 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1846 temp = alpha_emit_set_const (subtarget, mode, new_const,
1847 i, no_output);
1848 }
1849 if (temp)
1850 {
1851 if (no_output)
1852 return temp;
1853 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1854 target, 0, OPTAB_WIDEN);
1855 }
1856 }
1857 }
1858
1859 #if HOST_BITS_PER_WIDE_INT == 64
1860 /* Finally, see if can load a value into the target that is the same as the
1861 constant except that all bytes that are 0 are changed to be 0xff. If we
1862 can, then we can do a ZAPNOT to obtain the desired constant. */
1863
1864 new_const = c;
1865 for (i = 0; i < 64; i += 8)
1866 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1867 new_const |= (HOST_WIDE_INT) 0xff << i;
1868
1869 /* We are only called for SImode and DImode. If this is SImode, ensure that
1870 we are sign extended to a full word. */
1871
1872 if (mode == SImode)
1873 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1874
1875 if (new_const != c)
1876 {
1877 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1878 if (temp)
1879 {
1880 if (no_output)
1881 return temp;
1882 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1883 target, 0, OPTAB_WIDEN);
1884 }
1885 }
1886 #endif
1887
1888 return 0;
1889 }
1890
1891 /* Try to output insns to set TARGET equal to the constant C if it can be
1892 done in less than N insns. Do all computations in MODE. Returns the place
1893 where the output has been placed if it can be done and the insns have been
1894 emitted. If it would take more than N insns, zero is returned and no
1895 insns and emitted. */
1896
1897 static rtx
1898 alpha_emit_set_const (rtx target, enum machine_mode mode,
1899 HOST_WIDE_INT c, int n, bool no_output)
1900 {
1901 enum machine_mode orig_mode = mode;
1902 rtx orig_target = target;
1903 rtx result = 0;
1904 int i;
1905
1906 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1907 can't load this constant in one insn, do this in DImode. */
1908 if (!can_create_pseudo_p () && mode == SImode
1909 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1910 {
1911 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1912 if (result)
1913 return result;
1914
1915 target = no_output ? NULL : gen_lowpart (DImode, target);
1916 mode = DImode;
1917 }
1918 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1919 {
1920 target = no_output ? NULL : gen_lowpart (DImode, target);
1921 mode = DImode;
1922 }
1923
1924 /* Try 1 insn, then 2, then up to N. */
1925 for (i = 1; i <= n; i++)
1926 {
1927 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1928 if (result)
1929 {
1930 rtx insn, set;
1931
1932 if (no_output)
1933 return result;
1934
1935 insn = get_last_insn ();
1936 set = single_set (insn);
1937 if (! CONSTANT_P (SET_SRC (set)))
1938 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1939 break;
1940 }
1941 }
1942
1943 /* Allow for the case where we changed the mode of TARGET. */
1944 if (result)
1945 {
1946 if (result == target)
1947 result = orig_target;
1948 else if (mode != orig_mode)
1949 result = gen_lowpart (orig_mode, result);
1950 }
1951
1952 return result;
1953 }
1954
1955 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1956 fall back to a straight forward decomposition. We do this to avoid
1957 exponential run times encountered when looking for longer sequences
1958 with alpha_emit_set_const. */
1959
1960 static rtx
1961 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1962 {
1963 HOST_WIDE_INT d1, d2, d3, d4;
1964
1965 /* Decompose the entire word */
1966 #if HOST_BITS_PER_WIDE_INT >= 64
1967 gcc_assert (c2 == -(c1 < 0));
1968 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1969 c1 -= d1;
1970 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1971 c1 = (c1 - d2) >> 32;
1972 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1973 c1 -= d3;
1974 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1975 gcc_assert (c1 == d4);
1976 #else
1977 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1978 c1 -= d1;
1979 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1980 gcc_assert (c1 == d2);
1981 c2 += (d2 < 0);
1982 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1983 c2 -= d3;
1984 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1985 gcc_assert (c2 == d4);
1986 #endif
1987
1988 /* Construct the high word */
1989 if (d4)
1990 {
1991 emit_move_insn (target, GEN_INT (d4));
1992 if (d3)
1993 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1994 }
1995 else
1996 emit_move_insn (target, GEN_INT (d3));
1997
1998 /* Shift it into place */
1999 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2000
2001 /* Add in the low bits. */
2002 if (d2)
2003 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2004 if (d1)
2005 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2006
2007 return target;
2008 }
2009
2010 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2011 the low 64 bits. */
2012
2013 static void
2014 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2015 {
2016 HOST_WIDE_INT i0, i1;
2017
2018 if (GET_CODE (x) == CONST_VECTOR)
2019 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2020
2021
2022 if (CONST_INT_P (x))
2023 {
2024 i0 = INTVAL (x);
2025 i1 = -(i0 < 0);
2026 }
2027 else if (HOST_BITS_PER_WIDE_INT >= 64)
2028 {
2029 i0 = CONST_DOUBLE_LOW (x);
2030 i1 = -(i0 < 0);
2031 }
2032 else
2033 {
2034 i0 = CONST_DOUBLE_LOW (x);
2035 i1 = CONST_DOUBLE_HIGH (x);
2036 }
2037
2038 *p0 = i0;
2039 *p1 = i1;
2040 }
2041
2042 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2043 are willing to load the value into a register via a move pattern.
2044 Normally this is all symbolic constants, integral constants that
2045 take three or fewer instructions, and floating-point zero. */
2046
2047 bool
2048 alpha_legitimate_constant_p (rtx x)
2049 {
2050 enum machine_mode mode = GET_MODE (x);
2051 HOST_WIDE_INT i0, i1;
2052
2053 switch (GET_CODE (x))
2054 {
2055 case CONST:
2056 case LABEL_REF:
2057 case HIGH:
2058 return true;
2059
2060 case SYMBOL_REF:
2061 /* TLS symbols are never valid. */
2062 return SYMBOL_REF_TLS_MODEL (x) == 0;
2063
2064 case CONST_DOUBLE:
2065 if (x == CONST0_RTX (mode))
2066 return true;
2067 if (FLOAT_MODE_P (mode))
2068 return false;
2069 goto do_integer;
2070
2071 case CONST_VECTOR:
2072 if (x == CONST0_RTX (mode))
2073 return true;
2074 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2075 return false;
2076 if (GET_MODE_SIZE (mode) != 8)
2077 return false;
2078 goto do_integer;
2079
2080 case CONST_INT:
2081 do_integer:
2082 if (TARGET_BUILD_CONSTANTS)
2083 return true;
2084 alpha_extract_integer (x, &i0, &i1);
2085 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2086 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2087 return false;
2088
2089 default:
2090 return false;
2091 }
2092 }
2093
2094 /* Operand 1 is known to be a constant, and should require more than one
2095 instruction to load. Emit that multi-part load. */
2096
2097 bool
2098 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2099 {
2100 HOST_WIDE_INT i0, i1;
2101 rtx temp = NULL_RTX;
2102
2103 alpha_extract_integer (operands[1], &i0, &i1);
2104
2105 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2106 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2107
2108 if (!temp && TARGET_BUILD_CONSTANTS)
2109 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2110
2111 if (temp)
2112 {
2113 if (!rtx_equal_p (operands[0], temp))
2114 emit_move_insn (operands[0], temp);
2115 return true;
2116 }
2117
2118 return false;
2119 }
2120
2121 /* Expand a move instruction; return true if all work is done.
2122 We don't handle non-bwx subword loads here. */
2123
2124 bool
2125 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2126 {
2127 rtx tmp;
2128
2129 /* If the output is not a register, the input must be. */
2130 if (MEM_P (operands[0])
2131 && ! reg_or_0_operand (operands[1], mode))
2132 operands[1] = force_reg (mode, operands[1]);
2133
2134 /* Allow legitimize_address to perform some simplifications. */
2135 if (mode == Pmode && symbolic_operand (operands[1], mode))
2136 {
2137 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2138 if (tmp)
2139 {
2140 if (tmp == operands[0])
2141 return true;
2142 operands[1] = tmp;
2143 return false;
2144 }
2145 }
2146
2147 /* Early out for non-constants and valid constants. */
2148 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2149 return false;
2150
2151 /* Split large integers. */
2152 if (CONST_INT_P (operands[1])
2153 || GET_CODE (operands[1]) == CONST_DOUBLE
2154 || GET_CODE (operands[1]) == CONST_VECTOR)
2155 {
2156 if (alpha_split_const_mov (mode, operands))
2157 return true;
2158 }
2159
2160 /* Otherwise we've nothing left but to drop the thing to memory. */
2161 tmp = force_const_mem (mode, operands[1]);
2162
2163 if (tmp == NULL_RTX)
2164 return false;
2165
2166 if (reload_in_progress)
2167 {
2168 emit_move_insn (operands[0], XEXP (tmp, 0));
2169 operands[1] = replace_equiv_address (tmp, operands[0]);
2170 }
2171 else
2172 operands[1] = validize_mem (tmp);
2173 return false;
2174 }
2175
2176 /* Expand a non-bwx QImode or HImode move instruction;
2177 return true if all work is done. */
2178
2179 bool
2180 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2181 {
2182 rtx seq;
2183
2184 /* If the output is not a register, the input must be. */
2185 if (MEM_P (operands[0]))
2186 operands[1] = force_reg (mode, operands[1]);
2187
2188 /* Handle four memory cases, unaligned and aligned for either the input
2189 or the output. The only case where we can be called during reload is
2190 for aligned loads; all other cases require temporaries. */
2191
2192 if (any_memory_operand (operands[1], mode))
2193 {
2194 if (aligned_memory_operand (operands[1], mode))
2195 {
2196 if (reload_in_progress)
2197 {
2198 if (mode == QImode)
2199 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2200 else
2201 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2202 emit_insn (seq);
2203 }
2204 else
2205 {
2206 rtx aligned_mem, bitnum;
2207 rtx scratch = gen_reg_rtx (SImode);
2208 rtx subtarget;
2209 bool copyout;
2210
2211 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2212
2213 subtarget = operands[0];
2214 if (REG_P (subtarget))
2215 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2216 else
2217 subtarget = gen_reg_rtx (DImode), copyout = true;
2218
2219 if (mode == QImode)
2220 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2221 bitnum, scratch);
2222 else
2223 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2224 bitnum, scratch);
2225 emit_insn (seq);
2226
2227 if (copyout)
2228 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2229 }
2230 }
2231 else
2232 {
2233 /* Don't pass these as parameters since that makes the generated
2234 code depend on parameter evaluation order which will cause
2235 bootstrap failures. */
2236
2237 rtx temp1, temp2, subtarget, ua;
2238 bool copyout;
2239
2240 temp1 = gen_reg_rtx (DImode);
2241 temp2 = gen_reg_rtx (DImode);
2242
2243 subtarget = operands[0];
2244 if (REG_P (subtarget))
2245 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2246 else
2247 subtarget = gen_reg_rtx (DImode), copyout = true;
2248
2249 ua = get_unaligned_address (operands[1]);
2250 if (mode == QImode)
2251 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2252 else
2253 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2254
2255 alpha_set_memflags (seq, operands[1]);
2256 emit_insn (seq);
2257
2258 if (copyout)
2259 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2260 }
2261 return true;
2262 }
2263
2264 if (any_memory_operand (operands[0], mode))
2265 {
2266 if (aligned_memory_operand (operands[0], mode))
2267 {
2268 rtx aligned_mem, bitnum;
2269 rtx temp1 = gen_reg_rtx (SImode);
2270 rtx temp2 = gen_reg_rtx (SImode);
2271
2272 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2273
2274 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2275 temp1, temp2));
2276 }
2277 else
2278 {
2279 rtx temp1 = gen_reg_rtx (DImode);
2280 rtx temp2 = gen_reg_rtx (DImode);
2281 rtx temp3 = gen_reg_rtx (DImode);
2282 rtx ua = get_unaligned_address (operands[0]);
2283
2284 if (mode == QImode)
2285 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2286 else
2287 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2288
2289 alpha_set_memflags (seq, operands[0]);
2290 emit_insn (seq);
2291 }
2292 return true;
2293 }
2294
2295 return false;
2296 }
2297
2298 /* Implement the movmisalign patterns. One of the operands is a memory
2299 that is not naturally aligned. Emit instructions to load it. */
2300
2301 void
2302 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2303 {
2304 /* Honor misaligned loads, for those we promised to do so. */
2305 if (MEM_P (operands[1]))
2306 {
2307 rtx tmp;
2308
2309 if (register_operand (operands[0], mode))
2310 tmp = operands[0];
2311 else
2312 tmp = gen_reg_rtx (mode);
2313
2314 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2315 if (tmp != operands[0])
2316 emit_move_insn (operands[0], tmp);
2317 }
2318 else if (MEM_P (operands[0]))
2319 {
2320 if (!reg_or_0_operand (operands[1], mode))
2321 operands[1] = force_reg (mode, operands[1]);
2322 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2323 }
2324 else
2325 gcc_unreachable ();
2326 }
2327
2328 /* Generate an unsigned DImode to FP conversion. This is the same code
2329 optabs would emit if we didn't have TFmode patterns.
2330
2331 For SFmode, this is the only construction I've found that can pass
2332 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2333 intermediates will work, because you'll get intermediate rounding
2334 that ruins the end result. Some of this could be fixed by turning
2335 on round-to-positive-infinity, but that requires diddling the fpsr,
2336 which kills performance. I tried turning this around and converting
2337 to a negative number, so that I could turn on /m, but either I did
2338 it wrong or there's something else cause I wound up with the exact
2339 same single-bit error. There is a branch-less form of this same code:
2340
2341 srl $16,1,$1
2342 and $16,1,$2
2343 cmplt $16,0,$3
2344 or $1,$2,$2
2345 cmovge $16,$16,$2
2346 itoft $3,$f10
2347 itoft $2,$f11
2348 cvtqs $f11,$f11
2349 adds $f11,$f11,$f0
2350 fcmoveq $f10,$f11,$f0
2351
2352 I'm not using it because it's the same number of instructions as
2353 this branch-full form, and it has more serialized long latency
2354 instructions on the critical path.
2355
2356 For DFmode, we can avoid rounding errors by breaking up the word
2357 into two pieces, converting them separately, and adding them back:
2358
2359 LC0: .long 0,0x5f800000
2360
2361 itoft $16,$f11
2362 lda $2,LC0
2363 cmplt $16,0,$1
2364 cpyse $f11,$f31,$f10
2365 cpyse $f31,$f11,$f11
2366 s4addq $1,$2,$1
2367 lds $f12,0($1)
2368 cvtqt $f10,$f10
2369 cvtqt $f11,$f11
2370 addt $f12,$f10,$f0
2371 addt $f0,$f11,$f0
2372
2373 This doesn't seem to be a clear-cut win over the optabs form.
2374 It probably all depends on the distribution of numbers being
2375 converted -- in the optabs form, all but high-bit-set has a
2376 much lower minimum execution time. */
2377
2378 void
2379 alpha_emit_floatuns (rtx operands[2])
2380 {
2381 rtx neglab, donelab, i0, i1, f0, in, out;
2382 enum machine_mode mode;
2383
2384 out = operands[0];
2385 in = force_reg (DImode, operands[1]);
2386 mode = GET_MODE (out);
2387 neglab = gen_label_rtx ();
2388 donelab = gen_label_rtx ();
2389 i0 = gen_reg_rtx (DImode);
2390 i1 = gen_reg_rtx (DImode);
2391 f0 = gen_reg_rtx (mode);
2392
2393 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2394
2395 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2396 emit_jump_insn (gen_jump (donelab));
2397 emit_barrier ();
2398
2399 emit_label (neglab);
2400
2401 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2402 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2403 emit_insn (gen_iordi3 (i0, i0, i1));
2404 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2405 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2406
2407 emit_label (donelab);
2408 }
2409
2410 /* Generate the comparison for a conditional branch. */
2411
2412 rtx
2413 alpha_emit_conditional_branch (enum rtx_code code)
2414 {
2415 enum rtx_code cmp_code, branch_code;
2416 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2417 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2418 rtx tem;
2419
2420 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2421 {
2422 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2423 op1 = const0_rtx;
2424 alpha_compare.fp_p = 0;
2425 }
2426
2427 /* The general case: fold the comparison code to the types of compares
2428 that we have, choosing the branch as necessary. */
2429 switch (code)
2430 {
2431 case EQ: case LE: case LT: case LEU: case LTU:
2432 case UNORDERED:
2433 /* We have these compares: */
2434 cmp_code = code, branch_code = NE;
2435 break;
2436
2437 case NE:
2438 case ORDERED:
2439 /* These must be reversed. */
2440 cmp_code = reverse_condition (code), branch_code = EQ;
2441 break;
2442
2443 case GE: case GT: case GEU: case GTU:
2444 /* For FP, we swap them, for INT, we reverse them. */
2445 if (alpha_compare.fp_p)
2446 {
2447 cmp_code = swap_condition (code);
2448 branch_code = NE;
2449 tem = op0, op0 = op1, op1 = tem;
2450 }
2451 else
2452 {
2453 cmp_code = reverse_condition (code);
2454 branch_code = EQ;
2455 }
2456 break;
2457
2458 default:
2459 gcc_unreachable ();
2460 }
2461
2462 if (alpha_compare.fp_p)
2463 {
2464 cmp_mode = DFmode;
2465 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2466 {
2467 /* When we are not as concerned about non-finite values, and we
2468 are comparing against zero, we can branch directly. */
2469 if (op1 == CONST0_RTX (DFmode))
2470 cmp_code = UNKNOWN, branch_code = code;
2471 else if (op0 == CONST0_RTX (DFmode))
2472 {
2473 /* Undo the swap we probably did just above. */
2474 tem = op0, op0 = op1, op1 = tem;
2475 branch_code = swap_condition (cmp_code);
2476 cmp_code = UNKNOWN;
2477 }
2478 }
2479 else
2480 {
2481 /* ??? We mark the branch mode to be CCmode to prevent the
2482 compare and branch from being combined, since the compare
2483 insn follows IEEE rules that the branch does not. */
2484 branch_mode = CCmode;
2485 }
2486 }
2487 else
2488 {
2489 cmp_mode = DImode;
2490
2491 /* The following optimizations are only for signed compares. */
2492 if (code != LEU && code != LTU && code != GEU && code != GTU)
2493 {
2494 /* Whee. Compare and branch against 0 directly. */
2495 if (op1 == const0_rtx)
2496 cmp_code = UNKNOWN, branch_code = code;
2497
2498 /* If the constants doesn't fit into an immediate, but can
2499 be generated by lda/ldah, we adjust the argument and
2500 compare against zero, so we can use beq/bne directly. */
2501 /* ??? Don't do this when comparing against symbols, otherwise
2502 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2503 be declared false out of hand (at least for non-weak). */
2504 else if (CONST_INT_P (op1)
2505 && (code == EQ || code == NE)
2506 && !(symbolic_operand (op0, VOIDmode)
2507 || (REG_P (op0) && REG_POINTER (op0))))
2508 {
2509 rtx n_op1 = GEN_INT (-INTVAL (op1));
2510
2511 if (! satisfies_constraint_I (op1)
2512 && (satisfies_constraint_K (n_op1)
2513 || satisfies_constraint_L (n_op1)))
2514 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2515 }
2516 }
2517
2518 if (!reg_or_0_operand (op0, DImode))
2519 op0 = force_reg (DImode, op0);
2520 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2521 op1 = force_reg (DImode, op1);
2522 }
2523
2524 /* Emit an initial compare instruction, if necessary. */
2525 tem = op0;
2526 if (cmp_code != UNKNOWN)
2527 {
2528 tem = gen_reg_rtx (cmp_mode);
2529 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2530 }
2531
2532 /* Zero the operands. */
2533 memset (&alpha_compare, 0, sizeof (alpha_compare));
2534
2535 /* Return the branch comparison. */
2536 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2537 }
2538
2539 /* Certain simplifications can be done to make invalid setcc operations
2540 valid. Return the final comparison, or NULL if we can't work. */
2541
2542 rtx
2543 alpha_emit_setcc (enum rtx_code code)
2544 {
2545 enum rtx_code cmp_code;
2546 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2547 int fp_p = alpha_compare.fp_p;
2548 rtx tmp;
2549
2550 /* Zero the operands. */
2551 memset (&alpha_compare, 0, sizeof (alpha_compare));
2552
2553 if (fp_p && GET_MODE (op0) == TFmode)
2554 {
2555 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2556 op1 = const0_rtx;
2557 fp_p = 0;
2558 }
2559
2560 if (fp_p && !TARGET_FIX)
2561 return NULL_RTX;
2562
2563 /* The general case: fold the comparison code to the types of compares
2564 that we have, choosing the branch as necessary. */
2565
2566 cmp_code = UNKNOWN;
2567 switch (code)
2568 {
2569 case EQ: case LE: case LT: case LEU: case LTU:
2570 case UNORDERED:
2571 /* We have these compares. */
2572 if (fp_p)
2573 cmp_code = code, code = NE;
2574 break;
2575
2576 case NE:
2577 if (!fp_p && op1 == const0_rtx)
2578 break;
2579 /* FALLTHRU */
2580
2581 case ORDERED:
2582 cmp_code = reverse_condition (code);
2583 code = EQ;
2584 break;
2585
2586 case GE: case GT: case GEU: case GTU:
2587 /* These normally need swapping, but for integer zero we have
2588 special patterns that recognize swapped operands. */
2589 if (!fp_p && op1 == const0_rtx)
2590 break;
2591 code = swap_condition (code);
2592 if (fp_p)
2593 cmp_code = code, code = NE;
2594 tmp = op0, op0 = op1, op1 = tmp;
2595 break;
2596
2597 default:
2598 gcc_unreachable ();
2599 }
2600
2601 if (!fp_p)
2602 {
2603 if (!register_operand (op0, DImode))
2604 op0 = force_reg (DImode, op0);
2605 if (!reg_or_8bit_operand (op1, DImode))
2606 op1 = force_reg (DImode, op1);
2607 }
2608
2609 /* Emit an initial compare instruction, if necessary. */
2610 if (cmp_code != UNKNOWN)
2611 {
2612 enum machine_mode mode = fp_p ? DFmode : DImode;
2613
2614 tmp = gen_reg_rtx (mode);
2615 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2616 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2617
2618 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2619 op1 = const0_rtx;
2620 }
2621
2622 /* Return the setcc comparison. */
2623 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2624 }
2625
2626
2627 /* Rewrite a comparison against zero CMP of the form
2628 (CODE (cc0) (const_int 0)) so it can be written validly in
2629 a conditional move (if_then_else CMP ...).
2630 If both of the operands that set cc0 are nonzero we must emit
2631 an insn to perform the compare (it can't be done within
2632 the conditional move). */
2633
2634 rtx
2635 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2636 {
2637 enum rtx_code code = GET_CODE (cmp);
2638 enum rtx_code cmov_code = NE;
2639 rtx op0 = alpha_compare.op0;
2640 rtx op1 = alpha_compare.op1;
2641 int fp_p = alpha_compare.fp_p;
2642 enum machine_mode cmp_mode
2643 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2644 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2645 enum machine_mode cmov_mode = VOIDmode;
2646 int local_fast_math = flag_unsafe_math_optimizations;
2647 rtx tem;
2648
2649 /* Zero the operands. */
2650 memset (&alpha_compare, 0, sizeof (alpha_compare));
2651
2652 if (fp_p != FLOAT_MODE_P (mode))
2653 {
2654 enum rtx_code cmp_code;
2655
2656 if (! TARGET_FIX)
2657 return 0;
2658
2659 /* If we have fp<->int register move instructions, do a cmov by
2660 performing the comparison in fp registers, and move the
2661 zero/nonzero value to integer registers, where we can then
2662 use a normal cmov, or vice-versa. */
2663
2664 switch (code)
2665 {
2666 case EQ: case LE: case LT: case LEU: case LTU:
2667 /* We have these compares. */
2668 cmp_code = code, code = NE;
2669 break;
2670
2671 case NE:
2672 /* This must be reversed. */
2673 cmp_code = EQ, code = EQ;
2674 break;
2675
2676 case GE: case GT: case GEU: case GTU:
2677 /* These normally need swapping, but for integer zero we have
2678 special patterns that recognize swapped operands. */
2679 if (!fp_p && op1 == const0_rtx)
2680 cmp_code = code, code = NE;
2681 else
2682 {
2683 cmp_code = swap_condition (code);
2684 code = NE;
2685 tem = op0, op0 = op1, op1 = tem;
2686 }
2687 break;
2688
2689 default:
2690 gcc_unreachable ();
2691 }
2692
2693 tem = gen_reg_rtx (cmp_op_mode);
2694 emit_insn (gen_rtx_SET (VOIDmode, tem,
2695 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2696 op0, op1)));
2697
2698 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2699 op0 = gen_lowpart (cmp_op_mode, tem);
2700 op1 = CONST0_RTX (cmp_op_mode);
2701 fp_p = !fp_p;
2702 local_fast_math = 1;
2703 }
2704
2705 /* We may be able to use a conditional move directly.
2706 This avoids emitting spurious compares. */
2707 if (signed_comparison_operator (cmp, VOIDmode)
2708 && (!fp_p || local_fast_math)
2709 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2710 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2711
2712 /* We can't put the comparison inside the conditional move;
2713 emit a compare instruction and put that inside the
2714 conditional move. Make sure we emit only comparisons we have;
2715 swap or reverse as necessary. */
2716
2717 if (!can_create_pseudo_p ())
2718 return NULL_RTX;
2719
2720 switch (code)
2721 {
2722 case EQ: case LE: case LT: case LEU: case LTU:
2723 /* We have these compares: */
2724 break;
2725
2726 case NE:
2727 /* This must be reversed. */
2728 code = reverse_condition (code);
2729 cmov_code = EQ;
2730 break;
2731
2732 case GE: case GT: case GEU: case GTU:
2733 /* These must be swapped. */
2734 if (op1 != CONST0_RTX (cmp_mode))
2735 {
2736 code = swap_condition (code);
2737 tem = op0, op0 = op1, op1 = tem;
2738 }
2739 break;
2740
2741 default:
2742 gcc_unreachable ();
2743 }
2744
2745 if (!fp_p)
2746 {
2747 if (!reg_or_0_operand (op0, DImode))
2748 op0 = force_reg (DImode, op0);
2749 if (!reg_or_8bit_operand (op1, DImode))
2750 op1 = force_reg (DImode, op1);
2751 }
2752
2753 /* ??? We mark the branch mode to be CCmode to prevent the compare
2754 and cmov from being combined, since the compare insn follows IEEE
2755 rules that the cmov does not. */
2756 if (fp_p && !local_fast_math)
2757 cmov_mode = CCmode;
2758
2759 tem = gen_reg_rtx (cmp_op_mode);
2760 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2761 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2762 }
2763
2764 /* Simplify a conditional move of two constants into a setcc with
2765 arithmetic. This is done with a splitter since combine would
2766 just undo the work if done during code generation. It also catches
2767 cases we wouldn't have before cse. */
2768
2769 int
2770 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2771 rtx t_rtx, rtx f_rtx)
2772 {
2773 HOST_WIDE_INT t, f, diff;
2774 enum machine_mode mode;
2775 rtx target, subtarget, tmp;
2776
2777 mode = GET_MODE (dest);
2778 t = INTVAL (t_rtx);
2779 f = INTVAL (f_rtx);
2780 diff = t - f;
2781
2782 if (((code == NE || code == EQ) && diff < 0)
2783 || (code == GE || code == GT))
2784 {
2785 code = reverse_condition (code);
2786 diff = t, t = f, f = diff;
2787 diff = t - f;
2788 }
2789
2790 subtarget = target = dest;
2791 if (mode != DImode)
2792 {
2793 target = gen_lowpart (DImode, dest);
2794 if (can_create_pseudo_p ())
2795 subtarget = gen_reg_rtx (DImode);
2796 else
2797 subtarget = target;
2798 }
2799 /* Below, we must be careful to use copy_rtx on target and subtarget
2800 in intermediate insns, as they may be a subreg rtx, which may not
2801 be shared. */
2802
2803 if (f == 0 && exact_log2 (diff) > 0
2804 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2805 viable over a longer latency cmove. On EV5, the E0 slot is a
2806 scarce resource, and on EV4 shift has the same latency as a cmove. */
2807 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2808 {
2809 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2810 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2811
2812 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2813 GEN_INT (exact_log2 (t)));
2814 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2815 }
2816 else if (f == 0 && t == -1)
2817 {
2818 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2819 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2820
2821 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2822 }
2823 else if (diff == 1 || diff == 4 || diff == 8)
2824 {
2825 rtx add_op;
2826
2827 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2828 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2829
2830 if (diff == 1)
2831 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2832 else
2833 {
2834 add_op = GEN_INT (f);
2835 if (sext_add_operand (add_op, mode))
2836 {
2837 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2838 GEN_INT (diff));
2839 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2840 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2841 }
2842 else
2843 return 0;
2844 }
2845 }
2846 else
2847 return 0;
2848
2849 return 1;
2850 }
2851 \f
2852 /* Look up the function X_floating library function name for the
2853 given operation. */
2854
2855 struct GTY(()) xfloating_op
2856 {
2857 const enum rtx_code code;
2858 const char *const GTY((skip)) osf_func;
2859 const char *const GTY((skip)) vms_func;
2860 rtx libcall;
2861 };
2862
2863 static GTY(()) struct xfloating_op xfloating_ops[] =
2864 {
2865 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2866 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2867 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2868 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2869 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2870 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2871 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2872 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2873 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2874 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2875 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2876 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2877 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2878 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2879 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2880 };
2881
2882 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2883 {
2884 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2885 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2886 };
2887
2888 static rtx
2889 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2890 {
2891 struct xfloating_op *ops = xfloating_ops;
2892 long n = ARRAY_SIZE (xfloating_ops);
2893 long i;
2894
2895 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2896
2897 /* How irritating. Nothing to key off for the main table. */
2898 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2899 {
2900 ops = vax_cvt_ops;
2901 n = ARRAY_SIZE (vax_cvt_ops);
2902 }
2903
2904 for (i = 0; i < n; ++i, ++ops)
2905 if (ops->code == code)
2906 {
2907 rtx func = ops->libcall;
2908 if (!func)
2909 {
2910 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2911 ? ops->vms_func : ops->osf_func);
2912 ops->libcall = func;
2913 }
2914 return func;
2915 }
2916
2917 gcc_unreachable ();
2918 }
2919
2920 /* Most X_floating operations take the rounding mode as an argument.
2921 Compute that here. */
2922
2923 static int
2924 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2925 enum alpha_fp_rounding_mode round)
2926 {
2927 int mode;
2928
2929 switch (round)
2930 {
2931 case ALPHA_FPRM_NORM:
2932 mode = 2;
2933 break;
2934 case ALPHA_FPRM_MINF:
2935 mode = 1;
2936 break;
2937 case ALPHA_FPRM_CHOP:
2938 mode = 0;
2939 break;
2940 case ALPHA_FPRM_DYN:
2941 mode = 4;
2942 break;
2943 default:
2944 gcc_unreachable ();
2945
2946 /* XXX For reference, round to +inf is mode = 3. */
2947 }
2948
2949 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2950 mode |= 0x10000;
2951
2952 return mode;
2953 }
2954
2955 /* Emit an X_floating library function call.
2956
2957 Note that these functions do not follow normal calling conventions:
2958 TFmode arguments are passed in two integer registers (as opposed to
2959 indirect); TFmode return values appear in R16+R17.
2960
2961 FUNC is the function to call.
2962 TARGET is where the output belongs.
2963 OPERANDS are the inputs.
2964 NOPERANDS is the count of inputs.
2965 EQUIV is the expression equivalent for the function.
2966 */
2967
2968 static void
2969 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2970 int noperands, rtx equiv)
2971 {
2972 rtx usage = NULL_RTX, tmp, reg;
2973 int regno = 16, i;
2974
2975 start_sequence ();
2976
2977 for (i = 0; i < noperands; ++i)
2978 {
2979 switch (GET_MODE (operands[i]))
2980 {
2981 case TFmode:
2982 reg = gen_rtx_REG (TFmode, regno);
2983 regno += 2;
2984 break;
2985
2986 case DFmode:
2987 reg = gen_rtx_REG (DFmode, regno + 32);
2988 regno += 1;
2989 break;
2990
2991 case VOIDmode:
2992 gcc_assert (CONST_INT_P (operands[i]));
2993 /* FALLTHRU */
2994 case DImode:
2995 reg = gen_rtx_REG (DImode, regno);
2996 regno += 1;
2997 break;
2998
2999 default:
3000 gcc_unreachable ();
3001 }
3002
3003 emit_move_insn (reg, operands[i]);
3004 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3005 }
3006
3007 switch (GET_MODE (target))
3008 {
3009 case TFmode:
3010 reg = gen_rtx_REG (TFmode, 16);
3011 break;
3012 case DFmode:
3013 reg = gen_rtx_REG (DFmode, 32);
3014 break;
3015 case DImode:
3016 reg = gen_rtx_REG (DImode, 0);
3017 break;
3018 default:
3019 gcc_unreachable ();
3020 }
3021
3022 tmp = gen_rtx_MEM (QImode, func);
3023 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3024 const0_rtx, const0_rtx));
3025 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3026 RTL_CONST_CALL_P (tmp) = 1;
3027
3028 tmp = get_insns ();
3029 end_sequence ();
3030
3031 emit_libcall_block (tmp, target, reg, equiv);
3032 }
3033
3034 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3035
3036 void
3037 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3038 {
3039 rtx func;
3040 int mode;
3041 rtx out_operands[3];
3042
3043 func = alpha_lookup_xfloating_lib_func (code);
3044 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3045
3046 out_operands[0] = operands[1];
3047 out_operands[1] = operands[2];
3048 out_operands[2] = GEN_INT (mode);
3049 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3050 gen_rtx_fmt_ee (code, TFmode, operands[1],
3051 operands[2]));
3052 }
3053
3054 /* Emit an X_floating library function call for a comparison. */
3055
3056 static rtx
3057 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3058 {
3059 enum rtx_code cmp_code, res_code;
3060 rtx func, out, operands[2], note;
3061
3062 /* X_floating library comparison functions return
3063 -1 unordered
3064 0 false
3065 1 true
3066 Convert the compare against the raw return value. */
3067
3068 cmp_code = *pcode;
3069 switch (cmp_code)
3070 {
3071 case UNORDERED:
3072 cmp_code = EQ;
3073 res_code = LT;
3074 break;
3075 case ORDERED:
3076 cmp_code = EQ;
3077 res_code = GE;
3078 break;
3079 case NE:
3080 res_code = NE;
3081 break;
3082 case EQ:
3083 case LT:
3084 case GT:
3085 case LE:
3086 case GE:
3087 res_code = GT;
3088 break;
3089 default:
3090 gcc_unreachable ();
3091 }
3092 *pcode = res_code;
3093
3094 func = alpha_lookup_xfloating_lib_func (cmp_code);
3095
3096 operands[0] = op0;
3097 operands[1] = op1;
3098 out = gen_reg_rtx (DImode);
3099
3100 /* What's actually returned is -1,0,1, not a proper boolean value,
3101 so use an EXPR_LIST as with a generic libcall instead of a
3102 comparison type expression. */
3103 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3104 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3105 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3106 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3107
3108 return out;
3109 }
3110
3111 /* Emit an X_floating library function call for a conversion. */
3112
3113 void
3114 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3115 {
3116 int noperands = 1, mode;
3117 rtx out_operands[2];
3118 rtx func;
3119 enum rtx_code code = orig_code;
3120
3121 if (code == UNSIGNED_FIX)
3122 code = FIX;
3123
3124 func = alpha_lookup_xfloating_lib_func (code);
3125
3126 out_operands[0] = operands[1];
3127
3128 switch (code)
3129 {
3130 case FIX:
3131 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3132 out_operands[1] = GEN_INT (mode);
3133 noperands = 2;
3134 break;
3135 case FLOAT_TRUNCATE:
3136 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3137 out_operands[1] = GEN_INT (mode);
3138 noperands = 2;
3139 break;
3140 default:
3141 break;
3142 }
3143
3144 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3145 gen_rtx_fmt_e (orig_code,
3146 GET_MODE (operands[0]),
3147 operands[1]));
3148 }
3149
3150 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3151 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3152 guarantee that the sequence
3153 set (OP[0] OP[2])
3154 set (OP[1] OP[3])
3155 is valid. Naturally, output operand ordering is little-endian.
3156 This is used by *movtf_internal and *movti_internal. */
3157
3158 void
3159 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3160 bool fixup_overlap)
3161 {
3162 switch (GET_CODE (operands[1]))
3163 {
3164 case REG:
3165 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3166 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3167 break;
3168
3169 case MEM:
3170 operands[3] = adjust_address (operands[1], DImode, 8);
3171 operands[2] = adjust_address (operands[1], DImode, 0);
3172 break;
3173
3174 case CONST_INT:
3175 case CONST_DOUBLE:
3176 gcc_assert (operands[1] == CONST0_RTX (mode));
3177 operands[2] = operands[3] = const0_rtx;
3178 break;
3179
3180 default:
3181 gcc_unreachable ();
3182 }
3183
3184 switch (GET_CODE (operands[0]))
3185 {
3186 case REG:
3187 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3188 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3189 break;
3190
3191 case MEM:
3192 operands[1] = adjust_address (operands[0], DImode, 8);
3193 operands[0] = adjust_address (operands[0], DImode, 0);
3194 break;
3195
3196 default:
3197 gcc_unreachable ();
3198 }
3199
3200 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3201 {
3202 rtx tmp;
3203 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3204 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3205 }
3206 }
3207
3208 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3209 op2 is a register containing the sign bit, operation is the
3210 logical operation to be performed. */
3211
3212 void
3213 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3214 {
3215 rtx high_bit = operands[2];
3216 rtx scratch;
3217 int move;
3218
3219 alpha_split_tmode_pair (operands, TFmode, false);
3220
3221 /* Detect three flavors of operand overlap. */
3222 move = 1;
3223 if (rtx_equal_p (operands[0], operands[2]))
3224 move = 0;
3225 else if (rtx_equal_p (operands[1], operands[2]))
3226 {
3227 if (rtx_equal_p (operands[0], high_bit))
3228 move = 2;
3229 else
3230 move = -1;
3231 }
3232
3233 if (move < 0)
3234 emit_move_insn (operands[0], operands[2]);
3235
3236 /* ??? If the destination overlaps both source tf and high_bit, then
3237 assume source tf is dead in its entirety and use the other half
3238 for a scratch register. Otherwise "scratch" is just the proper
3239 destination register. */
3240 scratch = operands[move < 2 ? 1 : 3];
3241
3242 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3243
3244 if (move > 0)
3245 {
3246 emit_move_insn (operands[0], operands[2]);
3247 if (move > 1)
3248 emit_move_insn (operands[1], scratch);
3249 }
3250 }
3251 \f
3252 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3253 unaligned data:
3254
3255 unsigned: signed:
3256 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3257 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3258 lda r3,X(r11) lda r3,X+2(r11)
3259 extwl r1,r3,r1 extql r1,r3,r1
3260 extwh r2,r3,r2 extqh r2,r3,r2
3261 or r1.r2.r1 or r1,r2,r1
3262 sra r1,48,r1
3263
3264 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3265 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3266 lda r3,X(r11) lda r3,X(r11)
3267 extll r1,r3,r1 extll r1,r3,r1
3268 extlh r2,r3,r2 extlh r2,r3,r2
3269 or r1.r2.r1 addl r1,r2,r1
3270
3271 quad: ldq_u r1,X(r11)
3272 ldq_u r2,X+7(r11)
3273 lda r3,X(r11)
3274 extql r1,r3,r1
3275 extqh r2,r3,r2
3276 or r1.r2.r1
3277 */
3278
3279 void
3280 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3281 HOST_WIDE_INT ofs, int sign)
3282 {
3283 rtx meml, memh, addr, extl, exth, tmp, mema;
3284 enum machine_mode mode;
3285
3286 if (TARGET_BWX && size == 2)
3287 {
3288 meml = adjust_address (mem, QImode, ofs);
3289 memh = adjust_address (mem, QImode, ofs+1);
3290 if (BYTES_BIG_ENDIAN)
3291 tmp = meml, meml = memh, memh = tmp;
3292 extl = gen_reg_rtx (DImode);
3293 exth = gen_reg_rtx (DImode);
3294 emit_insn (gen_zero_extendqidi2 (extl, meml));
3295 emit_insn (gen_zero_extendqidi2 (exth, memh));
3296 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3297 NULL, 1, OPTAB_LIB_WIDEN);
3298 addr = expand_simple_binop (DImode, IOR, extl, exth,
3299 NULL, 1, OPTAB_LIB_WIDEN);
3300
3301 if (sign && GET_MODE (tgt) != HImode)
3302 {
3303 addr = gen_lowpart (HImode, addr);
3304 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3305 }
3306 else
3307 {
3308 if (GET_MODE (tgt) != DImode)
3309 addr = gen_lowpart (GET_MODE (tgt), addr);
3310 emit_move_insn (tgt, addr);
3311 }
3312 return;
3313 }
3314
3315 meml = gen_reg_rtx (DImode);
3316 memh = gen_reg_rtx (DImode);
3317 addr = gen_reg_rtx (DImode);
3318 extl = gen_reg_rtx (DImode);
3319 exth = gen_reg_rtx (DImode);
3320
3321 mema = XEXP (mem, 0);
3322 if (GET_CODE (mema) == LO_SUM)
3323 mema = force_reg (Pmode, mema);
3324
3325 /* AND addresses cannot be in any alias set, since they may implicitly
3326 alias surrounding code. Ideally we'd have some alias set that
3327 covered all types except those with alignment 8 or higher. */
3328
3329 tmp = change_address (mem, DImode,
3330 gen_rtx_AND (DImode,
3331 plus_constant (mema, ofs),
3332 GEN_INT (-8)));
3333 set_mem_alias_set (tmp, 0);
3334 emit_move_insn (meml, tmp);
3335
3336 tmp = change_address (mem, DImode,
3337 gen_rtx_AND (DImode,
3338 plus_constant (mema, ofs + size - 1),
3339 GEN_INT (-8)));
3340 set_mem_alias_set (tmp, 0);
3341 emit_move_insn (memh, tmp);
3342
3343 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3344 {
3345 emit_move_insn (addr, plus_constant (mema, -1));
3346
3347 emit_insn (gen_extqh_be (extl, meml, addr));
3348 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3349
3350 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3351 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3352 addr, 1, OPTAB_WIDEN);
3353 }
3354 else if (sign && size == 2)
3355 {
3356 emit_move_insn (addr, plus_constant (mema, ofs+2));
3357
3358 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3359 emit_insn (gen_extqh_le (exth, memh, addr));
3360
3361 /* We must use tgt here for the target. Alpha-vms port fails if we use
3362 addr for the target, because addr is marked as a pointer and combine
3363 knows that pointers are always sign-extended 32-bit values. */
3364 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3365 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3366 addr, 1, OPTAB_WIDEN);
3367 }
3368 else
3369 {
3370 if (WORDS_BIG_ENDIAN)
3371 {
3372 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3373 switch ((int) size)
3374 {
3375 case 2:
3376 emit_insn (gen_extwh_be (extl, meml, addr));
3377 mode = HImode;
3378 break;
3379
3380 case 4:
3381 emit_insn (gen_extlh_be (extl, meml, addr));
3382 mode = SImode;
3383 break;
3384
3385 case 8:
3386 emit_insn (gen_extqh_be (extl, meml, addr));
3387 mode = DImode;
3388 break;
3389
3390 default:
3391 gcc_unreachable ();
3392 }
3393 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3394 }
3395 else
3396 {
3397 emit_move_insn (addr, plus_constant (mema, ofs));
3398 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3399 switch ((int) size)
3400 {
3401 case 2:
3402 emit_insn (gen_extwh_le (exth, memh, addr));
3403 mode = HImode;
3404 break;
3405
3406 case 4:
3407 emit_insn (gen_extlh_le (exth, memh, addr));
3408 mode = SImode;
3409 break;
3410
3411 case 8:
3412 emit_insn (gen_extqh_le (exth, memh, addr));
3413 mode = DImode;
3414 break;
3415
3416 default:
3417 gcc_unreachable ();
3418 }
3419 }
3420
3421 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3422 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3423 sign, OPTAB_WIDEN);
3424 }
3425
3426 if (addr != tgt)
3427 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3428 }
3429
3430 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3431
3432 void
3433 alpha_expand_unaligned_store (rtx dst, rtx src,
3434 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3435 {
3436 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3437
3438 if (TARGET_BWX && size == 2)
3439 {
3440 if (src != const0_rtx)
3441 {
3442 dstl = gen_lowpart (QImode, src);
3443 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3444 NULL, 1, OPTAB_LIB_WIDEN);
3445 dsth = gen_lowpart (QImode, dsth);
3446 }
3447 else
3448 dstl = dsth = const0_rtx;
3449
3450 meml = adjust_address (dst, QImode, ofs);
3451 memh = adjust_address (dst, QImode, ofs+1);
3452 if (BYTES_BIG_ENDIAN)
3453 addr = meml, meml = memh, memh = addr;
3454
3455 emit_move_insn (meml, dstl);
3456 emit_move_insn (memh, dsth);
3457 return;
3458 }
3459
3460 dstl = gen_reg_rtx (DImode);
3461 dsth = gen_reg_rtx (DImode);
3462 insl = gen_reg_rtx (DImode);
3463 insh = gen_reg_rtx (DImode);
3464
3465 dsta = XEXP (dst, 0);
3466 if (GET_CODE (dsta) == LO_SUM)
3467 dsta = force_reg (Pmode, dsta);
3468
3469 /* AND addresses cannot be in any alias set, since they may implicitly
3470 alias surrounding code. Ideally we'd have some alias set that
3471 covered all types except those with alignment 8 or higher. */
3472
3473 meml = change_address (dst, DImode,
3474 gen_rtx_AND (DImode,
3475 plus_constant (dsta, ofs),
3476 GEN_INT (-8)));
3477 set_mem_alias_set (meml, 0);
3478
3479 memh = change_address (dst, DImode,
3480 gen_rtx_AND (DImode,
3481 plus_constant (dsta, ofs + size - 1),
3482 GEN_INT (-8)));
3483 set_mem_alias_set (memh, 0);
3484
3485 emit_move_insn (dsth, memh);
3486 emit_move_insn (dstl, meml);
3487 if (WORDS_BIG_ENDIAN)
3488 {
3489 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3490
3491 if (src != const0_rtx)
3492 {
3493 switch ((int) size)
3494 {
3495 case 2:
3496 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3497 break;
3498 case 4:
3499 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3500 break;
3501 case 8:
3502 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3503 break;
3504 }
3505 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3506 GEN_INT (size*8), addr));
3507 }
3508
3509 switch ((int) size)
3510 {
3511 case 2:
3512 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3513 break;
3514 case 4:
3515 {
3516 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3517 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3518 break;
3519 }
3520 case 8:
3521 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3522 break;
3523 }
3524
3525 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3526 }
3527 else
3528 {
3529 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3530
3531 if (src != CONST0_RTX (GET_MODE (src)))
3532 {
3533 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3534 GEN_INT (size*8), addr));
3535
3536 switch ((int) size)
3537 {
3538 case 2:
3539 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3540 break;
3541 case 4:
3542 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3543 break;
3544 case 8:
3545 emit_insn (gen_insql_le (insl, src, addr));
3546 break;
3547 }
3548 }
3549
3550 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3551
3552 switch ((int) size)
3553 {
3554 case 2:
3555 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3556 break;
3557 case 4:
3558 {
3559 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3560 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3561 break;
3562 }
3563 case 8:
3564 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3565 break;
3566 }
3567 }
3568
3569 if (src != CONST0_RTX (GET_MODE (src)))
3570 {
3571 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3572 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3573 }
3574
3575 if (WORDS_BIG_ENDIAN)
3576 {
3577 emit_move_insn (meml, dstl);
3578 emit_move_insn (memh, dsth);
3579 }
3580 else
3581 {
3582 /* Must store high before low for degenerate case of aligned. */
3583 emit_move_insn (memh, dsth);
3584 emit_move_insn (meml, dstl);
3585 }
3586 }
3587
3588 /* The block move code tries to maximize speed by separating loads and
3589 stores at the expense of register pressure: we load all of the data
3590 before we store it back out. There are two secondary effects worth
3591 mentioning, that this speeds copying to/from aligned and unaligned
3592 buffers, and that it makes the code significantly easier to write. */
3593
3594 #define MAX_MOVE_WORDS 8
3595
3596 /* Load an integral number of consecutive unaligned quadwords. */
3597
3598 static void
3599 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3600 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3601 {
3602 rtx const im8 = GEN_INT (-8);
3603 rtx const i64 = GEN_INT (64);
3604 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3605 rtx sreg, areg, tmp, smema;
3606 HOST_WIDE_INT i;
3607
3608 smema = XEXP (smem, 0);
3609 if (GET_CODE (smema) == LO_SUM)
3610 smema = force_reg (Pmode, smema);
3611
3612 /* Generate all the tmp registers we need. */
3613 for (i = 0; i < words; ++i)
3614 {
3615 data_regs[i] = out_regs[i];
3616 ext_tmps[i] = gen_reg_rtx (DImode);
3617 }
3618 data_regs[words] = gen_reg_rtx (DImode);
3619
3620 if (ofs != 0)
3621 smem = adjust_address (smem, GET_MODE (smem), ofs);
3622
3623 /* Load up all of the source data. */
3624 for (i = 0; i < words; ++i)
3625 {
3626 tmp = change_address (smem, DImode,
3627 gen_rtx_AND (DImode,
3628 plus_constant (smema, 8*i),
3629 im8));
3630 set_mem_alias_set (tmp, 0);
3631 emit_move_insn (data_regs[i], tmp);
3632 }
3633
3634 tmp = change_address (smem, DImode,
3635 gen_rtx_AND (DImode,
3636 plus_constant (smema, 8*words - 1),
3637 im8));
3638 set_mem_alias_set (tmp, 0);
3639 emit_move_insn (data_regs[words], tmp);
3640
3641 /* Extract the half-word fragments. Unfortunately DEC decided to make
3642 extxh with offset zero a noop instead of zeroing the register, so
3643 we must take care of that edge condition ourselves with cmov. */
3644
3645 sreg = copy_addr_to_reg (smema);
3646 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3647 1, OPTAB_WIDEN);
3648 if (WORDS_BIG_ENDIAN)
3649 emit_move_insn (sreg, plus_constant (sreg, 7));
3650 for (i = 0; i < words; ++i)
3651 {
3652 if (WORDS_BIG_ENDIAN)
3653 {
3654 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3655 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3656 }
3657 else
3658 {
3659 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3660 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3661 }
3662 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3663 gen_rtx_IF_THEN_ELSE (DImode,
3664 gen_rtx_EQ (DImode, areg,
3665 const0_rtx),
3666 const0_rtx, ext_tmps[i])));
3667 }
3668
3669 /* Merge the half-words into whole words. */
3670 for (i = 0; i < words; ++i)
3671 {
3672 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3673 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3674 }
3675 }
3676
3677 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3678 may be NULL to store zeros. */
3679
3680 static void
3681 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3682 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3683 {
3684 rtx const im8 = GEN_INT (-8);
3685 rtx const i64 = GEN_INT (64);
3686 rtx ins_tmps[MAX_MOVE_WORDS];
3687 rtx st_tmp_1, st_tmp_2, dreg;
3688 rtx st_addr_1, st_addr_2, dmema;
3689 HOST_WIDE_INT i;
3690
3691 dmema = XEXP (dmem, 0);
3692 if (GET_CODE (dmema) == LO_SUM)
3693 dmema = force_reg (Pmode, dmema);
3694
3695 /* Generate all the tmp registers we need. */
3696 if (data_regs != NULL)
3697 for (i = 0; i < words; ++i)
3698 ins_tmps[i] = gen_reg_rtx(DImode);
3699 st_tmp_1 = gen_reg_rtx(DImode);
3700 st_tmp_2 = gen_reg_rtx(DImode);
3701
3702 if (ofs != 0)
3703 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3704
3705 st_addr_2 = change_address (dmem, DImode,
3706 gen_rtx_AND (DImode,
3707 plus_constant (dmema, words*8 - 1),
3708 im8));
3709 set_mem_alias_set (st_addr_2, 0);
3710
3711 st_addr_1 = change_address (dmem, DImode,
3712 gen_rtx_AND (DImode, dmema, im8));
3713 set_mem_alias_set (st_addr_1, 0);
3714
3715 /* Load up the destination end bits. */
3716 emit_move_insn (st_tmp_2, st_addr_2);
3717 emit_move_insn (st_tmp_1, st_addr_1);
3718
3719 /* Shift the input data into place. */
3720 dreg = copy_addr_to_reg (dmema);
3721 if (WORDS_BIG_ENDIAN)
3722 emit_move_insn (dreg, plus_constant (dreg, 7));
3723 if (data_regs != NULL)
3724 {
3725 for (i = words-1; i >= 0; --i)
3726 {
3727 if (WORDS_BIG_ENDIAN)
3728 {
3729 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3730 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3731 }
3732 else
3733 {
3734 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3735 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3736 }
3737 }
3738 for (i = words-1; i > 0; --i)
3739 {
3740 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3741 ins_tmps[i-1], ins_tmps[i-1], 1,
3742 OPTAB_WIDEN);
3743 }
3744 }
3745
3746 /* Split and merge the ends with the destination data. */
3747 if (WORDS_BIG_ENDIAN)
3748 {
3749 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3750 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3751 }
3752 else
3753 {
3754 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3755 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3756 }
3757
3758 if (data_regs != NULL)
3759 {
3760 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3761 st_tmp_2, 1, OPTAB_WIDEN);
3762 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3763 st_tmp_1, 1, OPTAB_WIDEN);
3764 }
3765
3766 /* Store it all. */
3767 if (WORDS_BIG_ENDIAN)
3768 emit_move_insn (st_addr_1, st_tmp_1);
3769 else
3770 emit_move_insn (st_addr_2, st_tmp_2);
3771 for (i = words-1; i > 0; --i)
3772 {
3773 rtx tmp = change_address (dmem, DImode,
3774 gen_rtx_AND (DImode,
3775 plus_constant(dmema,
3776 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3777 im8));
3778 set_mem_alias_set (tmp, 0);
3779 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3780 }
3781 if (WORDS_BIG_ENDIAN)
3782 emit_move_insn (st_addr_2, st_tmp_2);
3783 else
3784 emit_move_insn (st_addr_1, st_tmp_1);
3785 }
3786
3787
3788 /* Expand string/block move operations.
3789
3790 operands[0] is the pointer to the destination.
3791 operands[1] is the pointer to the source.
3792 operands[2] is the number of bytes to move.
3793 operands[3] is the alignment. */
3794
3795 int
3796 alpha_expand_block_move (rtx operands[])
3797 {
3798 rtx bytes_rtx = operands[2];
3799 rtx align_rtx = operands[3];
3800 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3801 HOST_WIDE_INT bytes = orig_bytes;
3802 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3803 HOST_WIDE_INT dst_align = src_align;
3804 rtx orig_src = operands[1];
3805 rtx orig_dst = operands[0];
3806 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3807 rtx tmp;
3808 unsigned int i, words, ofs, nregs = 0;
3809
3810 if (orig_bytes <= 0)
3811 return 1;
3812 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3813 return 0;
3814
3815 /* Look for additional alignment information from recorded register info. */
3816
3817 tmp = XEXP (orig_src, 0);
3818 if (REG_P (tmp))
3819 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3820 else if (GET_CODE (tmp) == PLUS
3821 && REG_P (XEXP (tmp, 0))
3822 && CONST_INT_P (XEXP (tmp, 1)))
3823 {
3824 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3825 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3826
3827 if (a > src_align)
3828 {
3829 if (a >= 64 && c % 8 == 0)
3830 src_align = 64;
3831 else if (a >= 32 && c % 4 == 0)
3832 src_align = 32;
3833 else if (a >= 16 && c % 2 == 0)
3834 src_align = 16;
3835 }
3836 }
3837
3838 tmp = XEXP (orig_dst, 0);
3839 if (REG_P (tmp))
3840 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3841 else if (GET_CODE (tmp) == PLUS
3842 && REG_P (XEXP (tmp, 0))
3843 && CONST_INT_P (XEXP (tmp, 1)))
3844 {
3845 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3846 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3847
3848 if (a > dst_align)
3849 {
3850 if (a >= 64 && c % 8 == 0)
3851 dst_align = 64;
3852 else if (a >= 32 && c % 4 == 0)
3853 dst_align = 32;
3854 else if (a >= 16 && c % 2 == 0)
3855 dst_align = 16;
3856 }
3857 }
3858
3859 ofs = 0;
3860 if (src_align >= 64 && bytes >= 8)
3861 {
3862 words = bytes / 8;
3863
3864 for (i = 0; i < words; ++i)
3865 data_regs[nregs + i] = gen_reg_rtx (DImode);
3866
3867 for (i = 0; i < words; ++i)
3868 emit_move_insn (data_regs[nregs + i],
3869 adjust_address (orig_src, DImode, ofs + i * 8));
3870
3871 nregs += words;
3872 bytes -= words * 8;
3873 ofs += words * 8;
3874 }
3875
3876 if (src_align >= 32 && bytes >= 4)
3877 {
3878 words = bytes / 4;
3879
3880 for (i = 0; i < words; ++i)
3881 data_regs[nregs + i] = gen_reg_rtx (SImode);
3882
3883 for (i = 0; i < words; ++i)
3884 emit_move_insn (data_regs[nregs + i],
3885 adjust_address (orig_src, SImode, ofs + i * 4));
3886
3887 nregs += words;
3888 bytes -= words * 4;
3889 ofs += words * 4;
3890 }
3891
3892 if (bytes >= 8)
3893 {
3894 words = bytes / 8;
3895
3896 for (i = 0; i < words+1; ++i)
3897 data_regs[nregs + i] = gen_reg_rtx (DImode);
3898
3899 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3900 words, ofs);
3901
3902 nregs += words;
3903 bytes -= words * 8;
3904 ofs += words * 8;
3905 }
3906
3907 if (! TARGET_BWX && bytes >= 4)
3908 {
3909 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3910 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3911 bytes -= 4;
3912 ofs += 4;
3913 }
3914
3915 if (bytes >= 2)
3916 {
3917 if (src_align >= 16)
3918 {
3919 do {
3920 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3921 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3922 bytes -= 2;
3923 ofs += 2;
3924 } while (bytes >= 2);
3925 }
3926 else if (! TARGET_BWX)
3927 {
3928 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3929 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3930 bytes -= 2;
3931 ofs += 2;
3932 }
3933 }
3934
3935 while (bytes > 0)
3936 {
3937 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3938 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3939 bytes -= 1;
3940 ofs += 1;
3941 }
3942
3943 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3944
3945 /* Now save it back out again. */
3946
3947 i = 0, ofs = 0;
3948
3949 /* Write out the data in whatever chunks reading the source allowed. */
3950 if (dst_align >= 64)
3951 {
3952 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3953 {
3954 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3955 data_regs[i]);
3956 ofs += 8;
3957 i++;
3958 }
3959 }
3960
3961 if (dst_align >= 32)
3962 {
3963 /* If the source has remaining DImode regs, write them out in
3964 two pieces. */
3965 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3966 {
3967 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3968 NULL_RTX, 1, OPTAB_WIDEN);
3969
3970 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3971 gen_lowpart (SImode, data_regs[i]));
3972 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3973 gen_lowpart (SImode, tmp));
3974 ofs += 8;
3975 i++;
3976 }
3977
3978 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3979 {
3980 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3981 data_regs[i]);
3982 ofs += 4;
3983 i++;
3984 }
3985 }
3986
3987 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3988 {
3989 /* Write out a remaining block of words using unaligned methods. */
3990
3991 for (words = 1; i + words < nregs; words++)
3992 if (GET_MODE (data_regs[i + words]) != DImode)
3993 break;
3994
3995 if (words == 1)
3996 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3997 else
3998 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3999 words, ofs);
4000
4001 i += words;
4002 ofs += words * 8;
4003 }
4004
4005 /* Due to the above, this won't be aligned. */
4006 /* ??? If we have more than one of these, consider constructing full
4007 words in registers and using alpha_expand_unaligned_store_words. */
4008 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4009 {
4010 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4011 ofs += 4;
4012 i++;
4013 }
4014
4015 if (dst_align >= 16)
4016 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4017 {
4018 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4019 i++;
4020 ofs += 2;
4021 }
4022 else
4023 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4024 {
4025 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4026 i++;
4027 ofs += 2;
4028 }
4029
4030 /* The remainder must be byte copies. */
4031 while (i < nregs)
4032 {
4033 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4034 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4035 i++;
4036 ofs += 1;
4037 }
4038
4039 return 1;
4040 }
4041
4042 int
4043 alpha_expand_block_clear (rtx operands[])
4044 {
4045 rtx bytes_rtx = operands[1];
4046 rtx align_rtx = operands[3];
4047 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4048 HOST_WIDE_INT bytes = orig_bytes;
4049 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4050 HOST_WIDE_INT alignofs = 0;
4051 rtx orig_dst = operands[0];
4052 rtx tmp;
4053 int i, words, ofs = 0;
4054
4055 if (orig_bytes <= 0)
4056 return 1;
4057 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4058 return 0;
4059
4060 /* Look for stricter alignment. */
4061 tmp = XEXP (orig_dst, 0);
4062 if (REG_P (tmp))
4063 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4064 else if (GET_CODE (tmp) == PLUS
4065 && REG_P (XEXP (tmp, 0))
4066 && CONST_INT_P (XEXP (tmp, 1)))
4067 {
4068 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4069 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4070
4071 if (a > align)
4072 {
4073 if (a >= 64)
4074 align = a, alignofs = 8 - c % 8;
4075 else if (a >= 32)
4076 align = a, alignofs = 4 - c % 4;
4077 else if (a >= 16)
4078 align = a, alignofs = 2 - c % 2;
4079 }
4080 }
4081
4082 /* Handle an unaligned prefix first. */
4083
4084 if (alignofs > 0)
4085 {
4086 #if HOST_BITS_PER_WIDE_INT >= 64
4087 /* Given that alignofs is bounded by align, the only time BWX could
4088 generate three stores is for a 7 byte fill. Prefer two individual
4089 stores over a load/mask/store sequence. */
4090 if ((!TARGET_BWX || alignofs == 7)
4091 && align >= 32
4092 && !(alignofs == 4 && bytes >= 4))
4093 {
4094 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4095 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4096 rtx mem, tmp;
4097 HOST_WIDE_INT mask;
4098
4099 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4100 set_mem_alias_set (mem, 0);
4101
4102 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4103 if (bytes < alignofs)
4104 {
4105 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4106 ofs += bytes;
4107 bytes = 0;
4108 }
4109 else
4110 {
4111 bytes -= alignofs;
4112 ofs += alignofs;
4113 }
4114 alignofs = 0;
4115
4116 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4117 NULL_RTX, 1, OPTAB_WIDEN);
4118
4119 emit_move_insn (mem, tmp);
4120 }
4121 #endif
4122
4123 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4124 {
4125 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4126 bytes -= 1;
4127 ofs += 1;
4128 alignofs -= 1;
4129 }
4130 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4131 {
4132 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4133 bytes -= 2;
4134 ofs += 2;
4135 alignofs -= 2;
4136 }
4137 if (alignofs == 4 && bytes >= 4)
4138 {
4139 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4140 bytes -= 4;
4141 ofs += 4;
4142 alignofs = 0;
4143 }
4144
4145 /* If we've not used the extra lead alignment information by now,
4146 we won't be able to. Downgrade align to match what's left over. */
4147 if (alignofs > 0)
4148 {
4149 alignofs = alignofs & -alignofs;
4150 align = MIN (align, alignofs * BITS_PER_UNIT);
4151 }
4152 }
4153
4154 /* Handle a block of contiguous long-words. */
4155
4156 if (align >= 64 && bytes >= 8)
4157 {
4158 words = bytes / 8;
4159
4160 for (i = 0; i < words; ++i)
4161 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4162 const0_rtx);
4163
4164 bytes -= words * 8;
4165 ofs += words * 8;
4166 }
4167
4168 /* If the block is large and appropriately aligned, emit a single
4169 store followed by a sequence of stq_u insns. */
4170
4171 if (align >= 32 && bytes > 16)
4172 {
4173 rtx orig_dsta;
4174
4175 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4176 bytes -= 4;
4177 ofs += 4;
4178
4179 orig_dsta = XEXP (orig_dst, 0);
4180 if (GET_CODE (orig_dsta) == LO_SUM)
4181 orig_dsta = force_reg (Pmode, orig_dsta);
4182
4183 words = bytes / 8;
4184 for (i = 0; i < words; ++i)
4185 {
4186 rtx mem
4187 = change_address (orig_dst, DImode,
4188 gen_rtx_AND (DImode,
4189 plus_constant (orig_dsta, ofs + i*8),
4190 GEN_INT (-8)));
4191 set_mem_alias_set (mem, 0);
4192 emit_move_insn (mem, const0_rtx);
4193 }
4194
4195 /* Depending on the alignment, the first stq_u may have overlapped
4196 with the initial stl, which means that the last stq_u didn't
4197 write as much as it would appear. Leave those questionable bytes
4198 unaccounted for. */
4199 bytes -= words * 8 - 4;
4200 ofs += words * 8 - 4;
4201 }
4202
4203 /* Handle a smaller block of aligned words. */
4204
4205 if ((align >= 64 && bytes == 4)
4206 || (align == 32 && bytes >= 4))
4207 {
4208 words = bytes / 4;
4209
4210 for (i = 0; i < words; ++i)
4211 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4212 const0_rtx);
4213
4214 bytes -= words * 4;
4215 ofs += words * 4;
4216 }
4217
4218 /* An unaligned block uses stq_u stores for as many as possible. */
4219
4220 if (bytes >= 8)
4221 {
4222 words = bytes / 8;
4223
4224 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4225
4226 bytes -= words * 8;
4227 ofs += words * 8;
4228 }
4229
4230 /* Next clean up any trailing pieces. */
4231
4232 #if HOST_BITS_PER_WIDE_INT >= 64
4233 /* Count the number of bits in BYTES for which aligned stores could
4234 be emitted. */
4235 words = 0;
4236 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4237 if (bytes & i)
4238 words += 1;
4239
4240 /* If we have appropriate alignment (and it wouldn't take too many
4241 instructions otherwise), mask out the bytes we need. */
4242 if (TARGET_BWX ? words > 2 : bytes > 0)
4243 {
4244 if (align >= 64)
4245 {
4246 rtx mem, tmp;
4247 HOST_WIDE_INT mask;
4248
4249 mem = adjust_address (orig_dst, DImode, ofs);
4250 set_mem_alias_set (mem, 0);
4251
4252 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4253
4254 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4255 NULL_RTX, 1, OPTAB_WIDEN);
4256
4257 emit_move_insn (mem, tmp);
4258 return 1;
4259 }
4260 else if (align >= 32 && bytes < 4)
4261 {
4262 rtx mem, tmp;
4263 HOST_WIDE_INT mask;
4264
4265 mem = adjust_address (orig_dst, SImode, ofs);
4266 set_mem_alias_set (mem, 0);
4267
4268 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4269
4270 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4271 NULL_RTX, 1, OPTAB_WIDEN);
4272
4273 emit_move_insn (mem, tmp);
4274 return 1;
4275 }
4276 }
4277 #endif
4278
4279 if (!TARGET_BWX && bytes >= 4)
4280 {
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4282 bytes -= 4;
4283 ofs += 4;
4284 }
4285
4286 if (bytes >= 2)
4287 {
4288 if (align >= 16)
4289 {
4290 do {
4291 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4292 const0_rtx);
4293 bytes -= 2;
4294 ofs += 2;
4295 } while (bytes >= 2);
4296 }
4297 else if (! TARGET_BWX)
4298 {
4299 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4300 bytes -= 2;
4301 ofs += 2;
4302 }
4303 }
4304
4305 while (bytes > 0)
4306 {
4307 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4308 bytes -= 1;
4309 ofs += 1;
4310 }
4311
4312 return 1;
4313 }
4314
4315 /* Returns a mask so that zap(x, value) == x & mask. */
4316
4317 rtx
4318 alpha_expand_zap_mask (HOST_WIDE_INT value)
4319 {
4320 rtx result;
4321 int i;
4322
4323 if (HOST_BITS_PER_WIDE_INT >= 64)
4324 {
4325 HOST_WIDE_INT mask = 0;
4326
4327 for (i = 7; i >= 0; --i)
4328 {
4329 mask <<= 8;
4330 if (!((value >> i) & 1))
4331 mask |= 0xff;
4332 }
4333
4334 result = gen_int_mode (mask, DImode);
4335 }
4336 else
4337 {
4338 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4339
4340 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4341
4342 for (i = 7; i >= 4; --i)
4343 {
4344 mask_hi <<= 8;
4345 if (!((value >> i) & 1))
4346 mask_hi |= 0xff;
4347 }
4348
4349 for (i = 3; i >= 0; --i)
4350 {
4351 mask_lo <<= 8;
4352 if (!((value >> i) & 1))
4353 mask_lo |= 0xff;
4354 }
4355
4356 result = immed_double_const (mask_lo, mask_hi, DImode);
4357 }
4358
4359 return result;
4360 }
4361
4362 void
4363 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4364 enum machine_mode mode,
4365 rtx op0, rtx op1, rtx op2)
4366 {
4367 op0 = gen_lowpart (mode, op0);
4368
4369 if (op1 == const0_rtx)
4370 op1 = CONST0_RTX (mode);
4371 else
4372 op1 = gen_lowpart (mode, op1);
4373
4374 if (op2 == const0_rtx)
4375 op2 = CONST0_RTX (mode);
4376 else
4377 op2 = gen_lowpart (mode, op2);
4378
4379 emit_insn ((*gen) (op0, op1, op2));
4380 }
4381
4382 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4383 COND is true. Mark the jump as unlikely to be taken. */
4384
4385 static void
4386 emit_unlikely_jump (rtx cond, rtx label)
4387 {
4388 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4389 rtx x;
4390
4391 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4392 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4393 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4394 }
4395
4396 /* A subroutine of the atomic operation splitters. Emit a load-locked
4397 instruction in MODE. */
4398
4399 static void
4400 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4401 {
4402 rtx (*fn) (rtx, rtx) = NULL;
4403 if (mode == SImode)
4404 fn = gen_load_locked_si;
4405 else if (mode == DImode)
4406 fn = gen_load_locked_di;
4407 emit_insn (fn (reg, mem));
4408 }
4409
4410 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4411 instruction in MODE. */
4412
4413 static void
4414 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4415 {
4416 rtx (*fn) (rtx, rtx, rtx) = NULL;
4417 if (mode == SImode)
4418 fn = gen_store_conditional_si;
4419 else if (mode == DImode)
4420 fn = gen_store_conditional_di;
4421 emit_insn (fn (res, mem, val));
4422 }
4423
4424 /* A subroutine of the atomic operation splitters. Emit an insxl
4425 instruction in MODE. */
4426
4427 static rtx
4428 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4429 {
4430 rtx ret = gen_reg_rtx (DImode);
4431 rtx (*fn) (rtx, rtx, rtx);
4432
4433 if (WORDS_BIG_ENDIAN)
4434 {
4435 if (mode == QImode)
4436 fn = gen_insbl_be;
4437 else
4438 fn = gen_inswl_be;
4439 }
4440 else
4441 {
4442 if (mode == QImode)
4443 fn = gen_insbl_le;
4444 else
4445 fn = gen_inswl_le;
4446 }
4447 /* The insbl and inswl patterns require a register operand. */
4448 op1 = force_reg (mode, op1);
4449 emit_insn (fn (ret, op1, op2));
4450
4451 return ret;
4452 }
4453
4454 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4455 to perform. MEM is the memory on which to operate. VAL is the second
4456 operand of the binary operator. BEFORE and AFTER are optional locations to
4457 return the value of MEM either before of after the operation. SCRATCH is
4458 a scratch register. */
4459
4460 void
4461 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4462 rtx before, rtx after, rtx scratch)
4463 {
4464 enum machine_mode mode = GET_MODE (mem);
4465 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4466
4467 emit_insn (gen_memory_barrier ());
4468
4469 label = gen_label_rtx ();
4470 emit_label (label);
4471 label = gen_rtx_LABEL_REF (DImode, label);
4472
4473 if (before == NULL)
4474 before = scratch;
4475 emit_load_locked (mode, before, mem);
4476
4477 if (code == NOT)
4478 {
4479 x = gen_rtx_AND (mode, before, val);
4480 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4481
4482 x = gen_rtx_NOT (mode, val);
4483 }
4484 else
4485 x = gen_rtx_fmt_ee (code, mode, before, val);
4486 if (after)
4487 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4488 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4489
4490 emit_store_conditional (mode, cond, mem, scratch);
4491
4492 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4493 emit_unlikely_jump (x, label);
4494
4495 emit_insn (gen_memory_barrier ());
4496 }
4497
4498 /* Expand a compare and swap operation. */
4499
4500 void
4501 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4502 rtx scratch)
4503 {
4504 enum machine_mode mode = GET_MODE (mem);
4505 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4506
4507 emit_insn (gen_memory_barrier ());
4508
4509 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4510 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4511 emit_label (XEXP (label1, 0));
4512
4513 emit_load_locked (mode, retval, mem);
4514
4515 x = gen_lowpart (DImode, retval);
4516 if (oldval == const0_rtx)
4517 x = gen_rtx_NE (DImode, x, const0_rtx);
4518 else
4519 {
4520 x = gen_rtx_EQ (DImode, x, oldval);
4521 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4522 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4523 }
4524 emit_unlikely_jump (x, label2);
4525
4526 emit_move_insn (scratch, newval);
4527 emit_store_conditional (mode, cond, mem, scratch);
4528
4529 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4530 emit_unlikely_jump (x, label1);
4531
4532 emit_insn (gen_memory_barrier ());
4533 emit_label (XEXP (label2, 0));
4534 }
4535
4536 void
4537 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4538 {
4539 enum machine_mode mode = GET_MODE (mem);
4540 rtx addr, align, wdst;
4541 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4542
4543 addr = force_reg (DImode, XEXP (mem, 0));
4544 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4545 NULL_RTX, 1, OPTAB_DIRECT);
4546
4547 oldval = convert_modes (DImode, mode, oldval, 1);
4548 newval = emit_insxl (mode, newval, addr);
4549
4550 wdst = gen_reg_rtx (DImode);
4551 if (mode == QImode)
4552 fn5 = gen_sync_compare_and_swapqi_1;
4553 else
4554 fn5 = gen_sync_compare_and_swaphi_1;
4555 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4556
4557 emit_move_insn (dst, gen_lowpart (mode, wdst));
4558 }
4559
4560 void
4561 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4562 rtx oldval, rtx newval, rtx align,
4563 rtx scratch, rtx cond)
4564 {
4565 rtx label1, label2, mem, width, mask, x;
4566
4567 mem = gen_rtx_MEM (DImode, align);
4568 MEM_VOLATILE_P (mem) = 1;
4569
4570 emit_insn (gen_memory_barrier ());
4571 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4572 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4573 emit_label (XEXP (label1, 0));
4574
4575 emit_load_locked (DImode, scratch, mem);
4576
4577 width = GEN_INT (GET_MODE_BITSIZE (mode));
4578 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4579 if (WORDS_BIG_ENDIAN)
4580 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4581 else
4582 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4583
4584 if (oldval == const0_rtx)
4585 x = gen_rtx_NE (DImode, dest, const0_rtx);
4586 else
4587 {
4588 x = gen_rtx_EQ (DImode, dest, oldval);
4589 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4590 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4591 }
4592 emit_unlikely_jump (x, label2);
4593
4594 if (WORDS_BIG_ENDIAN)
4595 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4596 else
4597 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4598 emit_insn (gen_iordi3 (scratch, scratch, newval));
4599
4600 emit_store_conditional (DImode, scratch, mem, scratch);
4601
4602 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4603 emit_unlikely_jump (x, label1);
4604
4605 emit_insn (gen_memory_barrier ());
4606 emit_label (XEXP (label2, 0));
4607 }
4608
4609 /* Expand an atomic exchange operation. */
4610
4611 void
4612 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4613 {
4614 enum machine_mode mode = GET_MODE (mem);
4615 rtx label, x, cond = gen_lowpart (DImode, scratch);
4616
4617 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4618 emit_label (XEXP (label, 0));
4619
4620 emit_load_locked (mode, retval, mem);
4621 emit_move_insn (scratch, val);
4622 emit_store_conditional (mode, cond, mem, scratch);
4623
4624 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4625 emit_unlikely_jump (x, label);
4626
4627 emit_insn (gen_memory_barrier ());
4628 }
4629
4630 void
4631 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4632 {
4633 enum machine_mode mode = GET_MODE (mem);
4634 rtx addr, align, wdst;
4635 rtx (*fn4) (rtx, rtx, rtx, rtx);
4636
4637 /* Force the address into a register. */
4638 addr = force_reg (DImode, XEXP (mem, 0));
4639
4640 /* Align it to a multiple of 8. */
4641 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4642 NULL_RTX, 1, OPTAB_DIRECT);
4643
4644 /* Insert val into the correct byte location within the word. */
4645 val = emit_insxl (mode, val, addr);
4646
4647 wdst = gen_reg_rtx (DImode);
4648 if (mode == QImode)
4649 fn4 = gen_sync_lock_test_and_setqi_1;
4650 else
4651 fn4 = gen_sync_lock_test_and_sethi_1;
4652 emit_insn (fn4 (wdst, addr, val, align));
4653
4654 emit_move_insn (dst, gen_lowpart (mode, wdst));
4655 }
4656
4657 void
4658 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4659 rtx val, rtx align, rtx scratch)
4660 {
4661 rtx label, mem, width, mask, x;
4662
4663 mem = gen_rtx_MEM (DImode, align);
4664 MEM_VOLATILE_P (mem) = 1;
4665
4666 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4667 emit_label (XEXP (label, 0));
4668
4669 emit_load_locked (DImode, scratch, mem);
4670
4671 width = GEN_INT (GET_MODE_BITSIZE (mode));
4672 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4673 if (WORDS_BIG_ENDIAN)
4674 {
4675 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4676 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4677 }
4678 else
4679 {
4680 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4681 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4682 }
4683 emit_insn (gen_iordi3 (scratch, scratch, val));
4684
4685 emit_store_conditional (DImode, scratch, mem, scratch);
4686
4687 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4688 emit_unlikely_jump (x, label);
4689
4690 emit_insn (gen_memory_barrier ());
4691 }
4692 \f
4693 /* Adjust the cost of a scheduling dependency. Return the new cost of
4694 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4695
4696 static int
4697 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4698 {
4699 enum attr_type insn_type, dep_insn_type;
4700
4701 /* If the dependence is an anti-dependence, there is no cost. For an
4702 output dependence, there is sometimes a cost, but it doesn't seem
4703 worth handling those few cases. */
4704 if (REG_NOTE_KIND (link) != 0)
4705 return cost;
4706
4707 /* If we can't recognize the insns, we can't really do anything. */
4708 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4709 return cost;
4710
4711 insn_type = get_attr_type (insn);
4712 dep_insn_type = get_attr_type (dep_insn);
4713
4714 /* Bring in the user-defined memory latency. */
4715 if (dep_insn_type == TYPE_ILD
4716 || dep_insn_type == TYPE_FLD
4717 || dep_insn_type == TYPE_LDSYM)
4718 cost += alpha_memory_latency-1;
4719
4720 /* Everything else handled in DFA bypasses now. */
4721
4722 return cost;
4723 }
4724
4725 /* The number of instructions that can be issued per cycle. */
4726
4727 static int
4728 alpha_issue_rate (void)
4729 {
4730 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4731 }
4732
4733 /* How many alternative schedules to try. This should be as wide as the
4734 scheduling freedom in the DFA, but no wider. Making this value too
4735 large results extra work for the scheduler.
4736
4737 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4738 alternative schedules. For EV5, we can choose between E0/E1 and
4739 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4740
4741 static int
4742 alpha_multipass_dfa_lookahead (void)
4743 {
4744 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4745 }
4746 \f
4747 /* Machine-specific function data. */
4748
4749 struct GTY(()) machine_function
4750 {
4751 /* For unicosmk. */
4752 /* List of call information words for calls from this function. */
4753 struct rtx_def *first_ciw;
4754 struct rtx_def *last_ciw;
4755 int ciw_count;
4756
4757 /* List of deferred case vectors. */
4758 struct rtx_def *addr_list;
4759
4760 /* For OSF. */
4761 const char *some_ld_name;
4762
4763 /* For TARGET_LD_BUGGY_LDGP. */
4764 struct rtx_def *gp_save_rtx;
4765 };
4766
4767 /* How to allocate a 'struct machine_function'. */
4768
4769 static struct machine_function *
4770 alpha_init_machine_status (void)
4771 {
4772 return ((struct machine_function *)
4773 ggc_alloc_cleared (sizeof (struct machine_function)));
4774 }
4775
4776 /* Functions to save and restore alpha_return_addr_rtx. */
4777
4778 /* Start the ball rolling with RETURN_ADDR_RTX. */
4779
4780 rtx
4781 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4782 {
4783 if (count != 0)
4784 return const0_rtx;
4785
4786 return get_hard_reg_initial_val (Pmode, REG_RA);
4787 }
4788
4789 /* Return or create a memory slot containing the gp value for the current
4790 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4791
4792 rtx
4793 alpha_gp_save_rtx (void)
4794 {
4795 rtx seq, m = cfun->machine->gp_save_rtx;
4796
4797 if (m == NULL)
4798 {
4799 start_sequence ();
4800
4801 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4802 m = validize_mem (m);
4803 emit_move_insn (m, pic_offset_table_rtx);
4804
4805 seq = get_insns ();
4806 end_sequence ();
4807
4808 /* We used to simply emit the sequence after entry_of_function.
4809 However this breaks the CFG if the first instruction in the
4810 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4811 label. Emit the sequence properly on the edge. We are only
4812 invoked from dw2_build_landing_pads and finish_eh_generation
4813 will call commit_edge_insertions thanks to a kludge. */
4814 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4815
4816 cfun->machine->gp_save_rtx = m;
4817 }
4818
4819 return m;
4820 }
4821
4822 static int
4823 alpha_ra_ever_killed (void)
4824 {
4825 rtx top;
4826
4827 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4828 return (int)df_regs_ever_live_p (REG_RA);
4829
4830 push_topmost_sequence ();
4831 top = get_insns ();
4832 pop_topmost_sequence ();
4833
4834 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4835 }
4836
4837 \f
4838 /* Return the trap mode suffix applicable to the current
4839 instruction, or NULL. */
4840
4841 static const char *
4842 get_trap_mode_suffix (void)
4843 {
4844 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4845
4846 switch (s)
4847 {
4848 case TRAP_SUFFIX_NONE:
4849 return NULL;
4850
4851 case TRAP_SUFFIX_SU:
4852 if (alpha_fptm >= ALPHA_FPTM_SU)
4853 return "su";
4854 return NULL;
4855
4856 case TRAP_SUFFIX_SUI:
4857 if (alpha_fptm >= ALPHA_FPTM_SUI)
4858 return "sui";
4859 return NULL;
4860
4861 case TRAP_SUFFIX_V_SV:
4862 switch (alpha_fptm)
4863 {
4864 case ALPHA_FPTM_N:
4865 return NULL;
4866 case ALPHA_FPTM_U:
4867 return "v";
4868 case ALPHA_FPTM_SU:
4869 case ALPHA_FPTM_SUI:
4870 return "sv";
4871 default:
4872 gcc_unreachable ();
4873 }
4874
4875 case TRAP_SUFFIX_V_SV_SVI:
4876 switch (alpha_fptm)
4877 {
4878 case ALPHA_FPTM_N:
4879 return NULL;
4880 case ALPHA_FPTM_U:
4881 return "v";
4882 case ALPHA_FPTM_SU:
4883 return "sv";
4884 case ALPHA_FPTM_SUI:
4885 return "svi";
4886 default:
4887 gcc_unreachable ();
4888 }
4889 break;
4890
4891 case TRAP_SUFFIX_U_SU_SUI:
4892 switch (alpha_fptm)
4893 {
4894 case ALPHA_FPTM_N:
4895 return NULL;
4896 case ALPHA_FPTM_U:
4897 return "u";
4898 case ALPHA_FPTM_SU:
4899 return "su";
4900 case ALPHA_FPTM_SUI:
4901 return "sui";
4902 default:
4903 gcc_unreachable ();
4904 }
4905 break;
4906
4907 default:
4908 gcc_unreachable ();
4909 }
4910 gcc_unreachable ();
4911 }
4912
4913 /* Return the rounding mode suffix applicable to the current
4914 instruction, or NULL. */
4915
4916 static const char *
4917 get_round_mode_suffix (void)
4918 {
4919 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4920
4921 switch (s)
4922 {
4923 case ROUND_SUFFIX_NONE:
4924 return NULL;
4925 case ROUND_SUFFIX_NORMAL:
4926 switch (alpha_fprm)
4927 {
4928 case ALPHA_FPRM_NORM:
4929 return NULL;
4930 case ALPHA_FPRM_MINF:
4931 return "m";
4932 case ALPHA_FPRM_CHOP:
4933 return "c";
4934 case ALPHA_FPRM_DYN:
4935 return "d";
4936 default:
4937 gcc_unreachable ();
4938 }
4939 break;
4940
4941 case ROUND_SUFFIX_C:
4942 return "c";
4943
4944 default:
4945 gcc_unreachable ();
4946 }
4947 gcc_unreachable ();
4948 }
4949
4950 /* Locate some local-dynamic symbol still in use by this function
4951 so that we can print its name in some movdi_er_tlsldm pattern. */
4952
4953 static int
4954 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4955 {
4956 rtx x = *px;
4957
4958 if (GET_CODE (x) == SYMBOL_REF
4959 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4960 {
4961 cfun->machine->some_ld_name = XSTR (x, 0);
4962 return 1;
4963 }
4964
4965 return 0;
4966 }
4967
4968 static const char *
4969 get_some_local_dynamic_name (void)
4970 {
4971 rtx insn;
4972
4973 if (cfun->machine->some_ld_name)
4974 return cfun->machine->some_ld_name;
4975
4976 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4977 if (INSN_P (insn)
4978 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4979 return cfun->machine->some_ld_name;
4980
4981 gcc_unreachable ();
4982 }
4983
4984 /* Print an operand. Recognize special options, documented below. */
4985
4986 void
4987 print_operand (FILE *file, rtx x, int code)
4988 {
4989 int i;
4990
4991 switch (code)
4992 {
4993 case '~':
4994 /* Print the assembler name of the current function. */
4995 assemble_name (file, alpha_fnname);
4996 break;
4997
4998 case '&':
4999 assemble_name (file, get_some_local_dynamic_name ());
5000 break;
5001
5002 case '/':
5003 {
5004 const char *trap = get_trap_mode_suffix ();
5005 const char *round = get_round_mode_suffix ();
5006
5007 if (trap || round)
5008 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5009 (trap ? trap : ""), (round ? round : ""));
5010 break;
5011 }
5012
5013 case ',':
5014 /* Generates single precision instruction suffix. */
5015 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5016 break;
5017
5018 case '-':
5019 /* Generates double precision instruction suffix. */
5020 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5021 break;
5022
5023 case '#':
5024 if (alpha_this_literal_sequence_number == 0)
5025 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5026 fprintf (file, "%d", alpha_this_literal_sequence_number);
5027 break;
5028
5029 case '*':
5030 if (alpha_this_gpdisp_sequence_number == 0)
5031 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5032 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5033 break;
5034
5035 case 'H':
5036 if (GET_CODE (x) == HIGH)
5037 output_addr_const (file, XEXP (x, 0));
5038 else
5039 output_operand_lossage ("invalid %%H value");
5040 break;
5041
5042 case 'J':
5043 {
5044 const char *lituse;
5045
5046 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5047 {
5048 x = XVECEXP (x, 0, 0);
5049 lituse = "lituse_tlsgd";
5050 }
5051 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5052 {
5053 x = XVECEXP (x, 0, 0);
5054 lituse = "lituse_tlsldm";
5055 }
5056 else if (CONST_INT_P (x))
5057 lituse = "lituse_jsr";
5058 else
5059 {
5060 output_operand_lossage ("invalid %%J value");
5061 break;
5062 }
5063
5064 if (x != const0_rtx)
5065 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5066 }
5067 break;
5068
5069 case 'j':
5070 {
5071 const char *lituse;
5072
5073 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5074 lituse = "lituse_jsrdirect";
5075 #else
5076 lituse = "lituse_jsr";
5077 #endif
5078
5079 gcc_assert (INTVAL (x) != 0);
5080 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5081 }
5082 break;
5083 case 'r':
5084 /* If this operand is the constant zero, write it as "$31". */
5085 if (REG_P (x))
5086 fprintf (file, "%s", reg_names[REGNO (x)]);
5087 else if (x == CONST0_RTX (GET_MODE (x)))
5088 fprintf (file, "$31");
5089 else
5090 output_operand_lossage ("invalid %%r value");
5091 break;
5092
5093 case 'R':
5094 /* Similar, but for floating-point. */
5095 if (REG_P (x))
5096 fprintf (file, "%s", reg_names[REGNO (x)]);
5097 else if (x == CONST0_RTX (GET_MODE (x)))
5098 fprintf (file, "$f31");
5099 else
5100 output_operand_lossage ("invalid %%R value");
5101 break;
5102
5103 case 'N':
5104 /* Write the 1's complement of a constant. */
5105 if (!CONST_INT_P (x))
5106 output_operand_lossage ("invalid %%N value");
5107
5108 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5109 break;
5110
5111 case 'P':
5112 /* Write 1 << C, for a constant C. */
5113 if (!CONST_INT_P (x))
5114 output_operand_lossage ("invalid %%P value");
5115
5116 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5117 break;
5118
5119 case 'h':
5120 /* Write the high-order 16 bits of a constant, sign-extended. */
5121 if (!CONST_INT_P (x))
5122 output_operand_lossage ("invalid %%h value");
5123
5124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5125 break;
5126
5127 case 'L':
5128 /* Write the low-order 16 bits of a constant, sign-extended. */
5129 if (!CONST_INT_P (x))
5130 output_operand_lossage ("invalid %%L value");
5131
5132 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5133 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5134 break;
5135
5136 case 'm':
5137 /* Write mask for ZAP insn. */
5138 if (GET_CODE (x) == CONST_DOUBLE)
5139 {
5140 HOST_WIDE_INT mask = 0;
5141 HOST_WIDE_INT value;
5142
5143 value = CONST_DOUBLE_LOW (x);
5144 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5145 i++, value >>= 8)
5146 if (value & 0xff)
5147 mask |= (1 << i);
5148
5149 value = CONST_DOUBLE_HIGH (x);
5150 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5151 i++, value >>= 8)
5152 if (value & 0xff)
5153 mask |= (1 << (i + sizeof (int)));
5154
5155 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5156 }
5157
5158 else if (CONST_INT_P (x))
5159 {
5160 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5161
5162 for (i = 0; i < 8; i++, value >>= 8)
5163 if (value & 0xff)
5164 mask |= (1 << i);
5165
5166 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5167 }
5168 else
5169 output_operand_lossage ("invalid %%m value");
5170 break;
5171
5172 case 'M':
5173 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5174 if (!CONST_INT_P (x)
5175 || (INTVAL (x) != 8 && INTVAL (x) != 16
5176 && INTVAL (x) != 32 && INTVAL (x) != 64))
5177 output_operand_lossage ("invalid %%M value");
5178
5179 fprintf (file, "%s",
5180 (INTVAL (x) == 8 ? "b"
5181 : INTVAL (x) == 16 ? "w"
5182 : INTVAL (x) == 32 ? "l"
5183 : "q"));
5184 break;
5185
5186 case 'U':
5187 /* Similar, except do it from the mask. */
5188 if (CONST_INT_P (x))
5189 {
5190 HOST_WIDE_INT value = INTVAL (x);
5191
5192 if (value == 0xff)
5193 {
5194 fputc ('b', file);
5195 break;
5196 }
5197 if (value == 0xffff)
5198 {
5199 fputc ('w', file);
5200 break;
5201 }
5202 if (value == 0xffffffff)
5203 {
5204 fputc ('l', file);
5205 break;
5206 }
5207 if (value == -1)
5208 {
5209 fputc ('q', file);
5210 break;
5211 }
5212 }
5213 else if (HOST_BITS_PER_WIDE_INT == 32
5214 && GET_CODE (x) == CONST_DOUBLE
5215 && CONST_DOUBLE_LOW (x) == 0xffffffff
5216 && CONST_DOUBLE_HIGH (x) == 0)
5217 {
5218 fputc ('l', file);
5219 break;
5220 }
5221 output_operand_lossage ("invalid %%U value");
5222 break;
5223
5224 case 's':
5225 /* Write the constant value divided by 8 for little-endian mode or
5226 (56 - value) / 8 for big-endian mode. */
5227
5228 if (!CONST_INT_P (x)
5229 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5230 ? 56
5231 : 64)
5232 || (INTVAL (x) & 7) != 0)
5233 output_operand_lossage ("invalid %%s value");
5234
5235 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5236 WORDS_BIG_ENDIAN
5237 ? (56 - INTVAL (x)) / 8
5238 : INTVAL (x) / 8);
5239 break;
5240
5241 case 'S':
5242 /* Same, except compute (64 - c) / 8 */
5243
5244 if (!CONST_INT_P (x)
5245 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5246 && (INTVAL (x) & 7) != 8)
5247 output_operand_lossage ("invalid %%s value");
5248
5249 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5250 break;
5251
5252 case 't':
5253 {
5254 /* On Unicos/Mk systems: use a DEX expression if the symbol
5255 clashes with a register name. */
5256 int dex = unicosmk_need_dex (x);
5257 if (dex)
5258 fprintf (file, "DEX(%d)", dex);
5259 else
5260 output_addr_const (file, x);
5261 }
5262 break;
5263
5264 case 'C': case 'D': case 'c': case 'd':
5265 /* Write out comparison name. */
5266 {
5267 enum rtx_code c = GET_CODE (x);
5268
5269 if (!COMPARISON_P (x))
5270 output_operand_lossage ("invalid %%C value");
5271
5272 else if (code == 'D')
5273 c = reverse_condition (c);
5274 else if (code == 'c')
5275 c = swap_condition (c);
5276 else if (code == 'd')
5277 c = swap_condition (reverse_condition (c));
5278
5279 if (c == LEU)
5280 fprintf (file, "ule");
5281 else if (c == LTU)
5282 fprintf (file, "ult");
5283 else if (c == UNORDERED)
5284 fprintf (file, "un");
5285 else
5286 fprintf (file, "%s", GET_RTX_NAME (c));
5287 }
5288 break;
5289
5290 case 'E':
5291 /* Write the divide or modulus operator. */
5292 switch (GET_CODE (x))
5293 {
5294 case DIV:
5295 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5296 break;
5297 case UDIV:
5298 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5299 break;
5300 case MOD:
5301 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5302 break;
5303 case UMOD:
5304 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5305 break;
5306 default:
5307 output_operand_lossage ("invalid %%E value");
5308 break;
5309 }
5310 break;
5311
5312 case 'A':
5313 /* Write "_u" for unaligned access. */
5314 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5315 fprintf (file, "_u");
5316 break;
5317
5318 case 0:
5319 if (REG_P (x))
5320 fprintf (file, "%s", reg_names[REGNO (x)]);
5321 else if (MEM_P (x))
5322 output_address (XEXP (x, 0));
5323 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5324 {
5325 switch (XINT (XEXP (x, 0), 1))
5326 {
5327 case UNSPEC_DTPREL:
5328 case UNSPEC_TPREL:
5329 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5330 break;
5331 default:
5332 output_operand_lossage ("unknown relocation unspec");
5333 break;
5334 }
5335 }
5336 else
5337 output_addr_const (file, x);
5338 break;
5339
5340 default:
5341 output_operand_lossage ("invalid %%xn code");
5342 }
5343 }
5344
5345 void
5346 print_operand_address (FILE *file, rtx addr)
5347 {
5348 int basereg = 31;
5349 HOST_WIDE_INT offset = 0;
5350
5351 if (GET_CODE (addr) == AND)
5352 addr = XEXP (addr, 0);
5353
5354 if (GET_CODE (addr) == PLUS
5355 && CONST_INT_P (XEXP (addr, 1)))
5356 {
5357 offset = INTVAL (XEXP (addr, 1));
5358 addr = XEXP (addr, 0);
5359 }
5360
5361 if (GET_CODE (addr) == LO_SUM)
5362 {
5363 const char *reloc16, *reloclo;
5364 rtx op1 = XEXP (addr, 1);
5365
5366 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5367 {
5368 op1 = XEXP (op1, 0);
5369 switch (XINT (op1, 1))
5370 {
5371 case UNSPEC_DTPREL:
5372 reloc16 = NULL;
5373 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5374 break;
5375 case UNSPEC_TPREL:
5376 reloc16 = NULL;
5377 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5378 break;
5379 default:
5380 output_operand_lossage ("unknown relocation unspec");
5381 return;
5382 }
5383
5384 output_addr_const (file, XVECEXP (op1, 0, 0));
5385 }
5386 else
5387 {
5388 reloc16 = "gprel";
5389 reloclo = "gprellow";
5390 output_addr_const (file, op1);
5391 }
5392
5393 if (offset)
5394 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5395
5396 addr = XEXP (addr, 0);
5397 switch (GET_CODE (addr))
5398 {
5399 case REG:
5400 basereg = REGNO (addr);
5401 break;
5402
5403 case SUBREG:
5404 basereg = subreg_regno (addr);
5405 break;
5406
5407 default:
5408 gcc_unreachable ();
5409 }
5410
5411 fprintf (file, "($%d)\t\t!%s", basereg,
5412 (basereg == 29 ? reloc16 : reloclo));
5413 return;
5414 }
5415
5416 switch (GET_CODE (addr))
5417 {
5418 case REG:
5419 basereg = REGNO (addr);
5420 break;
5421
5422 case SUBREG:
5423 basereg = subreg_regno (addr);
5424 break;
5425
5426 case CONST_INT:
5427 offset = INTVAL (addr);
5428 break;
5429
5430 #if TARGET_ABI_OPEN_VMS
5431 case SYMBOL_REF:
5432 fprintf (file, "%s", XSTR (addr, 0));
5433 return;
5434
5435 case CONST:
5436 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5437 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5438 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5439 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5440 INTVAL (XEXP (XEXP (addr, 0), 1)));
5441 return;
5442
5443 #endif
5444 default:
5445 gcc_unreachable ();
5446 }
5447
5448 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5449 }
5450 \f
5451 /* Emit RTL insns to initialize the variable parts of a trampoline at
5452 TRAMP. FNADDR is an RTX for the address of the function's pure
5453 code. CXT is an RTX for the static chain value for the function.
5454
5455 The three offset parameters are for the individual template's
5456 layout. A JMPOFS < 0 indicates that the trampoline does not
5457 contain instructions at all.
5458
5459 We assume here that a function will be called many more times than
5460 its address is taken (e.g., it might be passed to qsort), so we
5461 take the trouble to initialize the "hint" field in the JMP insn.
5462 Note that the hint field is PC (new) + 4 * bits 13:0. */
5463
5464 void
5465 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5466 int fnofs, int cxtofs, int jmpofs)
5467 {
5468 rtx addr;
5469 /* VMS really uses DImode pointers in memory at this point. */
5470 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5471
5472 #ifdef POINTERS_EXTEND_UNSIGNED
5473 fnaddr = convert_memory_address (mode, fnaddr);
5474 cxt = convert_memory_address (mode, cxt);
5475 #endif
5476
5477 /* Store function address and CXT. */
5478 addr = memory_address (mode, plus_constant (tramp, fnofs));
5479 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5480 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5481 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5482
5483 #ifdef ENABLE_EXECUTE_STACK
5484 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5485 0, VOIDmode, 1, tramp, Pmode);
5486 #endif
5487
5488 if (jmpofs >= 0)
5489 emit_insn (gen_imb ());
5490 }
5491 \f
5492 /* Determine where to put an argument to a function.
5493 Value is zero to push the argument on the stack,
5494 or a hard register in which to store the argument.
5495
5496 MODE is the argument's machine mode.
5497 TYPE is the data type of the argument (as a tree).
5498 This is null for libcalls where that information may
5499 not be available.
5500 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5501 the preceding args and about the function being called.
5502 NAMED is nonzero if this argument is a named parameter
5503 (otherwise it is an extra parameter matching an ellipsis).
5504
5505 On Alpha the first 6 words of args are normally in registers
5506 and the rest are pushed. */
5507
5508 rtx
5509 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5510 int named ATTRIBUTE_UNUSED)
5511 {
5512 int basereg;
5513 int num_args;
5514
5515 /* Don't get confused and pass small structures in FP registers. */
5516 if (type && AGGREGATE_TYPE_P (type))
5517 basereg = 16;
5518 else
5519 {
5520 #ifdef ENABLE_CHECKING
5521 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5522 values here. */
5523 gcc_assert (!COMPLEX_MODE_P (mode));
5524 #endif
5525
5526 /* Set up defaults for FP operands passed in FP registers, and
5527 integral operands passed in integer registers. */
5528 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5529 basereg = 32 + 16;
5530 else
5531 basereg = 16;
5532 }
5533
5534 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5535 the three platforms, so we can't avoid conditional compilation. */
5536 #if TARGET_ABI_OPEN_VMS
5537 {
5538 if (mode == VOIDmode)
5539 return alpha_arg_info_reg_val (cum);
5540
5541 num_args = cum.num_args;
5542 if (num_args >= 6
5543 || targetm.calls.must_pass_in_stack (mode, type))
5544 return NULL_RTX;
5545 }
5546 #elif TARGET_ABI_UNICOSMK
5547 {
5548 int size;
5549
5550 /* If this is the last argument, generate the call info word (CIW). */
5551 /* ??? We don't include the caller's line number in the CIW because
5552 I don't know how to determine it if debug infos are turned off. */
5553 if (mode == VOIDmode)
5554 {
5555 int i;
5556 HOST_WIDE_INT lo;
5557 HOST_WIDE_INT hi;
5558 rtx ciw;
5559
5560 lo = 0;
5561
5562 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5563 if (cum.reg_args_type[i])
5564 lo |= (1 << (7 - i));
5565
5566 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5567 lo |= 7;
5568 else
5569 lo |= cum.num_reg_words;
5570
5571 #if HOST_BITS_PER_WIDE_INT == 32
5572 hi = (cum.num_args << 20) | cum.num_arg_words;
5573 #else
5574 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5575 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5576 hi = 0;
5577 #endif
5578 ciw = immed_double_const (lo, hi, DImode);
5579
5580 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5581 UNSPEC_UMK_LOAD_CIW);
5582 }
5583
5584 size = ALPHA_ARG_SIZE (mode, type, named);
5585 num_args = cum.num_reg_words;
5586 if (cum.force_stack
5587 || cum.num_reg_words + size > 6
5588 || targetm.calls.must_pass_in_stack (mode, type))
5589 return NULL_RTX;
5590 else if (type && TYPE_MODE (type) == BLKmode)
5591 {
5592 rtx reg1, reg2;
5593
5594 reg1 = gen_rtx_REG (DImode, num_args + 16);
5595 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5596
5597 /* The argument fits in two registers. Note that we still need to
5598 reserve a register for empty structures. */
5599 if (size == 0)
5600 return NULL_RTX;
5601 else if (size == 1)
5602 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5603 else
5604 {
5605 reg2 = gen_rtx_REG (DImode, num_args + 17);
5606 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5607 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5608 }
5609 }
5610 }
5611 #elif TARGET_ABI_OSF
5612 {
5613 if (cum >= 6)
5614 return NULL_RTX;
5615 num_args = cum;
5616
5617 /* VOID is passed as a special flag for "last argument". */
5618 if (type == void_type_node)
5619 basereg = 16;
5620 else if (targetm.calls.must_pass_in_stack (mode, type))
5621 return NULL_RTX;
5622 }
5623 #else
5624 #error Unhandled ABI
5625 #endif
5626
5627 return gen_rtx_REG (mode, num_args + basereg);
5628 }
5629
5630 static int
5631 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5632 enum machine_mode mode ATTRIBUTE_UNUSED,
5633 tree type ATTRIBUTE_UNUSED,
5634 bool named ATTRIBUTE_UNUSED)
5635 {
5636 int words = 0;
5637
5638 #if TARGET_ABI_OPEN_VMS
5639 if (cum->num_args < 6
5640 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5641 words = 6 - cum->num_args;
5642 #elif TARGET_ABI_UNICOSMK
5643 /* Never any split arguments. */
5644 #elif TARGET_ABI_OSF
5645 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5646 words = 6 - *cum;
5647 #else
5648 #error Unhandled ABI
5649 #endif
5650
5651 return words * UNITS_PER_WORD;
5652 }
5653
5654
5655 /* Return true if TYPE must be returned in memory, instead of in registers. */
5656
5657 static bool
5658 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5659 {
5660 enum machine_mode mode = VOIDmode;
5661 int size;
5662
5663 if (type)
5664 {
5665 mode = TYPE_MODE (type);
5666
5667 /* All aggregates are returned in memory. */
5668 if (AGGREGATE_TYPE_P (type))
5669 return true;
5670 }
5671
5672 size = GET_MODE_SIZE (mode);
5673 switch (GET_MODE_CLASS (mode))
5674 {
5675 case MODE_VECTOR_FLOAT:
5676 /* Pass all float vectors in memory, like an aggregate. */
5677 return true;
5678
5679 case MODE_COMPLEX_FLOAT:
5680 /* We judge complex floats on the size of their element,
5681 not the size of the whole type. */
5682 size = GET_MODE_UNIT_SIZE (mode);
5683 break;
5684
5685 case MODE_INT:
5686 case MODE_FLOAT:
5687 case MODE_COMPLEX_INT:
5688 case MODE_VECTOR_INT:
5689 break;
5690
5691 default:
5692 /* ??? We get called on all sorts of random stuff from
5693 aggregate_value_p. We must return something, but it's not
5694 clear what's safe to return. Pretend it's a struct I
5695 guess. */
5696 return true;
5697 }
5698
5699 /* Otherwise types must fit in one register. */
5700 return size > UNITS_PER_WORD;
5701 }
5702
5703 /* Return true if TYPE should be passed by invisible reference. */
5704
5705 static bool
5706 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5707 enum machine_mode mode,
5708 const_tree type ATTRIBUTE_UNUSED,
5709 bool named ATTRIBUTE_UNUSED)
5710 {
5711 return mode == TFmode || mode == TCmode;
5712 }
5713
5714 /* Define how to find the value returned by a function. VALTYPE is the
5715 data type of the value (as a tree). If the precise function being
5716 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5717 MODE is set instead of VALTYPE for libcalls.
5718
5719 On Alpha the value is found in $0 for integer functions and
5720 $f0 for floating-point functions. */
5721
5722 rtx
5723 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5724 enum machine_mode mode)
5725 {
5726 unsigned int regnum, dummy;
5727 enum mode_class mclass;
5728
5729 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5730
5731 if (valtype)
5732 mode = TYPE_MODE (valtype);
5733
5734 mclass = GET_MODE_CLASS (mode);
5735 switch (mclass)
5736 {
5737 case MODE_INT:
5738 PROMOTE_MODE (mode, dummy, valtype);
5739 /* FALLTHRU */
5740
5741 case MODE_COMPLEX_INT:
5742 case MODE_VECTOR_INT:
5743 regnum = 0;
5744 break;
5745
5746 case MODE_FLOAT:
5747 regnum = 32;
5748 break;
5749
5750 case MODE_COMPLEX_FLOAT:
5751 {
5752 enum machine_mode cmode = GET_MODE_INNER (mode);
5753
5754 return gen_rtx_PARALLEL
5755 (VOIDmode,
5756 gen_rtvec (2,
5757 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5758 const0_rtx),
5759 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5760 GEN_INT (GET_MODE_SIZE (cmode)))));
5761 }
5762
5763 default:
5764 gcc_unreachable ();
5765 }
5766
5767 return gen_rtx_REG (mode, regnum);
5768 }
5769
5770 /* TCmode complex values are passed by invisible reference. We
5771 should not split these values. */
5772
5773 static bool
5774 alpha_split_complex_arg (const_tree type)
5775 {
5776 return TYPE_MODE (type) != TCmode;
5777 }
5778
5779 static tree
5780 alpha_build_builtin_va_list (void)
5781 {
5782 tree base, ofs, space, record, type_decl;
5783
5784 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5785 return ptr_type_node;
5786
5787 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5788 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5789 TREE_CHAIN (record) = type_decl;
5790 TYPE_NAME (record) = type_decl;
5791
5792 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5793
5794 /* Dummy field to prevent alignment warnings. */
5795 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5796 DECL_FIELD_CONTEXT (space) = record;
5797 DECL_ARTIFICIAL (space) = 1;
5798 DECL_IGNORED_P (space) = 1;
5799
5800 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5801 integer_type_node);
5802 DECL_FIELD_CONTEXT (ofs) = record;
5803 TREE_CHAIN (ofs) = space;
5804
5805 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5806 ptr_type_node);
5807 DECL_FIELD_CONTEXT (base) = record;
5808 TREE_CHAIN (base) = ofs;
5809
5810 TYPE_FIELDS (record) = base;
5811 layout_type (record);
5812
5813 va_list_gpr_counter_field = ofs;
5814 return record;
5815 }
5816
5817 #if TARGET_ABI_OSF
5818 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5819 and constant additions. */
5820
5821 static gimple
5822 va_list_skip_additions (tree lhs)
5823 {
5824 gimple stmt;
5825
5826 for (;;)
5827 {
5828 enum tree_code code;
5829
5830 stmt = SSA_NAME_DEF_STMT (lhs);
5831
5832 if (gimple_code (stmt) == GIMPLE_PHI)
5833 return stmt;
5834
5835 if (!is_gimple_assign (stmt)
5836 || gimple_assign_lhs (stmt) != lhs)
5837 return NULL;
5838
5839 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5840 return stmt;
5841 code = gimple_assign_rhs_code (stmt);
5842 if (!CONVERT_EXPR_CODE_P (code)
5843 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5844 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5845 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5846 return stmt;
5847
5848 lhs = gimple_assign_rhs1 (stmt);
5849 }
5850 }
5851
5852 /* Check if LHS = RHS statement is
5853 LHS = *(ap.__base + ap.__offset + cst)
5854 or
5855 LHS = *(ap.__base
5856 + ((ap.__offset + cst <= 47)
5857 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5858 If the former, indicate that GPR registers are needed,
5859 if the latter, indicate that FPR registers are needed.
5860
5861 Also look for LHS = (*ptr).field, where ptr is one of the forms
5862 listed above.
5863
5864 On alpha, cfun->va_list_gpr_size is used as size of the needed
5865 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5866 registers are needed and bit 1 set if FPR registers are needed.
5867 Return true if va_list references should not be scanned for the
5868 current statement. */
5869
5870 static bool
5871 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5872 {
5873 tree base, offset, rhs;
5874 int offset_arg = 1;
5875 gimple base_stmt;
5876
5877 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5878 != GIMPLE_SINGLE_RHS)
5879 return false;
5880
5881 rhs = gimple_assign_rhs1 (stmt);
5882 while (handled_component_p (rhs))
5883 rhs = TREE_OPERAND (rhs, 0);
5884 if (TREE_CODE (rhs) != INDIRECT_REF
5885 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5886 return false;
5887
5888 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5889 if (stmt == NULL
5890 || !is_gimple_assign (stmt)
5891 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5892 return false;
5893
5894 base = gimple_assign_rhs1 (stmt);
5895 if (TREE_CODE (base) == SSA_NAME)
5896 {
5897 base_stmt = va_list_skip_additions (base);
5898 if (base_stmt
5899 && is_gimple_assign (base_stmt)
5900 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5901 base = gimple_assign_rhs1 (base_stmt);
5902 }
5903
5904 if (TREE_CODE (base) != COMPONENT_REF
5905 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5906 {
5907 base = gimple_assign_rhs2 (stmt);
5908 if (TREE_CODE (base) == SSA_NAME)
5909 {
5910 base_stmt = va_list_skip_additions (base);
5911 if (base_stmt
5912 && is_gimple_assign (base_stmt)
5913 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5914 base = gimple_assign_rhs1 (base_stmt);
5915 }
5916
5917 if (TREE_CODE (base) != COMPONENT_REF
5918 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5919 return false;
5920
5921 offset_arg = 0;
5922 }
5923
5924 base = get_base_address (base);
5925 if (TREE_CODE (base) != VAR_DECL
5926 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5927 return false;
5928
5929 offset = gimple_op (stmt, 1 + offset_arg);
5930 if (TREE_CODE (offset) == SSA_NAME)
5931 {
5932 gimple offset_stmt = va_list_skip_additions (offset);
5933
5934 if (offset_stmt
5935 && gimple_code (offset_stmt) == GIMPLE_PHI)
5936 {
5937 HOST_WIDE_INT sub;
5938 gimple arg1_stmt, arg2_stmt;
5939 tree arg1, arg2;
5940 enum tree_code code1, code2;
5941
5942 if (gimple_phi_num_args (offset_stmt) != 2)
5943 goto escapes;
5944
5945 arg1_stmt
5946 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5947 arg2_stmt
5948 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5949 if (arg1_stmt == NULL
5950 || !is_gimple_assign (arg1_stmt)
5951 || arg2_stmt == NULL
5952 || !is_gimple_assign (arg2_stmt))
5953 goto escapes;
5954
5955 code1 = gimple_assign_rhs_code (arg1_stmt);
5956 code2 = gimple_assign_rhs_code (arg2_stmt);
5957 if (code1 == COMPONENT_REF
5958 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5959 /* Do nothing. */;
5960 else if (code2 == COMPONENT_REF
5961 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5962 {
5963 gimple tem = arg1_stmt;
5964 code2 = code1;
5965 arg1_stmt = arg2_stmt;
5966 arg2_stmt = tem;
5967 }
5968 else
5969 goto escapes;
5970
5971 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5972 goto escapes;
5973
5974 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5975 if (code2 == MINUS_EXPR)
5976 sub = -sub;
5977 if (sub < -48 || sub > -32)
5978 goto escapes;
5979
5980 arg1 = gimple_assign_rhs1 (arg1_stmt);
5981 arg2 = gimple_assign_rhs1 (arg2_stmt);
5982 if (TREE_CODE (arg2) == SSA_NAME)
5983 {
5984 arg2_stmt = va_list_skip_additions (arg2);
5985 if (arg2_stmt == NULL
5986 || !is_gimple_assign (arg2_stmt)
5987 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5988 goto escapes;
5989 arg2 = gimple_assign_rhs1 (arg2_stmt);
5990 }
5991 if (arg1 != arg2)
5992 goto escapes;
5993
5994 if (TREE_CODE (arg1) != COMPONENT_REF
5995 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5996 || get_base_address (arg1) != base)
5997 goto escapes;
5998
5999 /* Need floating point regs. */
6000 cfun->va_list_fpr_size |= 2;
6001 return false;
6002 }
6003 if (offset_stmt
6004 && is_gimple_assign (offset_stmt)
6005 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6006 offset = gimple_assign_rhs1 (offset_stmt);
6007 }
6008 if (TREE_CODE (offset) != COMPONENT_REF
6009 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6010 || get_base_address (offset) != base)
6011 goto escapes;
6012 else
6013 /* Need general regs. */
6014 cfun->va_list_fpr_size |= 1;
6015 return false;
6016
6017 escapes:
6018 si->va_list_escapes = true;
6019 return false;
6020 }
6021 #endif
6022
6023 /* Perform any needed actions needed for a function that is receiving a
6024 variable number of arguments. */
6025
6026 static void
6027 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6028 tree type, int *pretend_size, int no_rtl)
6029 {
6030 CUMULATIVE_ARGS cum = *pcum;
6031
6032 /* Skip the current argument. */
6033 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6034
6035 #if TARGET_ABI_UNICOSMK
6036 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6037 arguments on the stack. Unfortunately, it doesn't always store the first
6038 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6039 with stdargs as we always have at least one named argument there. */
6040 if (cum.num_reg_words < 6)
6041 {
6042 if (!no_rtl)
6043 {
6044 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6045 emit_insn (gen_arg_home_umk ());
6046 }
6047 *pretend_size = 0;
6048 }
6049 #elif TARGET_ABI_OPEN_VMS
6050 /* For VMS, we allocate space for all 6 arg registers plus a count.
6051
6052 However, if NO registers need to be saved, don't allocate any space.
6053 This is not only because we won't need the space, but because AP
6054 includes the current_pretend_args_size and we don't want to mess up
6055 any ap-relative addresses already made. */
6056 if (cum.num_args < 6)
6057 {
6058 if (!no_rtl)
6059 {
6060 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6061 emit_insn (gen_arg_home ());
6062 }
6063 *pretend_size = 7 * UNITS_PER_WORD;
6064 }
6065 #else
6066 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6067 only push those that are remaining. However, if NO registers need to
6068 be saved, don't allocate any space. This is not only because we won't
6069 need the space, but because AP includes the current_pretend_args_size
6070 and we don't want to mess up any ap-relative addresses already made.
6071
6072 If we are not to use the floating-point registers, save the integer
6073 registers where we would put the floating-point registers. This is
6074 not the most efficient way to implement varargs with just one register
6075 class, but it isn't worth doing anything more efficient in this rare
6076 case. */
6077 if (cum >= 6)
6078 return;
6079
6080 if (!no_rtl)
6081 {
6082 int count;
6083 alias_set_type set = get_varargs_alias_set ();
6084 rtx tmp;
6085
6086 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6087 if (count > 6 - cum)
6088 count = 6 - cum;
6089
6090 /* Detect whether integer registers or floating-point registers
6091 are needed by the detected va_arg statements. See above for
6092 how these values are computed. Note that the "escape" value
6093 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6094 these bits set. */
6095 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6096
6097 if (cfun->va_list_fpr_size & 1)
6098 {
6099 tmp = gen_rtx_MEM (BLKmode,
6100 plus_constant (virtual_incoming_args_rtx,
6101 (cum + 6) * UNITS_PER_WORD));
6102 MEM_NOTRAP_P (tmp) = 1;
6103 set_mem_alias_set (tmp, set);
6104 move_block_from_reg (16 + cum, tmp, count);
6105 }
6106
6107 if (cfun->va_list_fpr_size & 2)
6108 {
6109 tmp = gen_rtx_MEM (BLKmode,
6110 plus_constant (virtual_incoming_args_rtx,
6111 cum * UNITS_PER_WORD));
6112 MEM_NOTRAP_P (tmp) = 1;
6113 set_mem_alias_set (tmp, set);
6114 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6115 }
6116 }
6117 *pretend_size = 12 * UNITS_PER_WORD;
6118 #endif
6119 }
6120
6121 static void
6122 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6123 {
6124 HOST_WIDE_INT offset;
6125 tree t, offset_field, base_field;
6126
6127 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6128 return;
6129
6130 if (TARGET_ABI_UNICOSMK)
6131 std_expand_builtin_va_start (valist, nextarg);
6132
6133 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6134 up by 48, storing fp arg registers in the first 48 bytes, and the
6135 integer arg registers in the next 48 bytes. This is only done,
6136 however, if any integer registers need to be stored.
6137
6138 If no integer registers need be stored, then we must subtract 48
6139 in order to account for the integer arg registers which are counted
6140 in argsize above, but which are not actually stored on the stack.
6141 Must further be careful here about structures straddling the last
6142 integer argument register; that futzes with pretend_args_size,
6143 which changes the meaning of AP. */
6144
6145 if (NUM_ARGS < 6)
6146 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6147 else
6148 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6149
6150 if (TARGET_ABI_OPEN_VMS)
6151 {
6152 nextarg = plus_constant (nextarg, offset);
6153 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6154 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6155 make_tree (ptr_type_node, nextarg));
6156 TREE_SIDE_EFFECTS (t) = 1;
6157
6158 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6159 }
6160 else
6161 {
6162 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6163 offset_field = TREE_CHAIN (base_field);
6164
6165 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6166 valist, base_field, NULL_TREE);
6167 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6168 valist, offset_field, NULL_TREE);
6169
6170 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6171 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6172 size_int (offset));
6173 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6174 TREE_SIDE_EFFECTS (t) = 1;
6175 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6176
6177 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6178 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6179 TREE_SIDE_EFFECTS (t) = 1;
6180 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6181 }
6182 }
6183
6184 static tree
6185 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6186 gimple_seq *pre_p)
6187 {
6188 tree type_size, ptr_type, addend, t, addr;
6189 gimple_seq internal_post;
6190
6191 /* If the type could not be passed in registers, skip the block
6192 reserved for the registers. */
6193 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6194 {
6195 t = build_int_cst (TREE_TYPE (offset), 6*8);
6196 gimplify_assign (offset,
6197 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6198 pre_p);
6199 }
6200
6201 addend = offset;
6202 ptr_type = build_pointer_type (type);
6203
6204 if (TREE_CODE (type) == COMPLEX_TYPE)
6205 {
6206 tree real_part, imag_part, real_temp;
6207
6208 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6209 offset, pre_p);
6210
6211 /* Copy the value into a new temporary, lest the formal temporary
6212 be reused out from under us. */
6213 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6214
6215 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6216 offset, pre_p);
6217
6218 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6219 }
6220 else if (TREE_CODE (type) == REAL_TYPE)
6221 {
6222 tree fpaddend, cond, fourtyeight;
6223
6224 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6225 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6226 addend, fourtyeight);
6227 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6228 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6229 fpaddend, addend);
6230 }
6231
6232 /* Build the final address and force that value into a temporary. */
6233 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6234 fold_convert (sizetype, addend));
6235 internal_post = NULL;
6236 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6237 gimple_seq_add_seq (pre_p, internal_post);
6238
6239 /* Update the offset field. */
6240 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6241 if (type_size == NULL || TREE_OVERFLOW (type_size))
6242 t = size_zero_node;
6243 else
6244 {
6245 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6246 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6247 t = size_binop (MULT_EXPR, t, size_int (8));
6248 }
6249 t = fold_convert (TREE_TYPE (offset), t);
6250 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6251 pre_p);
6252
6253 return build_va_arg_indirect_ref (addr);
6254 }
6255
6256 static tree
6257 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6258 gimple_seq *post_p)
6259 {
6260 tree offset_field, base_field, offset, base, t, r;
6261 bool indirect;
6262
6263 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6264 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6265
6266 base_field = TYPE_FIELDS (va_list_type_node);
6267 offset_field = TREE_CHAIN (base_field);
6268 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6269 valist, base_field, NULL_TREE);
6270 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6271 valist, offset_field, NULL_TREE);
6272
6273 /* Pull the fields of the structure out into temporaries. Since we never
6274 modify the base field, we can use a formal temporary. Sign-extend the
6275 offset field so that it's the proper width for pointer arithmetic. */
6276 base = get_formal_tmp_var (base_field, pre_p);
6277
6278 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6279 offset = get_initialized_tmp_var (t, pre_p, NULL);
6280
6281 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6282 if (indirect)
6283 type = build_pointer_type (type);
6284
6285 /* Find the value. Note that this will be a stable indirection, or
6286 a composite of stable indirections in the case of complex. */
6287 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6288
6289 /* Stuff the offset temporary back into its field. */
6290 gimplify_assign (unshare_expr (offset_field),
6291 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6292
6293 if (indirect)
6294 r = build_va_arg_indirect_ref (r);
6295
6296 return r;
6297 }
6298 \f
6299 /* Builtins. */
6300
6301 enum alpha_builtin
6302 {
6303 ALPHA_BUILTIN_CMPBGE,
6304 ALPHA_BUILTIN_EXTBL,
6305 ALPHA_BUILTIN_EXTWL,
6306 ALPHA_BUILTIN_EXTLL,
6307 ALPHA_BUILTIN_EXTQL,
6308 ALPHA_BUILTIN_EXTWH,
6309 ALPHA_BUILTIN_EXTLH,
6310 ALPHA_BUILTIN_EXTQH,
6311 ALPHA_BUILTIN_INSBL,
6312 ALPHA_BUILTIN_INSWL,
6313 ALPHA_BUILTIN_INSLL,
6314 ALPHA_BUILTIN_INSQL,
6315 ALPHA_BUILTIN_INSWH,
6316 ALPHA_BUILTIN_INSLH,
6317 ALPHA_BUILTIN_INSQH,
6318 ALPHA_BUILTIN_MSKBL,
6319 ALPHA_BUILTIN_MSKWL,
6320 ALPHA_BUILTIN_MSKLL,
6321 ALPHA_BUILTIN_MSKQL,
6322 ALPHA_BUILTIN_MSKWH,
6323 ALPHA_BUILTIN_MSKLH,
6324 ALPHA_BUILTIN_MSKQH,
6325 ALPHA_BUILTIN_UMULH,
6326 ALPHA_BUILTIN_ZAP,
6327 ALPHA_BUILTIN_ZAPNOT,
6328 ALPHA_BUILTIN_AMASK,
6329 ALPHA_BUILTIN_IMPLVER,
6330 ALPHA_BUILTIN_RPCC,
6331 ALPHA_BUILTIN_THREAD_POINTER,
6332 ALPHA_BUILTIN_SET_THREAD_POINTER,
6333
6334 /* TARGET_MAX */
6335 ALPHA_BUILTIN_MINUB8,
6336 ALPHA_BUILTIN_MINSB8,
6337 ALPHA_BUILTIN_MINUW4,
6338 ALPHA_BUILTIN_MINSW4,
6339 ALPHA_BUILTIN_MAXUB8,
6340 ALPHA_BUILTIN_MAXSB8,
6341 ALPHA_BUILTIN_MAXUW4,
6342 ALPHA_BUILTIN_MAXSW4,
6343 ALPHA_BUILTIN_PERR,
6344 ALPHA_BUILTIN_PKLB,
6345 ALPHA_BUILTIN_PKWB,
6346 ALPHA_BUILTIN_UNPKBL,
6347 ALPHA_BUILTIN_UNPKBW,
6348
6349 /* TARGET_CIX */
6350 ALPHA_BUILTIN_CTTZ,
6351 ALPHA_BUILTIN_CTLZ,
6352 ALPHA_BUILTIN_CTPOP,
6353
6354 ALPHA_BUILTIN_max
6355 };
6356
6357 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6358 CODE_FOR_builtin_cmpbge,
6359 CODE_FOR_builtin_extbl,
6360 CODE_FOR_builtin_extwl,
6361 CODE_FOR_builtin_extll,
6362 CODE_FOR_builtin_extql,
6363 CODE_FOR_builtin_extwh,
6364 CODE_FOR_builtin_extlh,
6365 CODE_FOR_builtin_extqh,
6366 CODE_FOR_builtin_insbl,
6367 CODE_FOR_builtin_inswl,
6368 CODE_FOR_builtin_insll,
6369 CODE_FOR_builtin_insql,
6370 CODE_FOR_builtin_inswh,
6371 CODE_FOR_builtin_inslh,
6372 CODE_FOR_builtin_insqh,
6373 CODE_FOR_builtin_mskbl,
6374 CODE_FOR_builtin_mskwl,
6375 CODE_FOR_builtin_mskll,
6376 CODE_FOR_builtin_mskql,
6377 CODE_FOR_builtin_mskwh,
6378 CODE_FOR_builtin_msklh,
6379 CODE_FOR_builtin_mskqh,
6380 CODE_FOR_umuldi3_highpart,
6381 CODE_FOR_builtin_zap,
6382 CODE_FOR_builtin_zapnot,
6383 CODE_FOR_builtin_amask,
6384 CODE_FOR_builtin_implver,
6385 CODE_FOR_builtin_rpcc,
6386 CODE_FOR_load_tp,
6387 CODE_FOR_set_tp,
6388
6389 /* TARGET_MAX */
6390 CODE_FOR_builtin_minub8,
6391 CODE_FOR_builtin_minsb8,
6392 CODE_FOR_builtin_minuw4,
6393 CODE_FOR_builtin_minsw4,
6394 CODE_FOR_builtin_maxub8,
6395 CODE_FOR_builtin_maxsb8,
6396 CODE_FOR_builtin_maxuw4,
6397 CODE_FOR_builtin_maxsw4,
6398 CODE_FOR_builtin_perr,
6399 CODE_FOR_builtin_pklb,
6400 CODE_FOR_builtin_pkwb,
6401 CODE_FOR_builtin_unpkbl,
6402 CODE_FOR_builtin_unpkbw,
6403
6404 /* TARGET_CIX */
6405 CODE_FOR_ctzdi2,
6406 CODE_FOR_clzdi2,
6407 CODE_FOR_popcountdi2
6408 };
6409
6410 struct alpha_builtin_def
6411 {
6412 const char *name;
6413 enum alpha_builtin code;
6414 unsigned int target_mask;
6415 bool is_const;
6416 };
6417
6418 static struct alpha_builtin_def const zero_arg_builtins[] = {
6419 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6420 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6421 };
6422
6423 static struct alpha_builtin_def const one_arg_builtins[] = {
6424 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6425 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6426 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6427 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6428 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6429 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6430 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6431 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6432 };
6433
6434 static struct alpha_builtin_def const two_arg_builtins[] = {
6435 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6436 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6437 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6438 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6439 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6440 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6441 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6442 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6443 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6444 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6445 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6446 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6447 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6448 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6449 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6450 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6451 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6452 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6453 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6454 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6455 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6456 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6457 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6458 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6459 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6460 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6461 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6462 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6463 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6464 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6465 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6466 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6467 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6468 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6469 };
6470
6471 static GTY(()) tree alpha_v8qi_u;
6472 static GTY(()) tree alpha_v8qi_s;
6473 static GTY(()) tree alpha_v4hi_u;
6474 static GTY(()) tree alpha_v4hi_s;
6475
6476 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6477 functions pointed to by P, with function type FTYPE. */
6478
6479 static void
6480 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6481 tree ftype)
6482 {
6483 tree decl;
6484 size_t i;
6485
6486 for (i = 0; i < count; ++i, ++p)
6487 if ((target_flags & p->target_mask) == p->target_mask)
6488 {
6489 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6490 NULL, NULL);
6491 if (p->is_const)
6492 TREE_READONLY (decl) = 1;
6493 TREE_NOTHROW (decl) = 1;
6494 }
6495 }
6496
6497
6498 static void
6499 alpha_init_builtins (void)
6500 {
6501 tree dimode_integer_type_node;
6502 tree ftype, decl;
6503
6504 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6505
6506 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6507 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6508 ftype);
6509
6510 ftype = build_function_type_list (dimode_integer_type_node,
6511 dimode_integer_type_node, NULL_TREE);
6512 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6513 ftype);
6514
6515 ftype = build_function_type_list (dimode_integer_type_node,
6516 dimode_integer_type_node,
6517 dimode_integer_type_node, NULL_TREE);
6518 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6519 ftype);
6520
6521 ftype = build_function_type (ptr_type_node, void_list_node);
6522 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6523 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6524 NULL, NULL);
6525 TREE_NOTHROW (decl) = 1;
6526
6527 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6528 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6529 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6530 NULL, NULL);
6531 TREE_NOTHROW (decl) = 1;
6532
6533 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6534 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6535 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6536 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6537 }
6538
6539 /* Expand an expression EXP that calls a built-in function,
6540 with result going to TARGET if that's convenient
6541 (and in mode MODE if that's convenient).
6542 SUBTARGET may be used as the target for computing one of EXP's operands.
6543 IGNORE is nonzero if the value is to be ignored. */
6544
6545 static rtx
6546 alpha_expand_builtin (tree exp, rtx target,
6547 rtx subtarget ATTRIBUTE_UNUSED,
6548 enum machine_mode mode ATTRIBUTE_UNUSED,
6549 int ignore ATTRIBUTE_UNUSED)
6550 {
6551 #define MAX_ARGS 2
6552
6553 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6554 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6555 tree arg;
6556 call_expr_arg_iterator iter;
6557 enum insn_code icode;
6558 rtx op[MAX_ARGS], pat;
6559 int arity;
6560 bool nonvoid;
6561
6562 if (fcode >= ALPHA_BUILTIN_max)
6563 internal_error ("bad builtin fcode");
6564 icode = code_for_builtin[fcode];
6565 if (icode == 0)
6566 internal_error ("bad builtin fcode");
6567
6568 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6569
6570 arity = 0;
6571 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6572 {
6573 const struct insn_operand_data *insn_op;
6574
6575 if (arg == error_mark_node)
6576 return NULL_RTX;
6577 if (arity > MAX_ARGS)
6578 return NULL_RTX;
6579
6580 insn_op = &insn_data[icode].operand[arity + nonvoid];
6581
6582 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6583
6584 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6585 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6586 arity++;
6587 }
6588
6589 if (nonvoid)
6590 {
6591 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6592 if (!target
6593 || GET_MODE (target) != tmode
6594 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6595 target = gen_reg_rtx (tmode);
6596 }
6597
6598 switch (arity)
6599 {
6600 case 0:
6601 pat = GEN_FCN (icode) (target);
6602 break;
6603 case 1:
6604 if (nonvoid)
6605 pat = GEN_FCN (icode) (target, op[0]);
6606 else
6607 pat = GEN_FCN (icode) (op[0]);
6608 break;
6609 case 2:
6610 pat = GEN_FCN (icode) (target, op[0], op[1]);
6611 break;
6612 default:
6613 gcc_unreachable ();
6614 }
6615 if (!pat)
6616 return NULL_RTX;
6617 emit_insn (pat);
6618
6619 if (nonvoid)
6620 return target;
6621 else
6622 return const0_rtx;
6623 }
6624
6625
6626 /* Several bits below assume HWI >= 64 bits. This should be enforced
6627 by config.gcc. */
6628 #if HOST_BITS_PER_WIDE_INT < 64
6629 # error "HOST_WIDE_INT too small"
6630 #endif
6631
6632 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6633 with an 8-bit output vector. OPINT contains the integer operands; bit N
6634 of OP_CONST is set if OPINT[N] is valid. */
6635
6636 static tree
6637 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6638 {
6639 if (op_const == 3)
6640 {
6641 int i, val;
6642 for (i = 0, val = 0; i < 8; ++i)
6643 {
6644 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6645 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6646 if (c0 >= c1)
6647 val |= 1 << i;
6648 }
6649 return build_int_cst (long_integer_type_node, val);
6650 }
6651 else if (op_const == 2 && opint[1] == 0)
6652 return build_int_cst (long_integer_type_node, 0xff);
6653 return NULL;
6654 }
6655
6656 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6657 specialized form of an AND operation. Other byte manipulation instructions
6658 are defined in terms of this instruction, so this is also used as a
6659 subroutine for other builtins.
6660
6661 OP contains the tree operands; OPINT contains the extracted integer values.
6662 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6663 OPINT may be considered. */
6664
6665 static tree
6666 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6667 long op_const)
6668 {
6669 if (op_const & 2)
6670 {
6671 unsigned HOST_WIDE_INT mask = 0;
6672 int i;
6673
6674 for (i = 0; i < 8; ++i)
6675 if ((opint[1] >> i) & 1)
6676 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6677
6678 if (op_const & 1)
6679 return build_int_cst (long_integer_type_node, opint[0] & mask);
6680
6681 if (op)
6682 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6683 build_int_cst (long_integer_type_node, mask));
6684 }
6685 else if ((op_const & 1) && opint[0] == 0)
6686 return build_int_cst (long_integer_type_node, 0);
6687 return NULL;
6688 }
6689
6690 /* Fold the builtins for the EXT family of instructions. */
6691
6692 static tree
6693 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6694 long op_const, unsigned HOST_WIDE_INT bytemask,
6695 bool is_high)
6696 {
6697 long zap_const = 2;
6698 tree *zap_op = NULL;
6699
6700 if (op_const & 2)
6701 {
6702 unsigned HOST_WIDE_INT loc;
6703
6704 loc = opint[1] & 7;
6705 if (BYTES_BIG_ENDIAN)
6706 loc ^= 7;
6707 loc *= 8;
6708
6709 if (loc != 0)
6710 {
6711 if (op_const & 1)
6712 {
6713 unsigned HOST_WIDE_INT temp = opint[0];
6714 if (is_high)
6715 temp <<= loc;
6716 else
6717 temp >>= loc;
6718 opint[0] = temp;
6719 zap_const = 3;
6720 }
6721 }
6722 else
6723 zap_op = op;
6724 }
6725
6726 opint[1] = bytemask;
6727 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6728 }
6729
6730 /* Fold the builtins for the INS family of instructions. */
6731
6732 static tree
6733 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6734 long op_const, unsigned HOST_WIDE_INT bytemask,
6735 bool is_high)
6736 {
6737 if ((op_const & 1) && opint[0] == 0)
6738 return build_int_cst (long_integer_type_node, 0);
6739
6740 if (op_const & 2)
6741 {
6742 unsigned HOST_WIDE_INT temp, loc, byteloc;
6743 tree *zap_op = NULL;
6744
6745 loc = opint[1] & 7;
6746 if (BYTES_BIG_ENDIAN)
6747 loc ^= 7;
6748 bytemask <<= loc;
6749
6750 temp = opint[0];
6751 if (is_high)
6752 {
6753 byteloc = (64 - (loc * 8)) & 0x3f;
6754 if (byteloc == 0)
6755 zap_op = op;
6756 else
6757 temp >>= byteloc;
6758 bytemask >>= 8;
6759 }
6760 else
6761 {
6762 byteloc = loc * 8;
6763 if (byteloc == 0)
6764 zap_op = op;
6765 else
6766 temp <<= byteloc;
6767 }
6768
6769 opint[0] = temp;
6770 opint[1] = bytemask;
6771 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6772 }
6773
6774 return NULL;
6775 }
6776
6777 static tree
6778 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6779 long op_const, unsigned HOST_WIDE_INT bytemask,
6780 bool is_high)
6781 {
6782 if (op_const & 2)
6783 {
6784 unsigned HOST_WIDE_INT loc;
6785
6786 loc = opint[1] & 7;
6787 if (BYTES_BIG_ENDIAN)
6788 loc ^= 7;
6789 bytemask <<= loc;
6790
6791 if (is_high)
6792 bytemask >>= 8;
6793
6794 opint[1] = bytemask ^ 0xff;
6795 }
6796
6797 return alpha_fold_builtin_zapnot (op, opint, op_const);
6798 }
6799
6800 static tree
6801 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6802 {
6803 switch (op_const)
6804 {
6805 case 3:
6806 {
6807 unsigned HOST_WIDE_INT l;
6808 HOST_WIDE_INT h;
6809
6810 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6811
6812 #if HOST_BITS_PER_WIDE_INT > 64
6813 # error fixme
6814 #endif
6815
6816 return build_int_cst (long_integer_type_node, h);
6817 }
6818
6819 case 1:
6820 opint[1] = opint[0];
6821 /* FALLTHRU */
6822 case 2:
6823 /* Note that (X*1) >> 64 == 0. */
6824 if (opint[1] == 0 || opint[1] == 1)
6825 return build_int_cst (long_integer_type_node, 0);
6826 break;
6827 }
6828 return NULL;
6829 }
6830
6831 static tree
6832 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6833 {
6834 tree op0 = fold_convert (vtype, op[0]);
6835 tree op1 = fold_convert (vtype, op[1]);
6836 tree val = fold_build2 (code, vtype, op0, op1);
6837 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6838 }
6839
6840 static tree
6841 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6842 {
6843 unsigned HOST_WIDE_INT temp = 0;
6844 int i;
6845
6846 if (op_const != 3)
6847 return NULL;
6848
6849 for (i = 0; i < 8; ++i)
6850 {
6851 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6852 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6853 if (a >= b)
6854 temp += a - b;
6855 else
6856 temp += b - a;
6857 }
6858
6859 return build_int_cst (long_integer_type_node, temp);
6860 }
6861
6862 static tree
6863 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6864 {
6865 unsigned HOST_WIDE_INT temp;
6866
6867 if (op_const == 0)
6868 return NULL;
6869
6870 temp = opint[0] & 0xff;
6871 temp |= (opint[0] >> 24) & 0xff00;
6872
6873 return build_int_cst (long_integer_type_node, temp);
6874 }
6875
6876 static tree
6877 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6878 {
6879 unsigned HOST_WIDE_INT temp;
6880
6881 if (op_const == 0)
6882 return NULL;
6883
6884 temp = opint[0] & 0xff;
6885 temp |= (opint[0] >> 8) & 0xff00;
6886 temp |= (opint[0] >> 16) & 0xff0000;
6887 temp |= (opint[0] >> 24) & 0xff000000;
6888
6889 return build_int_cst (long_integer_type_node, temp);
6890 }
6891
6892 static tree
6893 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6894 {
6895 unsigned HOST_WIDE_INT temp;
6896
6897 if (op_const == 0)
6898 return NULL;
6899
6900 temp = opint[0] & 0xff;
6901 temp |= (opint[0] & 0xff00) << 24;
6902
6903 return build_int_cst (long_integer_type_node, temp);
6904 }
6905
6906 static tree
6907 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6908 {
6909 unsigned HOST_WIDE_INT temp;
6910
6911 if (op_const == 0)
6912 return NULL;
6913
6914 temp = opint[0] & 0xff;
6915 temp |= (opint[0] & 0x0000ff00) << 8;
6916 temp |= (opint[0] & 0x00ff0000) << 16;
6917 temp |= (opint[0] & 0xff000000) << 24;
6918
6919 return build_int_cst (long_integer_type_node, temp);
6920 }
6921
6922 static tree
6923 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6924 {
6925 unsigned HOST_WIDE_INT temp;
6926
6927 if (op_const == 0)
6928 return NULL;
6929
6930 if (opint[0] == 0)
6931 temp = 64;
6932 else
6933 temp = exact_log2 (opint[0] & -opint[0]);
6934
6935 return build_int_cst (long_integer_type_node, temp);
6936 }
6937
6938 static tree
6939 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6940 {
6941 unsigned HOST_WIDE_INT temp;
6942
6943 if (op_const == 0)
6944 return NULL;
6945
6946 if (opint[0] == 0)
6947 temp = 64;
6948 else
6949 temp = 64 - floor_log2 (opint[0]) - 1;
6950
6951 return build_int_cst (long_integer_type_node, temp);
6952 }
6953
6954 static tree
6955 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6956 {
6957 unsigned HOST_WIDE_INT temp, op;
6958
6959 if (op_const == 0)
6960 return NULL;
6961
6962 op = opint[0];
6963 temp = 0;
6964 while (op)
6965 temp++, op &= op - 1;
6966
6967 return build_int_cst (long_integer_type_node, temp);
6968 }
6969
6970 /* Fold one of our builtin functions. */
6971
6972 static tree
6973 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6974 {
6975 tree op[MAX_ARGS], t;
6976 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6977 long op_const = 0, arity = 0;
6978
6979 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6980 {
6981 tree arg = TREE_VALUE (t);
6982 if (arg == error_mark_node)
6983 return NULL;
6984 if (arity >= MAX_ARGS)
6985 return NULL;
6986
6987 op[arity] = arg;
6988 opint[arity] = 0;
6989 if (TREE_CODE (arg) == INTEGER_CST)
6990 {
6991 op_const |= 1L << arity;
6992 opint[arity] = int_cst_value (arg);
6993 }
6994 }
6995
6996 switch (DECL_FUNCTION_CODE (fndecl))
6997 {
6998 case ALPHA_BUILTIN_CMPBGE:
6999 return alpha_fold_builtin_cmpbge (opint, op_const);
7000
7001 case ALPHA_BUILTIN_EXTBL:
7002 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7003 case ALPHA_BUILTIN_EXTWL:
7004 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7005 case ALPHA_BUILTIN_EXTLL:
7006 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7007 case ALPHA_BUILTIN_EXTQL:
7008 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7009 case ALPHA_BUILTIN_EXTWH:
7010 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7011 case ALPHA_BUILTIN_EXTLH:
7012 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7013 case ALPHA_BUILTIN_EXTQH:
7014 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7015
7016 case ALPHA_BUILTIN_INSBL:
7017 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7018 case ALPHA_BUILTIN_INSWL:
7019 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7020 case ALPHA_BUILTIN_INSLL:
7021 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7022 case ALPHA_BUILTIN_INSQL:
7023 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7024 case ALPHA_BUILTIN_INSWH:
7025 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7026 case ALPHA_BUILTIN_INSLH:
7027 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7028 case ALPHA_BUILTIN_INSQH:
7029 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7030
7031 case ALPHA_BUILTIN_MSKBL:
7032 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7033 case ALPHA_BUILTIN_MSKWL:
7034 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7035 case ALPHA_BUILTIN_MSKLL:
7036 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7037 case ALPHA_BUILTIN_MSKQL:
7038 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7039 case ALPHA_BUILTIN_MSKWH:
7040 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7041 case ALPHA_BUILTIN_MSKLH:
7042 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7043 case ALPHA_BUILTIN_MSKQH:
7044 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7045
7046 case ALPHA_BUILTIN_UMULH:
7047 return alpha_fold_builtin_umulh (opint, op_const);
7048
7049 case ALPHA_BUILTIN_ZAP:
7050 opint[1] ^= 0xff;
7051 /* FALLTHRU */
7052 case ALPHA_BUILTIN_ZAPNOT:
7053 return alpha_fold_builtin_zapnot (op, opint, op_const);
7054
7055 case ALPHA_BUILTIN_MINUB8:
7056 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7057 case ALPHA_BUILTIN_MINSB8:
7058 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7059 case ALPHA_BUILTIN_MINUW4:
7060 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7061 case ALPHA_BUILTIN_MINSW4:
7062 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7063 case ALPHA_BUILTIN_MAXUB8:
7064 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7065 case ALPHA_BUILTIN_MAXSB8:
7066 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7067 case ALPHA_BUILTIN_MAXUW4:
7068 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7069 case ALPHA_BUILTIN_MAXSW4:
7070 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7071
7072 case ALPHA_BUILTIN_PERR:
7073 return alpha_fold_builtin_perr (opint, op_const);
7074 case ALPHA_BUILTIN_PKLB:
7075 return alpha_fold_builtin_pklb (opint, op_const);
7076 case ALPHA_BUILTIN_PKWB:
7077 return alpha_fold_builtin_pkwb (opint, op_const);
7078 case ALPHA_BUILTIN_UNPKBL:
7079 return alpha_fold_builtin_unpkbl (opint, op_const);
7080 case ALPHA_BUILTIN_UNPKBW:
7081 return alpha_fold_builtin_unpkbw (opint, op_const);
7082
7083 case ALPHA_BUILTIN_CTTZ:
7084 return alpha_fold_builtin_cttz (opint, op_const);
7085 case ALPHA_BUILTIN_CTLZ:
7086 return alpha_fold_builtin_ctlz (opint, op_const);
7087 case ALPHA_BUILTIN_CTPOP:
7088 return alpha_fold_builtin_ctpop (opint, op_const);
7089
7090 case ALPHA_BUILTIN_AMASK:
7091 case ALPHA_BUILTIN_IMPLVER:
7092 case ALPHA_BUILTIN_RPCC:
7093 case ALPHA_BUILTIN_THREAD_POINTER:
7094 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7095 /* None of these are foldable at compile-time. */
7096 default:
7097 return NULL;
7098 }
7099 }
7100 \f
7101 /* This page contains routines that are used to determine what the function
7102 prologue and epilogue code will do and write them out. */
7103
7104 /* Compute the size of the save area in the stack. */
7105
7106 /* These variables are used for communication between the following functions.
7107 They indicate various things about the current function being compiled
7108 that are used to tell what kind of prologue, epilogue and procedure
7109 descriptor to generate. */
7110
7111 /* Nonzero if we need a stack procedure. */
7112 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7113 static enum alpha_procedure_types alpha_procedure_type;
7114
7115 /* Register number (either FP or SP) that is used to unwind the frame. */
7116 static int vms_unwind_regno;
7117
7118 /* Register number used to save FP. We need not have one for RA since
7119 we don't modify it for register procedures. This is only defined
7120 for register frame procedures. */
7121 static int vms_save_fp_regno;
7122
7123 /* Register number used to reference objects off our PV. */
7124 static int vms_base_regno;
7125
7126 /* Compute register masks for saved registers. */
7127
7128 static void
7129 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7130 {
7131 unsigned long imask = 0;
7132 unsigned long fmask = 0;
7133 unsigned int i;
7134
7135 /* When outputting a thunk, we don't have valid register life info,
7136 but assemble_start_function wants to output .frame and .mask
7137 directives. */
7138 if (cfun->is_thunk)
7139 {
7140 *imaskP = 0;
7141 *fmaskP = 0;
7142 return;
7143 }
7144
7145 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7146 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7147
7148 /* One for every register we have to save. */
7149 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7150 if (! fixed_regs[i] && ! call_used_regs[i]
7151 && df_regs_ever_live_p (i) && i != REG_RA
7152 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7153 {
7154 if (i < 32)
7155 imask |= (1UL << i);
7156 else
7157 fmask |= (1UL << (i - 32));
7158 }
7159
7160 /* We need to restore these for the handler. */
7161 if (crtl->calls_eh_return)
7162 {
7163 for (i = 0; ; ++i)
7164 {
7165 unsigned regno = EH_RETURN_DATA_REGNO (i);
7166 if (regno == INVALID_REGNUM)
7167 break;
7168 imask |= 1UL << regno;
7169 }
7170 }
7171
7172 /* If any register spilled, then spill the return address also. */
7173 /* ??? This is required by the Digital stack unwind specification
7174 and isn't needed if we're doing Dwarf2 unwinding. */
7175 if (imask || fmask || alpha_ra_ever_killed ())
7176 imask |= (1UL << REG_RA);
7177
7178 *imaskP = imask;
7179 *fmaskP = fmask;
7180 }
7181
7182 int
7183 alpha_sa_size (void)
7184 {
7185 unsigned long mask[2];
7186 int sa_size = 0;
7187 int i, j;
7188
7189 alpha_sa_mask (&mask[0], &mask[1]);
7190
7191 if (TARGET_ABI_UNICOSMK)
7192 {
7193 if (mask[0] || mask[1])
7194 sa_size = 14;
7195 }
7196 else
7197 {
7198 for (j = 0; j < 2; ++j)
7199 for (i = 0; i < 32; ++i)
7200 if ((mask[j] >> i) & 1)
7201 sa_size++;
7202 }
7203
7204 if (TARGET_ABI_UNICOSMK)
7205 {
7206 /* We might not need to generate a frame if we don't make any calls
7207 (including calls to __T3E_MISMATCH if this is a vararg function),
7208 don't have any local variables which require stack slots, don't
7209 use alloca and have not determined that we need a frame for other
7210 reasons. */
7211
7212 alpha_procedure_type
7213 = (sa_size || get_frame_size() != 0
7214 || crtl->outgoing_args_size
7215 || cfun->stdarg || cfun->calls_alloca
7216 || frame_pointer_needed)
7217 ? PT_STACK : PT_REGISTER;
7218
7219 /* Always reserve space for saving callee-saved registers if we
7220 need a frame as required by the calling convention. */
7221 if (alpha_procedure_type == PT_STACK)
7222 sa_size = 14;
7223 }
7224 else if (TARGET_ABI_OPEN_VMS)
7225 {
7226 /* Start by assuming we can use a register procedure if we don't
7227 make any calls (REG_RA not used) or need to save any
7228 registers and a stack procedure if we do. */
7229 if ((mask[0] >> REG_RA) & 1)
7230 alpha_procedure_type = PT_STACK;
7231 else if (get_frame_size() != 0)
7232 alpha_procedure_type = PT_REGISTER;
7233 else
7234 alpha_procedure_type = PT_NULL;
7235
7236 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7237 made the final decision on stack procedure vs register procedure. */
7238 if (alpha_procedure_type == PT_STACK)
7239 sa_size -= 2;
7240
7241 /* Decide whether to refer to objects off our PV via FP or PV.
7242 If we need FP for something else or if we receive a nonlocal
7243 goto (which expects PV to contain the value), we must use PV.
7244 Otherwise, start by assuming we can use FP. */
7245
7246 vms_base_regno
7247 = (frame_pointer_needed
7248 || cfun->has_nonlocal_label
7249 || alpha_procedure_type == PT_STACK
7250 || crtl->outgoing_args_size)
7251 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7252
7253 /* If we want to copy PV into FP, we need to find some register
7254 in which to save FP. */
7255
7256 vms_save_fp_regno = -1;
7257 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7258 for (i = 0; i < 32; i++)
7259 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7260 vms_save_fp_regno = i;
7261
7262 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7263 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7264 else if (alpha_procedure_type == PT_NULL)
7265 vms_base_regno = REG_PV;
7266
7267 /* Stack unwinding should be done via FP unless we use it for PV. */
7268 vms_unwind_regno = (vms_base_regno == REG_PV
7269 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7270
7271 /* If this is a stack procedure, allow space for saving FP and RA. */
7272 if (alpha_procedure_type == PT_STACK)
7273 sa_size += 2;
7274 }
7275 else
7276 {
7277 /* Our size must be even (multiple of 16 bytes). */
7278 if (sa_size & 1)
7279 sa_size++;
7280 }
7281
7282 return sa_size * 8;
7283 }
7284
7285 /* Define the offset between two registers, one to be eliminated,
7286 and the other its replacement, at the start of a routine. */
7287
7288 HOST_WIDE_INT
7289 alpha_initial_elimination_offset (unsigned int from,
7290 unsigned int to ATTRIBUTE_UNUSED)
7291 {
7292 HOST_WIDE_INT ret;
7293
7294 ret = alpha_sa_size ();
7295 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7296
7297 switch (from)
7298 {
7299 case FRAME_POINTER_REGNUM:
7300 break;
7301
7302 case ARG_POINTER_REGNUM:
7303 ret += (ALPHA_ROUND (get_frame_size ()
7304 + crtl->args.pretend_args_size)
7305 - crtl->args.pretend_args_size);
7306 break;
7307
7308 default:
7309 gcc_unreachable ();
7310 }
7311
7312 return ret;
7313 }
7314
7315 int
7316 alpha_pv_save_size (void)
7317 {
7318 alpha_sa_size ();
7319 return alpha_procedure_type == PT_STACK ? 8 : 0;
7320 }
7321
7322 int
7323 alpha_using_fp (void)
7324 {
7325 alpha_sa_size ();
7326 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7327 }
7328
7329 #if TARGET_ABI_OPEN_VMS
7330
7331 const struct attribute_spec vms_attribute_table[] =
7332 {
7333 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7334 { "overlaid", 0, 0, true, false, false, NULL },
7335 { "global", 0, 0, true, false, false, NULL },
7336 { "initialize", 0, 0, true, false, false, NULL },
7337 { NULL, 0, 0, false, false, false, NULL }
7338 };
7339
7340 #endif
7341
7342 static int
7343 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7344 {
7345 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7346 }
7347
7348 int
7349 alpha_find_lo_sum_using_gp (rtx insn)
7350 {
7351 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7352 }
7353
7354 static int
7355 alpha_does_function_need_gp (void)
7356 {
7357 rtx insn;
7358
7359 /* The GP being variable is an OSF abi thing. */
7360 if (! TARGET_ABI_OSF)
7361 return 0;
7362
7363 /* We need the gp to load the address of __mcount. */
7364 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7365 return 1;
7366
7367 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7368 if (cfun->is_thunk)
7369 return 1;
7370
7371 /* The nonlocal receiver pattern assumes that the gp is valid for
7372 the nested function. Reasonable because it's almost always set
7373 correctly already. For the cases where that's wrong, make sure
7374 the nested function loads its gp on entry. */
7375 if (crtl->has_nonlocal_goto)
7376 return 1;
7377
7378 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7379 Even if we are a static function, we still need to do this in case
7380 our address is taken and passed to something like qsort. */
7381
7382 push_topmost_sequence ();
7383 insn = get_insns ();
7384 pop_topmost_sequence ();
7385
7386 for (; insn; insn = NEXT_INSN (insn))
7387 if (INSN_P (insn)
7388 && ! JUMP_TABLE_DATA_P (insn)
7389 && GET_CODE (PATTERN (insn)) != USE
7390 && GET_CODE (PATTERN (insn)) != CLOBBER
7391 && get_attr_usegp (insn))
7392 return 1;
7393
7394 return 0;
7395 }
7396
7397 \f
7398 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7399 sequences. */
7400
7401 static rtx
7402 set_frame_related_p (void)
7403 {
7404 rtx seq = get_insns ();
7405 rtx insn;
7406
7407 end_sequence ();
7408
7409 if (!seq)
7410 return NULL_RTX;
7411
7412 if (INSN_P (seq))
7413 {
7414 insn = seq;
7415 while (insn != NULL_RTX)
7416 {
7417 RTX_FRAME_RELATED_P (insn) = 1;
7418 insn = NEXT_INSN (insn);
7419 }
7420 seq = emit_insn (seq);
7421 }
7422 else
7423 {
7424 seq = emit_insn (seq);
7425 RTX_FRAME_RELATED_P (seq) = 1;
7426 }
7427 return seq;
7428 }
7429
7430 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7431
7432 /* Generates a store with the proper unwind info attached. VALUE is
7433 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7434 contains SP+FRAME_BIAS, and that is the unwind info that should be
7435 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7436 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7437
7438 static void
7439 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7440 HOST_WIDE_INT base_ofs, rtx frame_reg)
7441 {
7442 rtx addr, mem, insn;
7443
7444 addr = plus_constant (base_reg, base_ofs);
7445 mem = gen_rtx_MEM (DImode, addr);
7446 set_mem_alias_set (mem, alpha_sr_alias_set);
7447
7448 insn = emit_move_insn (mem, value);
7449 RTX_FRAME_RELATED_P (insn) = 1;
7450
7451 if (frame_bias || value != frame_reg)
7452 {
7453 if (frame_bias)
7454 {
7455 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7456 mem = gen_rtx_MEM (DImode, addr);
7457 }
7458
7459 REG_NOTES (insn)
7460 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7461 gen_rtx_SET (VOIDmode, mem, frame_reg),
7462 REG_NOTES (insn));
7463 }
7464 }
7465
7466 static void
7467 emit_frame_store (unsigned int regno, rtx base_reg,
7468 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7469 {
7470 rtx reg = gen_rtx_REG (DImode, regno);
7471 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7472 }
7473
7474 /* Write function prologue. */
7475
7476 /* On vms we have two kinds of functions:
7477
7478 - stack frame (PROC_STACK)
7479 these are 'normal' functions with local vars and which are
7480 calling other functions
7481 - register frame (PROC_REGISTER)
7482 keeps all data in registers, needs no stack
7483
7484 We must pass this to the assembler so it can generate the
7485 proper pdsc (procedure descriptor)
7486 This is done with the '.pdesc' command.
7487
7488 On not-vms, we don't really differentiate between the two, as we can
7489 simply allocate stack without saving registers. */
7490
7491 void
7492 alpha_expand_prologue (void)
7493 {
7494 /* Registers to save. */
7495 unsigned long imask = 0;
7496 unsigned long fmask = 0;
7497 /* Stack space needed for pushing registers clobbered by us. */
7498 HOST_WIDE_INT sa_size;
7499 /* Complete stack size needed. */
7500 HOST_WIDE_INT frame_size;
7501 /* Offset from base reg to register save area. */
7502 HOST_WIDE_INT reg_offset;
7503 rtx sa_reg;
7504 int i;
7505
7506 sa_size = alpha_sa_size ();
7507
7508 frame_size = get_frame_size ();
7509 if (TARGET_ABI_OPEN_VMS)
7510 frame_size = ALPHA_ROUND (sa_size
7511 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7512 + frame_size
7513 + crtl->args.pretend_args_size);
7514 else if (TARGET_ABI_UNICOSMK)
7515 /* We have to allocate space for the DSIB if we generate a frame. */
7516 frame_size = ALPHA_ROUND (sa_size
7517 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7518 + ALPHA_ROUND (frame_size
7519 + crtl->outgoing_args_size);
7520 else
7521 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7522 + sa_size
7523 + ALPHA_ROUND (frame_size
7524 + crtl->args.pretend_args_size));
7525
7526 if (TARGET_ABI_OPEN_VMS)
7527 reg_offset = 8;
7528 else
7529 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7530
7531 alpha_sa_mask (&imask, &fmask);
7532
7533 /* Emit an insn to reload GP, if needed. */
7534 if (TARGET_ABI_OSF)
7535 {
7536 alpha_function_needs_gp = alpha_does_function_need_gp ();
7537 if (alpha_function_needs_gp)
7538 emit_insn (gen_prologue_ldgp ());
7539 }
7540
7541 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7542 the call to mcount ourselves, rather than having the linker do it
7543 magically in response to -pg. Since _mcount has special linkage,
7544 don't represent the call as a call. */
7545 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7546 emit_insn (gen_prologue_mcount ());
7547
7548 if (TARGET_ABI_UNICOSMK)
7549 unicosmk_gen_dsib (&imask);
7550
7551 /* Adjust the stack by the frame size. If the frame size is > 4096
7552 bytes, we need to be sure we probe somewhere in the first and last
7553 4096 bytes (we can probably get away without the latter test) and
7554 every 8192 bytes in between. If the frame size is > 32768, we
7555 do this in a loop. Otherwise, we generate the explicit probe
7556 instructions.
7557
7558 Note that we are only allowed to adjust sp once in the prologue. */
7559
7560 if (frame_size <= 32768)
7561 {
7562 if (frame_size > 4096)
7563 {
7564 int probed;
7565
7566 for (probed = 4096; probed < frame_size; probed += 8192)
7567 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7568 ? -probed + 64
7569 : -probed)));
7570
7571 /* We only have to do this probe if we aren't saving registers. */
7572 if (sa_size == 0 && frame_size > probed - 4096)
7573 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7574 }
7575
7576 if (frame_size != 0)
7577 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7578 GEN_INT (TARGET_ABI_UNICOSMK
7579 ? -frame_size + 64
7580 : -frame_size))));
7581 }
7582 else
7583 {
7584 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7585 number of 8192 byte blocks to probe. We then probe each block
7586 in the loop and then set SP to the proper location. If the
7587 amount remaining is > 4096, we have to do one more probe if we
7588 are not saving any registers. */
7589
7590 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7591 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7592 rtx ptr = gen_rtx_REG (DImode, 22);
7593 rtx count = gen_rtx_REG (DImode, 23);
7594 rtx seq;
7595
7596 emit_move_insn (count, GEN_INT (blocks));
7597 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7598 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7599
7600 /* Because of the difficulty in emitting a new basic block this
7601 late in the compilation, generate the loop as a single insn. */
7602 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7603
7604 if (leftover > 4096 && sa_size == 0)
7605 {
7606 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7607 MEM_VOLATILE_P (last) = 1;
7608 emit_move_insn (last, const0_rtx);
7609 }
7610
7611 if (TARGET_ABI_WINDOWS_NT)
7612 {
7613 /* For NT stack unwind (done by 'reverse execution'), it's
7614 not OK to take the result of a loop, even though the value
7615 is already in ptr, so we reload it via a single operation
7616 and subtract it to sp.
7617
7618 Yes, that's correct -- we have to reload the whole constant
7619 into a temporary via ldah+lda then subtract from sp. */
7620
7621 HOST_WIDE_INT lo, hi;
7622 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7623 hi = frame_size - lo;
7624
7625 emit_move_insn (ptr, GEN_INT (hi));
7626 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7627 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7628 ptr));
7629 }
7630 else
7631 {
7632 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7633 GEN_INT (-leftover)));
7634 }
7635
7636 /* This alternative is special, because the DWARF code cannot
7637 possibly intuit through the loop above. So we invent this
7638 note it looks at instead. */
7639 RTX_FRAME_RELATED_P (seq) = 1;
7640 REG_NOTES (seq)
7641 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7642 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7643 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7644 GEN_INT (TARGET_ABI_UNICOSMK
7645 ? -frame_size + 64
7646 : -frame_size))),
7647 REG_NOTES (seq));
7648 }
7649
7650 if (!TARGET_ABI_UNICOSMK)
7651 {
7652 HOST_WIDE_INT sa_bias = 0;
7653
7654 /* Cope with very large offsets to the register save area. */
7655 sa_reg = stack_pointer_rtx;
7656 if (reg_offset + sa_size > 0x8000)
7657 {
7658 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7659 rtx sa_bias_rtx;
7660
7661 if (low + sa_size <= 0x8000)
7662 sa_bias = reg_offset - low, reg_offset = low;
7663 else
7664 sa_bias = reg_offset, reg_offset = 0;
7665
7666 sa_reg = gen_rtx_REG (DImode, 24);
7667 sa_bias_rtx = GEN_INT (sa_bias);
7668
7669 if (add_operand (sa_bias_rtx, DImode))
7670 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7671 else
7672 {
7673 emit_move_insn (sa_reg, sa_bias_rtx);
7674 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7675 }
7676 }
7677
7678 /* Save regs in stack order. Beginning with VMS PV. */
7679 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7680 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7681
7682 /* Save register RA next. */
7683 if (imask & (1UL << REG_RA))
7684 {
7685 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7686 imask &= ~(1UL << REG_RA);
7687 reg_offset += 8;
7688 }
7689
7690 /* Now save any other registers required to be saved. */
7691 for (i = 0; i < 31; i++)
7692 if (imask & (1UL << i))
7693 {
7694 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7695 reg_offset += 8;
7696 }
7697
7698 for (i = 0; i < 31; i++)
7699 if (fmask & (1UL << i))
7700 {
7701 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7702 reg_offset += 8;
7703 }
7704 }
7705 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7706 {
7707 /* The standard frame on the T3E includes space for saving registers.
7708 We just have to use it. We don't have to save the return address and
7709 the old frame pointer here - they are saved in the DSIB. */
7710
7711 reg_offset = -56;
7712 for (i = 9; i < 15; i++)
7713 if (imask & (1UL << i))
7714 {
7715 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7716 reg_offset -= 8;
7717 }
7718 for (i = 2; i < 10; i++)
7719 if (fmask & (1UL << i))
7720 {
7721 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7722 reg_offset -= 8;
7723 }
7724 }
7725
7726 if (TARGET_ABI_OPEN_VMS)
7727 {
7728 if (alpha_procedure_type == PT_REGISTER)
7729 /* Register frame procedures save the fp.
7730 ?? Ought to have a dwarf2 save for this. */
7731 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7732 hard_frame_pointer_rtx);
7733
7734 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7735 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7736 gen_rtx_REG (DImode, REG_PV)));
7737
7738 if (alpha_procedure_type != PT_NULL
7739 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7740 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7741
7742 /* If we have to allocate space for outgoing args, do it now. */
7743 if (crtl->outgoing_args_size != 0)
7744 {
7745 rtx seq
7746 = emit_move_insn (stack_pointer_rtx,
7747 plus_constant
7748 (hard_frame_pointer_rtx,
7749 - (ALPHA_ROUND
7750 (crtl->outgoing_args_size))));
7751
7752 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7753 if ! frame_pointer_needed. Setting the bit will change the CFA
7754 computation rule to use sp again, which would be wrong if we had
7755 frame_pointer_needed, as this means sp might move unpredictably
7756 later on.
7757
7758 Also, note that
7759 frame_pointer_needed
7760 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7761 and
7762 crtl->outgoing_args_size != 0
7763 => alpha_procedure_type != PT_NULL,
7764
7765 so when we are not setting the bit here, we are guaranteed to
7766 have emitted an FRP frame pointer update just before. */
7767 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7768 }
7769 }
7770 else if (!TARGET_ABI_UNICOSMK)
7771 {
7772 /* If we need a frame pointer, set it from the stack pointer. */
7773 if (frame_pointer_needed)
7774 {
7775 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7776 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7777 else
7778 /* This must always be the last instruction in the
7779 prologue, thus we emit a special move + clobber. */
7780 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7781 stack_pointer_rtx, sa_reg)));
7782 }
7783 }
7784
7785 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7786 the prologue, for exception handling reasons, we cannot do this for
7787 any insn that might fault. We could prevent this for mems with a
7788 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7789 have to prevent all such scheduling with a blockage.
7790
7791 Linux, on the other hand, never bothered to implement OSF/1's
7792 exception handling, and so doesn't care about such things. Anyone
7793 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7794
7795 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7796 emit_insn (gen_blockage ());
7797 }
7798
7799 /* Count the number of .file directives, so that .loc is up to date. */
7800 int num_source_filenames = 0;
7801
7802 /* Output the textual info surrounding the prologue. */
7803
7804 void
7805 alpha_start_function (FILE *file, const char *fnname,
7806 tree decl ATTRIBUTE_UNUSED)
7807 {
7808 unsigned long imask = 0;
7809 unsigned long fmask = 0;
7810 /* Stack space needed for pushing registers clobbered by us. */
7811 HOST_WIDE_INT sa_size;
7812 /* Complete stack size needed. */
7813 unsigned HOST_WIDE_INT frame_size;
7814 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7815 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7816 ? 524288
7817 : 1UL << 31;
7818 /* Offset from base reg to register save area. */
7819 HOST_WIDE_INT reg_offset;
7820 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7821 int i;
7822
7823 /* Don't emit an extern directive for functions defined in the same file. */
7824 if (TARGET_ABI_UNICOSMK)
7825 {
7826 tree name_tree;
7827 name_tree = get_identifier (fnname);
7828 TREE_ASM_WRITTEN (name_tree) = 1;
7829 }
7830
7831 alpha_fnname = fnname;
7832 sa_size = alpha_sa_size ();
7833
7834 frame_size = get_frame_size ();
7835 if (TARGET_ABI_OPEN_VMS)
7836 frame_size = ALPHA_ROUND (sa_size
7837 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7838 + frame_size
7839 + crtl->args.pretend_args_size);
7840 else if (TARGET_ABI_UNICOSMK)
7841 frame_size = ALPHA_ROUND (sa_size
7842 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7843 + ALPHA_ROUND (frame_size
7844 + crtl->outgoing_args_size);
7845 else
7846 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7847 + sa_size
7848 + ALPHA_ROUND (frame_size
7849 + crtl->args.pretend_args_size));
7850
7851 if (TARGET_ABI_OPEN_VMS)
7852 reg_offset = 8;
7853 else
7854 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7855
7856 alpha_sa_mask (&imask, &fmask);
7857
7858 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7859 We have to do that before the .ent directive as we cannot switch
7860 files within procedures with native ecoff because line numbers are
7861 linked to procedure descriptors.
7862 Outputting the lineno helps debugging of one line functions as they
7863 would otherwise get no line number at all. Please note that we would
7864 like to put out last_linenum from final.c, but it is not accessible. */
7865
7866 if (write_symbols == SDB_DEBUG)
7867 {
7868 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7869 ASM_OUTPUT_SOURCE_FILENAME (file,
7870 DECL_SOURCE_FILE (current_function_decl));
7871 #endif
7872 #ifdef SDB_OUTPUT_SOURCE_LINE
7873 if (debug_info_level != DINFO_LEVEL_TERSE)
7874 SDB_OUTPUT_SOURCE_LINE (file,
7875 DECL_SOURCE_LINE (current_function_decl));
7876 #endif
7877 }
7878
7879 /* Issue function start and label. */
7880 if (TARGET_ABI_OPEN_VMS
7881 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7882 {
7883 fputs ("\t.ent ", file);
7884 assemble_name (file, fnname);
7885 putc ('\n', file);
7886
7887 /* If the function needs GP, we'll write the "..ng" label there.
7888 Otherwise, do it here. */
7889 if (TARGET_ABI_OSF
7890 && ! alpha_function_needs_gp
7891 && ! cfun->is_thunk)
7892 {
7893 putc ('$', file);
7894 assemble_name (file, fnname);
7895 fputs ("..ng:\n", file);
7896 }
7897 }
7898
7899 strcpy (entry_label, fnname);
7900 if (TARGET_ABI_OPEN_VMS)
7901 strcat (entry_label, "..en");
7902
7903 /* For public functions, the label must be globalized by appending an
7904 additional colon. */
7905 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7906 strcat (entry_label, ":");
7907
7908 ASM_OUTPUT_LABEL (file, entry_label);
7909 inside_function = TRUE;
7910
7911 if (TARGET_ABI_OPEN_VMS)
7912 fprintf (file, "\t.base $%d\n", vms_base_regno);
7913
7914 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7915 && !flag_inhibit_size_directive)
7916 {
7917 /* Set flags in procedure descriptor to request IEEE-conformant
7918 math-library routines. The value we set it to is PDSC_EXC_IEEE
7919 (/usr/include/pdsc.h). */
7920 fputs ("\t.eflag 48\n", file);
7921 }
7922
7923 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7924 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7925 alpha_arg_offset = -frame_size + 48;
7926
7927 /* Describe our frame. If the frame size is larger than an integer,
7928 print it as zero to avoid an assembler error. We won't be
7929 properly describing such a frame, but that's the best we can do. */
7930 if (TARGET_ABI_UNICOSMK)
7931 ;
7932 else if (TARGET_ABI_OPEN_VMS)
7933 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7934 HOST_WIDE_INT_PRINT_DEC "\n",
7935 vms_unwind_regno,
7936 frame_size >= (1UL << 31) ? 0 : frame_size,
7937 reg_offset);
7938 else if (!flag_inhibit_size_directive)
7939 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7940 (frame_pointer_needed
7941 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7942 frame_size >= max_frame_size ? 0 : frame_size,
7943 crtl->args.pretend_args_size);
7944
7945 /* Describe which registers were spilled. */
7946 if (TARGET_ABI_UNICOSMK)
7947 ;
7948 else if (TARGET_ABI_OPEN_VMS)
7949 {
7950 if (imask)
7951 /* ??? Does VMS care if mask contains ra? The old code didn't
7952 set it, so I don't here. */
7953 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7954 if (fmask)
7955 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7956 if (alpha_procedure_type == PT_REGISTER)
7957 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7958 }
7959 else if (!flag_inhibit_size_directive)
7960 {
7961 if (imask)
7962 {
7963 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7964 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7965
7966 for (i = 0; i < 32; ++i)
7967 if (imask & (1UL << i))
7968 reg_offset += 8;
7969 }
7970
7971 if (fmask)
7972 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7973 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7974 }
7975
7976 #if TARGET_ABI_OPEN_VMS
7977 /* Ifdef'ed cause link_section are only available then. */
7978 switch_to_section (readonly_data_section);
7979 fprintf (file, "\t.align 3\n");
7980 assemble_name (file, fnname); fputs ("..na:\n", file);
7981 fputs ("\t.ascii \"", file);
7982 assemble_name (file, fnname);
7983 fputs ("\\0\"\n", file);
7984 alpha_need_linkage (fnname, 1);
7985 switch_to_section (text_section);
7986 #endif
7987 }
7988
7989 /* Emit the .prologue note at the scheduled end of the prologue. */
7990
7991 static void
7992 alpha_output_function_end_prologue (FILE *file)
7993 {
7994 if (TARGET_ABI_UNICOSMK)
7995 ;
7996 else if (TARGET_ABI_OPEN_VMS)
7997 fputs ("\t.prologue\n", file);
7998 else if (TARGET_ABI_WINDOWS_NT)
7999 fputs ("\t.prologue 0\n", file);
8000 else if (!flag_inhibit_size_directive)
8001 fprintf (file, "\t.prologue %d\n",
8002 alpha_function_needs_gp || cfun->is_thunk);
8003 }
8004
8005 /* Write function epilogue. */
8006
8007 /* ??? At some point we will want to support full unwind, and so will
8008 need to mark the epilogue as well. At the moment, we just confuse
8009 dwarf2out. */
8010 #undef FRP
8011 #define FRP(exp) exp
8012
8013 void
8014 alpha_expand_epilogue (void)
8015 {
8016 /* Registers to save. */
8017 unsigned long imask = 0;
8018 unsigned long fmask = 0;
8019 /* Stack space needed for pushing registers clobbered by us. */
8020 HOST_WIDE_INT sa_size;
8021 /* Complete stack size needed. */
8022 HOST_WIDE_INT frame_size;
8023 /* Offset from base reg to register save area. */
8024 HOST_WIDE_INT reg_offset;
8025 int fp_is_frame_pointer, fp_offset;
8026 rtx sa_reg, sa_reg_exp = NULL;
8027 rtx sp_adj1, sp_adj2, mem;
8028 rtx eh_ofs;
8029 int i;
8030
8031 sa_size = alpha_sa_size ();
8032
8033 frame_size = get_frame_size ();
8034 if (TARGET_ABI_OPEN_VMS)
8035 frame_size = ALPHA_ROUND (sa_size
8036 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8037 + frame_size
8038 + crtl->args.pretend_args_size);
8039 else if (TARGET_ABI_UNICOSMK)
8040 frame_size = ALPHA_ROUND (sa_size
8041 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8042 + ALPHA_ROUND (frame_size
8043 + crtl->outgoing_args_size);
8044 else
8045 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8046 + sa_size
8047 + ALPHA_ROUND (frame_size
8048 + crtl->args.pretend_args_size));
8049
8050 if (TARGET_ABI_OPEN_VMS)
8051 {
8052 if (alpha_procedure_type == PT_STACK)
8053 reg_offset = 8;
8054 else
8055 reg_offset = 0;
8056 }
8057 else
8058 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8059
8060 alpha_sa_mask (&imask, &fmask);
8061
8062 fp_is_frame_pointer
8063 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8064 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8065 fp_offset = 0;
8066 sa_reg = stack_pointer_rtx;
8067
8068 if (crtl->calls_eh_return)
8069 eh_ofs = EH_RETURN_STACKADJ_RTX;
8070 else
8071 eh_ofs = NULL_RTX;
8072
8073 if (!TARGET_ABI_UNICOSMK && sa_size)
8074 {
8075 /* If we have a frame pointer, restore SP from it. */
8076 if ((TARGET_ABI_OPEN_VMS
8077 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8078 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8079 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8080
8081 /* Cope with very large offsets to the register save area. */
8082 if (reg_offset + sa_size > 0x8000)
8083 {
8084 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8085 HOST_WIDE_INT bias;
8086
8087 if (low + sa_size <= 0x8000)
8088 bias = reg_offset - low, reg_offset = low;
8089 else
8090 bias = reg_offset, reg_offset = 0;
8091
8092 sa_reg = gen_rtx_REG (DImode, 22);
8093 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8094
8095 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8096 }
8097
8098 /* Restore registers in order, excepting a true frame pointer. */
8099
8100 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8101 if (! eh_ofs)
8102 set_mem_alias_set (mem, alpha_sr_alias_set);
8103 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8104
8105 reg_offset += 8;
8106 imask &= ~(1UL << REG_RA);
8107
8108 for (i = 0; i < 31; ++i)
8109 if (imask & (1UL << i))
8110 {
8111 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8112 fp_offset = reg_offset;
8113 else
8114 {
8115 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8116 set_mem_alias_set (mem, alpha_sr_alias_set);
8117 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8118 }
8119 reg_offset += 8;
8120 }
8121
8122 for (i = 0; i < 31; ++i)
8123 if (fmask & (1UL << i))
8124 {
8125 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8126 set_mem_alias_set (mem, alpha_sr_alias_set);
8127 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8128 reg_offset += 8;
8129 }
8130 }
8131 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8132 {
8133 /* Restore callee-saved general-purpose registers. */
8134
8135 reg_offset = -56;
8136
8137 for (i = 9; i < 15; i++)
8138 if (imask & (1UL << i))
8139 {
8140 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8141 reg_offset));
8142 set_mem_alias_set (mem, alpha_sr_alias_set);
8143 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8144 reg_offset -= 8;
8145 }
8146
8147 for (i = 2; i < 10; i++)
8148 if (fmask & (1UL << i))
8149 {
8150 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8151 reg_offset));
8152 set_mem_alias_set (mem, alpha_sr_alias_set);
8153 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8154 reg_offset -= 8;
8155 }
8156
8157 /* Restore the return address from the DSIB. */
8158
8159 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8160 set_mem_alias_set (mem, alpha_sr_alias_set);
8161 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8162 }
8163
8164 if (frame_size || eh_ofs)
8165 {
8166 sp_adj1 = stack_pointer_rtx;
8167
8168 if (eh_ofs)
8169 {
8170 sp_adj1 = gen_rtx_REG (DImode, 23);
8171 emit_move_insn (sp_adj1,
8172 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8173 }
8174
8175 /* If the stack size is large, begin computation into a temporary
8176 register so as not to interfere with a potential fp restore,
8177 which must be consecutive with an SP restore. */
8178 if (frame_size < 32768
8179 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8180 sp_adj2 = GEN_INT (frame_size);
8181 else if (TARGET_ABI_UNICOSMK)
8182 {
8183 sp_adj1 = gen_rtx_REG (DImode, 23);
8184 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8185 sp_adj2 = const0_rtx;
8186 }
8187 else if (frame_size < 0x40007fffL)
8188 {
8189 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8190
8191 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8192 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8193 sp_adj1 = sa_reg;
8194 else
8195 {
8196 sp_adj1 = gen_rtx_REG (DImode, 23);
8197 FRP (emit_move_insn (sp_adj1, sp_adj2));
8198 }
8199 sp_adj2 = GEN_INT (low);
8200 }
8201 else
8202 {
8203 rtx tmp = gen_rtx_REG (DImode, 23);
8204 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8205 3, false));
8206 if (!sp_adj2)
8207 {
8208 /* We can't drop new things to memory this late, afaik,
8209 so build it up by pieces. */
8210 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8211 -(frame_size < 0)));
8212 gcc_assert (sp_adj2);
8213 }
8214 }
8215
8216 /* From now on, things must be in order. So emit blockages. */
8217
8218 /* Restore the frame pointer. */
8219 if (TARGET_ABI_UNICOSMK)
8220 {
8221 emit_insn (gen_blockage ());
8222 mem = gen_rtx_MEM (DImode,
8223 plus_constant (hard_frame_pointer_rtx, -16));
8224 set_mem_alias_set (mem, alpha_sr_alias_set);
8225 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8226 }
8227 else if (fp_is_frame_pointer)
8228 {
8229 emit_insn (gen_blockage ());
8230 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8231 set_mem_alias_set (mem, alpha_sr_alias_set);
8232 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8233 }
8234 else if (TARGET_ABI_OPEN_VMS)
8235 {
8236 emit_insn (gen_blockage ());
8237 FRP (emit_move_insn (hard_frame_pointer_rtx,
8238 gen_rtx_REG (DImode, vms_save_fp_regno)));
8239 }
8240
8241 /* Restore the stack pointer. */
8242 emit_insn (gen_blockage ());
8243 if (sp_adj2 == const0_rtx)
8244 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8245 else
8246 FRP (emit_move_insn (stack_pointer_rtx,
8247 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8248 }
8249 else
8250 {
8251 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8252 {
8253 emit_insn (gen_blockage ());
8254 FRP (emit_move_insn (hard_frame_pointer_rtx,
8255 gen_rtx_REG (DImode, vms_save_fp_regno)));
8256 }
8257 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8258 {
8259 /* Decrement the frame pointer if the function does not have a
8260 frame. */
8261
8262 emit_insn (gen_blockage ());
8263 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8264 hard_frame_pointer_rtx, constm1_rtx)));
8265 }
8266 }
8267 }
8268 \f
8269 /* Output the rest of the textual info surrounding the epilogue. */
8270
8271 void
8272 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8273 {
8274 rtx insn;
8275
8276 /* We output a nop after noreturn calls at the very end of the function to
8277 ensure that the return address always remains in the caller's code range,
8278 as not doing so might confuse unwinding engines. */
8279 insn = get_last_insn ();
8280 if (!INSN_P (insn))
8281 insn = prev_active_insn (insn);
8282 if (CALL_P (insn))
8283 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8284
8285 #if TARGET_ABI_OSF
8286 if (cfun->is_thunk)
8287 free_after_compilation (cfun);
8288 #endif
8289
8290 #if TARGET_ABI_OPEN_VMS
8291 alpha_write_linkage (file, fnname, decl);
8292 #endif
8293
8294 /* End the function. */
8295 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8296 {
8297 fputs ("\t.end ", file);
8298 assemble_name (file, fnname);
8299 putc ('\n', file);
8300 }
8301 inside_function = FALSE;
8302
8303 /* Output jump tables and the static subroutine information block. */
8304 if (TARGET_ABI_UNICOSMK)
8305 {
8306 unicosmk_output_ssib (file, fnname);
8307 unicosmk_output_deferred_case_vectors (file);
8308 }
8309 }
8310
8311 #if TARGET_ABI_OSF
8312 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8313
8314 In order to avoid the hordes of differences between generated code
8315 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8316 lots of code loading up large constants, generate rtl and emit it
8317 instead of going straight to text.
8318
8319 Not sure why this idea hasn't been explored before... */
8320
8321 static void
8322 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8323 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8324 tree function)
8325 {
8326 HOST_WIDE_INT hi, lo;
8327 rtx this_rtx, insn, funexp;
8328
8329 gcc_assert (cfun->is_thunk);
8330
8331 /* We always require a valid GP. */
8332 emit_insn (gen_prologue_ldgp ());
8333 emit_note (NOTE_INSN_PROLOGUE_END);
8334
8335 /* Find the "this" pointer. If the function returns a structure,
8336 the structure return pointer is in $16. */
8337 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8338 this_rtx = gen_rtx_REG (Pmode, 17);
8339 else
8340 this_rtx = gen_rtx_REG (Pmode, 16);
8341
8342 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8343 entire constant for the add. */
8344 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8345 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8346 if (hi + lo == delta)
8347 {
8348 if (hi)
8349 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8350 if (lo)
8351 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8352 }
8353 else
8354 {
8355 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8356 delta, -(delta < 0));
8357 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8358 }
8359
8360 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8361 if (vcall_offset)
8362 {
8363 rtx tmp, tmp2;
8364
8365 tmp = gen_rtx_REG (Pmode, 0);
8366 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8367
8368 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8369 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8370 if (hi + lo == vcall_offset)
8371 {
8372 if (hi)
8373 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8374 }
8375 else
8376 {
8377 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8378 vcall_offset, -(vcall_offset < 0));
8379 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8380 lo = 0;
8381 }
8382 if (lo)
8383 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8384 else
8385 tmp2 = tmp;
8386 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8387
8388 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8389 }
8390
8391 /* Generate a tail call to the target function. */
8392 if (! TREE_USED (function))
8393 {
8394 assemble_external (function);
8395 TREE_USED (function) = 1;
8396 }
8397 funexp = XEXP (DECL_RTL (function), 0);
8398 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8399 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8400 SIBLING_CALL_P (insn) = 1;
8401
8402 /* Run just enough of rest_of_compilation to get the insns emitted.
8403 There's not really enough bulk here to make other passes such as
8404 instruction scheduling worth while. Note that use_thunk calls
8405 assemble_start_function and assemble_end_function. */
8406 insn = get_insns ();
8407 insn_locators_alloc ();
8408 shorten_branches (insn);
8409 final_start_function (insn, file, 1);
8410 final (insn, file, 1);
8411 final_end_function ();
8412 }
8413 #endif /* TARGET_ABI_OSF */
8414 \f
8415 /* Debugging support. */
8416
8417 #include "gstab.h"
8418
8419 /* Count the number of sdb related labels are generated (to find block
8420 start and end boundaries). */
8421
8422 int sdb_label_count = 0;
8423
8424 /* Name of the file containing the current function. */
8425
8426 static const char *current_function_file = "";
8427
8428 /* Offsets to alpha virtual arg/local debugging pointers. */
8429
8430 long alpha_arg_offset;
8431 long alpha_auto_offset;
8432 \f
8433 /* Emit a new filename to a stream. */
8434
8435 void
8436 alpha_output_filename (FILE *stream, const char *name)
8437 {
8438 static int first_time = TRUE;
8439
8440 if (first_time)
8441 {
8442 first_time = FALSE;
8443 ++num_source_filenames;
8444 current_function_file = name;
8445 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8446 output_quoted_string (stream, name);
8447 fprintf (stream, "\n");
8448 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8449 fprintf (stream, "\t#@stabs\n");
8450 }
8451
8452 else if (write_symbols == DBX_DEBUG)
8453 /* dbxout.c will emit an appropriate .stabs directive. */
8454 return;
8455
8456 else if (name != current_function_file
8457 && strcmp (name, current_function_file) != 0)
8458 {
8459 if (inside_function && ! TARGET_GAS)
8460 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8461 else
8462 {
8463 ++num_source_filenames;
8464 current_function_file = name;
8465 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8466 }
8467
8468 output_quoted_string (stream, name);
8469 fprintf (stream, "\n");
8470 }
8471 }
8472 \f
8473 /* Structure to show the current status of registers and memory. */
8474
8475 struct shadow_summary
8476 {
8477 struct {
8478 unsigned int i : 31; /* Mask of int regs */
8479 unsigned int fp : 31; /* Mask of fp regs */
8480 unsigned int mem : 1; /* mem == imem | fpmem */
8481 } used, defd;
8482 };
8483
8484 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8485 to the summary structure. SET is nonzero if the insn is setting the
8486 object, otherwise zero. */
8487
8488 static void
8489 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8490 {
8491 const char *format_ptr;
8492 int i, j;
8493
8494 if (x == 0)
8495 return;
8496
8497 switch (GET_CODE (x))
8498 {
8499 /* ??? Note that this case would be incorrect if the Alpha had a
8500 ZERO_EXTRACT in SET_DEST. */
8501 case SET:
8502 summarize_insn (SET_SRC (x), sum, 0);
8503 summarize_insn (SET_DEST (x), sum, 1);
8504 break;
8505
8506 case CLOBBER:
8507 summarize_insn (XEXP (x, 0), sum, 1);
8508 break;
8509
8510 case USE:
8511 summarize_insn (XEXP (x, 0), sum, 0);
8512 break;
8513
8514 case ASM_OPERANDS:
8515 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8516 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8517 break;
8518
8519 case PARALLEL:
8520 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8521 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8522 break;
8523
8524 case SUBREG:
8525 summarize_insn (SUBREG_REG (x), sum, 0);
8526 break;
8527
8528 case REG:
8529 {
8530 int regno = REGNO (x);
8531 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8532
8533 if (regno == 31 || regno == 63)
8534 break;
8535
8536 if (set)
8537 {
8538 if (regno < 32)
8539 sum->defd.i |= mask;
8540 else
8541 sum->defd.fp |= mask;
8542 }
8543 else
8544 {
8545 if (regno < 32)
8546 sum->used.i |= mask;
8547 else
8548 sum->used.fp |= mask;
8549 }
8550 }
8551 break;
8552
8553 case MEM:
8554 if (set)
8555 sum->defd.mem = 1;
8556 else
8557 sum->used.mem = 1;
8558
8559 /* Find the regs used in memory address computation: */
8560 summarize_insn (XEXP (x, 0), sum, 0);
8561 break;
8562
8563 case CONST_INT: case CONST_DOUBLE:
8564 case SYMBOL_REF: case LABEL_REF: case CONST:
8565 case SCRATCH: case ASM_INPUT:
8566 break;
8567
8568 /* Handle common unary and binary ops for efficiency. */
8569 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8570 case MOD: case UDIV: case UMOD: case AND: case IOR:
8571 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8572 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8573 case NE: case EQ: case GE: case GT: case LE:
8574 case LT: case GEU: case GTU: case LEU: case LTU:
8575 summarize_insn (XEXP (x, 0), sum, 0);
8576 summarize_insn (XEXP (x, 1), sum, 0);
8577 break;
8578
8579 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8580 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8581 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8582 case SQRT: case FFS:
8583 summarize_insn (XEXP (x, 0), sum, 0);
8584 break;
8585
8586 default:
8587 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8588 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8589 switch (format_ptr[i])
8590 {
8591 case 'e':
8592 summarize_insn (XEXP (x, i), sum, 0);
8593 break;
8594
8595 case 'E':
8596 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8597 summarize_insn (XVECEXP (x, i, j), sum, 0);
8598 break;
8599
8600 case 'i':
8601 break;
8602
8603 default:
8604 gcc_unreachable ();
8605 }
8606 }
8607 }
8608
8609 /* Ensure a sufficient number of `trapb' insns are in the code when
8610 the user requests code with a trap precision of functions or
8611 instructions.
8612
8613 In naive mode, when the user requests a trap-precision of
8614 "instruction", a trapb is needed after every instruction that may
8615 generate a trap. This ensures that the code is resumption safe but
8616 it is also slow.
8617
8618 When optimizations are turned on, we delay issuing a trapb as long
8619 as possible. In this context, a trap shadow is the sequence of
8620 instructions that starts with a (potentially) trap generating
8621 instruction and extends to the next trapb or call_pal instruction
8622 (but GCC never generates call_pal by itself). We can delay (and
8623 therefore sometimes omit) a trapb subject to the following
8624 conditions:
8625
8626 (a) On entry to the trap shadow, if any Alpha register or memory
8627 location contains a value that is used as an operand value by some
8628 instruction in the trap shadow (live on entry), then no instruction
8629 in the trap shadow may modify the register or memory location.
8630
8631 (b) Within the trap shadow, the computation of the base register
8632 for a memory load or store instruction may not involve using the
8633 result of an instruction that might generate an UNPREDICTABLE
8634 result.
8635
8636 (c) Within the trap shadow, no register may be used more than once
8637 as a destination register. (This is to make life easier for the
8638 trap-handler.)
8639
8640 (d) The trap shadow may not include any branch instructions. */
8641
8642 static void
8643 alpha_handle_trap_shadows (void)
8644 {
8645 struct shadow_summary shadow;
8646 int trap_pending, exception_nesting;
8647 rtx i, n;
8648
8649 trap_pending = 0;
8650 exception_nesting = 0;
8651 shadow.used.i = 0;
8652 shadow.used.fp = 0;
8653 shadow.used.mem = 0;
8654 shadow.defd = shadow.used;
8655
8656 for (i = get_insns (); i ; i = NEXT_INSN (i))
8657 {
8658 if (NOTE_P (i))
8659 {
8660 switch (NOTE_KIND (i))
8661 {
8662 case NOTE_INSN_EH_REGION_BEG:
8663 exception_nesting++;
8664 if (trap_pending)
8665 goto close_shadow;
8666 break;
8667
8668 case NOTE_INSN_EH_REGION_END:
8669 exception_nesting--;
8670 if (trap_pending)
8671 goto close_shadow;
8672 break;
8673
8674 case NOTE_INSN_EPILOGUE_BEG:
8675 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8676 goto close_shadow;
8677 break;
8678 }
8679 }
8680 else if (trap_pending)
8681 {
8682 if (alpha_tp == ALPHA_TP_FUNC)
8683 {
8684 if (JUMP_P (i)
8685 && GET_CODE (PATTERN (i)) == RETURN)
8686 goto close_shadow;
8687 }
8688 else if (alpha_tp == ALPHA_TP_INSN)
8689 {
8690 if (optimize > 0)
8691 {
8692 struct shadow_summary sum;
8693
8694 sum.used.i = 0;
8695 sum.used.fp = 0;
8696 sum.used.mem = 0;
8697 sum.defd = sum.used;
8698
8699 switch (GET_CODE (i))
8700 {
8701 case INSN:
8702 /* Annoyingly, get_attr_trap will die on these. */
8703 if (GET_CODE (PATTERN (i)) == USE
8704 || GET_CODE (PATTERN (i)) == CLOBBER)
8705 break;
8706
8707 summarize_insn (PATTERN (i), &sum, 0);
8708
8709 if ((sum.defd.i & shadow.defd.i)
8710 || (sum.defd.fp & shadow.defd.fp))
8711 {
8712 /* (c) would be violated */
8713 goto close_shadow;
8714 }
8715
8716 /* Combine shadow with summary of current insn: */
8717 shadow.used.i |= sum.used.i;
8718 shadow.used.fp |= sum.used.fp;
8719 shadow.used.mem |= sum.used.mem;
8720 shadow.defd.i |= sum.defd.i;
8721 shadow.defd.fp |= sum.defd.fp;
8722 shadow.defd.mem |= sum.defd.mem;
8723
8724 if ((sum.defd.i & shadow.used.i)
8725 || (sum.defd.fp & shadow.used.fp)
8726 || (sum.defd.mem & shadow.used.mem))
8727 {
8728 /* (a) would be violated (also takes care of (b)) */
8729 gcc_assert (get_attr_trap (i) != TRAP_YES
8730 || (!(sum.defd.i & sum.used.i)
8731 && !(sum.defd.fp & sum.used.fp)));
8732
8733 goto close_shadow;
8734 }
8735 break;
8736
8737 case JUMP_INSN:
8738 case CALL_INSN:
8739 case CODE_LABEL:
8740 goto close_shadow;
8741
8742 default:
8743 gcc_unreachable ();
8744 }
8745 }
8746 else
8747 {
8748 close_shadow:
8749 n = emit_insn_before (gen_trapb (), i);
8750 PUT_MODE (n, TImode);
8751 PUT_MODE (i, TImode);
8752 trap_pending = 0;
8753 shadow.used.i = 0;
8754 shadow.used.fp = 0;
8755 shadow.used.mem = 0;
8756 shadow.defd = shadow.used;
8757 }
8758 }
8759 }
8760
8761 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8762 && NONJUMP_INSN_P (i)
8763 && GET_CODE (PATTERN (i)) != USE
8764 && GET_CODE (PATTERN (i)) != CLOBBER
8765 && get_attr_trap (i) == TRAP_YES)
8766 {
8767 if (optimize && !trap_pending)
8768 summarize_insn (PATTERN (i), &shadow, 0);
8769 trap_pending = 1;
8770 }
8771 }
8772 }
8773 \f
8774 /* Alpha can only issue instruction groups simultaneously if they are
8775 suitably aligned. This is very processor-specific. */
8776 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8777 that are marked "fake". These instructions do not exist on that target,
8778 but it is possible to see these insns with deranged combinations of
8779 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8780 choose a result at random. */
8781
8782 enum alphaev4_pipe {
8783 EV4_STOP = 0,
8784 EV4_IB0 = 1,
8785 EV4_IB1 = 2,
8786 EV4_IBX = 4
8787 };
8788
8789 enum alphaev5_pipe {
8790 EV5_STOP = 0,
8791 EV5_NONE = 1,
8792 EV5_E01 = 2,
8793 EV5_E0 = 4,
8794 EV5_E1 = 8,
8795 EV5_FAM = 16,
8796 EV5_FA = 32,
8797 EV5_FM = 64
8798 };
8799
8800 static enum alphaev4_pipe
8801 alphaev4_insn_pipe (rtx insn)
8802 {
8803 if (recog_memoized (insn) < 0)
8804 return EV4_STOP;
8805 if (get_attr_length (insn) != 4)
8806 return EV4_STOP;
8807
8808 switch (get_attr_type (insn))
8809 {
8810 case TYPE_ILD:
8811 case TYPE_LDSYM:
8812 case TYPE_FLD:
8813 case TYPE_LD_L:
8814 return EV4_IBX;
8815
8816 case TYPE_IADD:
8817 case TYPE_ILOG:
8818 case TYPE_ICMOV:
8819 case TYPE_ICMP:
8820 case TYPE_FST:
8821 case TYPE_SHIFT:
8822 case TYPE_IMUL:
8823 case TYPE_FBR:
8824 case TYPE_MVI: /* fake */
8825 return EV4_IB0;
8826
8827 case TYPE_IST:
8828 case TYPE_MISC:
8829 case TYPE_IBR:
8830 case TYPE_JSR:
8831 case TYPE_CALLPAL:
8832 case TYPE_FCPYS:
8833 case TYPE_FCMOV:
8834 case TYPE_FADD:
8835 case TYPE_FDIV:
8836 case TYPE_FMUL:
8837 case TYPE_ST_C:
8838 case TYPE_MB:
8839 case TYPE_FSQRT: /* fake */
8840 case TYPE_FTOI: /* fake */
8841 case TYPE_ITOF: /* fake */
8842 return EV4_IB1;
8843
8844 default:
8845 gcc_unreachable ();
8846 }
8847 }
8848
8849 static enum alphaev5_pipe
8850 alphaev5_insn_pipe (rtx insn)
8851 {
8852 if (recog_memoized (insn) < 0)
8853 return EV5_STOP;
8854 if (get_attr_length (insn) != 4)
8855 return EV5_STOP;
8856
8857 switch (get_attr_type (insn))
8858 {
8859 case TYPE_ILD:
8860 case TYPE_FLD:
8861 case TYPE_LDSYM:
8862 case TYPE_IADD:
8863 case TYPE_ILOG:
8864 case TYPE_ICMOV:
8865 case TYPE_ICMP:
8866 return EV5_E01;
8867
8868 case TYPE_IST:
8869 case TYPE_FST:
8870 case TYPE_SHIFT:
8871 case TYPE_IMUL:
8872 case TYPE_MISC:
8873 case TYPE_MVI:
8874 case TYPE_LD_L:
8875 case TYPE_ST_C:
8876 case TYPE_MB:
8877 case TYPE_FTOI: /* fake */
8878 case TYPE_ITOF: /* fake */
8879 return EV5_E0;
8880
8881 case TYPE_IBR:
8882 case TYPE_JSR:
8883 case TYPE_CALLPAL:
8884 return EV5_E1;
8885
8886 case TYPE_FCPYS:
8887 return EV5_FAM;
8888
8889 case TYPE_FBR:
8890 case TYPE_FCMOV:
8891 case TYPE_FADD:
8892 case TYPE_FDIV:
8893 case TYPE_FSQRT: /* fake */
8894 return EV5_FA;
8895
8896 case TYPE_FMUL:
8897 return EV5_FM;
8898
8899 default:
8900 gcc_unreachable ();
8901 }
8902 }
8903
8904 /* IN_USE is a mask of the slots currently filled within the insn group.
8905 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8906 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8907
8908 LEN is, of course, the length of the group in bytes. */
8909
8910 static rtx
8911 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8912 {
8913 int len, in_use;
8914
8915 len = in_use = 0;
8916
8917 if (! INSN_P (insn)
8918 || GET_CODE (PATTERN (insn)) == CLOBBER
8919 || GET_CODE (PATTERN (insn)) == USE)
8920 goto next_and_done;
8921
8922 while (1)
8923 {
8924 enum alphaev4_pipe pipe;
8925
8926 pipe = alphaev4_insn_pipe (insn);
8927 switch (pipe)
8928 {
8929 case EV4_STOP:
8930 /* Force complex instructions to start new groups. */
8931 if (in_use)
8932 goto done;
8933
8934 /* If this is a completely unrecognized insn, it's an asm.
8935 We don't know how long it is, so record length as -1 to
8936 signal a needed realignment. */
8937 if (recog_memoized (insn) < 0)
8938 len = -1;
8939 else
8940 len = get_attr_length (insn);
8941 goto next_and_done;
8942
8943 case EV4_IBX:
8944 if (in_use & EV4_IB0)
8945 {
8946 if (in_use & EV4_IB1)
8947 goto done;
8948 in_use |= EV4_IB1;
8949 }
8950 else
8951 in_use |= EV4_IB0 | EV4_IBX;
8952 break;
8953
8954 case EV4_IB0:
8955 if (in_use & EV4_IB0)
8956 {
8957 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8958 goto done;
8959 in_use |= EV4_IB1;
8960 }
8961 in_use |= EV4_IB0;
8962 break;
8963
8964 case EV4_IB1:
8965 if (in_use & EV4_IB1)
8966 goto done;
8967 in_use |= EV4_IB1;
8968 break;
8969
8970 default:
8971 gcc_unreachable ();
8972 }
8973 len += 4;
8974
8975 /* Haifa doesn't do well scheduling branches. */
8976 if (JUMP_P (insn))
8977 goto next_and_done;
8978
8979 next:
8980 insn = next_nonnote_insn (insn);
8981
8982 if (!insn || ! INSN_P (insn))
8983 goto done;
8984
8985 /* Let Haifa tell us where it thinks insn group boundaries are. */
8986 if (GET_MODE (insn) == TImode)
8987 goto done;
8988
8989 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8990 goto next;
8991 }
8992
8993 next_and_done:
8994 insn = next_nonnote_insn (insn);
8995
8996 done:
8997 *plen = len;
8998 *pin_use = in_use;
8999 return insn;
9000 }
9001
9002 /* IN_USE is a mask of the slots currently filled within the insn group.
9003 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9004 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9005
9006 LEN is, of course, the length of the group in bytes. */
9007
9008 static rtx
9009 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9010 {
9011 int len, in_use;
9012
9013 len = in_use = 0;
9014
9015 if (! INSN_P (insn)
9016 || GET_CODE (PATTERN (insn)) == CLOBBER
9017 || GET_CODE (PATTERN (insn)) == USE)
9018 goto next_and_done;
9019
9020 while (1)
9021 {
9022 enum alphaev5_pipe pipe;
9023
9024 pipe = alphaev5_insn_pipe (insn);
9025 switch (pipe)
9026 {
9027 case EV5_STOP:
9028 /* Force complex instructions to start new groups. */
9029 if (in_use)
9030 goto done;
9031
9032 /* If this is a completely unrecognized insn, it's an asm.
9033 We don't know how long it is, so record length as -1 to
9034 signal a needed realignment. */
9035 if (recog_memoized (insn) < 0)
9036 len = -1;
9037 else
9038 len = get_attr_length (insn);
9039 goto next_and_done;
9040
9041 /* ??? Most of the places below, we would like to assert never
9042 happen, as it would indicate an error either in Haifa, or
9043 in the scheduling description. Unfortunately, Haifa never
9044 schedules the last instruction of the BB, so we don't have
9045 an accurate TI bit to go off. */
9046 case EV5_E01:
9047 if (in_use & EV5_E0)
9048 {
9049 if (in_use & EV5_E1)
9050 goto done;
9051 in_use |= EV5_E1;
9052 }
9053 else
9054 in_use |= EV5_E0 | EV5_E01;
9055 break;
9056
9057 case EV5_E0:
9058 if (in_use & EV5_E0)
9059 {
9060 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9061 goto done;
9062 in_use |= EV5_E1;
9063 }
9064 in_use |= EV5_E0;
9065 break;
9066
9067 case EV5_E1:
9068 if (in_use & EV5_E1)
9069 goto done;
9070 in_use |= EV5_E1;
9071 break;
9072
9073 case EV5_FAM:
9074 if (in_use & EV5_FA)
9075 {
9076 if (in_use & EV5_FM)
9077 goto done;
9078 in_use |= EV5_FM;
9079 }
9080 else
9081 in_use |= EV5_FA | EV5_FAM;
9082 break;
9083
9084 case EV5_FA:
9085 if (in_use & EV5_FA)
9086 goto done;
9087 in_use |= EV5_FA;
9088 break;
9089
9090 case EV5_FM:
9091 if (in_use & EV5_FM)
9092 goto done;
9093 in_use |= EV5_FM;
9094 break;
9095
9096 case EV5_NONE:
9097 break;
9098
9099 default:
9100 gcc_unreachable ();
9101 }
9102 len += 4;
9103
9104 /* Haifa doesn't do well scheduling branches. */
9105 /* ??? If this is predicted not-taken, slotting continues, except
9106 that no more IBR, FBR, or JSR insns may be slotted. */
9107 if (JUMP_P (insn))
9108 goto next_and_done;
9109
9110 next:
9111 insn = next_nonnote_insn (insn);
9112
9113 if (!insn || ! INSN_P (insn))
9114 goto done;
9115
9116 /* Let Haifa tell us where it thinks insn group boundaries are. */
9117 if (GET_MODE (insn) == TImode)
9118 goto done;
9119
9120 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9121 goto next;
9122 }
9123
9124 next_and_done:
9125 insn = next_nonnote_insn (insn);
9126
9127 done:
9128 *plen = len;
9129 *pin_use = in_use;
9130 return insn;
9131 }
9132
9133 static rtx
9134 alphaev4_next_nop (int *pin_use)
9135 {
9136 int in_use = *pin_use;
9137 rtx nop;
9138
9139 if (!(in_use & EV4_IB0))
9140 {
9141 in_use |= EV4_IB0;
9142 nop = gen_nop ();
9143 }
9144 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9145 {
9146 in_use |= EV4_IB1;
9147 nop = gen_nop ();
9148 }
9149 else if (TARGET_FP && !(in_use & EV4_IB1))
9150 {
9151 in_use |= EV4_IB1;
9152 nop = gen_fnop ();
9153 }
9154 else
9155 nop = gen_unop ();
9156
9157 *pin_use = in_use;
9158 return nop;
9159 }
9160
9161 static rtx
9162 alphaev5_next_nop (int *pin_use)
9163 {
9164 int in_use = *pin_use;
9165 rtx nop;
9166
9167 if (!(in_use & EV5_E1))
9168 {
9169 in_use |= EV5_E1;
9170 nop = gen_nop ();
9171 }
9172 else if (TARGET_FP && !(in_use & EV5_FA))
9173 {
9174 in_use |= EV5_FA;
9175 nop = gen_fnop ();
9176 }
9177 else if (TARGET_FP && !(in_use & EV5_FM))
9178 {
9179 in_use |= EV5_FM;
9180 nop = gen_fnop ();
9181 }
9182 else
9183 nop = gen_unop ();
9184
9185 *pin_use = in_use;
9186 return nop;
9187 }
9188
9189 /* The instruction group alignment main loop. */
9190
9191 static void
9192 alpha_align_insns (unsigned int max_align,
9193 rtx (*next_group) (rtx, int *, int *),
9194 rtx (*next_nop) (int *))
9195 {
9196 /* ALIGN is the known alignment for the insn group. */
9197 unsigned int align;
9198 /* OFS is the offset of the current insn in the insn group. */
9199 int ofs;
9200 int prev_in_use, in_use, len, ldgp;
9201 rtx i, next;
9202
9203 /* Let shorten branches care for assigning alignments to code labels. */
9204 shorten_branches (get_insns ());
9205
9206 if (align_functions < 4)
9207 align = 4;
9208 else if ((unsigned int) align_functions < max_align)
9209 align = align_functions;
9210 else
9211 align = max_align;
9212
9213 ofs = prev_in_use = 0;
9214 i = get_insns ();
9215 if (NOTE_P (i))
9216 i = next_nonnote_insn (i);
9217
9218 ldgp = alpha_function_needs_gp ? 8 : 0;
9219
9220 while (i)
9221 {
9222 next = (*next_group) (i, &in_use, &len);
9223
9224 /* When we see a label, resync alignment etc. */
9225 if (LABEL_P (i))
9226 {
9227 unsigned int new_align = 1 << label_to_alignment (i);
9228
9229 if (new_align >= align)
9230 {
9231 align = new_align < max_align ? new_align : max_align;
9232 ofs = 0;
9233 }
9234
9235 else if (ofs & (new_align-1))
9236 ofs = (ofs | (new_align-1)) + 1;
9237 gcc_assert (!len);
9238 }
9239
9240 /* Handle complex instructions special. */
9241 else if (in_use == 0)
9242 {
9243 /* Asms will have length < 0. This is a signal that we have
9244 lost alignment knowledge. Assume, however, that the asm
9245 will not mis-align instructions. */
9246 if (len < 0)
9247 {
9248 ofs = 0;
9249 align = 4;
9250 len = 0;
9251 }
9252 }
9253
9254 /* If the known alignment is smaller than the recognized insn group,
9255 realign the output. */
9256 else if ((int) align < len)
9257 {
9258 unsigned int new_log_align = len > 8 ? 4 : 3;
9259 rtx prev, where;
9260
9261 where = prev = prev_nonnote_insn (i);
9262 if (!where || !LABEL_P (where))
9263 where = i;
9264
9265 /* Can't realign between a call and its gp reload. */
9266 if (! (TARGET_EXPLICIT_RELOCS
9267 && prev && CALL_P (prev)))
9268 {
9269 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9270 align = 1 << new_log_align;
9271 ofs = 0;
9272 }
9273 }
9274
9275 /* We may not insert padding inside the initial ldgp sequence. */
9276 else if (ldgp > 0)
9277 ldgp -= len;
9278
9279 /* If the group won't fit in the same INT16 as the previous,
9280 we need to add padding to keep the group together. Rather
9281 than simply leaving the insn filling to the assembler, we
9282 can make use of the knowledge of what sorts of instructions
9283 were issued in the previous group to make sure that all of
9284 the added nops are really free. */
9285 else if (ofs + len > (int) align)
9286 {
9287 int nop_count = (align - ofs) / 4;
9288 rtx where;
9289
9290 /* Insert nops before labels, branches, and calls to truly merge
9291 the execution of the nops with the previous instruction group. */
9292 where = prev_nonnote_insn (i);
9293 if (where)
9294 {
9295 if (LABEL_P (where))
9296 {
9297 rtx where2 = prev_nonnote_insn (where);
9298 if (where2 && JUMP_P (where2))
9299 where = where2;
9300 }
9301 else if (NONJUMP_INSN_P (where))
9302 where = i;
9303 }
9304 else
9305 where = i;
9306
9307 do
9308 emit_insn_before ((*next_nop)(&prev_in_use), where);
9309 while (--nop_count);
9310 ofs = 0;
9311 }
9312
9313 ofs = (ofs + len) & (align - 1);
9314 prev_in_use = in_use;
9315 i = next;
9316 }
9317 }
9318
9319 /* Insert an unop between a noreturn function call and GP load. */
9320
9321 static void
9322 alpha_pad_noreturn (void)
9323 {
9324 rtx insn, next;
9325
9326 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9327 {
9328 if (!CALL_P (insn)
9329 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9330 continue;
9331
9332 next = next_active_insn (insn);
9333
9334 if (next)
9335 {
9336 rtx pat = PATTERN (next);
9337
9338 if (GET_CODE (pat) == SET
9339 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9340 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9341 emit_insn_after (gen_unop (), insn);
9342 }
9343 }
9344 }
9345 \f
9346 /* Machine dependent reorg pass. */
9347
9348 static void
9349 alpha_reorg (void)
9350 {
9351 /* Workaround for a linker error that triggers when an
9352 exception handler immediatelly follows a noreturn function.
9353
9354 The instruction stream from an object file:
9355
9356 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9357 58: 00 00 ba 27 ldah gp,0(ra)
9358 5c: 00 00 bd 23 lda gp,0(gp)
9359 60: 00 00 7d a7 ldq t12,0(gp)
9360 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9361
9362 was converted in the final link pass to:
9363
9364 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9365 fdb28: 00 00 fe 2f unop
9366 fdb2c: 00 00 fe 2f unop
9367 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9368 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9369
9370 GP load instructions were wrongly cleared by the linker relaxation
9371 pass. This workaround prevents removal of GP loads by inserting
9372 an unop instruction between a noreturn function call and
9373 exception handler prologue. */
9374
9375 if (current_function_has_exception_handlers ())
9376 alpha_pad_noreturn ();
9377
9378 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9379 alpha_handle_trap_shadows ();
9380
9381 /* Due to the number of extra trapb insns, don't bother fixing up
9382 alignment when trap precision is instruction. Moreover, we can
9383 only do our job when sched2 is run. */
9384 if (optimize && !optimize_size
9385 && alpha_tp != ALPHA_TP_INSN
9386 && flag_schedule_insns_after_reload)
9387 {
9388 if (alpha_tune == PROCESSOR_EV4)
9389 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9390 else if (alpha_tune == PROCESSOR_EV5)
9391 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9392 }
9393 }
9394 \f
9395 #if !TARGET_ABI_UNICOSMK
9396
9397 #ifdef HAVE_STAMP_H
9398 #include <stamp.h>
9399 #endif
9400
9401 static void
9402 alpha_file_start (void)
9403 {
9404 #ifdef OBJECT_FORMAT_ELF
9405 /* If emitting dwarf2 debug information, we cannot generate a .file
9406 directive to start the file, as it will conflict with dwarf2out
9407 file numbers. So it's only useful when emitting mdebug output. */
9408 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9409 #endif
9410
9411 default_file_start ();
9412 #ifdef MS_STAMP
9413 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9414 #endif
9415
9416 fputs ("\t.set noreorder\n", asm_out_file);
9417 fputs ("\t.set volatile\n", asm_out_file);
9418 if (!TARGET_ABI_OPEN_VMS)
9419 fputs ("\t.set noat\n", asm_out_file);
9420 if (TARGET_EXPLICIT_RELOCS)
9421 fputs ("\t.set nomacro\n", asm_out_file);
9422 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9423 {
9424 const char *arch;
9425
9426 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9427 arch = "ev6";
9428 else if (TARGET_MAX)
9429 arch = "pca56";
9430 else if (TARGET_BWX)
9431 arch = "ev56";
9432 else if (alpha_cpu == PROCESSOR_EV5)
9433 arch = "ev5";
9434 else
9435 arch = "ev4";
9436
9437 fprintf (asm_out_file, "\t.arch %s\n", arch);
9438 }
9439 }
9440 #endif
9441
9442 #ifdef OBJECT_FORMAT_ELF
9443 /* Since we don't have a .dynbss section, we should not allow global
9444 relocations in the .rodata section. */
9445
9446 static int
9447 alpha_elf_reloc_rw_mask (void)
9448 {
9449 return flag_pic ? 3 : 2;
9450 }
9451
9452 /* Return a section for X. The only special thing we do here is to
9453 honor small data. */
9454
9455 static section *
9456 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9457 unsigned HOST_WIDE_INT align)
9458 {
9459 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9460 /* ??? Consider using mergeable sdata sections. */
9461 return sdata_section;
9462 else
9463 return default_elf_select_rtx_section (mode, x, align);
9464 }
9465
9466 static unsigned int
9467 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9468 {
9469 unsigned int flags = 0;
9470
9471 if (strcmp (name, ".sdata") == 0
9472 || strncmp (name, ".sdata.", 7) == 0
9473 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9474 || strcmp (name, ".sbss") == 0
9475 || strncmp (name, ".sbss.", 6) == 0
9476 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9477 flags = SECTION_SMALL;
9478
9479 flags |= default_section_type_flags (decl, name, reloc);
9480 return flags;
9481 }
9482 #endif /* OBJECT_FORMAT_ELF */
9483 \f
9484 /* Structure to collect function names for final output in link section. */
9485 /* Note that items marked with GTY can't be ifdef'ed out. */
9486
9487 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9488 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9489
9490 struct GTY(()) alpha_links
9491 {
9492 int num;
9493 rtx linkage;
9494 enum links_kind lkind;
9495 enum reloc_kind rkind;
9496 };
9497
9498 struct GTY(()) alpha_funcs
9499 {
9500 int num;
9501 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9502 links;
9503 };
9504
9505 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9506 splay_tree alpha_links_tree;
9507 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9508 splay_tree alpha_funcs_tree;
9509
9510 static GTY(()) int alpha_funcs_num;
9511
9512 #if TARGET_ABI_OPEN_VMS
9513
9514 /* Return the VMS argument type corresponding to MODE. */
9515
9516 enum avms_arg_type
9517 alpha_arg_type (enum machine_mode mode)
9518 {
9519 switch (mode)
9520 {
9521 case SFmode:
9522 return TARGET_FLOAT_VAX ? FF : FS;
9523 case DFmode:
9524 return TARGET_FLOAT_VAX ? FD : FT;
9525 default:
9526 return I64;
9527 }
9528 }
9529
9530 /* Return an rtx for an integer representing the VMS Argument Information
9531 register value. */
9532
9533 rtx
9534 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9535 {
9536 unsigned HOST_WIDE_INT regval = cum.num_args;
9537 int i;
9538
9539 for (i = 0; i < 6; i++)
9540 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9541
9542 return GEN_INT (regval);
9543 }
9544 \f
9545 /* Make (or fake) .linkage entry for function call.
9546
9547 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9548
9549 Return an SYMBOL_REF rtx for the linkage. */
9550
9551 rtx
9552 alpha_need_linkage (const char *name, int is_local)
9553 {
9554 splay_tree_node node;
9555 struct alpha_links *al;
9556
9557 if (name[0] == '*')
9558 name++;
9559
9560 if (is_local)
9561 {
9562 struct alpha_funcs *cfaf;
9563
9564 if (!alpha_funcs_tree)
9565 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9566 splay_tree_compare_pointers);
9567
9568 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9569
9570 cfaf->links = 0;
9571 cfaf->num = ++alpha_funcs_num;
9572
9573 splay_tree_insert (alpha_funcs_tree,
9574 (splay_tree_key) current_function_decl,
9575 (splay_tree_value) cfaf);
9576 }
9577
9578 if (alpha_links_tree)
9579 {
9580 /* Is this name already defined? */
9581
9582 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9583 if (node)
9584 {
9585 al = (struct alpha_links *) node->value;
9586 if (is_local)
9587 {
9588 /* Defined here but external assumed. */
9589 if (al->lkind == KIND_EXTERN)
9590 al->lkind = KIND_LOCAL;
9591 }
9592 else
9593 {
9594 /* Used here but unused assumed. */
9595 if (al->lkind == KIND_UNUSED)
9596 al->lkind = KIND_LOCAL;
9597 }
9598 return al->linkage;
9599 }
9600 }
9601 else
9602 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9603
9604 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9605 name = ggc_strdup (name);
9606
9607 /* Assume external if no definition. */
9608 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9609
9610 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9611 get_identifier (name);
9612
9613 /* Construct a SYMBOL_REF for us to call. */
9614 {
9615 size_t name_len = strlen (name);
9616 char *linksym = XALLOCAVEC (char, name_len + 6);
9617 linksym[0] = '$';
9618 memcpy (linksym + 1, name, name_len);
9619 memcpy (linksym + 1 + name_len, "..lk", 5);
9620 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9621 ggc_alloc_string (linksym, name_len + 5));
9622 }
9623
9624 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9625 (splay_tree_value) al);
9626
9627 return al->linkage;
9628 }
9629
9630 rtx
9631 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9632 {
9633 splay_tree_node cfunnode;
9634 struct alpha_funcs *cfaf;
9635 struct alpha_links *al;
9636 const char *name = XSTR (linkage, 0);
9637
9638 cfaf = (struct alpha_funcs *) 0;
9639 al = (struct alpha_links *) 0;
9640
9641 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9642 cfaf = (struct alpha_funcs *) cfunnode->value;
9643
9644 if (cfaf->links)
9645 {
9646 splay_tree_node lnode;
9647
9648 /* Is this name already defined? */
9649
9650 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9651 if (lnode)
9652 al = (struct alpha_links *) lnode->value;
9653 }
9654 else
9655 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9656
9657 if (!al)
9658 {
9659 size_t name_len;
9660 size_t buflen;
9661 char buf [512];
9662 char *linksym;
9663 splay_tree_node node = 0;
9664 struct alpha_links *anl;
9665
9666 if (name[0] == '*')
9667 name++;
9668
9669 name_len = strlen (name);
9670
9671 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9672 al->num = cfaf->num;
9673
9674 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9675 if (node)
9676 {
9677 anl = (struct alpha_links *) node->value;
9678 al->lkind = anl->lkind;
9679 }
9680
9681 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9682 buflen = strlen (buf);
9683 linksym = XALLOCAVEC (char, buflen + 1);
9684 memcpy (linksym, buf, buflen + 1);
9685
9686 al->linkage = gen_rtx_SYMBOL_REF
9687 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9688
9689 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9690 (splay_tree_value) al);
9691 }
9692
9693 if (rflag)
9694 al->rkind = KIND_CODEADDR;
9695 else
9696 al->rkind = KIND_LINKAGE;
9697
9698 if (lflag)
9699 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9700 else
9701 return al->linkage;
9702 }
9703
9704 static int
9705 alpha_write_one_linkage (splay_tree_node node, void *data)
9706 {
9707 const char *const name = (const char *) node->key;
9708 struct alpha_links *link = (struct alpha_links *) node->value;
9709 FILE *stream = (FILE *) data;
9710
9711 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9712 if (link->rkind == KIND_CODEADDR)
9713 {
9714 if (link->lkind == KIND_LOCAL)
9715 {
9716 /* Local and used */
9717 fprintf (stream, "\t.quad %s..en\n", name);
9718 }
9719 else
9720 {
9721 /* External and used, request code address. */
9722 fprintf (stream, "\t.code_address %s\n", name);
9723 }
9724 }
9725 else
9726 {
9727 if (link->lkind == KIND_LOCAL)
9728 {
9729 /* Local and used, build linkage pair. */
9730 fprintf (stream, "\t.quad %s..en\n", name);
9731 fprintf (stream, "\t.quad %s\n", name);
9732 }
9733 else
9734 {
9735 /* External and used, request linkage pair. */
9736 fprintf (stream, "\t.linkage %s\n", name);
9737 }
9738 }
9739
9740 return 0;
9741 }
9742
9743 static void
9744 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9745 {
9746 splay_tree_node node;
9747 struct alpha_funcs *func;
9748
9749 fprintf (stream, "\t.link\n");
9750 fprintf (stream, "\t.align 3\n");
9751 in_section = NULL;
9752
9753 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9754 func = (struct alpha_funcs *) node->value;
9755
9756 fputs ("\t.name ", stream);
9757 assemble_name (stream, funname);
9758 fputs ("..na\n", stream);
9759 ASM_OUTPUT_LABEL (stream, funname);
9760 fprintf (stream, "\t.pdesc ");
9761 assemble_name (stream, funname);
9762 fprintf (stream, "..en,%s\n",
9763 alpha_procedure_type == PT_STACK ? "stack"
9764 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9765
9766 if (func->links)
9767 {
9768 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9769 /* splay_tree_delete (func->links); */
9770 }
9771 }
9772
9773 /* Given a decl, a section name, and whether the decl initializer
9774 has relocs, choose attributes for the section. */
9775
9776 #define SECTION_VMS_OVERLAY SECTION_FORGET
9777 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9778 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9779
9780 static unsigned int
9781 vms_section_type_flags (tree decl, const char *name, int reloc)
9782 {
9783 unsigned int flags = default_section_type_flags (decl, name, reloc);
9784
9785 if (decl && DECL_ATTRIBUTES (decl)
9786 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9787 flags |= SECTION_VMS_OVERLAY;
9788 if (decl && DECL_ATTRIBUTES (decl)
9789 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9790 flags |= SECTION_VMS_GLOBAL;
9791 if (decl && DECL_ATTRIBUTES (decl)
9792 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9793 flags |= SECTION_VMS_INITIALIZE;
9794
9795 return flags;
9796 }
9797
9798 /* Switch to an arbitrary section NAME with attributes as specified
9799 by FLAGS. ALIGN specifies any known alignment requirements for
9800 the section; 0 if the default should be used. */
9801
9802 static void
9803 vms_asm_named_section (const char *name, unsigned int flags,
9804 tree decl ATTRIBUTE_UNUSED)
9805 {
9806 fputc ('\n', asm_out_file);
9807 fprintf (asm_out_file, ".section\t%s", name);
9808
9809 if (flags & SECTION_VMS_OVERLAY)
9810 fprintf (asm_out_file, ",OVR");
9811 if (flags & SECTION_VMS_GLOBAL)
9812 fprintf (asm_out_file, ",GBL");
9813 if (flags & SECTION_VMS_INITIALIZE)
9814 fprintf (asm_out_file, ",NOMOD");
9815 if (flags & SECTION_DEBUG)
9816 fprintf (asm_out_file, ",NOWRT");
9817
9818 fputc ('\n', asm_out_file);
9819 }
9820
9821 /* Record an element in the table of global constructors. SYMBOL is
9822 a SYMBOL_REF of the function to be called; PRIORITY is a number
9823 between 0 and MAX_INIT_PRIORITY.
9824
9825 Differs from default_ctors_section_asm_out_constructor in that the
9826 width of the .ctors entry is always 64 bits, rather than the 32 bits
9827 used by a normal pointer. */
9828
9829 static void
9830 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9831 {
9832 switch_to_section (ctors_section);
9833 assemble_align (BITS_PER_WORD);
9834 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9835 }
9836
9837 static void
9838 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9839 {
9840 switch_to_section (dtors_section);
9841 assemble_align (BITS_PER_WORD);
9842 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9843 }
9844 #else
9845
9846 rtx
9847 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9848 int is_local ATTRIBUTE_UNUSED)
9849 {
9850 return NULL_RTX;
9851 }
9852
9853 rtx
9854 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9855 tree cfundecl ATTRIBUTE_UNUSED,
9856 int lflag ATTRIBUTE_UNUSED,
9857 int rflag ATTRIBUTE_UNUSED)
9858 {
9859 return NULL_RTX;
9860 }
9861
9862 #endif /* TARGET_ABI_OPEN_VMS */
9863 \f
9864 #if TARGET_ABI_UNICOSMK
9865
9866 /* This evaluates to true if we do not know how to pass TYPE solely in
9867 registers. This is the case for all arguments that do not fit in two
9868 registers. */
9869
9870 static bool
9871 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9872 {
9873 if (type == NULL)
9874 return false;
9875
9876 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9877 return true;
9878 if (TREE_ADDRESSABLE (type))
9879 return true;
9880
9881 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9882 }
9883
9884 /* Define the offset between two registers, one to be eliminated, and the
9885 other its replacement, at the start of a routine. */
9886
9887 int
9888 unicosmk_initial_elimination_offset (int from, int to)
9889 {
9890 int fixed_size;
9891
9892 fixed_size = alpha_sa_size();
9893 if (fixed_size != 0)
9894 fixed_size += 48;
9895
9896 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9897 return -fixed_size;
9898 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9899 return 0;
9900 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9901 return (ALPHA_ROUND (crtl->outgoing_args_size)
9902 + ALPHA_ROUND (get_frame_size()));
9903 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9904 return (ALPHA_ROUND (fixed_size)
9905 + ALPHA_ROUND (get_frame_size()
9906 + crtl->outgoing_args_size));
9907 else
9908 gcc_unreachable ();
9909 }
9910
9911 /* Output the module name for .ident and .end directives. We have to strip
9912 directories and add make sure that the module name starts with a letter
9913 or '$'. */
9914
9915 static void
9916 unicosmk_output_module_name (FILE *file)
9917 {
9918 const char *name = lbasename (main_input_filename);
9919 unsigned len = strlen (name);
9920 char *clean_name = alloca (len + 2);
9921 char *ptr = clean_name;
9922
9923 /* CAM only accepts module names that start with a letter or '$'. We
9924 prefix the module name with a '$' if necessary. */
9925
9926 if (!ISALPHA (*name))
9927 *ptr++ = '$';
9928 memcpy (ptr, name, len + 1);
9929 clean_symbol_name (clean_name);
9930 fputs (clean_name, file);
9931 }
9932
9933 /* Output the definition of a common variable. */
9934
9935 void
9936 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9937 {
9938 tree name_tree;
9939 printf ("T3E__: common %s\n", name);
9940
9941 in_section = NULL;
9942 fputs("\t.endp\n\n\t.psect ", file);
9943 assemble_name(file, name);
9944 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9945 fprintf(file, "\t.byte\t0:%d\n", size);
9946
9947 /* Mark the symbol as defined in this module. */
9948 name_tree = get_identifier (name);
9949 TREE_ASM_WRITTEN (name_tree) = 1;
9950 }
9951
9952 #define SECTION_PUBLIC SECTION_MACH_DEP
9953 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9954 static int current_section_align;
9955
9956 /* A get_unnamed_section callback for switching to the text section. */
9957
9958 static void
9959 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9960 {
9961 static int count = 0;
9962 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9963 }
9964
9965 /* A get_unnamed_section callback for switching to the data section. */
9966
9967 static void
9968 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9969 {
9970 static int count = 1;
9971 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9972 }
9973
9974 /* Implement TARGET_ASM_INIT_SECTIONS.
9975
9976 The Cray assembler is really weird with respect to sections. It has only
9977 named sections and you can't reopen a section once it has been closed.
9978 This means that we have to generate unique names whenever we want to
9979 reenter the text or the data section. */
9980
9981 static void
9982 unicosmk_init_sections (void)
9983 {
9984 text_section = get_unnamed_section (SECTION_CODE,
9985 unicosmk_output_text_section_asm_op,
9986 NULL);
9987 data_section = get_unnamed_section (SECTION_WRITE,
9988 unicosmk_output_data_section_asm_op,
9989 NULL);
9990 readonly_data_section = data_section;
9991 }
9992
9993 static unsigned int
9994 unicosmk_section_type_flags (tree decl, const char *name,
9995 int reloc ATTRIBUTE_UNUSED)
9996 {
9997 unsigned int flags = default_section_type_flags (decl, name, reloc);
9998
9999 if (!decl)
10000 return flags;
10001
10002 if (TREE_CODE (decl) == FUNCTION_DECL)
10003 {
10004 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10005 if (align_functions_log > current_section_align)
10006 current_section_align = align_functions_log;
10007
10008 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10009 flags |= SECTION_MAIN;
10010 }
10011 else
10012 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10013
10014 if (TREE_PUBLIC (decl))
10015 flags |= SECTION_PUBLIC;
10016
10017 return flags;
10018 }
10019
10020 /* Generate a section name for decl and associate it with the
10021 declaration. */
10022
10023 static void
10024 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10025 {
10026 const char *name;
10027 int len;
10028
10029 gcc_assert (decl);
10030
10031 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10032 name = default_strip_name_encoding (name);
10033 len = strlen (name);
10034
10035 if (TREE_CODE (decl) == FUNCTION_DECL)
10036 {
10037 char *string;
10038
10039 /* It is essential that we prefix the section name here because
10040 otherwise the section names generated for constructors and
10041 destructors confuse collect2. */
10042
10043 string = alloca (len + 6);
10044 sprintf (string, "code@%s", name);
10045 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10046 }
10047 else if (TREE_PUBLIC (decl))
10048 DECL_SECTION_NAME (decl) = build_string (len, name);
10049 else
10050 {
10051 char *string;
10052
10053 string = alloca (len + 6);
10054 sprintf (string, "data@%s", name);
10055 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10056 }
10057 }
10058
10059 /* Switch to an arbitrary section NAME with attributes as specified
10060 by FLAGS. ALIGN specifies any known alignment requirements for
10061 the section; 0 if the default should be used. */
10062
10063 static void
10064 unicosmk_asm_named_section (const char *name, unsigned int flags,
10065 tree decl ATTRIBUTE_UNUSED)
10066 {
10067 const char *kind;
10068
10069 /* Close the previous section. */
10070
10071 fputs ("\t.endp\n\n", asm_out_file);
10072
10073 /* Find out what kind of section we are opening. */
10074
10075 if (flags & SECTION_MAIN)
10076 fputs ("\t.start\tmain\n", asm_out_file);
10077
10078 if (flags & SECTION_CODE)
10079 kind = "code";
10080 else if (flags & SECTION_PUBLIC)
10081 kind = "common";
10082 else
10083 kind = "data";
10084
10085 if (current_section_align != 0)
10086 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10087 current_section_align, kind);
10088 else
10089 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10090 }
10091
10092 static void
10093 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10094 {
10095 if (DECL_P (decl)
10096 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10097 unicosmk_unique_section (decl, 0);
10098 }
10099
10100 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10101 in code sections because .align fill unused space with zeroes. */
10102
10103 void
10104 unicosmk_output_align (FILE *file, int align)
10105 {
10106 if (inside_function)
10107 fprintf (file, "\tgcc@code@align\t%d\n", align);
10108 else
10109 fprintf (file, "\t.align\t%d\n", align);
10110 }
10111
10112 /* Add a case vector to the current function's list of deferred case
10113 vectors. Case vectors have to be put into a separate section because CAM
10114 does not allow data definitions in code sections. */
10115
10116 void
10117 unicosmk_defer_case_vector (rtx lab, rtx vec)
10118 {
10119 struct machine_function *machine = cfun->machine;
10120
10121 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10122 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10123 machine->addr_list);
10124 }
10125
10126 /* Output a case vector. */
10127
10128 static void
10129 unicosmk_output_addr_vec (FILE *file, rtx vec)
10130 {
10131 rtx lab = XEXP (vec, 0);
10132 rtx body = XEXP (vec, 1);
10133 int vlen = XVECLEN (body, 0);
10134 int idx;
10135
10136 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10137
10138 for (idx = 0; idx < vlen; idx++)
10139 {
10140 ASM_OUTPUT_ADDR_VEC_ELT
10141 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10142 }
10143 }
10144
10145 /* Output current function's deferred case vectors. */
10146
10147 static void
10148 unicosmk_output_deferred_case_vectors (FILE *file)
10149 {
10150 struct machine_function *machine = cfun->machine;
10151 rtx t;
10152
10153 if (machine->addr_list == NULL_RTX)
10154 return;
10155
10156 switch_to_section (data_section);
10157 for (t = machine->addr_list; t; t = XEXP (t, 1))
10158 unicosmk_output_addr_vec (file, XEXP (t, 0));
10159 }
10160
10161 /* Generate the name of the SSIB section for the current function. */
10162
10163 #define SSIB_PREFIX "__SSIB_"
10164 #define SSIB_PREFIX_LEN 7
10165
10166 static const char *
10167 unicosmk_ssib_name (void)
10168 {
10169 /* This is ok since CAM won't be able to deal with names longer than that
10170 anyway. */
10171
10172 static char name[256];
10173
10174 rtx x;
10175 const char *fnname;
10176 int len;
10177
10178 x = DECL_RTL (cfun->decl);
10179 gcc_assert (MEM_P (x));
10180 x = XEXP (x, 0);
10181 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10182 fnname = XSTR (x, 0);
10183
10184 len = strlen (fnname);
10185 if (len + SSIB_PREFIX_LEN > 255)
10186 len = 255 - SSIB_PREFIX_LEN;
10187
10188 strcpy (name, SSIB_PREFIX);
10189 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10190 name[len + SSIB_PREFIX_LEN] = 0;
10191
10192 return name;
10193 }
10194
10195 /* Set up the dynamic subprogram information block (DSIB) and update the
10196 frame pointer register ($15) for subroutines which have a frame. If the
10197 subroutine doesn't have a frame, simply increment $15. */
10198
10199 static void
10200 unicosmk_gen_dsib (unsigned long *imaskP)
10201 {
10202 if (alpha_procedure_type == PT_STACK)
10203 {
10204 const char *ssib_name;
10205 rtx mem;
10206
10207 /* Allocate 64 bytes for the DSIB. */
10208
10209 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10210 GEN_INT (-64))));
10211 emit_insn (gen_blockage ());
10212
10213 /* Save the return address. */
10214
10215 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10216 set_mem_alias_set (mem, alpha_sr_alias_set);
10217 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10218 (*imaskP) &= ~(1UL << REG_RA);
10219
10220 /* Save the old frame pointer. */
10221
10222 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10223 set_mem_alias_set (mem, alpha_sr_alias_set);
10224 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10225 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10226
10227 emit_insn (gen_blockage ());
10228
10229 /* Store the SSIB pointer. */
10230
10231 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10232 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10233 set_mem_alias_set (mem, alpha_sr_alias_set);
10234
10235 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10236 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10237 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10238
10239 /* Save the CIW index. */
10240
10241 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10242 set_mem_alias_set (mem, alpha_sr_alias_set);
10243 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10244
10245 emit_insn (gen_blockage ());
10246
10247 /* Set the new frame pointer. */
10248
10249 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10250 stack_pointer_rtx, GEN_INT (64))));
10251
10252 }
10253 else
10254 {
10255 /* Increment the frame pointer register to indicate that we do not
10256 have a frame. */
10257
10258 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10259 hard_frame_pointer_rtx, const1_rtx)));
10260 }
10261 }
10262
10263 /* Output the static subroutine information block for the current
10264 function. */
10265
10266 static void
10267 unicosmk_output_ssib (FILE *file, const char *fnname)
10268 {
10269 int len;
10270 int i;
10271 rtx x;
10272 rtx ciw;
10273 struct machine_function *machine = cfun->machine;
10274
10275 in_section = NULL;
10276 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10277 unicosmk_ssib_name ());
10278
10279 /* Some required stuff and the function name length. */
10280
10281 len = strlen (fnname);
10282 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10283
10284 /* Saved registers
10285 ??? We don't do that yet. */
10286
10287 fputs ("\t.quad\t0\n", file);
10288
10289 /* Function address. */
10290
10291 fputs ("\t.quad\t", file);
10292 assemble_name (file, fnname);
10293 putc ('\n', file);
10294
10295 fputs ("\t.quad\t0\n", file);
10296 fputs ("\t.quad\t0\n", file);
10297
10298 /* Function name.
10299 ??? We do it the same way Cray CC does it but this could be
10300 simplified. */
10301
10302 for( i = 0; i < len; i++ )
10303 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10304 if( (len % 8) == 0 )
10305 fputs ("\t.quad\t0\n", file);
10306 else
10307 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10308
10309 /* All call information words used in the function. */
10310
10311 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10312 {
10313 ciw = XEXP (x, 0);
10314 #if HOST_BITS_PER_WIDE_INT == 32
10315 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10316 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10317 #else
10318 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10319 #endif
10320 }
10321 }
10322
10323 /* Add a call information word (CIW) to the list of the current function's
10324 CIWs and return its index.
10325
10326 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10327
10328 rtx
10329 unicosmk_add_call_info_word (rtx x)
10330 {
10331 rtx node;
10332 struct machine_function *machine = cfun->machine;
10333
10334 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10335 if (machine->first_ciw == NULL_RTX)
10336 machine->first_ciw = node;
10337 else
10338 XEXP (machine->last_ciw, 1) = node;
10339
10340 machine->last_ciw = node;
10341 ++machine->ciw_count;
10342
10343 return GEN_INT (machine->ciw_count
10344 + strlen (current_function_name ())/8 + 5);
10345 }
10346
10347 /* The Cray assembler doesn't accept extern declarations for symbols which
10348 are defined in the same file. We have to keep track of all global
10349 symbols which are referenced and/or defined in a source file and output
10350 extern declarations for those which are referenced but not defined at
10351 the end of file. */
10352
10353 /* List of identifiers for which an extern declaration might have to be
10354 emitted. */
10355 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10356
10357 struct unicosmk_extern_list
10358 {
10359 struct unicosmk_extern_list *next;
10360 const char *name;
10361 };
10362
10363 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10364
10365 /* Output extern declarations which are required for every asm file. */
10366
10367 static void
10368 unicosmk_output_default_externs (FILE *file)
10369 {
10370 static const char *const externs[] =
10371 { "__T3E_MISMATCH" };
10372
10373 int i;
10374 int n;
10375
10376 n = ARRAY_SIZE (externs);
10377
10378 for (i = 0; i < n; i++)
10379 fprintf (file, "\t.extern\t%s\n", externs[i]);
10380 }
10381
10382 /* Output extern declarations for global symbols which are have been
10383 referenced but not defined. */
10384
10385 static void
10386 unicosmk_output_externs (FILE *file)
10387 {
10388 struct unicosmk_extern_list *p;
10389 const char *real_name;
10390 int len;
10391 tree name_tree;
10392
10393 len = strlen (user_label_prefix);
10394 for (p = unicosmk_extern_head; p != 0; p = p->next)
10395 {
10396 /* We have to strip the encoding and possibly remove user_label_prefix
10397 from the identifier in order to handle -fleading-underscore and
10398 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10399 real_name = default_strip_name_encoding (p->name);
10400 if (len && p->name[0] == '*'
10401 && !memcmp (real_name, user_label_prefix, len))
10402 real_name += len;
10403
10404 name_tree = get_identifier (real_name);
10405 if (! TREE_ASM_WRITTEN (name_tree))
10406 {
10407 TREE_ASM_WRITTEN (name_tree) = 1;
10408 fputs ("\t.extern\t", file);
10409 assemble_name (file, p->name);
10410 putc ('\n', file);
10411 }
10412 }
10413 }
10414
10415 /* Record an extern. */
10416
10417 void
10418 unicosmk_add_extern (const char *name)
10419 {
10420 struct unicosmk_extern_list *p;
10421
10422 p = (struct unicosmk_extern_list *)
10423 xmalloc (sizeof (struct unicosmk_extern_list));
10424 p->next = unicosmk_extern_head;
10425 p->name = name;
10426 unicosmk_extern_head = p;
10427 }
10428
10429 /* The Cray assembler generates incorrect code if identifiers which
10430 conflict with register names are used as instruction operands. We have
10431 to replace such identifiers with DEX expressions. */
10432
10433 /* Structure to collect identifiers which have been replaced by DEX
10434 expressions. */
10435 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10436
10437 struct unicosmk_dex {
10438 struct unicosmk_dex *next;
10439 const char *name;
10440 };
10441
10442 /* List of identifiers which have been replaced by DEX expressions. The DEX
10443 number is determined by the position in the list. */
10444
10445 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10446
10447 /* The number of elements in the DEX list. */
10448
10449 static int unicosmk_dex_count = 0;
10450
10451 /* Check if NAME must be replaced by a DEX expression. */
10452
10453 static int
10454 unicosmk_special_name (const char *name)
10455 {
10456 if (name[0] == '*')
10457 ++name;
10458
10459 if (name[0] == '$')
10460 ++name;
10461
10462 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10463 return 0;
10464
10465 switch (name[1])
10466 {
10467 case '1': case '2':
10468 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10469
10470 case '3':
10471 return (name[2] == '\0'
10472 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10473
10474 default:
10475 return (ISDIGIT (name[1]) && name[2] == '\0');
10476 }
10477 }
10478
10479 /* Return the DEX number if X must be replaced by a DEX expression and 0
10480 otherwise. */
10481
10482 static int
10483 unicosmk_need_dex (rtx x)
10484 {
10485 struct unicosmk_dex *dex;
10486 const char *name;
10487 int i;
10488
10489 if (GET_CODE (x) != SYMBOL_REF)
10490 return 0;
10491
10492 name = XSTR (x,0);
10493 if (! unicosmk_special_name (name))
10494 return 0;
10495
10496 i = unicosmk_dex_count;
10497 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10498 {
10499 if (! strcmp (name, dex->name))
10500 return i;
10501 --i;
10502 }
10503
10504 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10505 dex->name = name;
10506 dex->next = unicosmk_dex_list;
10507 unicosmk_dex_list = dex;
10508
10509 ++unicosmk_dex_count;
10510 return unicosmk_dex_count;
10511 }
10512
10513 /* Output the DEX definitions for this file. */
10514
10515 static void
10516 unicosmk_output_dex (FILE *file)
10517 {
10518 struct unicosmk_dex *dex;
10519 int i;
10520
10521 if (unicosmk_dex_list == NULL)
10522 return;
10523
10524 fprintf (file, "\t.dexstart\n");
10525
10526 i = unicosmk_dex_count;
10527 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10528 {
10529 fprintf (file, "\tDEX (%d) = ", i);
10530 assemble_name (file, dex->name);
10531 putc ('\n', file);
10532 --i;
10533 }
10534
10535 fprintf (file, "\t.dexend\n");
10536 }
10537
10538 /* Output text that to appear at the beginning of an assembler file. */
10539
10540 static void
10541 unicosmk_file_start (void)
10542 {
10543 int i;
10544
10545 fputs ("\t.ident\t", asm_out_file);
10546 unicosmk_output_module_name (asm_out_file);
10547 fputs ("\n\n", asm_out_file);
10548
10549 /* The Unicos/Mk assembler uses different register names. Instead of trying
10550 to support them, we simply use micro definitions. */
10551
10552 /* CAM has different register names: rN for the integer register N and fN
10553 for the floating-point register N. Instead of trying to use these in
10554 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10555 register. */
10556
10557 for (i = 0; i < 32; ++i)
10558 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10559
10560 for (i = 0; i < 32; ++i)
10561 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10562
10563 putc ('\n', asm_out_file);
10564
10565 /* The .align directive fill unused space with zeroes which does not work
10566 in code sections. We define the macro 'gcc@code@align' which uses nops
10567 instead. Note that it assumes that code sections always have the
10568 biggest possible alignment since . refers to the current offset from
10569 the beginning of the section. */
10570
10571 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10572 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10573 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10574 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10575 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10576 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10577 fputs ("\t.endr\n", asm_out_file);
10578 fputs ("\t.endif\n", asm_out_file);
10579 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10580
10581 /* Output extern declarations which should always be visible. */
10582 unicosmk_output_default_externs (asm_out_file);
10583
10584 /* Open a dummy section. We always need to be inside a section for the
10585 section-switching code to work correctly.
10586 ??? This should be a module id or something like that. I still have to
10587 figure out what the rules for those are. */
10588 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10589 }
10590
10591 /* Output text to appear at the end of an assembler file. This includes all
10592 pending extern declarations and DEX expressions. */
10593
10594 static void
10595 unicosmk_file_end (void)
10596 {
10597 fputs ("\t.endp\n\n", asm_out_file);
10598
10599 /* Output all pending externs. */
10600
10601 unicosmk_output_externs (asm_out_file);
10602
10603 /* Output dex definitions used for functions whose names conflict with
10604 register names. */
10605
10606 unicosmk_output_dex (asm_out_file);
10607
10608 fputs ("\t.end\t", asm_out_file);
10609 unicosmk_output_module_name (asm_out_file);
10610 putc ('\n', asm_out_file);
10611 }
10612
10613 #else
10614
10615 static void
10616 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10617 {}
10618
10619 static void
10620 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10621 {}
10622
10623 static void
10624 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10625 const char * fnname ATTRIBUTE_UNUSED)
10626 {}
10627
10628 rtx
10629 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10630 {
10631 return NULL_RTX;
10632 }
10633
10634 static int
10635 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10636 {
10637 return 0;
10638 }
10639
10640 #endif /* TARGET_ABI_UNICOSMK */
10641
10642 static void
10643 alpha_init_libfuncs (void)
10644 {
10645 if (TARGET_ABI_UNICOSMK)
10646 {
10647 /* Prevent gcc from generating calls to __divsi3. */
10648 set_optab_libfunc (sdiv_optab, SImode, 0);
10649 set_optab_libfunc (udiv_optab, SImode, 0);
10650
10651 /* Use the functions provided by the system library
10652 for DImode integer division. */
10653 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10654 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10655 }
10656 else if (TARGET_ABI_OPEN_VMS)
10657 {
10658 /* Use the VMS runtime library functions for division and
10659 remainder. */
10660 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10661 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10662 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10663 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10664 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10665 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10666 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10667 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10668 }
10669 }
10670
10671 \f
10672 /* Initialize the GCC target structure. */
10673 #if TARGET_ABI_OPEN_VMS
10674 # undef TARGET_ATTRIBUTE_TABLE
10675 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10676 # undef TARGET_SECTION_TYPE_FLAGS
10677 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10678 #endif
10679
10680 #undef TARGET_IN_SMALL_DATA_P
10681 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10682
10683 #if TARGET_ABI_UNICOSMK
10684 # undef TARGET_INSERT_ATTRIBUTES
10685 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10686 # undef TARGET_SECTION_TYPE_FLAGS
10687 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10688 # undef TARGET_ASM_UNIQUE_SECTION
10689 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10690 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10691 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10692 # undef TARGET_ASM_GLOBALIZE_LABEL
10693 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10694 # undef TARGET_MUST_PASS_IN_STACK
10695 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10696 #endif
10697
10698 #undef TARGET_ASM_ALIGNED_HI_OP
10699 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10700 #undef TARGET_ASM_ALIGNED_DI_OP
10701 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10702
10703 /* Default unaligned ops are provided for ELF systems. To get unaligned
10704 data for non-ELF systems, we have to turn off auto alignment. */
10705 #ifndef OBJECT_FORMAT_ELF
10706 #undef TARGET_ASM_UNALIGNED_HI_OP
10707 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10708 #undef TARGET_ASM_UNALIGNED_SI_OP
10709 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10710 #undef TARGET_ASM_UNALIGNED_DI_OP
10711 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10712 #endif
10713
10714 #ifdef OBJECT_FORMAT_ELF
10715 #undef TARGET_ASM_RELOC_RW_MASK
10716 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10717 #undef TARGET_ASM_SELECT_RTX_SECTION
10718 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10719 #undef TARGET_SECTION_TYPE_FLAGS
10720 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10721 #endif
10722
10723 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10724 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10725
10726 #undef TARGET_INIT_LIBFUNCS
10727 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10728
10729 #if TARGET_ABI_UNICOSMK
10730 #undef TARGET_ASM_FILE_START
10731 #define TARGET_ASM_FILE_START unicosmk_file_start
10732 #undef TARGET_ASM_FILE_END
10733 #define TARGET_ASM_FILE_END unicosmk_file_end
10734 #else
10735 #undef TARGET_ASM_FILE_START
10736 #define TARGET_ASM_FILE_START alpha_file_start
10737 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10738 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10739 #endif
10740
10741 #undef TARGET_SCHED_ADJUST_COST
10742 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10743 #undef TARGET_SCHED_ISSUE_RATE
10744 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10745 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10746 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10747 alpha_multipass_dfa_lookahead
10748
10749 #undef TARGET_HAVE_TLS
10750 #define TARGET_HAVE_TLS HAVE_AS_TLS
10751
10752 #undef TARGET_INIT_BUILTINS
10753 #define TARGET_INIT_BUILTINS alpha_init_builtins
10754 #undef TARGET_EXPAND_BUILTIN
10755 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10756 #undef TARGET_FOLD_BUILTIN
10757 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10758
10759 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10760 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10761 #undef TARGET_CANNOT_COPY_INSN_P
10762 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10763 #undef TARGET_CANNOT_FORCE_CONST_MEM
10764 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10765
10766 #if TARGET_ABI_OSF
10767 #undef TARGET_ASM_OUTPUT_MI_THUNK
10768 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10769 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10770 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10771 #undef TARGET_STDARG_OPTIMIZE_HOOK
10772 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10773 #endif
10774
10775 #undef TARGET_RTX_COSTS
10776 #define TARGET_RTX_COSTS alpha_rtx_costs
10777 #undef TARGET_ADDRESS_COST
10778 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
10779
10780 #undef TARGET_MACHINE_DEPENDENT_REORG
10781 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10782
10783 #undef TARGET_PROMOTE_FUNCTION_ARGS
10784 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10785 #undef TARGET_PROMOTE_FUNCTION_RETURN
10786 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10787 #undef TARGET_PROMOTE_PROTOTYPES
10788 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10789 #undef TARGET_RETURN_IN_MEMORY
10790 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10791 #undef TARGET_PASS_BY_REFERENCE
10792 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10793 #undef TARGET_SETUP_INCOMING_VARARGS
10794 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10795 #undef TARGET_STRICT_ARGUMENT_NAMING
10796 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10797 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10798 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10799 #undef TARGET_SPLIT_COMPLEX_ARG
10800 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10801 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10802 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10803 #undef TARGET_ARG_PARTIAL_BYTES
10804 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10805
10806 #undef TARGET_SECONDARY_RELOAD
10807 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10808
10809 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10810 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10811 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10812 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10813
10814 #undef TARGET_BUILD_BUILTIN_VA_LIST
10815 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10816
10817 #undef TARGET_EXPAND_BUILTIN_VA_START
10818 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10819
10820 /* The Alpha architecture does not require sequential consistency. See
10821 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10822 for an example of how it can be violated in practice. */
10823 #undef TARGET_RELAXED_ORDERING
10824 #define TARGET_RELAXED_ORDERING true
10825
10826 #undef TARGET_DEFAULT_TARGET_FLAGS
10827 #define TARGET_DEFAULT_TARGET_FLAGS \
10828 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10829 #undef TARGET_HANDLE_OPTION
10830 #define TARGET_HANDLE_OPTION alpha_handle_option
10831
10832 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10833 #undef TARGET_MANGLE_TYPE
10834 #define TARGET_MANGLE_TYPE alpha_mangle_type
10835 #endif
10836
10837 struct gcc_target targetm = TARGET_INITIALIZER;
10838
10839 \f
10840 #include "gt-alpha.h"