]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/alpha/alpha.c
alias.h (alias_set_type): Define new type.
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
59
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
62
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
65
66 static const char * const alpha_cpu_name[] =
67 {
68 "ev4", "ev5", "ev6"
69 };
70
71 /* Specify how accurate floating-point traps need to be. */
72
73 enum alpha_trap_precision alpha_tp;
74
75 /* Specify the floating-point rounding mode. */
76
77 enum alpha_fp_rounding_mode alpha_fprm;
78
79 /* Specify which things cause traps. */
80
81 enum alpha_fp_trap_mode alpha_fptm;
82
83 /* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
85
86 struct alpha_compare alpha_compare;
87
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
90
91 static int inside_function = FALSE;
92
93 /* The number of cycles of latency we should assume on memory reads. */
94
95 int alpha_memory_latency = 3;
96
97 /* Whether the function needs the GP. */
98
99 static int alpha_function_needs_gp;
100
101 /* The alias set for prologue/epilogue register save/restore. */
102
103 static GTY(()) alias_set_type alpha_sr_alias_set;
104
105 /* The assembler name of the current function. */
106
107 static const char *alpha_fnname;
108
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
112
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
119
120 /* Costs of various operations on the different architectures. */
121
122 struct alpha_rtx_cost_data
123 {
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
133 };
134
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 {
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
147 },
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
158 },
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
169 },
170 };
171
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
176
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 {
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
188 };
189
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
193 #else
194 #define NUM_ARGS current_function_args_info
195 #endif
196
197 #define REG_PV 27
198 #define REG_RA 26
199
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
206 #endif
207
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
212 \f
213 /* Implement TARGET_HANDLE_OPTION. */
214
215 static bool
216 alpha_handle_option (size_t code, const char *arg, int value)
217 {
218 switch (code)
219 {
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
224
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
229
230 case OPT_mtls_size_:
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
234 }
235
236 return true;
237 }
238
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
241
242 static const char *
243 alpha_mangle_type (tree type)
244 {
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
248
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
251 }
252 #endif
253
254 /* Parse target option strings. */
255
256 void
257 override_options (void)
258 {
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
263 } cpu_table[] = {
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { 0, 0, 0 }
279 };
280
281 int i;
282
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
285 {
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
289 }
290
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
297
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
300
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
306
307 if (TARGET_IEEE)
308 {
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
311 else
312 {
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
315 }
316 }
317
318 if (TARGET_IEEE_WITH_INEXACT)
319 {
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
322 else
323 {
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
326 }
327 }
328
329 if (alpha_tp_string)
330 {
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
337 else
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
339 }
340
341 if (alpha_fprm_string)
342 {
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
351 else
352 error ("bad value %qs for -mfp-rounding-mode switch",
353 alpha_fprm_string);
354 }
355
356 if (alpha_fptm_string)
357 {
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
368 }
369
370 if (alpha_cpu_string)
371 {
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 {
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
378 break;
379 }
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
382 }
383
384 if (alpha_tune_string)
385 {
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 {
389 alpha_tune = cpu_table [i].processor;
390 break;
391 }
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
394 }
395
396 /* Do some sanity checks on the above options. */
397
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 {
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
402 }
403
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
406 {
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
409 }
410
411 if (alpha_cpu == PROCESSOR_EV6)
412 {
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
417 }
418
419 if (TARGET_FLOAT_VAX)
420 {
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 {
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
425 }
426 if (alpha_fptm == ALPHA_FPTM_SUI)
427 {
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
430 }
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
434 }
435
436 {
437 char *end;
438 int lat;
439
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
442
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 ;
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
449 {
450 static int const cache_latency[][4] =
451 {
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
455 };
456
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
459 {
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
462 lat = 3;
463 }
464 else
465 lat = cache_latency[alpha_tune][lat-1];
466 }
467 else if (! strcmp (alpha_mlat_string, "main"))
468 {
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
472 }
473 else
474 {
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
476 lat = 3;
477 }
478
479 alpha_memory_latency = lat;
480 }
481
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
485
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
491
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 {
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
501 }
502 if (align_functions <= 0)
503 align_functions = 16;
504
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
507
508 /* Register variables and functions with the garbage collector. */
509
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
512
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
515 {
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
519 }
520
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524 #endif
525 }
526 \f
527 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
528
529 int
530 zap_mask (HOST_WIDE_INT value)
531 {
532 int i;
533
534 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
535 i++, value >>= 8)
536 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
537 return 0;
538
539 return 1;
540 }
541
542 /* Return true if OP is valid for a particular TLS relocation.
543 We are already guaranteed that OP is a CONST. */
544
545 int
546 tls_symbolic_operand_1 (rtx op, int size, int unspec)
547 {
548 op = XEXP (op, 0);
549
550 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
551 return 0;
552 op = XVECEXP (op, 0, 0);
553
554 if (GET_CODE (op) != SYMBOL_REF)
555 return 0;
556
557 switch (SYMBOL_REF_TLS_MODEL (op))
558 {
559 case TLS_MODEL_LOCAL_DYNAMIC:
560 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
561 case TLS_MODEL_INITIAL_EXEC:
562 return unspec == UNSPEC_TPREL && size == 64;
563 case TLS_MODEL_LOCAL_EXEC:
564 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
565 default:
566 gcc_unreachable ();
567 }
568 }
569
570 /* Used by aligned_memory_operand and unaligned_memory_operand to
571 resolve what reload is going to do with OP if it's a register. */
572
573 rtx
574 resolve_reload_operand (rtx op)
575 {
576 if (reload_in_progress)
577 {
578 rtx tmp = op;
579 if (GET_CODE (tmp) == SUBREG)
580 tmp = SUBREG_REG (tmp);
581 if (GET_CODE (tmp) == REG
582 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
583 {
584 op = reg_equiv_memory_loc[REGNO (tmp)];
585 if (op == 0)
586 return 0;
587 }
588 }
589 return op;
590 }
591
592 /* The scalar modes supported differs from the default check-what-c-supports
593 version in that sometimes TFmode is available even when long double
594 indicates only DFmode. On unicosmk, we have the situation that HImode
595 doesn't map to any C type, but of course we still support that. */
596
597 static bool
598 alpha_scalar_mode_supported_p (enum machine_mode mode)
599 {
600 switch (mode)
601 {
602 case QImode:
603 case HImode:
604 case SImode:
605 case DImode:
606 case TImode: /* via optabs.c */
607 return true;
608
609 case SFmode:
610 case DFmode:
611 return true;
612
613 case TFmode:
614 return TARGET_HAS_XFLOATING_LIBS;
615
616 default:
617 return false;
618 }
619 }
620
621 /* Alpha implements a couple of integer vector mode operations when
622 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
623 which allows the vectorizer to operate on e.g. move instructions,
624 or when expand_vector_operations can do something useful. */
625
626 static bool
627 alpha_vector_mode_supported_p (enum machine_mode mode)
628 {
629 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
630 }
631
632 /* Return 1 if this function can directly return via $26. */
633
634 int
635 direct_return (void)
636 {
637 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
638 && reload_completed
639 && alpha_sa_size () == 0
640 && get_frame_size () == 0
641 && current_function_outgoing_args_size == 0
642 && current_function_pretend_args_size == 0);
643 }
644
645 /* Return the ADDR_VEC associated with a tablejump insn. */
646
647 rtx
648 alpha_tablejump_addr_vec (rtx insn)
649 {
650 rtx tmp;
651
652 tmp = JUMP_LABEL (insn);
653 if (!tmp)
654 return NULL_RTX;
655 tmp = NEXT_INSN (tmp);
656 if (!tmp)
657 return NULL_RTX;
658 if (GET_CODE (tmp) == JUMP_INSN
659 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
660 return PATTERN (tmp);
661 return NULL_RTX;
662 }
663
664 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
665
666 rtx
667 alpha_tablejump_best_label (rtx insn)
668 {
669 rtx jump_table = alpha_tablejump_addr_vec (insn);
670 rtx best_label = NULL_RTX;
671
672 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
673 there for edge frequency counts from profile data. */
674
675 if (jump_table)
676 {
677 int n_labels = XVECLEN (jump_table, 1);
678 int best_count = -1;
679 int i, j;
680
681 for (i = 0; i < n_labels; i++)
682 {
683 int count = 1;
684
685 for (j = i + 1; j < n_labels; j++)
686 if (XEXP (XVECEXP (jump_table, 1, i), 0)
687 == XEXP (XVECEXP (jump_table, 1, j), 0))
688 count++;
689
690 if (count > best_count)
691 best_count = count, best_label = XVECEXP (jump_table, 1, i);
692 }
693 }
694
695 return best_label ? best_label : const0_rtx;
696 }
697
698 /* Return the TLS model to use for SYMBOL. */
699
700 static enum tls_model
701 tls_symbolic_operand_type (rtx symbol)
702 {
703 enum tls_model model;
704
705 if (GET_CODE (symbol) != SYMBOL_REF)
706 return 0;
707 model = SYMBOL_REF_TLS_MODEL (symbol);
708
709 /* Local-exec with a 64-bit size is the same code as initial-exec. */
710 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
711 model = TLS_MODEL_INITIAL_EXEC;
712
713 return model;
714 }
715 \f
716 /* Return true if the function DECL will share the same GP as any
717 function in the current unit of translation. */
718
719 static bool
720 decl_has_samegp (tree decl)
721 {
722 /* Functions that are not local can be overridden, and thus may
723 not share the same gp. */
724 if (!(*targetm.binds_local_p) (decl))
725 return false;
726
727 /* If -msmall-data is in effect, assume that there is only one GP
728 for the module, and so any local symbol has this property. We
729 need explicit relocations to be able to enforce this for symbols
730 not defined in this unit of translation, however. */
731 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
732 return true;
733
734 /* Functions that are not external are defined in this UoT. */
735 /* ??? Irritatingly, static functions not yet emitted are still
736 marked "external". Apply this to non-static functions only. */
737 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
738 }
739
740 /* Return true if EXP should be placed in the small data section. */
741
742 static bool
743 alpha_in_small_data_p (tree exp)
744 {
745 /* We want to merge strings, so we never consider them small data. */
746 if (TREE_CODE (exp) == STRING_CST)
747 return false;
748
749 /* Functions are never in the small data area. Duh. */
750 if (TREE_CODE (exp) == FUNCTION_DECL)
751 return false;
752
753 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
754 {
755 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
756 if (strcmp (section, ".sdata") == 0
757 || strcmp (section, ".sbss") == 0)
758 return true;
759 }
760 else
761 {
762 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
763
764 /* If this is an incomplete type with size 0, then we can't put it
765 in sdata because it might be too big when completed. */
766 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
767 return true;
768 }
769
770 return false;
771 }
772
773 #if TARGET_ABI_OPEN_VMS
774 static bool
775 alpha_linkage_symbol_p (const char *symname)
776 {
777 int symlen = strlen (symname);
778
779 if (symlen > 4)
780 return strcmp (&symname [symlen - 4], "..lk") == 0;
781
782 return false;
783 }
784
785 #define LINKAGE_SYMBOL_REF_P(X) \
786 ((GET_CODE (X) == SYMBOL_REF \
787 && alpha_linkage_symbol_p (XSTR (X, 0))) \
788 || (GET_CODE (X) == CONST \
789 && GET_CODE (XEXP (X, 0)) == PLUS \
790 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
791 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
792 #endif
793
794 /* legitimate_address_p recognizes an RTL expression that is a valid
795 memory address for an instruction. The MODE argument is the
796 machine mode for the MEM expression that wants to use this address.
797
798 For Alpha, we have either a constant address or the sum of a
799 register and a constant address, or just a register. For DImode,
800 any of those forms can be surrounded with an AND that clear the
801 low-order three bits; this is an "unaligned" access. */
802
803 bool
804 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
805 {
806 /* If this is an ldq_u type address, discard the outer AND. */
807 if (mode == DImode
808 && GET_CODE (x) == AND
809 && GET_CODE (XEXP (x, 1)) == CONST_INT
810 && INTVAL (XEXP (x, 1)) == -8)
811 x = XEXP (x, 0);
812
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
817 x = SUBREG_REG (x);
818
819 /* Unadorned general registers are valid. */
820 if (REG_P (x)
821 && (strict
822 ? STRICT_REG_OK_FOR_BASE_P (x)
823 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
824 return true;
825
826 /* Constant addresses (i.e. +/- 32k) are valid. */
827 if (CONSTANT_ADDRESS_P (x))
828 return true;
829
830 #if TARGET_ABI_OPEN_VMS
831 if (LINKAGE_SYMBOL_REF_P (x))
832 return true;
833 #endif
834
835 /* Register plus a small constant offset is valid. */
836 if (GET_CODE (x) == PLUS)
837 {
838 rtx ofs = XEXP (x, 1);
839 x = XEXP (x, 0);
840
841 /* Discard non-paradoxical subregs. */
842 if (GET_CODE (x) == SUBREG
843 && (GET_MODE_SIZE (GET_MODE (x))
844 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
845 x = SUBREG_REG (x);
846
847 if (REG_P (x))
848 {
849 if (! strict
850 && NONSTRICT_REG_OK_FP_BASE_P (x)
851 && GET_CODE (ofs) == CONST_INT)
852 return true;
853 if ((strict
854 ? STRICT_REG_OK_FOR_BASE_P (x)
855 : NONSTRICT_REG_OK_FOR_BASE_P (x))
856 && CONSTANT_ADDRESS_P (ofs))
857 return true;
858 }
859 }
860
861 /* If we're managing explicit relocations, LO_SUM is valid, as
862 are small data symbols. */
863 else if (TARGET_EXPLICIT_RELOCS)
864 {
865 if (small_symbolic_operand (x, Pmode))
866 return true;
867
868 if (GET_CODE (x) == LO_SUM)
869 {
870 rtx ofs = XEXP (x, 1);
871 x = XEXP (x, 0);
872
873 /* Discard non-paradoxical subregs. */
874 if (GET_CODE (x) == SUBREG
875 && (GET_MODE_SIZE (GET_MODE (x))
876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
877 x = SUBREG_REG (x);
878
879 /* Must have a valid base register. */
880 if (! (REG_P (x)
881 && (strict
882 ? STRICT_REG_OK_FOR_BASE_P (x)
883 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
884 return false;
885
886 /* The symbol must be local. */
887 if (local_symbolic_operand (ofs, Pmode)
888 || dtp32_symbolic_operand (ofs, Pmode)
889 || tp32_symbolic_operand (ofs, Pmode))
890 return true;
891 }
892 }
893
894 return false;
895 }
896
897 /* Build the SYMBOL_REF for __tls_get_addr. */
898
899 static GTY(()) rtx tls_get_addr_libfunc;
900
901 static rtx
902 get_tls_get_addr (void)
903 {
904 if (!tls_get_addr_libfunc)
905 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
906 return tls_get_addr_libfunc;
907 }
908
909 /* Try machine-dependent ways of modifying an illegitimate address
910 to be legitimate. If we find one, return the new, valid address. */
911
912 rtx
913 alpha_legitimize_address (rtx x, rtx scratch,
914 enum machine_mode mode ATTRIBUTE_UNUSED)
915 {
916 HOST_WIDE_INT addend;
917
918 /* If the address is (plus reg const_int) and the CONST_INT is not a
919 valid offset, compute the high part of the constant and add it to
920 the register. Then our address is (plus temp low-part-const). */
921 if (GET_CODE (x) == PLUS
922 && GET_CODE (XEXP (x, 0)) == REG
923 && GET_CODE (XEXP (x, 1)) == CONST_INT
924 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
925 {
926 addend = INTVAL (XEXP (x, 1));
927 x = XEXP (x, 0);
928 goto split_addend;
929 }
930
931 /* If the address is (const (plus FOO const_int)), find the low-order
932 part of the CONST_INT. Then load FOO plus any high-order part of the
933 CONST_INT into a register. Our address is (plus reg low-part-const).
934 This is done to reduce the number of GOT entries. */
935 if (can_create_pseudo_p ()
936 && GET_CODE (x) == CONST
937 && GET_CODE (XEXP (x, 0)) == PLUS
938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
939 {
940 addend = INTVAL (XEXP (XEXP (x, 0), 1));
941 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
942 goto split_addend;
943 }
944
945 /* If we have a (plus reg const), emit the load as in (2), then add
946 the two registers, and finally generate (plus reg low-part-const) as
947 our address. */
948 if (can_create_pseudo_p ()
949 && GET_CODE (x) == PLUS
950 && GET_CODE (XEXP (x, 0)) == REG
951 && GET_CODE (XEXP (x, 1)) == CONST
952 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
953 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
954 {
955 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
956 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
957 XEXP (XEXP (XEXP (x, 1), 0), 0),
958 NULL_RTX, 1, OPTAB_LIB_WIDEN);
959 goto split_addend;
960 }
961
962 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
963 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
964 {
965 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
966
967 switch (tls_symbolic_operand_type (x))
968 {
969 case TLS_MODEL_NONE:
970 break;
971
972 case TLS_MODEL_GLOBAL_DYNAMIC:
973 start_sequence ();
974
975 r0 = gen_rtx_REG (Pmode, 0);
976 r16 = gen_rtx_REG (Pmode, 16);
977 tga = get_tls_get_addr ();
978 dest = gen_reg_rtx (Pmode);
979 seq = GEN_INT (alpha_next_sequence_number++);
980
981 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
982 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
983 insn = emit_call_insn (insn);
984 CONST_OR_PURE_CALL_P (insn) = 1;
985 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
986
987 insn = get_insns ();
988 end_sequence ();
989
990 emit_libcall_block (insn, dest, r0, x);
991 return dest;
992
993 case TLS_MODEL_LOCAL_DYNAMIC:
994 start_sequence ();
995
996 r0 = gen_rtx_REG (Pmode, 0);
997 r16 = gen_rtx_REG (Pmode, 16);
998 tga = get_tls_get_addr ();
999 scratch = gen_reg_rtx (Pmode);
1000 seq = GEN_INT (alpha_next_sequence_number++);
1001
1002 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1003 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1004 insn = emit_call_insn (insn);
1005 CONST_OR_PURE_CALL_P (insn) = 1;
1006 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1007
1008 insn = get_insns ();
1009 end_sequence ();
1010
1011 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1012 UNSPEC_TLSLDM_CALL);
1013 emit_libcall_block (insn, scratch, r0, eqv);
1014
1015 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1016 eqv = gen_rtx_CONST (Pmode, eqv);
1017
1018 if (alpha_tls_size == 64)
1019 {
1020 dest = gen_reg_rtx (Pmode);
1021 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1022 emit_insn (gen_adddi3 (dest, dest, scratch));
1023 return dest;
1024 }
1025 if (alpha_tls_size == 32)
1026 {
1027 insn = gen_rtx_HIGH (Pmode, eqv);
1028 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1029 scratch = gen_reg_rtx (Pmode);
1030 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1031 }
1032 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1033
1034 case TLS_MODEL_INITIAL_EXEC:
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1036 eqv = gen_rtx_CONST (Pmode, eqv);
1037 tp = gen_reg_rtx (Pmode);
1038 scratch = gen_reg_rtx (Pmode);
1039 dest = gen_reg_rtx (Pmode);
1040
1041 emit_insn (gen_load_tp (tp));
1042 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1043 emit_insn (gen_adddi3 (dest, tp, scratch));
1044 return dest;
1045
1046 case TLS_MODEL_LOCAL_EXEC:
1047 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1048 eqv = gen_rtx_CONST (Pmode, eqv);
1049 tp = gen_reg_rtx (Pmode);
1050
1051 emit_insn (gen_load_tp (tp));
1052 if (alpha_tls_size == 32)
1053 {
1054 insn = gen_rtx_HIGH (Pmode, eqv);
1055 insn = gen_rtx_PLUS (Pmode, tp, insn);
1056 tp = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1058 }
1059 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1060
1061 default:
1062 gcc_unreachable ();
1063 }
1064
1065 if (local_symbolic_operand (x, Pmode))
1066 {
1067 if (small_symbolic_operand (x, Pmode))
1068 return x;
1069 else
1070 {
1071 if (can_create_pseudo_p ())
1072 scratch = gen_reg_rtx (Pmode);
1073 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1074 gen_rtx_HIGH (Pmode, x)));
1075 return gen_rtx_LO_SUM (Pmode, scratch, x);
1076 }
1077 }
1078 }
1079
1080 return NULL;
1081
1082 split_addend:
1083 {
1084 HOST_WIDE_INT low, high;
1085
1086 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1087 addend -= low;
1088 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1089 addend -= high;
1090
1091 if (addend)
1092 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1093 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1094 1, OPTAB_LIB_WIDEN);
1095 if (high)
1096 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1097 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1098 1, OPTAB_LIB_WIDEN);
1099
1100 return plus_constant (x, low);
1101 }
1102 }
1103
1104 /* Primarily this is required for TLS symbols, but given that our move
1105 patterns *ought* to be able to handle any symbol at any time, we
1106 should never be spilling symbolic operands to the constant pool, ever. */
1107
1108 static bool
1109 alpha_cannot_force_const_mem (rtx x)
1110 {
1111 enum rtx_code code = GET_CODE (x);
1112 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1113 }
1114
1115 /* We do not allow indirect calls to be optimized into sibling calls, nor
1116 can we allow a call to a function with a different GP to be optimized
1117 into a sibcall. */
1118
1119 static bool
1120 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1121 {
1122 /* Can't do indirect tail calls, since we don't know if the target
1123 uses the same GP. */
1124 if (!decl)
1125 return false;
1126
1127 /* Otherwise, we can make a tail call if the target function shares
1128 the same GP. */
1129 return decl_has_samegp (decl);
1130 }
1131
1132 int
1133 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1134 {
1135 rtx x = *px;
1136
1137 /* Don't re-split. */
1138 if (GET_CODE (x) == LO_SUM)
1139 return -1;
1140
1141 return small_symbolic_operand (x, Pmode) != 0;
1142 }
1143
1144 static int
1145 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1146 {
1147 rtx x = *px;
1148
1149 /* Don't re-split. */
1150 if (GET_CODE (x) == LO_SUM)
1151 return -1;
1152
1153 if (small_symbolic_operand (x, Pmode))
1154 {
1155 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1156 *px = x;
1157 return -1;
1158 }
1159
1160 return 0;
1161 }
1162
1163 rtx
1164 split_small_symbolic_operand (rtx x)
1165 {
1166 x = copy_insn (x);
1167 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1168 return x;
1169 }
1170
1171 /* Indicate that INSN cannot be duplicated. This is true for any insn
1172 that we've marked with gpdisp relocs, since those have to stay in
1173 1-1 correspondence with one another.
1174
1175 Technically we could copy them if we could set up a mapping from one
1176 sequence number to another, across the set of insns to be duplicated.
1177 This seems overly complicated and error-prone since interblock motion
1178 from sched-ebb could move one of the pair of insns to a different block.
1179
1180 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1181 then they'll be in a different block from their ldgp. Which could lead
1182 the bb reorder code to think that it would be ok to copy just the block
1183 containing the call and branch to the block containing the ldgp. */
1184
1185 static bool
1186 alpha_cannot_copy_insn_p (rtx insn)
1187 {
1188 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1189 return false;
1190 if (recog_memoized (insn) >= 0)
1191 return get_attr_cannot_copy (insn);
1192 else
1193 return false;
1194 }
1195
1196
1197 /* Try a machine-dependent way of reloading an illegitimate address
1198 operand. If we find one, push the reload and return the new rtx. */
1199
1200 rtx
1201 alpha_legitimize_reload_address (rtx x,
1202 enum machine_mode mode ATTRIBUTE_UNUSED,
1203 int opnum, int type,
1204 int ind_levels ATTRIBUTE_UNUSED)
1205 {
1206 /* We must recognize output that we have already generated ourselves. */
1207 if (GET_CODE (x) == PLUS
1208 && GET_CODE (XEXP (x, 0)) == PLUS
1209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1211 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1212 {
1213 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1214 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1215 opnum, type);
1216 return x;
1217 }
1218
1219 /* We wish to handle large displacements off a base register by
1220 splitting the addend across an ldah and the mem insn. This
1221 cuts number of extra insns needed from 3 to 1. */
1222 if (GET_CODE (x) == PLUS
1223 && GET_CODE (XEXP (x, 0)) == REG
1224 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1225 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1226 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1227 {
1228 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1229 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1230 HOST_WIDE_INT high
1231 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1232
1233 /* Check for 32-bit overflow. */
1234 if (high + low != val)
1235 return NULL_RTX;
1236
1237 /* Reload the high part into a base reg; leave the low part
1238 in the mem directly. */
1239 x = gen_rtx_PLUS (GET_MODE (x),
1240 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1241 GEN_INT (high)),
1242 GEN_INT (low));
1243
1244 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1245 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1246 opnum, type);
1247 return x;
1248 }
1249
1250 return NULL_RTX;
1251 }
1252 \f
1253 /* Compute a (partial) cost for rtx X. Return true if the complete
1254 cost has been computed, and false if subexpressions should be
1255 scanned. In either case, *TOTAL contains the cost result. */
1256
1257 static bool
1258 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1259 {
1260 enum machine_mode mode = GET_MODE (x);
1261 bool float_mode_p = FLOAT_MODE_P (mode);
1262 const struct alpha_rtx_cost_data *cost_data;
1263
1264 if (optimize_size)
1265 cost_data = &alpha_rtx_cost_size;
1266 else
1267 cost_data = &alpha_rtx_cost_data[alpha_tune];
1268
1269 switch (code)
1270 {
1271 case CONST_INT:
1272 /* If this is an 8-bit constant, return zero since it can be used
1273 nearly anywhere with no cost. If it is a valid operand for an
1274 ADD or AND, likewise return 0 if we know it will be used in that
1275 context. Otherwise, return 2 since it might be used there later.
1276 All other constants take at least two insns. */
1277 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1278 {
1279 *total = 0;
1280 return true;
1281 }
1282 /* FALLTHRU */
1283
1284 case CONST_DOUBLE:
1285 if (x == CONST0_RTX (mode))
1286 *total = 0;
1287 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1288 || (outer_code == AND && and_operand (x, VOIDmode)))
1289 *total = 0;
1290 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1291 *total = 2;
1292 else
1293 *total = COSTS_N_INSNS (2);
1294 return true;
1295
1296 case CONST:
1297 case SYMBOL_REF:
1298 case LABEL_REF:
1299 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1300 *total = COSTS_N_INSNS (outer_code != MEM);
1301 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1302 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1303 else if (tls_symbolic_operand_type (x))
1304 /* Estimate of cost for call_pal rduniq. */
1305 /* ??? How many insns do we emit here? More than one... */
1306 *total = COSTS_N_INSNS (15);
1307 else
1308 /* Otherwise we do a load from the GOT. */
1309 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1310 return true;
1311
1312 case HIGH:
1313 /* This is effectively an add_operand. */
1314 *total = 2;
1315 return true;
1316
1317 case PLUS:
1318 case MINUS:
1319 if (float_mode_p)
1320 *total = cost_data->fp_add;
1321 else if (GET_CODE (XEXP (x, 0)) == MULT
1322 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1323 {
1324 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1325 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1326 return true;
1327 }
1328 return false;
1329
1330 case MULT:
1331 if (float_mode_p)
1332 *total = cost_data->fp_mult;
1333 else if (mode == DImode)
1334 *total = cost_data->int_mult_di;
1335 else
1336 *total = cost_data->int_mult_si;
1337 return false;
1338
1339 case ASHIFT:
1340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1341 && INTVAL (XEXP (x, 1)) <= 3)
1342 {
1343 *total = COSTS_N_INSNS (1);
1344 return false;
1345 }
1346 /* FALLTHRU */
1347
1348 case ASHIFTRT:
1349 case LSHIFTRT:
1350 *total = cost_data->int_shift;
1351 return false;
1352
1353 case IF_THEN_ELSE:
1354 if (float_mode_p)
1355 *total = cost_data->fp_add;
1356 else
1357 *total = cost_data->int_cmov;
1358 return false;
1359
1360 case DIV:
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 if (!float_mode_p)
1365 *total = cost_data->int_div;
1366 else if (mode == SFmode)
1367 *total = cost_data->fp_div_sf;
1368 else
1369 *total = cost_data->fp_div_df;
1370 return false;
1371
1372 case MEM:
1373 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1374 return true;
1375
1376 case NEG:
1377 if (! float_mode_p)
1378 {
1379 *total = COSTS_N_INSNS (1);
1380 return false;
1381 }
1382 /* FALLTHRU */
1383
1384 case ABS:
1385 if (! float_mode_p)
1386 {
1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1388 return false;
1389 }
1390 /* FALLTHRU */
1391
1392 case FLOAT:
1393 case UNSIGNED_FLOAT:
1394 case FIX:
1395 case UNSIGNED_FIX:
1396 case FLOAT_TRUNCATE:
1397 *total = cost_data->fp_add;
1398 return false;
1399
1400 case FLOAT_EXTEND:
1401 if (GET_CODE (XEXP (x, 0)) == MEM)
1402 *total = 0;
1403 else
1404 *total = cost_data->fp_add;
1405 return false;
1406
1407 default:
1408 return false;
1409 }
1410 }
1411 \f
1412 /* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
1416
1417 void
1418 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1419 {
1420 rtx base;
1421 HOST_WIDE_INT disp, offset;
1422
1423 gcc_assert (GET_CODE (ref) == MEM);
1424
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1427 {
1428 base = find_replacement (&XEXP (ref, 0));
1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
1430 }
1431 else
1432 base = XEXP (ref, 0);
1433
1434 if (GET_CODE (base) == PLUS)
1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 else
1437 disp = 0;
1438
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1444 offset = 0;
1445 else
1446 offset = disp & 3;
1447
1448 /* Access the entire aligned word. */
1449 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1450
1451 /* Convert the byte offset within the word to a bit offset. */
1452 if (WORDS_BIG_ENDIAN)
1453 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1454 else
1455 offset *= 8;
1456 *pbitnum = GEN_INT (offset);
1457 }
1458
1459 /* Similar, but just get the address. Handle the two reload cases.
1460 Add EXTRA_OFFSET to the address we return. */
1461
1462 rtx
1463 get_unaligned_address (rtx ref)
1464 {
1465 rtx base;
1466 HOST_WIDE_INT offset = 0;
1467
1468 gcc_assert (GET_CODE (ref) == MEM);
1469
1470 if (reload_in_progress
1471 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1472 {
1473 base = find_replacement (&XEXP (ref, 0));
1474
1475 gcc_assert (memory_address_p (GET_MODE (ref), base));
1476 }
1477 else
1478 base = XEXP (ref, 0);
1479
1480 if (GET_CODE (base) == PLUS)
1481 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1482
1483 return plus_constant (base, offset);
1484 }
1485
1486 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1487 X is always returned in a register. */
1488
1489 rtx
1490 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1491 {
1492 if (GET_CODE (addr) == PLUS)
1493 {
1494 ofs += INTVAL (XEXP (addr, 1));
1495 addr = XEXP (addr, 0);
1496 }
1497
1498 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1500 }
1501
1502 /* On the Alpha, all (non-symbolic) constants except zero go into
1503 a floating-point register via memory. Note that we cannot
1504 return anything that is not a subset of CLASS, and that some
1505 symbolic constants cannot be dropped to memory. */
1506
1507 enum reg_class
1508 alpha_preferred_reload_class(rtx x, enum reg_class class)
1509 {
1510 /* Zero is present in any register class. */
1511 if (x == CONST0_RTX (GET_MODE (x)))
1512 return class;
1513
1514 /* These sorts of constants we can easily drop to memory. */
1515 if (GET_CODE (x) == CONST_INT
1516 || GET_CODE (x) == CONST_DOUBLE
1517 || GET_CODE (x) == CONST_VECTOR)
1518 {
1519 if (class == FLOAT_REGS)
1520 return NO_REGS;
1521 if (class == ALL_REGS)
1522 return GENERAL_REGS;
1523 return class;
1524 }
1525
1526 /* All other kinds of constants should not (and in the case of HIGH
1527 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1528 secondary reload. */
1529 if (CONSTANT_P (x))
1530 return (class == ALL_REGS ? GENERAL_REGS : class);
1531
1532 return class;
1533 }
1534
1535 /* Inform reload about cases where moving X with a mode MODE to a register in
1536 CLASS requires an extra scratch or immediate register. Return the class
1537 needed for the immediate register. */
1538
1539 static enum reg_class
1540 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1541 enum machine_mode mode, secondary_reload_info *sri)
1542 {
1543 /* Loading and storing HImode or QImode values to and from memory
1544 usually requires a scratch register. */
1545 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1546 {
1547 if (any_memory_operand (x, mode))
1548 {
1549 if (in_p)
1550 {
1551 if (!aligned_memory_operand (x, mode))
1552 sri->icode = reload_in_optab[mode];
1553 }
1554 else
1555 sri->icode = reload_out_optab[mode];
1556 return NO_REGS;
1557 }
1558 }
1559
1560 /* We also cannot do integral arithmetic into FP regs, as might result
1561 from register elimination into a DImode fp register. */
1562 if (class == FLOAT_REGS)
1563 {
1564 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1565 return GENERAL_REGS;
1566 if (in_p && INTEGRAL_MODE_P (mode)
1567 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1568 return GENERAL_REGS;
1569 }
1570
1571 return NO_REGS;
1572 }
1573 \f
1574 /* Subfunction of the following function. Update the flags of any MEM
1575 found in part of X. */
1576
1577 static int
1578 alpha_set_memflags_1 (rtx *xp, void *data)
1579 {
1580 rtx x = *xp, orig = (rtx) data;
1581
1582 if (GET_CODE (x) != MEM)
1583 return 0;
1584
1585 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1586 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1587 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1588 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1589 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1590
1591 /* Sadly, we cannot use alias sets because the extra aliasing
1592 produced by the AND interferes. Given that two-byte quantities
1593 are the only thing we would be able to differentiate anyway,
1594 there does not seem to be any point in convoluting the early
1595 out of the alias check. */
1596
1597 return -1;
1598 }
1599
1600 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1601 generated to perform a memory operation, look for any MEMs in either
1602 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1603 volatile flags from REF into each of the MEMs found. If REF is not
1604 a MEM, don't do anything. */
1605
1606 void
1607 alpha_set_memflags (rtx insn, rtx ref)
1608 {
1609 rtx *base_ptr;
1610
1611 if (GET_CODE (ref) != MEM)
1612 return;
1613
1614 /* This is only called from alpha.md, after having had something
1615 generated from one of the insn patterns. So if everything is
1616 zero, the pattern is already up-to-date. */
1617 if (!MEM_VOLATILE_P (ref)
1618 && !MEM_IN_STRUCT_P (ref)
1619 && !MEM_SCALAR_P (ref)
1620 && !MEM_NOTRAP_P (ref)
1621 && !MEM_READONLY_P (ref))
1622 return;
1623
1624 if (INSN_P (insn))
1625 base_ptr = &PATTERN (insn);
1626 else
1627 base_ptr = &insn;
1628 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1629 }
1630 \f
1631 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1632 int, bool);
1633
1634 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1635 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1636 and return pc_rtx if successful. */
1637
1638 static rtx
1639 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1640 HOST_WIDE_INT c, int n, bool no_output)
1641 {
1642 HOST_WIDE_INT new;
1643 int i, bits;
1644 /* Use a pseudo if highly optimizing and still generating RTL. */
1645 rtx subtarget
1646 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1647 rtx temp, insn;
1648
1649 /* If this is a sign-extended 32-bit constant, we can do this in at most
1650 three insns, so do it if we have enough insns left. We always have
1651 a sign-extended 32-bit constant when compiling on a narrow machine. */
1652
1653 if (HOST_BITS_PER_WIDE_INT != 64
1654 || c >> 31 == -1 || c >> 31 == 0)
1655 {
1656 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1657 HOST_WIDE_INT tmp1 = c - low;
1658 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1659 HOST_WIDE_INT extra = 0;
1660
1661 /* If HIGH will be interpreted as negative but the constant is
1662 positive, we must adjust it to do two ldha insns. */
1663
1664 if ((high & 0x8000) != 0 && c >= 0)
1665 {
1666 extra = 0x4000;
1667 tmp1 -= 0x40000000;
1668 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1669 }
1670
1671 if (c == low || (low == 0 && extra == 0))
1672 {
1673 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1674 but that meant that we can't handle INT_MIN on 32-bit machines
1675 (like NT/Alpha), because we recurse indefinitely through
1676 emit_move_insn to gen_movdi. So instead, since we know exactly
1677 what we want, create it explicitly. */
1678
1679 if (no_output)
1680 return pc_rtx;
1681 if (target == NULL)
1682 target = gen_reg_rtx (mode);
1683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1684 return target;
1685 }
1686 else if (n >= 2 + (extra != 0))
1687 {
1688 if (no_output)
1689 return pc_rtx;
1690 if (!can_create_pseudo_p ())
1691 {
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1693 temp = target;
1694 }
1695 else
1696 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1697 subtarget, mode);
1698
1699 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1700 This means that if we go through expand_binop, we'll try to
1701 generate extensions, etc, which will require new pseudos, which
1702 will fail during some split phases. The SImode add patterns
1703 still exist, but are not named. So build the insns by hand. */
1704
1705 if (extra != 0)
1706 {
1707 if (! subtarget)
1708 subtarget = gen_reg_rtx (mode);
1709 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1710 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1711 emit_insn (insn);
1712 temp = subtarget;
1713 }
1714
1715 if (target == NULL)
1716 target = gen_reg_rtx (mode);
1717 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1718 insn = gen_rtx_SET (VOIDmode, target, insn);
1719 emit_insn (insn);
1720 return target;
1721 }
1722 }
1723
1724 /* If we couldn't do it that way, try some other methods. But if we have
1725 no instructions left, don't bother. Likewise, if this is SImode and
1726 we can't make pseudos, we can't do anything since the expand_binop
1727 and expand_unop calls will widen and try to make pseudos. */
1728
1729 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1730 return 0;
1731
1732 /* Next, see if we can load a related constant and then shift and possibly
1733 negate it to get the constant we want. Try this once each increasing
1734 numbers of insns. */
1735
1736 for (i = 1; i < n; i++)
1737 {
1738 /* First, see if minus some low bits, we've an easy load of
1739 high bits. */
1740
1741 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1742 if (new != 0)
1743 {
1744 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1745 if (temp)
1746 {
1747 if (no_output)
1748 return temp;
1749 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1750 target, 0, OPTAB_WIDEN);
1751 }
1752 }
1753
1754 /* Next try complementing. */
1755 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1756 if (temp)
1757 {
1758 if (no_output)
1759 return temp;
1760 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1761 }
1762
1763 /* Next try to form a constant and do a left shift. We can do this
1764 if some low-order bits are zero; the exact_log2 call below tells
1765 us that information. The bits we are shifting out could be any
1766 value, but here we'll just try the 0- and sign-extended forms of
1767 the constant. To try to increase the chance of having the same
1768 constant in more than one insn, start at the highest number of
1769 bits to shift, but try all possibilities in case a ZAPNOT will
1770 be useful. */
1771
1772 bits = exact_log2 (c & -c);
1773 if (bits > 0)
1774 for (; bits > 0; bits--)
1775 {
1776 new = c >> bits;
1777 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1778 if (!temp && c < 0)
1779 {
1780 new = (unsigned HOST_WIDE_INT)c >> bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new,
1782 i, no_output);
1783 }
1784 if (temp)
1785 {
1786 if (no_output)
1787 return temp;
1788 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1789 target, 0, OPTAB_WIDEN);
1790 }
1791 }
1792
1793 /* Now try high-order zero bits. Here we try the shifted-in bits as
1794 all zero and all ones. Be careful to avoid shifting outside the
1795 mode and to avoid shifting outside the host wide int size. */
1796 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1797 confuse the recursive call and set all of the high 32 bits. */
1798
1799 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1800 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1801 if (bits > 0)
1802 for (; bits > 0; bits--)
1803 {
1804 new = c << bits;
1805 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1806 if (!temp)
1807 {
1808 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1809 temp = alpha_emit_set_const (subtarget, mode, new,
1810 i, no_output);
1811 }
1812 if (temp)
1813 {
1814 if (no_output)
1815 return temp;
1816 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1817 target, 1, OPTAB_WIDEN);
1818 }
1819 }
1820
1821 /* Now try high-order 1 bits. We get that with a sign-extension.
1822 But one bit isn't enough here. Be careful to avoid shifting outside
1823 the mode and to avoid shifting outside the host wide int size. */
1824
1825 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1826 - floor_log2 (~ c) - 2);
1827 if (bits > 0)
1828 for (; bits > 0; bits--)
1829 {
1830 new = c << bits;
1831 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1832 if (!temp)
1833 {
1834 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1835 temp = alpha_emit_set_const (subtarget, mode, new,
1836 i, no_output);
1837 }
1838 if (temp)
1839 {
1840 if (no_output)
1841 return temp;
1842 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1843 target, 0, OPTAB_WIDEN);
1844 }
1845 }
1846 }
1847
1848 #if HOST_BITS_PER_WIDE_INT == 64
1849 /* Finally, see if can load a value into the target that is the same as the
1850 constant except that all bytes that are 0 are changed to be 0xff. If we
1851 can, then we can do a ZAPNOT to obtain the desired constant. */
1852
1853 new = c;
1854 for (i = 0; i < 64; i += 8)
1855 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1856 new |= (HOST_WIDE_INT) 0xff << i;
1857
1858 /* We are only called for SImode and DImode. If this is SImode, ensure that
1859 we are sign extended to a full word. */
1860
1861 if (mode == SImode)
1862 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1863
1864 if (new != c)
1865 {
1866 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1867 if (temp)
1868 {
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1872 target, 0, OPTAB_WIDEN);
1873 }
1874 }
1875 #endif
1876
1877 return 0;
1878 }
1879
1880 /* Try to output insns to set TARGET equal to the constant C if it can be
1881 done in less than N insns. Do all computations in MODE. Returns the place
1882 where the output has been placed if it can be done and the insns have been
1883 emitted. If it would take more than N insns, zero is returned and no
1884 insns and emitted. */
1885
1886 static rtx
1887 alpha_emit_set_const (rtx target, enum machine_mode mode,
1888 HOST_WIDE_INT c, int n, bool no_output)
1889 {
1890 enum machine_mode orig_mode = mode;
1891 rtx orig_target = target;
1892 rtx result = 0;
1893 int i;
1894
1895 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1896 can't load this constant in one insn, do this in DImode. */
1897 if (!can_create_pseudo_p () && mode == SImode
1898 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1899 {
1900 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1901 if (result)
1902 return result;
1903
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1905 mode = DImode;
1906 }
1907 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1908 {
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1910 mode = DImode;
1911 }
1912
1913 /* Try 1 insn, then 2, then up to N. */
1914 for (i = 1; i <= n; i++)
1915 {
1916 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1917 if (result)
1918 {
1919 rtx insn, set;
1920
1921 if (no_output)
1922 return result;
1923
1924 insn = get_last_insn ();
1925 set = single_set (insn);
1926 if (! CONSTANT_P (SET_SRC (set)))
1927 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1928 break;
1929 }
1930 }
1931
1932 /* Allow for the case where we changed the mode of TARGET. */
1933 if (result)
1934 {
1935 if (result == target)
1936 result = orig_target;
1937 else if (mode != orig_mode)
1938 result = gen_lowpart (orig_mode, result);
1939 }
1940
1941 return result;
1942 }
1943
1944 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1945 fall back to a straight forward decomposition. We do this to avoid
1946 exponential run times encountered when looking for longer sequences
1947 with alpha_emit_set_const. */
1948
1949 static rtx
1950 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1951 {
1952 HOST_WIDE_INT d1, d2, d3, d4;
1953
1954 /* Decompose the entire word */
1955 #if HOST_BITS_PER_WIDE_INT >= 64
1956 gcc_assert (c2 == -(c1 < 0));
1957 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1958 c1 -= d1;
1959 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1960 c1 = (c1 - d2) >> 32;
1961 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1962 c1 -= d3;
1963 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1964 gcc_assert (c1 == d4);
1965 #else
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d1;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d2);
1970 c2 += (d2 < 0);
1971 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1972 c2 -= d3;
1973 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c2 == d4);
1975 #endif
1976
1977 /* Construct the high word */
1978 if (d4)
1979 {
1980 emit_move_insn (target, GEN_INT (d4));
1981 if (d3)
1982 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1983 }
1984 else
1985 emit_move_insn (target, GEN_INT (d3));
1986
1987 /* Shift it into place */
1988 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1989
1990 /* Add in the low bits. */
1991 if (d2)
1992 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1993 if (d1)
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1995
1996 return target;
1997 }
1998
1999 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2000 the low 64 bits. */
2001
2002 static void
2003 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2004 {
2005 HOST_WIDE_INT i0, i1;
2006
2007 if (GET_CODE (x) == CONST_VECTOR)
2008 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2009
2010
2011 if (GET_CODE (x) == CONST_INT)
2012 {
2013 i0 = INTVAL (x);
2014 i1 = -(i0 < 0);
2015 }
2016 else if (HOST_BITS_PER_WIDE_INT >= 64)
2017 {
2018 i0 = CONST_DOUBLE_LOW (x);
2019 i1 = -(i0 < 0);
2020 }
2021 else
2022 {
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = CONST_DOUBLE_HIGH (x);
2025 }
2026
2027 *p0 = i0;
2028 *p1 = i1;
2029 }
2030
2031 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2032 are willing to load the value into a register via a move pattern.
2033 Normally this is all symbolic constants, integral constants that
2034 take three or fewer instructions, and floating-point zero. */
2035
2036 bool
2037 alpha_legitimate_constant_p (rtx x)
2038 {
2039 enum machine_mode mode = GET_MODE (x);
2040 HOST_WIDE_INT i0, i1;
2041
2042 switch (GET_CODE (x))
2043 {
2044 case CONST:
2045 case LABEL_REF:
2046 case HIGH:
2047 return true;
2048
2049 case SYMBOL_REF:
2050 /* TLS symbols are never valid. */
2051 return SYMBOL_REF_TLS_MODEL (x) == 0;
2052
2053 case CONST_DOUBLE:
2054 if (x == CONST0_RTX (mode))
2055 return true;
2056 if (FLOAT_MODE_P (mode))
2057 return false;
2058 goto do_integer;
2059
2060 case CONST_VECTOR:
2061 if (x == CONST0_RTX (mode))
2062 return true;
2063 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2064 return false;
2065 if (GET_MODE_SIZE (mode) != 8)
2066 return false;
2067 goto do_integer;
2068
2069 case CONST_INT:
2070 do_integer:
2071 if (TARGET_BUILD_CONSTANTS)
2072 return true;
2073 alpha_extract_integer (x, &i0, &i1);
2074 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2075 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2076 return false;
2077
2078 default:
2079 return false;
2080 }
2081 }
2082
2083 /* Operand 1 is known to be a constant, and should require more than one
2084 instruction to load. Emit that multi-part load. */
2085
2086 bool
2087 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2088 {
2089 HOST_WIDE_INT i0, i1;
2090 rtx temp = NULL_RTX;
2091
2092 alpha_extract_integer (operands[1], &i0, &i1);
2093
2094 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2095 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2096
2097 if (!temp && TARGET_BUILD_CONSTANTS)
2098 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2099
2100 if (temp)
2101 {
2102 if (!rtx_equal_p (operands[0], temp))
2103 emit_move_insn (operands[0], temp);
2104 return true;
2105 }
2106
2107 return false;
2108 }
2109
2110 /* Expand a move instruction; return true if all work is done.
2111 We don't handle non-bwx subword loads here. */
2112
2113 bool
2114 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2115 {
2116 /* If the output is not a register, the input must be. */
2117 if (GET_CODE (operands[0]) == MEM
2118 && ! reg_or_0_operand (operands[1], mode))
2119 operands[1] = force_reg (mode, operands[1]);
2120
2121 /* Allow legitimize_address to perform some simplifications. */
2122 if (mode == Pmode && symbolic_operand (operands[1], mode))
2123 {
2124 rtx tmp;
2125
2126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2127 if (tmp)
2128 {
2129 if (tmp == operands[0])
2130 return true;
2131 operands[1] = tmp;
2132 return false;
2133 }
2134 }
2135
2136 /* Early out for non-constants and valid constants. */
2137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2138 return false;
2139
2140 /* Split large integers. */
2141 if (GET_CODE (operands[1]) == CONST_INT
2142 || GET_CODE (operands[1]) == CONST_DOUBLE
2143 || GET_CODE (operands[1]) == CONST_VECTOR)
2144 {
2145 if (alpha_split_const_mov (mode, operands))
2146 return true;
2147 }
2148
2149 /* Otherwise we've nothing left but to drop the thing to memory. */
2150 operands[1] = force_const_mem (mode, operands[1]);
2151 if (reload_in_progress)
2152 {
2153 emit_move_insn (operands[0], XEXP (operands[1], 0));
2154 operands[1] = replace_equiv_address (operands[1], operands[0]);
2155 }
2156 else
2157 operands[1] = validize_mem (operands[1]);
2158 return false;
2159 }
2160
2161 /* Expand a non-bwx QImode or HImode move instruction;
2162 return true if all work is done. */
2163
2164 bool
2165 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2166 {
2167 rtx seq;
2168
2169 /* If the output is not a register, the input must be. */
2170 if (MEM_P (operands[0]))
2171 operands[1] = force_reg (mode, operands[1]);
2172
2173 /* Handle four memory cases, unaligned and aligned for either the input
2174 or the output. The only case where we can be called during reload is
2175 for aligned loads; all other cases require temporaries. */
2176
2177 if (any_memory_operand (operands[1], mode))
2178 {
2179 if (aligned_memory_operand (operands[1], mode))
2180 {
2181 if (reload_in_progress)
2182 {
2183 if (mode == QImode)
2184 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2185 else
2186 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2187 emit_insn (seq);
2188 }
2189 else
2190 {
2191 rtx aligned_mem, bitnum;
2192 rtx scratch = gen_reg_rtx (SImode);
2193 rtx subtarget;
2194 bool copyout;
2195
2196 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2197
2198 subtarget = operands[0];
2199 if (GET_CODE (subtarget) == REG)
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 else
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2203
2204 if (mode == QImode)
2205 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2206 bitnum, scratch);
2207 else
2208 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2209 bitnum, scratch);
2210 emit_insn (seq);
2211
2212 if (copyout)
2213 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2214 }
2215 }
2216 else
2217 {
2218 /* Don't pass these as parameters since that makes the generated
2219 code depend on parameter evaluation order which will cause
2220 bootstrap failures. */
2221
2222 rtx temp1, temp2, subtarget, ua;
2223 bool copyout;
2224
2225 temp1 = gen_reg_rtx (DImode);
2226 temp2 = gen_reg_rtx (DImode);
2227
2228 subtarget = operands[0];
2229 if (GET_CODE (subtarget) == REG)
2230 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2231 else
2232 subtarget = gen_reg_rtx (DImode), copyout = true;
2233
2234 ua = get_unaligned_address (operands[1]);
2235 if (mode == QImode)
2236 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2237 else
2238 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2239
2240 alpha_set_memflags (seq, operands[1]);
2241 emit_insn (seq);
2242
2243 if (copyout)
2244 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2245 }
2246 return true;
2247 }
2248
2249 if (any_memory_operand (operands[0], mode))
2250 {
2251 if (aligned_memory_operand (operands[0], mode))
2252 {
2253 rtx aligned_mem, bitnum;
2254 rtx temp1 = gen_reg_rtx (SImode);
2255 rtx temp2 = gen_reg_rtx (SImode);
2256
2257 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2258
2259 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2260 temp1, temp2));
2261 }
2262 else
2263 {
2264 rtx temp1 = gen_reg_rtx (DImode);
2265 rtx temp2 = gen_reg_rtx (DImode);
2266 rtx temp3 = gen_reg_rtx (DImode);
2267 rtx ua = get_unaligned_address (operands[0]);
2268
2269 if (mode == QImode)
2270 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2271 else
2272 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2273
2274 alpha_set_memflags (seq, operands[0]);
2275 emit_insn (seq);
2276 }
2277 return true;
2278 }
2279
2280 return false;
2281 }
2282
2283 /* Implement the movmisalign patterns. One of the operands is a memory
2284 that is not naturally aligned. Emit instructions to load it. */
2285
2286 void
2287 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2288 {
2289 /* Honor misaligned loads, for those we promised to do so. */
2290 if (MEM_P (operands[1]))
2291 {
2292 rtx tmp;
2293
2294 if (register_operand (operands[0], mode))
2295 tmp = operands[0];
2296 else
2297 tmp = gen_reg_rtx (mode);
2298
2299 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2300 if (tmp != operands[0])
2301 emit_move_insn (operands[0], tmp);
2302 }
2303 else if (MEM_P (operands[0]))
2304 {
2305 if (!reg_or_0_operand (operands[1], mode))
2306 operands[1] = force_reg (mode, operands[1]);
2307 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2308 }
2309 else
2310 gcc_unreachable ();
2311 }
2312
2313 /* Generate an unsigned DImode to FP conversion. This is the same code
2314 optabs would emit if we didn't have TFmode patterns.
2315
2316 For SFmode, this is the only construction I've found that can pass
2317 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2318 intermediates will work, because you'll get intermediate rounding
2319 that ruins the end result. Some of this could be fixed by turning
2320 on round-to-positive-infinity, but that requires diddling the fpsr,
2321 which kills performance. I tried turning this around and converting
2322 to a negative number, so that I could turn on /m, but either I did
2323 it wrong or there's something else cause I wound up with the exact
2324 same single-bit error. There is a branch-less form of this same code:
2325
2326 srl $16,1,$1
2327 and $16,1,$2
2328 cmplt $16,0,$3
2329 or $1,$2,$2
2330 cmovge $16,$16,$2
2331 itoft $3,$f10
2332 itoft $2,$f11
2333 cvtqs $f11,$f11
2334 adds $f11,$f11,$f0
2335 fcmoveq $f10,$f11,$f0
2336
2337 I'm not using it because it's the same number of instructions as
2338 this branch-full form, and it has more serialized long latency
2339 instructions on the critical path.
2340
2341 For DFmode, we can avoid rounding errors by breaking up the word
2342 into two pieces, converting them separately, and adding them back:
2343
2344 LC0: .long 0,0x5f800000
2345
2346 itoft $16,$f11
2347 lda $2,LC0
2348 cmplt $16,0,$1
2349 cpyse $f11,$f31,$f10
2350 cpyse $f31,$f11,$f11
2351 s4addq $1,$2,$1
2352 lds $f12,0($1)
2353 cvtqt $f10,$f10
2354 cvtqt $f11,$f11
2355 addt $f12,$f10,$f0
2356 addt $f0,$f11,$f0
2357
2358 This doesn't seem to be a clear-cut win over the optabs form.
2359 It probably all depends on the distribution of numbers being
2360 converted -- in the optabs form, all but high-bit-set has a
2361 much lower minimum execution time. */
2362
2363 void
2364 alpha_emit_floatuns (rtx operands[2])
2365 {
2366 rtx neglab, donelab, i0, i1, f0, in, out;
2367 enum machine_mode mode;
2368
2369 out = operands[0];
2370 in = force_reg (DImode, operands[1]);
2371 mode = GET_MODE (out);
2372 neglab = gen_label_rtx ();
2373 donelab = gen_label_rtx ();
2374 i0 = gen_reg_rtx (DImode);
2375 i1 = gen_reg_rtx (DImode);
2376 f0 = gen_reg_rtx (mode);
2377
2378 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2379
2380 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2381 emit_jump_insn (gen_jump (donelab));
2382 emit_barrier ();
2383
2384 emit_label (neglab);
2385
2386 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2387 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2388 emit_insn (gen_iordi3 (i0, i0, i1));
2389 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2390 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2391
2392 emit_label (donelab);
2393 }
2394
2395 /* Generate the comparison for a conditional branch. */
2396
2397 rtx
2398 alpha_emit_conditional_branch (enum rtx_code code)
2399 {
2400 enum rtx_code cmp_code, branch_code;
2401 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2402 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2403 rtx tem;
2404
2405 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2406 {
2407 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2408 op1 = const0_rtx;
2409 alpha_compare.fp_p = 0;
2410 }
2411
2412 /* The general case: fold the comparison code to the types of compares
2413 that we have, choosing the branch as necessary. */
2414 switch (code)
2415 {
2416 case EQ: case LE: case LT: case LEU: case LTU:
2417 case UNORDERED:
2418 /* We have these compares: */
2419 cmp_code = code, branch_code = NE;
2420 break;
2421
2422 case NE:
2423 case ORDERED:
2424 /* These must be reversed. */
2425 cmp_code = reverse_condition (code), branch_code = EQ;
2426 break;
2427
2428 case GE: case GT: case GEU: case GTU:
2429 /* For FP, we swap them, for INT, we reverse them. */
2430 if (alpha_compare.fp_p)
2431 {
2432 cmp_code = swap_condition (code);
2433 branch_code = NE;
2434 tem = op0, op0 = op1, op1 = tem;
2435 }
2436 else
2437 {
2438 cmp_code = reverse_condition (code);
2439 branch_code = EQ;
2440 }
2441 break;
2442
2443 default:
2444 gcc_unreachable ();
2445 }
2446
2447 if (alpha_compare.fp_p)
2448 {
2449 cmp_mode = DFmode;
2450 if (flag_unsafe_math_optimizations)
2451 {
2452 /* When we are not as concerned about non-finite values, and we
2453 are comparing against zero, we can branch directly. */
2454 if (op1 == CONST0_RTX (DFmode))
2455 cmp_code = UNKNOWN, branch_code = code;
2456 else if (op0 == CONST0_RTX (DFmode))
2457 {
2458 /* Undo the swap we probably did just above. */
2459 tem = op0, op0 = op1, op1 = tem;
2460 branch_code = swap_condition (cmp_code);
2461 cmp_code = UNKNOWN;
2462 }
2463 }
2464 else
2465 {
2466 /* ??? We mark the branch mode to be CCmode to prevent the
2467 compare and branch from being combined, since the compare
2468 insn follows IEEE rules that the branch does not. */
2469 branch_mode = CCmode;
2470 }
2471 }
2472 else
2473 {
2474 cmp_mode = DImode;
2475
2476 /* The following optimizations are only for signed compares. */
2477 if (code != LEU && code != LTU && code != GEU && code != GTU)
2478 {
2479 /* Whee. Compare and branch against 0 directly. */
2480 if (op1 == const0_rtx)
2481 cmp_code = UNKNOWN, branch_code = code;
2482
2483 /* If the constants doesn't fit into an immediate, but can
2484 be generated by lda/ldah, we adjust the argument and
2485 compare against zero, so we can use beq/bne directly. */
2486 /* ??? Don't do this when comparing against symbols, otherwise
2487 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2488 be declared false out of hand (at least for non-weak). */
2489 else if (GET_CODE (op1) == CONST_INT
2490 && (code == EQ || code == NE)
2491 && !(symbolic_operand (op0, VOIDmode)
2492 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2493 {
2494 rtx n_op1 = GEN_INT (-INTVAL (op1));
2495
2496 if (! satisfies_constraint_I (op1)
2497 && (satisfies_constraint_K (n_op1)
2498 || satisfies_constraint_L (n_op1)))
2499 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2500 }
2501 }
2502
2503 if (!reg_or_0_operand (op0, DImode))
2504 op0 = force_reg (DImode, op0);
2505 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2506 op1 = force_reg (DImode, op1);
2507 }
2508
2509 /* Emit an initial compare instruction, if necessary. */
2510 tem = op0;
2511 if (cmp_code != UNKNOWN)
2512 {
2513 tem = gen_reg_rtx (cmp_mode);
2514 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2515 }
2516
2517 /* Zero the operands. */
2518 memset (&alpha_compare, 0, sizeof (alpha_compare));
2519
2520 /* Return the branch comparison. */
2521 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2522 }
2523
2524 /* Certain simplifications can be done to make invalid setcc operations
2525 valid. Return the final comparison, or NULL if we can't work. */
2526
2527 rtx
2528 alpha_emit_setcc (enum rtx_code code)
2529 {
2530 enum rtx_code cmp_code;
2531 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2532 int fp_p = alpha_compare.fp_p;
2533 rtx tmp;
2534
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2537
2538 if (fp_p && GET_MODE (op0) == TFmode)
2539 {
2540 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2541 op1 = const0_rtx;
2542 fp_p = 0;
2543 }
2544
2545 if (fp_p && !TARGET_FIX)
2546 return NULL_RTX;
2547
2548 /* The general case: fold the comparison code to the types of compares
2549 that we have, choosing the branch as necessary. */
2550
2551 cmp_code = UNKNOWN;
2552 switch (code)
2553 {
2554 case EQ: case LE: case LT: case LEU: case LTU:
2555 case UNORDERED:
2556 /* We have these compares. */
2557 if (fp_p)
2558 cmp_code = code, code = NE;
2559 break;
2560
2561 case NE:
2562 if (!fp_p && op1 == const0_rtx)
2563 break;
2564 /* FALLTHRU */
2565
2566 case ORDERED:
2567 cmp_code = reverse_condition (code);
2568 code = EQ;
2569 break;
2570
2571 case GE: case GT: case GEU: case GTU:
2572 /* These normally need swapping, but for integer zero we have
2573 special patterns that recognize swapped operands. */
2574 if (!fp_p && op1 == const0_rtx)
2575 break;
2576 code = swap_condition (code);
2577 if (fp_p)
2578 cmp_code = code, code = NE;
2579 tmp = op0, op0 = op1, op1 = tmp;
2580 break;
2581
2582 default:
2583 gcc_unreachable ();
2584 }
2585
2586 if (!fp_p)
2587 {
2588 if (!register_operand (op0, DImode))
2589 op0 = force_reg (DImode, op0);
2590 if (!reg_or_8bit_operand (op1, DImode))
2591 op1 = force_reg (DImode, op1);
2592 }
2593
2594 /* Emit an initial compare instruction, if necessary. */
2595 if (cmp_code != UNKNOWN)
2596 {
2597 enum machine_mode mode = fp_p ? DFmode : DImode;
2598
2599 tmp = gen_reg_rtx (mode);
2600 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2601 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2602
2603 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2604 op1 = const0_rtx;
2605 }
2606
2607 /* Return the setcc comparison. */
2608 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2609 }
2610
2611
2612 /* Rewrite a comparison against zero CMP of the form
2613 (CODE (cc0) (const_int 0)) so it can be written validly in
2614 a conditional move (if_then_else CMP ...).
2615 If both of the operands that set cc0 are nonzero we must emit
2616 an insn to perform the compare (it can't be done within
2617 the conditional move). */
2618
2619 rtx
2620 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2621 {
2622 enum rtx_code code = GET_CODE (cmp);
2623 enum rtx_code cmov_code = NE;
2624 rtx op0 = alpha_compare.op0;
2625 rtx op1 = alpha_compare.op1;
2626 int fp_p = alpha_compare.fp_p;
2627 enum machine_mode cmp_mode
2628 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2629 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2630 enum machine_mode cmov_mode = VOIDmode;
2631 int local_fast_math = flag_unsafe_math_optimizations;
2632 rtx tem;
2633
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2636
2637 if (fp_p != FLOAT_MODE_P (mode))
2638 {
2639 enum rtx_code cmp_code;
2640
2641 if (! TARGET_FIX)
2642 return 0;
2643
2644 /* If we have fp<->int register move instructions, do a cmov by
2645 performing the comparison in fp registers, and move the
2646 zero/nonzero value to integer registers, where we can then
2647 use a normal cmov, or vice-versa. */
2648
2649 switch (code)
2650 {
2651 case EQ: case LE: case LT: case LEU: case LTU:
2652 /* We have these compares. */
2653 cmp_code = code, code = NE;
2654 break;
2655
2656 case NE:
2657 /* This must be reversed. */
2658 cmp_code = EQ, code = EQ;
2659 break;
2660
2661 case GE: case GT: case GEU: case GTU:
2662 /* These normally need swapping, but for integer zero we have
2663 special patterns that recognize swapped operands. */
2664 if (!fp_p && op1 == const0_rtx)
2665 cmp_code = code, code = NE;
2666 else
2667 {
2668 cmp_code = swap_condition (code);
2669 code = NE;
2670 tem = op0, op0 = op1, op1 = tem;
2671 }
2672 break;
2673
2674 default:
2675 gcc_unreachable ();
2676 }
2677
2678 tem = gen_reg_rtx (cmp_op_mode);
2679 emit_insn (gen_rtx_SET (VOIDmode, tem,
2680 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2681 op0, op1)));
2682
2683 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2684 op0 = gen_lowpart (cmp_op_mode, tem);
2685 op1 = CONST0_RTX (cmp_op_mode);
2686 fp_p = !fp_p;
2687 local_fast_math = 1;
2688 }
2689
2690 /* We may be able to use a conditional move directly.
2691 This avoids emitting spurious compares. */
2692 if (signed_comparison_operator (cmp, VOIDmode)
2693 && (!fp_p || local_fast_math)
2694 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2695 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2696
2697 /* We can't put the comparison inside the conditional move;
2698 emit a compare instruction and put that inside the
2699 conditional move. Make sure we emit only comparisons we have;
2700 swap or reverse as necessary. */
2701
2702 if (!can_create_pseudo_p ())
2703 return NULL_RTX;
2704
2705 switch (code)
2706 {
2707 case EQ: case LE: case LT: case LEU: case LTU:
2708 /* We have these compares: */
2709 break;
2710
2711 case NE:
2712 /* This must be reversed. */
2713 code = reverse_condition (code);
2714 cmov_code = EQ;
2715 break;
2716
2717 case GE: case GT: case GEU: case GTU:
2718 /* These must be swapped. */
2719 if (op1 != CONST0_RTX (cmp_mode))
2720 {
2721 code = swap_condition (code);
2722 tem = op0, op0 = op1, op1 = tem;
2723 }
2724 break;
2725
2726 default:
2727 gcc_unreachable ();
2728 }
2729
2730 if (!fp_p)
2731 {
2732 if (!reg_or_0_operand (op0, DImode))
2733 op0 = force_reg (DImode, op0);
2734 if (!reg_or_8bit_operand (op1, DImode))
2735 op1 = force_reg (DImode, op1);
2736 }
2737
2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
2741 if (fp_p && !local_fast_math)
2742 cmov_mode = CCmode;
2743
2744 tem = gen_reg_rtx (cmp_op_mode);
2745 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2746 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2747 }
2748
2749 /* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2753
2754 int
2755 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2756 rtx t_rtx, rtx f_rtx)
2757 {
2758 HOST_WIDE_INT t, f, diff;
2759 enum machine_mode mode;
2760 rtx target, subtarget, tmp;
2761
2762 mode = GET_MODE (dest);
2763 t = INTVAL (t_rtx);
2764 f = INTVAL (f_rtx);
2765 diff = t - f;
2766
2767 if (((code == NE || code == EQ) && diff < 0)
2768 || (code == GE || code == GT))
2769 {
2770 code = reverse_condition (code);
2771 diff = t, t = f, f = diff;
2772 diff = t - f;
2773 }
2774
2775 subtarget = target = dest;
2776 if (mode != DImode)
2777 {
2778 target = gen_lowpart (DImode, dest);
2779 if (can_create_pseudo_p ())
2780 subtarget = gen_reg_rtx (DImode);
2781 else
2782 subtarget = target;
2783 }
2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2786 be shared. */
2787
2788 if (f == 0 && exact_log2 (diff) > 0
2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2790 viable over a longer latency cmove. On EV5, the E0 slot is a
2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
2792 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2793 {
2794 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2795 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2796
2797 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2798 GEN_INT (exact_log2 (t)));
2799 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2800 }
2801 else if (f == 0 && t == -1)
2802 {
2803 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2804 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2805
2806 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2807 }
2808 else if (diff == 1 || diff == 4 || diff == 8)
2809 {
2810 rtx add_op;
2811
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2814
2815 if (diff == 1)
2816 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2817 else
2818 {
2819 add_op = GEN_INT (f);
2820 if (sext_add_operand (add_op, mode))
2821 {
2822 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2823 GEN_INT (diff));
2824 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2825 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2826 }
2827 else
2828 return 0;
2829 }
2830 }
2831 else
2832 return 0;
2833
2834 return 1;
2835 }
2836 \f
2837 /* Look up the function X_floating library function name for the
2838 given operation. */
2839
2840 struct xfloating_op GTY(())
2841 {
2842 const enum rtx_code code;
2843 const char *const GTY((skip)) osf_func;
2844 const char *const GTY((skip)) vms_func;
2845 rtx libcall;
2846 };
2847
2848 static GTY(()) struct xfloating_op xfloating_ops[] =
2849 {
2850 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2865 };
2866
2867 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2868 {
2869 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2871 };
2872
2873 static rtx
2874 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2875 {
2876 struct xfloating_op *ops = xfloating_ops;
2877 long n = ARRAY_SIZE (xfloating_ops);
2878 long i;
2879
2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2881
2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2884 {
2885 ops = vax_cvt_ops;
2886 n = ARRAY_SIZE (vax_cvt_ops);
2887 }
2888
2889 for (i = 0; i < n; ++i, ++ops)
2890 if (ops->code == code)
2891 {
2892 rtx func = ops->libcall;
2893 if (!func)
2894 {
2895 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops->vms_func : ops->osf_func);
2897 ops->libcall = func;
2898 }
2899 return func;
2900 }
2901
2902 gcc_unreachable ();
2903 }
2904
2905 /* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2907
2908 static int
2909 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2910 enum alpha_fp_rounding_mode round)
2911 {
2912 int mode;
2913
2914 switch (round)
2915 {
2916 case ALPHA_FPRM_NORM:
2917 mode = 2;
2918 break;
2919 case ALPHA_FPRM_MINF:
2920 mode = 1;
2921 break;
2922 case ALPHA_FPRM_CHOP:
2923 mode = 0;
2924 break;
2925 case ALPHA_FPRM_DYN:
2926 mode = 4;
2927 break;
2928 default:
2929 gcc_unreachable ();
2930
2931 /* XXX For reference, round to +inf is mode = 3. */
2932 }
2933
2934 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2935 mode |= 0x10000;
2936
2937 return mode;
2938 }
2939
2940 /* Emit an X_floating library function call.
2941
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
2944 indirect); TFmode return values appear in R16+R17.
2945
2946 FUNC is the function to call.
2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2951 */
2952
2953 static void
2954 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2955 int noperands, rtx equiv)
2956 {
2957 rtx usage = NULL_RTX, tmp, reg;
2958 int regno = 16, i;
2959
2960 start_sequence ();
2961
2962 for (i = 0; i < noperands; ++i)
2963 {
2964 switch (GET_MODE (operands[i]))
2965 {
2966 case TFmode:
2967 reg = gen_rtx_REG (TFmode, regno);
2968 regno += 2;
2969 break;
2970
2971 case DFmode:
2972 reg = gen_rtx_REG (DFmode, regno + 32);
2973 regno += 1;
2974 break;
2975
2976 case VOIDmode:
2977 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2978 /* FALLTHRU */
2979 case DImode:
2980 reg = gen_rtx_REG (DImode, regno);
2981 regno += 1;
2982 break;
2983
2984 default:
2985 gcc_unreachable ();
2986 }
2987
2988 emit_move_insn (reg, operands[i]);
2989 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2990 }
2991
2992 switch (GET_MODE (target))
2993 {
2994 case TFmode:
2995 reg = gen_rtx_REG (TFmode, 16);
2996 break;
2997 case DFmode:
2998 reg = gen_rtx_REG (DFmode, 32);
2999 break;
3000 case DImode:
3001 reg = gen_rtx_REG (DImode, 0);
3002 break;
3003 default:
3004 gcc_unreachable ();
3005 }
3006
3007 tmp = gen_rtx_MEM (QImode, func);
3008 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3009 const0_rtx, const0_rtx));
3010 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3011 CONST_OR_PURE_CALL_P (tmp) = 1;
3012
3013 tmp = get_insns ();
3014 end_sequence ();
3015
3016 emit_libcall_block (tmp, target, reg, equiv);
3017 }
3018
3019 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3020
3021 void
3022 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3023 {
3024 rtx func;
3025 int mode;
3026 rtx out_operands[3];
3027
3028 func = alpha_lookup_xfloating_lib_func (code);
3029 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3030
3031 out_operands[0] = operands[1];
3032 out_operands[1] = operands[2];
3033 out_operands[2] = GEN_INT (mode);
3034 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3035 gen_rtx_fmt_ee (code, TFmode, operands[1],
3036 operands[2]));
3037 }
3038
3039 /* Emit an X_floating library function call for a comparison. */
3040
3041 static rtx
3042 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3043 {
3044 enum rtx_code cmp_code, res_code;
3045 rtx func, out, operands[2];
3046
3047 /* X_floating library comparison functions return
3048 -1 unordered
3049 0 false
3050 1 true
3051 Convert the compare against the raw return value. */
3052
3053 cmp_code = *pcode;
3054 switch (cmp_code)
3055 {
3056 case UNORDERED:
3057 cmp_code = EQ;
3058 res_code = LT;
3059 break;
3060 case ORDERED:
3061 cmp_code = EQ;
3062 res_code = GE;
3063 break;
3064 case NE:
3065 res_code = NE;
3066 break;
3067 case EQ:
3068 case LT:
3069 case GT:
3070 case LE:
3071 case GE:
3072 res_code = GT;
3073 break;
3074 default:
3075 gcc_unreachable ();
3076 }
3077 *pcode = res_code;
3078
3079 func = alpha_lookup_xfloating_lib_func (cmp_code);
3080
3081 operands[0] = op0;
3082 operands[1] = op1;
3083 out = gen_reg_rtx (DImode);
3084
3085 /* ??? Strange mode for equiv because what's actually returned
3086 is -1,0,1, not a proper boolean value. */
3087 alpha_emit_xfloating_libcall (func, out, operands, 2,
3088 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3089
3090 return out;
3091 }
3092
3093 /* Emit an X_floating library function call for a conversion. */
3094
3095 void
3096 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3097 {
3098 int noperands = 1, mode;
3099 rtx out_operands[2];
3100 rtx func;
3101 enum rtx_code code = orig_code;
3102
3103 if (code == UNSIGNED_FIX)
3104 code = FIX;
3105
3106 func = alpha_lookup_xfloating_lib_func (code);
3107
3108 out_operands[0] = operands[1];
3109
3110 switch (code)
3111 {
3112 case FIX:
3113 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3114 out_operands[1] = GEN_INT (mode);
3115 noperands = 2;
3116 break;
3117 case FLOAT_TRUNCATE:
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3119 out_operands[1] = GEN_INT (mode);
3120 noperands = 2;
3121 break;
3122 default:
3123 break;
3124 }
3125
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3127 gen_rtx_fmt_e (orig_code,
3128 GET_MODE (operands[0]),
3129 operands[1]));
3130 }
3131
3132 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3135 set (OP[0] OP[2])
3136 set (OP[1] OP[3])
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3139
3140 void
3141 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3142 bool fixup_overlap)
3143 {
3144 switch (GET_CODE (operands[1]))
3145 {
3146 case REG:
3147 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3148 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3149 break;
3150
3151 case MEM:
3152 operands[3] = adjust_address (operands[1], DImode, 8);
3153 operands[2] = adjust_address (operands[1], DImode, 0);
3154 break;
3155
3156 case CONST_INT:
3157 case CONST_DOUBLE:
3158 gcc_assert (operands[1] == CONST0_RTX (mode));
3159 operands[2] = operands[3] = const0_rtx;
3160 break;
3161
3162 default:
3163 gcc_unreachable ();
3164 }
3165
3166 switch (GET_CODE (operands[0]))
3167 {
3168 case REG:
3169 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3170 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3171 break;
3172
3173 case MEM:
3174 operands[1] = adjust_address (operands[0], DImode, 8);
3175 operands[0] = adjust_address (operands[0], DImode, 0);
3176 break;
3177
3178 default:
3179 gcc_unreachable ();
3180 }
3181
3182 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3183 {
3184 rtx tmp;
3185 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3186 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3187 }
3188 }
3189
3190 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
3192 logical operation to be performed. */
3193
3194 void
3195 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3196 {
3197 rtx high_bit = operands[2];
3198 rtx scratch;
3199 int move;
3200
3201 alpha_split_tmode_pair (operands, TFmode, false);
3202
3203 /* Detect three flavors of operand overlap. */
3204 move = 1;
3205 if (rtx_equal_p (operands[0], operands[2]))
3206 move = 0;
3207 else if (rtx_equal_p (operands[1], operands[2]))
3208 {
3209 if (rtx_equal_p (operands[0], high_bit))
3210 move = 2;
3211 else
3212 move = -1;
3213 }
3214
3215 if (move < 0)
3216 emit_move_insn (operands[0], operands[2]);
3217
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch = operands[move < 2 ? 1 : 3];
3223
3224 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3225
3226 if (move > 0)
3227 {
3228 emit_move_insn (operands[0], operands[2]);
3229 if (move > 1)
3230 emit_move_insn (operands[1], scratch);
3231 }
3232 }
3233 \f
3234 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3235 unaligned data:
3236
3237 unsigned: signed:
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3244 sra r1,48,r1
3245
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3252
3253 quad: ldq_u r1,X(r11)
3254 ldq_u r2,X+7(r11)
3255 lda r3,X(r11)
3256 extql r1,r3,r1
3257 extqh r2,r3,r2
3258 or r1.r2.r1
3259 */
3260
3261 void
3262 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3263 HOST_WIDE_INT ofs, int sign)
3264 {
3265 rtx meml, memh, addr, extl, exth, tmp, mema;
3266 enum machine_mode mode;
3267
3268 if (TARGET_BWX && size == 2)
3269 {
3270 meml = adjust_address (mem, QImode, ofs);
3271 memh = adjust_address (mem, QImode, ofs+1);
3272 if (BYTES_BIG_ENDIAN)
3273 tmp = meml, meml = memh, memh = tmp;
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276 emit_insn (gen_zero_extendqidi2 (extl, meml));
3277 emit_insn (gen_zero_extendqidi2 (exth, memh));
3278 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3279 NULL, 1, OPTAB_LIB_WIDEN);
3280 addr = expand_simple_binop (DImode, IOR, extl, exth,
3281 NULL, 1, OPTAB_LIB_WIDEN);
3282
3283 if (sign && GET_MODE (tgt) != HImode)
3284 {
3285 addr = gen_lowpart (HImode, addr);
3286 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3287 }
3288 else
3289 {
3290 if (GET_MODE (tgt) != DImode)
3291 addr = gen_lowpart (GET_MODE (tgt), addr);
3292 emit_move_insn (tgt, addr);
3293 }
3294 return;
3295 }
3296
3297 meml = gen_reg_rtx (DImode);
3298 memh = gen_reg_rtx (DImode);
3299 addr = gen_reg_rtx (DImode);
3300 extl = gen_reg_rtx (DImode);
3301 exth = gen_reg_rtx (DImode);
3302
3303 mema = XEXP (mem, 0);
3304 if (GET_CODE (mema) == LO_SUM)
3305 mema = force_reg (Pmode, mema);
3306
3307 /* AND addresses cannot be in any alias set, since they may implicitly
3308 alias surrounding code. Ideally we'd have some alias set that
3309 covered all types except those with alignment 8 or higher. */
3310
3311 tmp = change_address (mem, DImode,
3312 gen_rtx_AND (DImode,
3313 plus_constant (mema, ofs),
3314 GEN_INT (-8)));
3315 set_mem_alias_set (tmp, 0);
3316 emit_move_insn (meml, tmp);
3317
3318 tmp = change_address (mem, DImode,
3319 gen_rtx_AND (DImode,
3320 plus_constant (mema, ofs + size - 1),
3321 GEN_INT (-8)));
3322 set_mem_alias_set (tmp, 0);
3323 emit_move_insn (memh, tmp);
3324
3325 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3326 {
3327 emit_move_insn (addr, plus_constant (mema, -1));
3328
3329 emit_insn (gen_extqh_be (extl, meml, addr));
3330 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3331
3332 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3333 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3334 addr, 1, OPTAB_WIDEN);
3335 }
3336 else if (sign && size == 2)
3337 {
3338 emit_move_insn (addr, plus_constant (mema, ofs+2));
3339
3340 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3341 emit_insn (gen_extqh_le (exth, memh, addr));
3342
3343 /* We must use tgt here for the target. Alpha-vms port fails if we use
3344 addr for the target, because addr is marked as a pointer and combine
3345 knows that pointers are always sign-extended 32-bit values. */
3346 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3347 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3348 addr, 1, OPTAB_WIDEN);
3349 }
3350 else
3351 {
3352 if (WORDS_BIG_ENDIAN)
3353 {
3354 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3355 switch ((int) size)
3356 {
3357 case 2:
3358 emit_insn (gen_extwh_be (extl, meml, addr));
3359 mode = HImode;
3360 break;
3361
3362 case 4:
3363 emit_insn (gen_extlh_be (extl, meml, addr));
3364 mode = SImode;
3365 break;
3366
3367 case 8:
3368 emit_insn (gen_extqh_be (extl, meml, addr));
3369 mode = DImode;
3370 break;
3371
3372 default:
3373 gcc_unreachable ();
3374 }
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3376 }
3377 else
3378 {
3379 emit_move_insn (addr, plus_constant (mema, ofs));
3380 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3381 switch ((int) size)
3382 {
3383 case 2:
3384 emit_insn (gen_extwh_le (exth, memh, addr));
3385 mode = HImode;
3386 break;
3387
3388 case 4:
3389 emit_insn (gen_extlh_le (exth, memh, addr));
3390 mode = SImode;
3391 break;
3392
3393 case 8:
3394 emit_insn (gen_extqh_le (exth, memh, addr));
3395 mode = DImode;
3396 break;
3397
3398 default:
3399 gcc_unreachable ();
3400 }
3401 }
3402
3403 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3404 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3405 sign, OPTAB_WIDEN);
3406 }
3407
3408 if (addr != tgt)
3409 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3410 }
3411
3412 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3413
3414 void
3415 alpha_expand_unaligned_store (rtx dst, rtx src,
3416 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3417 {
3418 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3419
3420 if (TARGET_BWX && size == 2)
3421 {
3422 if (src != const0_rtx)
3423 {
3424 dstl = gen_lowpart (QImode, src);
3425 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3426 NULL, 1, OPTAB_LIB_WIDEN);
3427 dsth = gen_lowpart (QImode, dsth);
3428 }
3429 else
3430 dstl = dsth = const0_rtx;
3431
3432 meml = adjust_address (dst, QImode, ofs);
3433 memh = adjust_address (dst, QImode, ofs+1);
3434 if (BYTES_BIG_ENDIAN)
3435 addr = meml, meml = memh, memh = addr;
3436
3437 emit_move_insn (meml, dstl);
3438 emit_move_insn (memh, dsth);
3439 return;
3440 }
3441
3442 dstl = gen_reg_rtx (DImode);
3443 dsth = gen_reg_rtx (DImode);
3444 insl = gen_reg_rtx (DImode);
3445 insh = gen_reg_rtx (DImode);
3446
3447 dsta = XEXP (dst, 0);
3448 if (GET_CODE (dsta) == LO_SUM)
3449 dsta = force_reg (Pmode, dsta);
3450
3451 /* AND addresses cannot be in any alias set, since they may implicitly
3452 alias surrounding code. Ideally we'd have some alias set that
3453 covered all types except those with alignment 8 or higher. */
3454
3455 meml = change_address (dst, DImode,
3456 gen_rtx_AND (DImode,
3457 plus_constant (dsta, ofs),
3458 GEN_INT (-8)));
3459 set_mem_alias_set (meml, 0);
3460
3461 memh = change_address (dst, DImode,
3462 gen_rtx_AND (DImode,
3463 plus_constant (dsta, ofs + size - 1),
3464 GEN_INT (-8)));
3465 set_mem_alias_set (memh, 0);
3466
3467 emit_move_insn (dsth, memh);
3468 emit_move_insn (dstl, meml);
3469 if (WORDS_BIG_ENDIAN)
3470 {
3471 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3472
3473 if (src != const0_rtx)
3474 {
3475 switch ((int) size)
3476 {
3477 case 2:
3478 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3479 break;
3480 case 4:
3481 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3482 break;
3483 case 8:
3484 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3485 break;
3486 }
3487 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3488 GEN_INT (size*8), addr));
3489 }
3490
3491 switch ((int) size)
3492 {
3493 case 2:
3494 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3495 break;
3496 case 4:
3497 {
3498 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3499 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3500 break;
3501 }
3502 case 8:
3503 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3504 break;
3505 }
3506
3507 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3508 }
3509 else
3510 {
3511 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3512
3513 if (src != CONST0_RTX (GET_MODE (src)))
3514 {
3515 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3516 GEN_INT (size*8), addr));
3517
3518 switch ((int) size)
3519 {
3520 case 2:
3521 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3522 break;
3523 case 4:
3524 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3525 break;
3526 case 8:
3527 emit_insn (gen_insql_le (insl, src, addr));
3528 break;
3529 }
3530 }
3531
3532 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3533
3534 switch ((int) size)
3535 {
3536 case 2:
3537 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3538 break;
3539 case 4:
3540 {
3541 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3542 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3543 break;
3544 }
3545 case 8:
3546 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3547 break;
3548 }
3549 }
3550
3551 if (src != CONST0_RTX (GET_MODE (src)))
3552 {
3553 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3554 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3555 }
3556
3557 if (WORDS_BIG_ENDIAN)
3558 {
3559 emit_move_insn (meml, dstl);
3560 emit_move_insn (memh, dsth);
3561 }
3562 else
3563 {
3564 /* Must store high before low for degenerate case of aligned. */
3565 emit_move_insn (memh, dsth);
3566 emit_move_insn (meml, dstl);
3567 }
3568 }
3569
3570 /* The block move code tries to maximize speed by separating loads and
3571 stores at the expense of register pressure: we load all of the data
3572 before we store it back out. There are two secondary effects worth
3573 mentioning, that this speeds copying to/from aligned and unaligned
3574 buffers, and that it makes the code significantly easier to write. */
3575
3576 #define MAX_MOVE_WORDS 8
3577
3578 /* Load an integral number of consecutive unaligned quadwords. */
3579
3580 static void
3581 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3582 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3583 {
3584 rtx const im8 = GEN_INT (-8);
3585 rtx const i64 = GEN_INT (64);
3586 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3587 rtx sreg, areg, tmp, smema;
3588 HOST_WIDE_INT i;
3589
3590 smema = XEXP (smem, 0);
3591 if (GET_CODE (smema) == LO_SUM)
3592 smema = force_reg (Pmode, smema);
3593
3594 /* Generate all the tmp registers we need. */
3595 for (i = 0; i < words; ++i)
3596 {
3597 data_regs[i] = out_regs[i];
3598 ext_tmps[i] = gen_reg_rtx (DImode);
3599 }
3600 data_regs[words] = gen_reg_rtx (DImode);
3601
3602 if (ofs != 0)
3603 smem = adjust_address (smem, GET_MODE (smem), ofs);
3604
3605 /* Load up all of the source data. */
3606 for (i = 0; i < words; ++i)
3607 {
3608 tmp = change_address (smem, DImode,
3609 gen_rtx_AND (DImode,
3610 plus_constant (smema, 8*i),
3611 im8));
3612 set_mem_alias_set (tmp, 0);
3613 emit_move_insn (data_regs[i], tmp);
3614 }
3615
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*words - 1),
3619 im8));
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[words], tmp);
3622
3623 /* Extract the half-word fragments. Unfortunately DEC decided to make
3624 extxh with offset zero a noop instead of zeroing the register, so
3625 we must take care of that edge condition ourselves with cmov. */
3626
3627 sreg = copy_addr_to_reg (smema);
3628 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3629 1, OPTAB_WIDEN);
3630 if (WORDS_BIG_ENDIAN)
3631 emit_move_insn (sreg, plus_constant (sreg, 7));
3632 for (i = 0; i < words; ++i)
3633 {
3634 if (WORDS_BIG_ENDIAN)
3635 {
3636 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3637 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3638 }
3639 else
3640 {
3641 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3642 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3643 }
3644 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3645 gen_rtx_IF_THEN_ELSE (DImode,
3646 gen_rtx_EQ (DImode, areg,
3647 const0_rtx),
3648 const0_rtx, ext_tmps[i])));
3649 }
3650
3651 /* Merge the half-words into whole words. */
3652 for (i = 0; i < words; ++i)
3653 {
3654 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3655 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3656 }
3657 }
3658
3659 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3660 may be NULL to store zeros. */
3661
3662 static void
3663 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3664 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3665 {
3666 rtx const im8 = GEN_INT (-8);
3667 rtx const i64 = GEN_INT (64);
3668 rtx ins_tmps[MAX_MOVE_WORDS];
3669 rtx st_tmp_1, st_tmp_2, dreg;
3670 rtx st_addr_1, st_addr_2, dmema;
3671 HOST_WIDE_INT i;
3672
3673 dmema = XEXP (dmem, 0);
3674 if (GET_CODE (dmema) == LO_SUM)
3675 dmema = force_reg (Pmode, dmema);
3676
3677 /* Generate all the tmp registers we need. */
3678 if (data_regs != NULL)
3679 for (i = 0; i < words; ++i)
3680 ins_tmps[i] = gen_reg_rtx(DImode);
3681 st_tmp_1 = gen_reg_rtx(DImode);
3682 st_tmp_2 = gen_reg_rtx(DImode);
3683
3684 if (ofs != 0)
3685 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3686
3687 st_addr_2 = change_address (dmem, DImode,
3688 gen_rtx_AND (DImode,
3689 plus_constant (dmema, words*8 - 1),
3690 im8));
3691 set_mem_alias_set (st_addr_2, 0);
3692
3693 st_addr_1 = change_address (dmem, DImode,
3694 gen_rtx_AND (DImode, dmema, im8));
3695 set_mem_alias_set (st_addr_1, 0);
3696
3697 /* Load up the destination end bits. */
3698 emit_move_insn (st_tmp_2, st_addr_2);
3699 emit_move_insn (st_tmp_1, st_addr_1);
3700
3701 /* Shift the input data into place. */
3702 dreg = copy_addr_to_reg (dmema);
3703 if (WORDS_BIG_ENDIAN)
3704 emit_move_insn (dreg, plus_constant (dreg, 7));
3705 if (data_regs != NULL)
3706 {
3707 for (i = words-1; i >= 0; --i)
3708 {
3709 if (WORDS_BIG_ENDIAN)
3710 {
3711 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3712 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3713 }
3714 else
3715 {
3716 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3718 }
3719 }
3720 for (i = words-1; i > 0; --i)
3721 {
3722 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3723 ins_tmps[i-1], ins_tmps[i-1], 1,
3724 OPTAB_WIDEN);
3725 }
3726 }
3727
3728 /* Split and merge the ends with the destination data. */
3729 if (WORDS_BIG_ENDIAN)
3730 {
3731 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3732 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3733 }
3734 else
3735 {
3736 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3737 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3738 }
3739
3740 if (data_regs != NULL)
3741 {
3742 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3743 st_tmp_2, 1, OPTAB_WIDEN);
3744 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3745 st_tmp_1, 1, OPTAB_WIDEN);
3746 }
3747
3748 /* Store it all. */
3749 if (WORDS_BIG_ENDIAN)
3750 emit_move_insn (st_addr_1, st_tmp_1);
3751 else
3752 emit_move_insn (st_addr_2, st_tmp_2);
3753 for (i = words-1; i > 0; --i)
3754 {
3755 rtx tmp = change_address (dmem, DImode,
3756 gen_rtx_AND (DImode,
3757 plus_constant(dmema,
3758 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3759 im8));
3760 set_mem_alias_set (tmp, 0);
3761 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3762 }
3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (st_addr_2, st_tmp_2);
3765 else
3766 emit_move_insn (st_addr_1, st_tmp_1);
3767 }
3768
3769
3770 /* Expand string/block move operations.
3771
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3776
3777 int
3778 alpha_expand_block_move (rtx operands[])
3779 {
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3789 rtx tmp;
3790 unsigned int i, words, ofs, nregs = 0;
3791
3792 if (orig_bytes <= 0)
3793 return 1;
3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3795 return 0;
3796
3797 /* Look for additional alignment information from recorded register info. */
3798
3799 tmp = XEXP (orig_src, 0);
3800 if (GET_CODE (tmp) == REG)
3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3802 else if (GET_CODE (tmp) == PLUS
3803 && GET_CODE (XEXP (tmp, 0)) == REG
3804 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3805 {
3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3808
3809 if (a > src_align)
3810 {
3811 if (a >= 64 && c % 8 == 0)
3812 src_align = 64;
3813 else if (a >= 32 && c % 4 == 0)
3814 src_align = 32;
3815 else if (a >= 16 && c % 2 == 0)
3816 src_align = 16;
3817 }
3818 }
3819
3820 tmp = XEXP (orig_dst, 0);
3821 if (GET_CODE (tmp) == REG)
3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && GET_CODE (XEXP (tmp, 0)) == REG
3825 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3826 {
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3829
3830 if (a > dst_align)
3831 {
3832 if (a >= 64 && c % 8 == 0)
3833 dst_align = 64;
3834 else if (a >= 32 && c % 4 == 0)
3835 dst_align = 32;
3836 else if (a >= 16 && c % 2 == 0)
3837 dst_align = 16;
3838 }
3839 }
3840
3841 ofs = 0;
3842 if (src_align >= 64 && bytes >= 8)
3843 {
3844 words = bytes / 8;
3845
3846 for (i = 0; i < words; ++i)
3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
3848
3849 for (i = 0; i < words; ++i)
3850 emit_move_insn (data_regs[nregs + i],
3851 adjust_address (orig_src, DImode, ofs + i * 8));
3852
3853 nregs += words;
3854 bytes -= words * 8;
3855 ofs += words * 8;
3856 }
3857
3858 if (src_align >= 32 && bytes >= 4)
3859 {
3860 words = bytes / 4;
3861
3862 for (i = 0; i < words; ++i)
3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
3864
3865 for (i = 0; i < words; ++i)
3866 emit_move_insn (data_regs[nregs + i],
3867 adjust_address (orig_src, SImode, ofs + i * 4));
3868
3869 nregs += words;
3870 bytes -= words * 4;
3871 ofs += words * 4;
3872 }
3873
3874 if (bytes >= 8)
3875 {
3876 words = bytes / 8;
3877
3878 for (i = 0; i < words+1; ++i)
3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
3880
3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3882 words, ofs);
3883
3884 nregs += words;
3885 bytes -= words * 8;
3886 ofs += words * 8;
3887 }
3888
3889 if (! TARGET_BWX && bytes >= 4)
3890 {
3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3893 bytes -= 4;
3894 ofs += 4;
3895 }
3896
3897 if (bytes >= 2)
3898 {
3899 if (src_align >= 16)
3900 {
3901 do {
3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3904 bytes -= 2;
3905 ofs += 2;
3906 } while (bytes >= 2);
3907 }
3908 else if (! TARGET_BWX)
3909 {
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3912 bytes -= 2;
3913 ofs += 2;
3914 }
3915 }
3916
3917 while (bytes > 0)
3918 {
3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3921 bytes -= 1;
3922 ofs += 1;
3923 }
3924
3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3926
3927 /* Now save it back out again. */
3928
3929 i = 0, ofs = 0;
3930
3931 /* Write out the data in whatever chunks reading the source allowed. */
3932 if (dst_align >= 64)
3933 {
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3935 {
3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3937 data_regs[i]);
3938 ofs += 8;
3939 i++;
3940 }
3941 }
3942
3943 if (dst_align >= 32)
3944 {
3945 /* If the source has remaining DImode regs, write them out in
3946 two pieces. */
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3948 {
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3951
3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3953 gen_lowpart (SImode, data_regs[i]));
3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3955 gen_lowpart (SImode, tmp));
3956 ofs += 8;
3957 i++;
3958 }
3959
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3961 {
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3963 data_regs[i]);
3964 ofs += 4;
3965 i++;
3966 }
3967 }
3968
3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3970 {
3971 /* Write out a remaining block of words using unaligned methods. */
3972
3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
3975 break;
3976
3977 if (words == 1)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3979 else
3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3981 words, ofs);
3982
3983 i += words;
3984 ofs += words * 8;
3985 }
3986
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3991 {
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3993 ofs += 4;
3994 i++;
3995 }
3996
3997 if (dst_align >= 16)
3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3999 {
4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4001 i++;
4002 ofs += 2;
4003 }
4004 else
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4006 {
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4008 i++;
4009 ofs += 2;
4010 }
4011
4012 /* The remainder must be byte copies. */
4013 while (i < nregs)
4014 {
4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4017 i++;
4018 ofs += 1;
4019 }
4020
4021 return 1;
4022 }
4023
4024 int
4025 alpha_expand_block_clear (rtx operands[])
4026 {
4027 rtx bytes_rtx = operands[1];
4028 rtx align_rtx = operands[3];
4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
4033 rtx orig_dst = operands[0];
4034 rtx tmp;
4035 int i, words, ofs = 0;
4036
4037 if (orig_bytes <= 0)
4038 return 1;
4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4040 return 0;
4041
4042 /* Look for stricter alignment. */
4043 tmp = XEXP (orig_dst, 0);
4044 if (GET_CODE (tmp) == REG)
4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4046 else if (GET_CODE (tmp) == PLUS
4047 && GET_CODE (XEXP (tmp, 0)) == REG
4048 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4049 {
4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4052
4053 if (a > align)
4054 {
4055 if (a >= 64)
4056 align = a, alignofs = 8 - c % 8;
4057 else if (a >= 32)
4058 align = a, alignofs = 4 - c % 4;
4059 else if (a >= 16)
4060 align = a, alignofs = 2 - c % 2;
4061 }
4062 }
4063
4064 /* Handle an unaligned prefix first. */
4065
4066 if (alignofs > 0)
4067 {
4068 #if HOST_BITS_PER_WIDE_INT >= 64
4069 /* Given that alignofs is bounded by align, the only time BWX could
4070 generate three stores is for a 7 byte fill. Prefer two individual
4071 stores over a load/mask/store sequence. */
4072 if ((!TARGET_BWX || alignofs == 7)
4073 && align >= 32
4074 && !(alignofs == 4 && bytes >= 4))
4075 {
4076 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4077 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4078 rtx mem, tmp;
4079 HOST_WIDE_INT mask;
4080
4081 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4082 set_mem_alias_set (mem, 0);
4083
4084 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4085 if (bytes < alignofs)
4086 {
4087 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4088 ofs += bytes;
4089 bytes = 0;
4090 }
4091 else
4092 {
4093 bytes -= alignofs;
4094 ofs += alignofs;
4095 }
4096 alignofs = 0;
4097
4098 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4099 NULL_RTX, 1, OPTAB_WIDEN);
4100
4101 emit_move_insn (mem, tmp);
4102 }
4103 #endif
4104
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4106 {
4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4108 bytes -= 1;
4109 ofs += 1;
4110 alignofs -= 1;
4111 }
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4113 {
4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4115 bytes -= 2;
4116 ofs += 2;
4117 alignofs -= 2;
4118 }
4119 if (alignofs == 4 && bytes >= 4)
4120 {
4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4122 bytes -= 4;
4123 ofs += 4;
4124 alignofs = 0;
4125 }
4126
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4129 if (alignofs > 0)
4130 {
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4133 }
4134 }
4135
4136 /* Handle a block of contiguous long-words. */
4137
4138 if (align >= 64 && bytes >= 8)
4139 {
4140 words = bytes / 8;
4141
4142 for (i = 0; i < words; ++i)
4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4144 const0_rtx);
4145
4146 bytes -= words * 8;
4147 ofs += words * 8;
4148 }
4149
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4152
4153 if (align >= 32 && bytes > 16)
4154 {
4155 rtx orig_dsta;
4156
4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4158 bytes -= 4;
4159 ofs += 4;
4160
4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4164
4165 words = bytes / 8;
4166 for (i = 0; i < words; ++i)
4167 {
4168 rtx mem
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
4171 plus_constant (orig_dsta, ofs + i*8),
4172 GEN_INT (-8)));
4173 set_mem_alias_set (mem, 0);
4174 emit_move_insn (mem, const0_rtx);
4175 }
4176
4177 /* Depending on the alignment, the first stq_u may have overlapped
4178 with the initial stl, which means that the last stq_u didn't
4179 write as much as it would appear. Leave those questionable bytes
4180 unaccounted for. */
4181 bytes -= words * 8 - 4;
4182 ofs += words * 8 - 4;
4183 }
4184
4185 /* Handle a smaller block of aligned words. */
4186
4187 if ((align >= 64 && bytes == 4)
4188 || (align == 32 && bytes >= 4))
4189 {
4190 words = bytes / 4;
4191
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4194 const0_rtx);
4195
4196 bytes -= words * 4;
4197 ofs += words * 4;
4198 }
4199
4200 /* An unaligned block uses stq_u stores for as many as possible. */
4201
4202 if (bytes >= 8)
4203 {
4204 words = bytes / 8;
4205
4206 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4207
4208 bytes -= words * 8;
4209 ofs += words * 8;
4210 }
4211
4212 /* Next clean up any trailing pieces. */
4213
4214 #if HOST_BITS_PER_WIDE_INT >= 64
4215 /* Count the number of bits in BYTES for which aligned stores could
4216 be emitted. */
4217 words = 0;
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4219 if (bytes & i)
4220 words += 1;
4221
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4225 {
4226 if (align >= 64)
4227 {
4228 rtx mem, tmp;
4229 HOST_WIDE_INT mask;
4230
4231 mem = adjust_address (orig_dst, DImode, ofs);
4232 set_mem_alias_set (mem, 0);
4233
4234 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4235
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4238
4239 emit_move_insn (mem, tmp);
4240 return 1;
4241 }
4242 else if (align >= 32 && bytes < 4)
4243 {
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4246
4247 mem = adjust_address (orig_dst, SImode, ofs);
4248 set_mem_alias_set (mem, 0);
4249
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4251
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4254
4255 emit_move_insn (mem, tmp);
4256 return 1;
4257 }
4258 }
4259 #endif
4260
4261 if (!TARGET_BWX && bytes >= 4)
4262 {
4263 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4264 bytes -= 4;
4265 ofs += 4;
4266 }
4267
4268 if (bytes >= 2)
4269 {
4270 if (align >= 16)
4271 {
4272 do {
4273 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4274 const0_rtx);
4275 bytes -= 2;
4276 ofs += 2;
4277 } while (bytes >= 2);
4278 }
4279 else if (! TARGET_BWX)
4280 {
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4282 bytes -= 2;
4283 ofs += 2;
4284 }
4285 }
4286
4287 while (bytes > 0)
4288 {
4289 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4290 bytes -= 1;
4291 ofs += 1;
4292 }
4293
4294 return 1;
4295 }
4296
4297 /* Returns a mask so that zap(x, value) == x & mask. */
4298
4299 rtx
4300 alpha_expand_zap_mask (HOST_WIDE_INT value)
4301 {
4302 rtx result;
4303 int i;
4304
4305 if (HOST_BITS_PER_WIDE_INT >= 64)
4306 {
4307 HOST_WIDE_INT mask = 0;
4308
4309 for (i = 7; i >= 0; --i)
4310 {
4311 mask <<= 8;
4312 if (!((value >> i) & 1))
4313 mask |= 0xff;
4314 }
4315
4316 result = gen_int_mode (mask, DImode);
4317 }
4318 else
4319 {
4320 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4321
4322 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4323
4324 for (i = 7; i >= 4; --i)
4325 {
4326 mask_hi <<= 8;
4327 if (!((value >> i) & 1))
4328 mask_hi |= 0xff;
4329 }
4330
4331 for (i = 3; i >= 0; --i)
4332 {
4333 mask_lo <<= 8;
4334 if (!((value >> i) & 1))
4335 mask_lo |= 0xff;
4336 }
4337
4338 result = immed_double_const (mask_lo, mask_hi, DImode);
4339 }
4340
4341 return result;
4342 }
4343
4344 void
4345 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4346 enum machine_mode mode,
4347 rtx op0, rtx op1, rtx op2)
4348 {
4349 op0 = gen_lowpart (mode, op0);
4350
4351 if (op1 == const0_rtx)
4352 op1 = CONST0_RTX (mode);
4353 else
4354 op1 = gen_lowpart (mode, op1);
4355
4356 if (op2 == const0_rtx)
4357 op2 = CONST0_RTX (mode);
4358 else
4359 op2 = gen_lowpart (mode, op2);
4360
4361 emit_insn ((*gen) (op0, op1, op2));
4362 }
4363
4364 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4365 COND is true. Mark the jump as unlikely to be taken. */
4366
4367 static void
4368 emit_unlikely_jump (rtx cond, rtx label)
4369 {
4370 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4371 rtx x;
4372
4373 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4374 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4375 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4376 }
4377
4378 /* A subroutine of the atomic operation splitters. Emit a load-locked
4379 instruction in MODE. */
4380
4381 static void
4382 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4383 {
4384 rtx (*fn) (rtx, rtx) = NULL;
4385 if (mode == SImode)
4386 fn = gen_load_locked_si;
4387 else if (mode == DImode)
4388 fn = gen_load_locked_di;
4389 emit_insn (fn (reg, mem));
4390 }
4391
4392 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4393 instruction in MODE. */
4394
4395 static void
4396 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4397 {
4398 rtx (*fn) (rtx, rtx, rtx) = NULL;
4399 if (mode == SImode)
4400 fn = gen_store_conditional_si;
4401 else if (mode == DImode)
4402 fn = gen_store_conditional_di;
4403 emit_insn (fn (res, mem, val));
4404 }
4405
4406 /* A subroutine of the atomic operation splitters. Emit an insxl
4407 instruction in MODE. */
4408
4409 static rtx
4410 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4411 {
4412 rtx ret = gen_reg_rtx (DImode);
4413 rtx (*fn) (rtx, rtx, rtx);
4414
4415 if (WORDS_BIG_ENDIAN)
4416 {
4417 if (mode == QImode)
4418 fn = gen_insbl_be;
4419 else
4420 fn = gen_inswl_be;
4421 }
4422 else
4423 {
4424 if (mode == QImode)
4425 fn = gen_insbl_le;
4426 else
4427 fn = gen_inswl_le;
4428 }
4429 /* The insbl and inswl patterns require a register operand. */
4430 op1 = force_reg (mode, op1);
4431 emit_insn (fn (ret, op1, op2));
4432
4433 return ret;
4434 }
4435
4436 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4437 to perform. MEM is the memory on which to operate. VAL is the second
4438 operand of the binary operator. BEFORE and AFTER are optional locations to
4439 return the value of MEM either before of after the operation. SCRATCH is
4440 a scratch register. */
4441
4442 void
4443 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4444 rtx before, rtx after, rtx scratch)
4445 {
4446 enum machine_mode mode = GET_MODE (mem);
4447 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4448
4449 emit_insn (gen_memory_barrier ());
4450
4451 label = gen_label_rtx ();
4452 emit_label (label);
4453 label = gen_rtx_LABEL_REF (DImode, label);
4454
4455 if (before == NULL)
4456 before = scratch;
4457 emit_load_locked (mode, before, mem);
4458
4459 if (code == NOT)
4460 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4461 else
4462 x = gen_rtx_fmt_ee (code, mode, before, val);
4463 if (after)
4464 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4465 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4466
4467 emit_store_conditional (mode, cond, mem, scratch);
4468
4469 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4470 emit_unlikely_jump (x, label);
4471
4472 emit_insn (gen_memory_barrier ());
4473 }
4474
4475 /* Expand a compare and swap operation. */
4476
4477 void
4478 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4479 rtx scratch)
4480 {
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4483
4484 emit_insn (gen_memory_barrier ());
4485
4486 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4487 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 emit_label (XEXP (label1, 0));
4489
4490 emit_load_locked (mode, retval, mem);
4491
4492 x = gen_lowpart (DImode, retval);
4493 if (oldval == const0_rtx)
4494 x = gen_rtx_NE (DImode, x, const0_rtx);
4495 else
4496 {
4497 x = gen_rtx_EQ (DImode, x, oldval);
4498 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4499 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4500 }
4501 emit_unlikely_jump (x, label2);
4502
4503 emit_move_insn (scratch, newval);
4504 emit_store_conditional (mode, cond, mem, scratch);
4505
4506 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4507 emit_unlikely_jump (x, label1);
4508
4509 emit_insn (gen_memory_barrier ());
4510 emit_label (XEXP (label2, 0));
4511 }
4512
4513 void
4514 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4515 {
4516 enum machine_mode mode = GET_MODE (mem);
4517 rtx addr, align, wdst;
4518 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4519
4520 addr = force_reg (DImode, XEXP (mem, 0));
4521 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4522 NULL_RTX, 1, OPTAB_DIRECT);
4523
4524 oldval = convert_modes (DImode, mode, oldval, 1);
4525 newval = emit_insxl (mode, newval, addr);
4526
4527 wdst = gen_reg_rtx (DImode);
4528 if (mode == QImode)
4529 fn5 = gen_sync_compare_and_swapqi_1;
4530 else
4531 fn5 = gen_sync_compare_and_swaphi_1;
4532 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4533
4534 emit_move_insn (dst, gen_lowpart (mode, wdst));
4535 }
4536
4537 void
4538 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4539 rtx oldval, rtx newval, rtx align,
4540 rtx scratch, rtx cond)
4541 {
4542 rtx label1, label2, mem, width, mask, x;
4543
4544 mem = gen_rtx_MEM (DImode, align);
4545 MEM_VOLATILE_P (mem) = 1;
4546
4547 emit_insn (gen_memory_barrier ());
4548 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4549 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 emit_label (XEXP (label1, 0));
4551
4552 emit_load_locked (DImode, scratch, mem);
4553
4554 width = GEN_INT (GET_MODE_BITSIZE (mode));
4555 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4556 if (WORDS_BIG_ENDIAN)
4557 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4558 else
4559 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4560
4561 if (oldval == const0_rtx)
4562 x = gen_rtx_NE (DImode, dest, const0_rtx);
4563 else
4564 {
4565 x = gen_rtx_EQ (DImode, dest, oldval);
4566 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4567 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4568 }
4569 emit_unlikely_jump (x, label2);
4570
4571 if (WORDS_BIG_ENDIAN)
4572 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4573 else
4574 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4575 emit_insn (gen_iordi3 (scratch, scratch, newval));
4576
4577 emit_store_conditional (DImode, scratch, mem, scratch);
4578
4579 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4580 emit_unlikely_jump (x, label1);
4581
4582 emit_insn (gen_memory_barrier ());
4583 emit_label (XEXP (label2, 0));
4584 }
4585
4586 /* Expand an atomic exchange operation. */
4587
4588 void
4589 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4590 {
4591 enum machine_mode mode = GET_MODE (mem);
4592 rtx label, x, cond = gen_lowpart (DImode, scratch);
4593
4594 emit_insn (gen_memory_barrier ());
4595
4596 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 emit_label (XEXP (label, 0));
4598
4599 emit_load_locked (mode, retval, mem);
4600 emit_move_insn (scratch, val);
4601 emit_store_conditional (mode, cond, mem, scratch);
4602
4603 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4604 emit_unlikely_jump (x, label);
4605 }
4606
4607 void
4608 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4609 {
4610 enum machine_mode mode = GET_MODE (mem);
4611 rtx addr, align, wdst;
4612 rtx (*fn4) (rtx, rtx, rtx, rtx);
4613
4614 /* Force the address into a register. */
4615 addr = force_reg (DImode, XEXP (mem, 0));
4616
4617 /* Align it to a multiple of 8. */
4618 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4619 NULL_RTX, 1, OPTAB_DIRECT);
4620
4621 /* Insert val into the correct byte location within the word. */
4622 val = emit_insxl (mode, val, addr);
4623
4624 wdst = gen_reg_rtx (DImode);
4625 if (mode == QImode)
4626 fn4 = gen_sync_lock_test_and_setqi_1;
4627 else
4628 fn4 = gen_sync_lock_test_and_sethi_1;
4629 emit_insn (fn4 (wdst, addr, val, align));
4630
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4632 }
4633
4634 void
4635 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4636 rtx val, rtx align, rtx scratch)
4637 {
4638 rtx label, mem, width, mask, x;
4639
4640 mem = gen_rtx_MEM (DImode, align);
4641 MEM_VOLATILE_P (mem) = 1;
4642
4643 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4646
4647 emit_load_locked (DImode, scratch, mem);
4648
4649 width = GEN_INT (GET_MODE_BITSIZE (mode));
4650 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4651 if (WORDS_BIG_ENDIAN)
4652 {
4653 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4655 }
4656 else
4657 {
4658 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4659 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4660 }
4661 emit_insn (gen_iordi3 (scratch, scratch, val));
4662
4663 emit_store_conditional (DImode, scratch, mem, scratch);
4664
4665 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4666 emit_unlikely_jump (x, label);
4667 }
4668 \f
4669 /* Adjust the cost of a scheduling dependency. Return the new cost of
4670 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4671
4672 static int
4673 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4674 {
4675 enum attr_type insn_type, dep_insn_type;
4676
4677 /* If the dependence is an anti-dependence, there is no cost. For an
4678 output dependence, there is sometimes a cost, but it doesn't seem
4679 worth handling those few cases. */
4680 if (REG_NOTE_KIND (link) != 0)
4681 return cost;
4682
4683 /* If we can't recognize the insns, we can't really do anything. */
4684 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4685 return cost;
4686
4687 insn_type = get_attr_type (insn);
4688 dep_insn_type = get_attr_type (dep_insn);
4689
4690 /* Bring in the user-defined memory latency. */
4691 if (dep_insn_type == TYPE_ILD
4692 || dep_insn_type == TYPE_FLD
4693 || dep_insn_type == TYPE_LDSYM)
4694 cost += alpha_memory_latency-1;
4695
4696 /* Everything else handled in DFA bypasses now. */
4697
4698 return cost;
4699 }
4700
4701 /* The number of instructions that can be issued per cycle. */
4702
4703 static int
4704 alpha_issue_rate (void)
4705 {
4706 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4707 }
4708
4709 /* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4712
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4716
4717 static int
4718 alpha_multipass_dfa_lookahead (void)
4719 {
4720 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4721 }
4722 \f
4723 /* Machine-specific function data. */
4724
4725 struct machine_function GTY(())
4726 {
4727 /* For unicosmk. */
4728 /* List of call information words for calls from this function. */
4729 struct rtx_def *first_ciw;
4730 struct rtx_def *last_ciw;
4731 int ciw_count;
4732
4733 /* List of deferred case vectors. */
4734 struct rtx_def *addr_list;
4735
4736 /* For OSF. */
4737 const char *some_ld_name;
4738
4739 /* For TARGET_LD_BUGGY_LDGP. */
4740 struct rtx_def *gp_save_rtx;
4741 };
4742
4743 /* How to allocate a 'struct machine_function'. */
4744
4745 static struct machine_function *
4746 alpha_init_machine_status (void)
4747 {
4748 return ((struct machine_function *)
4749 ggc_alloc_cleared (sizeof (struct machine_function)));
4750 }
4751
4752 /* Functions to save and restore alpha_return_addr_rtx. */
4753
4754 /* Start the ball rolling with RETURN_ADDR_RTX. */
4755
4756 rtx
4757 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4758 {
4759 if (count != 0)
4760 return const0_rtx;
4761
4762 return get_hard_reg_initial_val (Pmode, REG_RA);
4763 }
4764
4765 /* Return or create a memory slot containing the gp value for the current
4766 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4767
4768 rtx
4769 alpha_gp_save_rtx (void)
4770 {
4771 rtx seq, m = cfun->machine->gp_save_rtx;
4772
4773 if (m == NULL)
4774 {
4775 start_sequence ();
4776
4777 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778 m = validize_mem (m);
4779 emit_move_insn (m, pic_offset_table_rtx);
4780
4781 seq = get_insns ();
4782 end_sequence ();
4783 emit_insn_at_entry (seq);
4784
4785 cfun->machine->gp_save_rtx = m;
4786 }
4787
4788 return m;
4789 }
4790
4791 static int
4792 alpha_ra_ever_killed (void)
4793 {
4794 rtx top;
4795
4796 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4797 return (int)df_regs_ever_live_p (REG_RA);
4798
4799 push_topmost_sequence ();
4800 top = get_insns ();
4801 pop_topmost_sequence ();
4802
4803 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4804 }
4805
4806 \f
4807 /* Return the trap mode suffix applicable to the current
4808 instruction, or NULL. */
4809
4810 static const char *
4811 get_trap_mode_suffix (void)
4812 {
4813 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4814
4815 switch (s)
4816 {
4817 case TRAP_SUFFIX_NONE:
4818 return NULL;
4819
4820 case TRAP_SUFFIX_SU:
4821 if (alpha_fptm >= ALPHA_FPTM_SU)
4822 return "su";
4823 return NULL;
4824
4825 case TRAP_SUFFIX_SUI:
4826 if (alpha_fptm >= ALPHA_FPTM_SUI)
4827 return "sui";
4828 return NULL;
4829
4830 case TRAP_SUFFIX_V_SV:
4831 switch (alpha_fptm)
4832 {
4833 case ALPHA_FPTM_N:
4834 return NULL;
4835 case ALPHA_FPTM_U:
4836 return "v";
4837 case ALPHA_FPTM_SU:
4838 case ALPHA_FPTM_SUI:
4839 return "sv";
4840 default:
4841 gcc_unreachable ();
4842 }
4843
4844 case TRAP_SUFFIX_V_SV_SVI:
4845 switch (alpha_fptm)
4846 {
4847 case ALPHA_FPTM_N:
4848 return NULL;
4849 case ALPHA_FPTM_U:
4850 return "v";
4851 case ALPHA_FPTM_SU:
4852 return "sv";
4853 case ALPHA_FPTM_SUI:
4854 return "svi";
4855 default:
4856 gcc_unreachable ();
4857 }
4858 break;
4859
4860 case TRAP_SUFFIX_U_SU_SUI:
4861 switch (alpha_fptm)
4862 {
4863 case ALPHA_FPTM_N:
4864 return NULL;
4865 case ALPHA_FPTM_U:
4866 return "u";
4867 case ALPHA_FPTM_SU:
4868 return "su";
4869 case ALPHA_FPTM_SUI:
4870 return "sui";
4871 default:
4872 gcc_unreachable ();
4873 }
4874 break;
4875
4876 default:
4877 gcc_unreachable ();
4878 }
4879 gcc_unreachable ();
4880 }
4881
4882 /* Return the rounding mode suffix applicable to the current
4883 instruction, or NULL. */
4884
4885 static const char *
4886 get_round_mode_suffix (void)
4887 {
4888 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4889
4890 switch (s)
4891 {
4892 case ROUND_SUFFIX_NONE:
4893 return NULL;
4894 case ROUND_SUFFIX_NORMAL:
4895 switch (alpha_fprm)
4896 {
4897 case ALPHA_FPRM_NORM:
4898 return NULL;
4899 case ALPHA_FPRM_MINF:
4900 return "m";
4901 case ALPHA_FPRM_CHOP:
4902 return "c";
4903 case ALPHA_FPRM_DYN:
4904 return "d";
4905 default:
4906 gcc_unreachable ();
4907 }
4908 break;
4909
4910 case ROUND_SUFFIX_C:
4911 return "c";
4912
4913 default:
4914 gcc_unreachable ();
4915 }
4916 gcc_unreachable ();
4917 }
4918
4919 /* Locate some local-dynamic symbol still in use by this function
4920 so that we can print its name in some movdi_er_tlsldm pattern. */
4921
4922 static int
4923 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4924 {
4925 rtx x = *px;
4926
4927 if (GET_CODE (x) == SYMBOL_REF
4928 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4929 {
4930 cfun->machine->some_ld_name = XSTR (x, 0);
4931 return 1;
4932 }
4933
4934 return 0;
4935 }
4936
4937 static const char *
4938 get_some_local_dynamic_name (void)
4939 {
4940 rtx insn;
4941
4942 if (cfun->machine->some_ld_name)
4943 return cfun->machine->some_ld_name;
4944
4945 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4946 if (INSN_P (insn)
4947 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4948 return cfun->machine->some_ld_name;
4949
4950 gcc_unreachable ();
4951 }
4952
4953 /* Print an operand. Recognize special options, documented below. */
4954
4955 void
4956 print_operand (FILE *file, rtx x, int code)
4957 {
4958 int i;
4959
4960 switch (code)
4961 {
4962 case '~':
4963 /* Print the assembler name of the current function. */
4964 assemble_name (file, alpha_fnname);
4965 break;
4966
4967 case '&':
4968 assemble_name (file, get_some_local_dynamic_name ());
4969 break;
4970
4971 case '/':
4972 {
4973 const char *trap = get_trap_mode_suffix ();
4974 const char *round = get_round_mode_suffix ();
4975
4976 if (trap || round)
4977 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4978 (trap ? trap : ""), (round ? round : ""));
4979 break;
4980 }
4981
4982 case ',':
4983 /* Generates single precision instruction suffix. */
4984 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4985 break;
4986
4987 case '-':
4988 /* Generates double precision instruction suffix. */
4989 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4990 break;
4991
4992 case '#':
4993 if (alpha_this_literal_sequence_number == 0)
4994 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4995 fprintf (file, "%d", alpha_this_literal_sequence_number);
4996 break;
4997
4998 case '*':
4999 if (alpha_this_gpdisp_sequence_number == 0)
5000 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5001 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5002 break;
5003
5004 case 'H':
5005 if (GET_CODE (x) == HIGH)
5006 output_addr_const (file, XEXP (x, 0));
5007 else
5008 output_operand_lossage ("invalid %%H value");
5009 break;
5010
5011 case 'J':
5012 {
5013 const char *lituse;
5014
5015 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5016 {
5017 x = XVECEXP (x, 0, 0);
5018 lituse = "lituse_tlsgd";
5019 }
5020 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5021 {
5022 x = XVECEXP (x, 0, 0);
5023 lituse = "lituse_tlsldm";
5024 }
5025 else if (GET_CODE (x) == CONST_INT)
5026 lituse = "lituse_jsr";
5027 else
5028 {
5029 output_operand_lossage ("invalid %%J value");
5030 break;
5031 }
5032
5033 if (x != const0_rtx)
5034 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5035 }
5036 break;
5037
5038 case 'j':
5039 {
5040 const char *lituse;
5041
5042 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5043 lituse = "lituse_jsrdirect";
5044 #else
5045 lituse = "lituse_jsr";
5046 #endif
5047
5048 gcc_assert (INTVAL (x) != 0);
5049 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5050 }
5051 break;
5052 case 'r':
5053 /* If this operand is the constant zero, write it as "$31". */
5054 if (GET_CODE (x) == REG)
5055 fprintf (file, "%s", reg_names[REGNO (x)]);
5056 else if (x == CONST0_RTX (GET_MODE (x)))
5057 fprintf (file, "$31");
5058 else
5059 output_operand_lossage ("invalid %%r value");
5060 break;
5061
5062 case 'R':
5063 /* Similar, but for floating-point. */
5064 if (GET_CODE (x) == REG)
5065 fprintf (file, "%s", reg_names[REGNO (x)]);
5066 else if (x == CONST0_RTX (GET_MODE (x)))
5067 fprintf (file, "$f31");
5068 else
5069 output_operand_lossage ("invalid %%R value");
5070 break;
5071
5072 case 'N':
5073 /* Write the 1's complement of a constant. */
5074 if (GET_CODE (x) != CONST_INT)
5075 output_operand_lossage ("invalid %%N value");
5076
5077 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5078 break;
5079
5080 case 'P':
5081 /* Write 1 << C, for a constant C. */
5082 if (GET_CODE (x) != CONST_INT)
5083 output_operand_lossage ("invalid %%P value");
5084
5085 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5086 break;
5087
5088 case 'h':
5089 /* Write the high-order 16 bits of a constant, sign-extended. */
5090 if (GET_CODE (x) != CONST_INT)
5091 output_operand_lossage ("invalid %%h value");
5092
5093 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5094 break;
5095
5096 case 'L':
5097 /* Write the low-order 16 bits of a constant, sign-extended. */
5098 if (GET_CODE (x) != CONST_INT)
5099 output_operand_lossage ("invalid %%L value");
5100
5101 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5102 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5103 break;
5104
5105 case 'm':
5106 /* Write mask for ZAP insn. */
5107 if (GET_CODE (x) == CONST_DOUBLE)
5108 {
5109 HOST_WIDE_INT mask = 0;
5110 HOST_WIDE_INT value;
5111
5112 value = CONST_DOUBLE_LOW (x);
5113 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5114 i++, value >>= 8)
5115 if (value & 0xff)
5116 mask |= (1 << i);
5117
5118 value = CONST_DOUBLE_HIGH (x);
5119 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5120 i++, value >>= 8)
5121 if (value & 0xff)
5122 mask |= (1 << (i + sizeof (int)));
5123
5124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5125 }
5126
5127 else if (GET_CODE (x) == CONST_INT)
5128 {
5129 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5130
5131 for (i = 0; i < 8; i++, value >>= 8)
5132 if (value & 0xff)
5133 mask |= (1 << i);
5134
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5136 }
5137 else
5138 output_operand_lossage ("invalid %%m value");
5139 break;
5140
5141 case 'M':
5142 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5143 if (GET_CODE (x) != CONST_INT
5144 || (INTVAL (x) != 8 && INTVAL (x) != 16
5145 && INTVAL (x) != 32 && INTVAL (x) != 64))
5146 output_operand_lossage ("invalid %%M value");
5147
5148 fprintf (file, "%s",
5149 (INTVAL (x) == 8 ? "b"
5150 : INTVAL (x) == 16 ? "w"
5151 : INTVAL (x) == 32 ? "l"
5152 : "q"));
5153 break;
5154
5155 case 'U':
5156 /* Similar, except do it from the mask. */
5157 if (GET_CODE (x) == CONST_INT)
5158 {
5159 HOST_WIDE_INT value = INTVAL (x);
5160
5161 if (value == 0xff)
5162 {
5163 fputc ('b', file);
5164 break;
5165 }
5166 if (value == 0xffff)
5167 {
5168 fputc ('w', file);
5169 break;
5170 }
5171 if (value == 0xffffffff)
5172 {
5173 fputc ('l', file);
5174 break;
5175 }
5176 if (value == -1)
5177 {
5178 fputc ('q', file);
5179 break;
5180 }
5181 }
5182 else if (HOST_BITS_PER_WIDE_INT == 32
5183 && GET_CODE (x) == CONST_DOUBLE
5184 && CONST_DOUBLE_LOW (x) == 0xffffffff
5185 && CONST_DOUBLE_HIGH (x) == 0)
5186 {
5187 fputc ('l', file);
5188 break;
5189 }
5190 output_operand_lossage ("invalid %%U value");
5191 break;
5192
5193 case 's':
5194 /* Write the constant value divided by 8 for little-endian mode or
5195 (56 - value) / 8 for big-endian mode. */
5196
5197 if (GET_CODE (x) != CONST_INT
5198 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5199 ? 56
5200 : 64)
5201 || (INTVAL (x) & 7) != 0)
5202 output_operand_lossage ("invalid %%s value");
5203
5204 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5205 WORDS_BIG_ENDIAN
5206 ? (56 - INTVAL (x)) / 8
5207 : INTVAL (x) / 8);
5208 break;
5209
5210 case 'S':
5211 /* Same, except compute (64 - c) / 8 */
5212
5213 if (GET_CODE (x) != CONST_INT
5214 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5215 && (INTVAL (x) & 7) != 8)
5216 output_operand_lossage ("invalid %%s value");
5217
5218 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5219 break;
5220
5221 case 't':
5222 {
5223 /* On Unicos/Mk systems: use a DEX expression if the symbol
5224 clashes with a register name. */
5225 int dex = unicosmk_need_dex (x);
5226 if (dex)
5227 fprintf (file, "DEX(%d)", dex);
5228 else
5229 output_addr_const (file, x);
5230 }
5231 break;
5232
5233 case 'C': case 'D': case 'c': case 'd':
5234 /* Write out comparison name. */
5235 {
5236 enum rtx_code c = GET_CODE (x);
5237
5238 if (!COMPARISON_P (x))
5239 output_operand_lossage ("invalid %%C value");
5240
5241 else if (code == 'D')
5242 c = reverse_condition (c);
5243 else if (code == 'c')
5244 c = swap_condition (c);
5245 else if (code == 'd')
5246 c = swap_condition (reverse_condition (c));
5247
5248 if (c == LEU)
5249 fprintf (file, "ule");
5250 else if (c == LTU)
5251 fprintf (file, "ult");
5252 else if (c == UNORDERED)
5253 fprintf (file, "un");
5254 else
5255 fprintf (file, "%s", GET_RTX_NAME (c));
5256 }
5257 break;
5258
5259 case 'E':
5260 /* Write the divide or modulus operator. */
5261 switch (GET_CODE (x))
5262 {
5263 case DIV:
5264 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5265 break;
5266 case UDIV:
5267 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5268 break;
5269 case MOD:
5270 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5271 break;
5272 case UMOD:
5273 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5274 break;
5275 default:
5276 output_operand_lossage ("invalid %%E value");
5277 break;
5278 }
5279 break;
5280
5281 case 'A':
5282 /* Write "_u" for unaligned access. */
5283 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5284 fprintf (file, "_u");
5285 break;
5286
5287 case 0:
5288 if (GET_CODE (x) == REG)
5289 fprintf (file, "%s", reg_names[REGNO (x)]);
5290 else if (GET_CODE (x) == MEM)
5291 output_address (XEXP (x, 0));
5292 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5293 {
5294 switch (XINT (XEXP (x, 0), 1))
5295 {
5296 case UNSPEC_DTPREL:
5297 case UNSPEC_TPREL:
5298 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5299 break;
5300 default:
5301 output_operand_lossage ("unknown relocation unspec");
5302 break;
5303 }
5304 }
5305 else
5306 output_addr_const (file, x);
5307 break;
5308
5309 default:
5310 output_operand_lossage ("invalid %%xn code");
5311 }
5312 }
5313
5314 void
5315 print_operand_address (FILE *file, rtx addr)
5316 {
5317 int basereg = 31;
5318 HOST_WIDE_INT offset = 0;
5319
5320 if (GET_CODE (addr) == AND)
5321 addr = XEXP (addr, 0);
5322
5323 if (GET_CODE (addr) == PLUS
5324 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5325 {
5326 offset = INTVAL (XEXP (addr, 1));
5327 addr = XEXP (addr, 0);
5328 }
5329
5330 if (GET_CODE (addr) == LO_SUM)
5331 {
5332 const char *reloc16, *reloclo;
5333 rtx op1 = XEXP (addr, 1);
5334
5335 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5336 {
5337 op1 = XEXP (op1, 0);
5338 switch (XINT (op1, 1))
5339 {
5340 case UNSPEC_DTPREL:
5341 reloc16 = NULL;
5342 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5343 break;
5344 case UNSPEC_TPREL:
5345 reloc16 = NULL;
5346 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5347 break;
5348 default:
5349 output_operand_lossage ("unknown relocation unspec");
5350 return;
5351 }
5352
5353 output_addr_const (file, XVECEXP (op1, 0, 0));
5354 }
5355 else
5356 {
5357 reloc16 = "gprel";
5358 reloclo = "gprellow";
5359 output_addr_const (file, op1);
5360 }
5361
5362 if (offset)
5363 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5364
5365 addr = XEXP (addr, 0);
5366 switch (GET_CODE (addr))
5367 {
5368 case REG:
5369 basereg = REGNO (addr);
5370 break;
5371
5372 case SUBREG:
5373 basereg = subreg_regno (addr);
5374 break;
5375
5376 default:
5377 gcc_unreachable ();
5378 }
5379
5380 fprintf (file, "($%d)\t\t!%s", basereg,
5381 (basereg == 29 ? reloc16 : reloclo));
5382 return;
5383 }
5384
5385 switch (GET_CODE (addr))
5386 {
5387 case REG:
5388 basereg = REGNO (addr);
5389 break;
5390
5391 case SUBREG:
5392 basereg = subreg_regno (addr);
5393 break;
5394
5395 case CONST_INT:
5396 offset = INTVAL (addr);
5397 break;
5398
5399 #if TARGET_ABI_OPEN_VMS
5400 case SYMBOL_REF:
5401 fprintf (file, "%s", XSTR (addr, 0));
5402 return;
5403
5404 case CONST:
5405 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5406 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5407 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5408 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5409 INTVAL (XEXP (XEXP (addr, 0), 1)));
5410 return;
5411
5412 #endif
5413 default:
5414 gcc_unreachable ();
5415 }
5416
5417 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5418 }
5419 \f
5420 /* Emit RTL insns to initialize the variable parts of a trampoline at
5421 TRAMP. FNADDR is an RTX for the address of the function's pure
5422 code. CXT is an RTX for the static chain value for the function.
5423
5424 The three offset parameters are for the individual template's
5425 layout. A JMPOFS < 0 indicates that the trampoline does not
5426 contain instructions at all.
5427
5428 We assume here that a function will be called many more times than
5429 its address is taken (e.g., it might be passed to qsort), so we
5430 take the trouble to initialize the "hint" field in the JMP insn.
5431 Note that the hint field is PC (new) + 4 * bits 13:0. */
5432
5433 void
5434 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5435 int fnofs, int cxtofs, int jmpofs)
5436 {
5437 rtx temp, temp1, addr;
5438 /* VMS really uses DImode pointers in memory at this point. */
5439 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5440
5441 #ifdef POINTERS_EXTEND_UNSIGNED
5442 fnaddr = convert_memory_address (mode, fnaddr);
5443 cxt = convert_memory_address (mode, cxt);
5444 #endif
5445
5446 /* Store function address and CXT. */
5447 addr = memory_address (mode, plus_constant (tramp, fnofs));
5448 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5449 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5450 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5451
5452 /* This has been disabled since the hint only has a 32k range, and in
5453 no existing OS is the stack within 32k of the text segment. */
5454 if (0 && jmpofs >= 0)
5455 {
5456 /* Compute hint value. */
5457 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5458 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5459 OPTAB_WIDEN);
5460 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5461 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5462 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5463 GEN_INT (0x3fff), 0);
5464
5465 /* Merge in the hint. */
5466 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5467 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5468 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5469 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5470 OPTAB_WIDEN);
5471 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5472 }
5473
5474 #ifdef ENABLE_EXECUTE_STACK
5475 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5476 0, VOIDmode, 1, tramp, Pmode);
5477 #endif
5478
5479 if (jmpofs >= 0)
5480 emit_insn (gen_imb ());
5481 }
5482 \f
5483 /* Determine where to put an argument to a function.
5484 Value is zero to push the argument on the stack,
5485 or a hard register in which to store the argument.
5486
5487 MODE is the argument's machine mode.
5488 TYPE is the data type of the argument (as a tree).
5489 This is null for libcalls where that information may
5490 not be available.
5491 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5492 the preceding args and about the function being called.
5493 NAMED is nonzero if this argument is a named parameter
5494 (otherwise it is an extra parameter matching an ellipsis).
5495
5496 On Alpha the first 6 words of args are normally in registers
5497 and the rest are pushed. */
5498
5499 rtx
5500 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5501 int named ATTRIBUTE_UNUSED)
5502 {
5503 int basereg;
5504 int num_args;
5505
5506 /* Don't get confused and pass small structures in FP registers. */
5507 if (type && AGGREGATE_TYPE_P (type))
5508 basereg = 16;
5509 else
5510 {
5511 #ifdef ENABLE_CHECKING
5512 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5513 values here. */
5514 gcc_assert (!COMPLEX_MODE_P (mode));
5515 #endif
5516
5517 /* Set up defaults for FP operands passed in FP registers, and
5518 integral operands passed in integer registers. */
5519 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5520 basereg = 32 + 16;
5521 else
5522 basereg = 16;
5523 }
5524
5525 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5526 the three platforms, so we can't avoid conditional compilation. */
5527 #if TARGET_ABI_OPEN_VMS
5528 {
5529 if (mode == VOIDmode)
5530 return alpha_arg_info_reg_val (cum);
5531
5532 num_args = cum.num_args;
5533 if (num_args >= 6
5534 || targetm.calls.must_pass_in_stack (mode, type))
5535 return NULL_RTX;
5536 }
5537 #elif TARGET_ABI_UNICOSMK
5538 {
5539 int size;
5540
5541 /* If this is the last argument, generate the call info word (CIW). */
5542 /* ??? We don't include the caller's line number in the CIW because
5543 I don't know how to determine it if debug infos are turned off. */
5544 if (mode == VOIDmode)
5545 {
5546 int i;
5547 HOST_WIDE_INT lo;
5548 HOST_WIDE_INT hi;
5549 rtx ciw;
5550
5551 lo = 0;
5552
5553 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5554 if (cum.reg_args_type[i])
5555 lo |= (1 << (7 - i));
5556
5557 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5558 lo |= 7;
5559 else
5560 lo |= cum.num_reg_words;
5561
5562 #if HOST_BITS_PER_WIDE_INT == 32
5563 hi = (cum.num_args << 20) | cum.num_arg_words;
5564 #else
5565 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5566 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5567 hi = 0;
5568 #endif
5569 ciw = immed_double_const (lo, hi, DImode);
5570
5571 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5572 UNSPEC_UMK_LOAD_CIW);
5573 }
5574
5575 size = ALPHA_ARG_SIZE (mode, type, named);
5576 num_args = cum.num_reg_words;
5577 if (cum.force_stack
5578 || cum.num_reg_words + size > 6
5579 || targetm.calls.must_pass_in_stack (mode, type))
5580 return NULL_RTX;
5581 else if (type && TYPE_MODE (type) == BLKmode)
5582 {
5583 rtx reg1, reg2;
5584
5585 reg1 = gen_rtx_REG (DImode, num_args + 16);
5586 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5587
5588 /* The argument fits in two registers. Note that we still need to
5589 reserve a register for empty structures. */
5590 if (size == 0)
5591 return NULL_RTX;
5592 else if (size == 1)
5593 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5594 else
5595 {
5596 reg2 = gen_rtx_REG (DImode, num_args + 17);
5597 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5598 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5599 }
5600 }
5601 }
5602 #elif TARGET_ABI_OSF
5603 {
5604 if (cum >= 6)
5605 return NULL_RTX;
5606 num_args = cum;
5607
5608 /* VOID is passed as a special flag for "last argument". */
5609 if (type == void_type_node)
5610 basereg = 16;
5611 else if (targetm.calls.must_pass_in_stack (mode, type))
5612 return NULL_RTX;
5613 }
5614 #else
5615 #error Unhandled ABI
5616 #endif
5617
5618 return gen_rtx_REG (mode, num_args + basereg);
5619 }
5620
5621 static int
5622 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5623 enum machine_mode mode ATTRIBUTE_UNUSED,
5624 tree type ATTRIBUTE_UNUSED,
5625 bool named ATTRIBUTE_UNUSED)
5626 {
5627 int words = 0;
5628
5629 #if TARGET_ABI_OPEN_VMS
5630 if (cum->num_args < 6
5631 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5632 words = 6 - cum->num_args;
5633 #elif TARGET_ABI_UNICOSMK
5634 /* Never any split arguments. */
5635 #elif TARGET_ABI_OSF
5636 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5637 words = 6 - *cum;
5638 #else
5639 #error Unhandled ABI
5640 #endif
5641
5642 return words * UNITS_PER_WORD;
5643 }
5644
5645
5646 /* Return true if TYPE must be returned in memory, instead of in registers. */
5647
5648 static bool
5649 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5650 {
5651 enum machine_mode mode = VOIDmode;
5652 int size;
5653
5654 if (type)
5655 {
5656 mode = TYPE_MODE (type);
5657
5658 /* All aggregates are returned in memory. */
5659 if (AGGREGATE_TYPE_P (type))
5660 return true;
5661 }
5662
5663 size = GET_MODE_SIZE (mode);
5664 switch (GET_MODE_CLASS (mode))
5665 {
5666 case MODE_VECTOR_FLOAT:
5667 /* Pass all float vectors in memory, like an aggregate. */
5668 return true;
5669
5670 case MODE_COMPLEX_FLOAT:
5671 /* We judge complex floats on the size of their element,
5672 not the size of the whole type. */
5673 size = GET_MODE_UNIT_SIZE (mode);
5674 break;
5675
5676 case MODE_INT:
5677 case MODE_FLOAT:
5678 case MODE_COMPLEX_INT:
5679 case MODE_VECTOR_INT:
5680 break;
5681
5682 default:
5683 /* ??? We get called on all sorts of random stuff from
5684 aggregate_value_p. We must return something, but it's not
5685 clear what's safe to return. Pretend it's a struct I
5686 guess. */
5687 return true;
5688 }
5689
5690 /* Otherwise types must fit in one register. */
5691 return size > UNITS_PER_WORD;
5692 }
5693
5694 /* Return true if TYPE should be passed by invisible reference. */
5695
5696 static bool
5697 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5698 enum machine_mode mode,
5699 tree type ATTRIBUTE_UNUSED,
5700 bool named ATTRIBUTE_UNUSED)
5701 {
5702 return mode == TFmode || mode == TCmode;
5703 }
5704
5705 /* Define how to find the value returned by a function. VALTYPE is the
5706 data type of the value (as a tree). If the precise function being
5707 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5708 MODE is set instead of VALTYPE for libcalls.
5709
5710 On Alpha the value is found in $0 for integer functions and
5711 $f0 for floating-point functions. */
5712
5713 rtx
5714 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5715 enum machine_mode mode)
5716 {
5717 unsigned int regnum, dummy;
5718 enum mode_class class;
5719
5720 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5721
5722 if (valtype)
5723 mode = TYPE_MODE (valtype);
5724
5725 class = GET_MODE_CLASS (mode);
5726 switch (class)
5727 {
5728 case MODE_INT:
5729 PROMOTE_MODE (mode, dummy, valtype);
5730 /* FALLTHRU */
5731
5732 case MODE_COMPLEX_INT:
5733 case MODE_VECTOR_INT:
5734 regnum = 0;
5735 break;
5736
5737 case MODE_FLOAT:
5738 regnum = 32;
5739 break;
5740
5741 case MODE_COMPLEX_FLOAT:
5742 {
5743 enum machine_mode cmode = GET_MODE_INNER (mode);
5744
5745 return gen_rtx_PARALLEL
5746 (VOIDmode,
5747 gen_rtvec (2,
5748 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5749 const0_rtx),
5750 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5751 GEN_INT (GET_MODE_SIZE (cmode)))));
5752 }
5753
5754 default:
5755 gcc_unreachable ();
5756 }
5757
5758 return gen_rtx_REG (mode, regnum);
5759 }
5760
5761 /* TCmode complex values are passed by invisible reference. We
5762 should not split these values. */
5763
5764 static bool
5765 alpha_split_complex_arg (tree type)
5766 {
5767 return TYPE_MODE (type) != TCmode;
5768 }
5769
5770 static tree
5771 alpha_build_builtin_va_list (void)
5772 {
5773 tree base, ofs, space, record, type_decl;
5774
5775 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5776 return ptr_type_node;
5777
5778 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5779 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5780 TREE_CHAIN (record) = type_decl;
5781 TYPE_NAME (record) = type_decl;
5782
5783 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5784
5785 /* Dummy field to prevent alignment warnings. */
5786 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5787 DECL_FIELD_CONTEXT (space) = record;
5788 DECL_ARTIFICIAL (space) = 1;
5789 DECL_IGNORED_P (space) = 1;
5790
5791 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5792 integer_type_node);
5793 DECL_FIELD_CONTEXT (ofs) = record;
5794 TREE_CHAIN (ofs) = space;
5795
5796 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5797 ptr_type_node);
5798 DECL_FIELD_CONTEXT (base) = record;
5799 TREE_CHAIN (base) = ofs;
5800
5801 TYPE_FIELDS (record) = base;
5802 layout_type (record);
5803
5804 va_list_gpr_counter_field = ofs;
5805 return record;
5806 }
5807
5808 #if TARGET_ABI_OSF
5809 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5810 and constant additions. */
5811
5812 static tree
5813 va_list_skip_additions (tree lhs)
5814 {
5815 tree rhs, stmt;
5816
5817 if (TREE_CODE (lhs) != SSA_NAME)
5818 return lhs;
5819
5820 for (;;)
5821 {
5822 stmt = SSA_NAME_DEF_STMT (lhs);
5823
5824 if (TREE_CODE (stmt) == PHI_NODE)
5825 return stmt;
5826
5827 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5828 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5829 return lhs;
5830
5831 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5832 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5833 rhs = TREE_OPERAND (rhs, 0);
5834
5835 if ((TREE_CODE (rhs) != NOP_EXPR
5836 && TREE_CODE (rhs) != CONVERT_EXPR
5837 && (TREE_CODE (rhs) != PLUS_EXPR
5838 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5839 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5840 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5841 return rhs;
5842
5843 lhs = TREE_OPERAND (rhs, 0);
5844 }
5845 }
5846
5847 /* Check if LHS = RHS statement is
5848 LHS = *(ap.__base + ap.__offset + cst)
5849 or
5850 LHS = *(ap.__base
5851 + ((ap.__offset + cst <= 47)
5852 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5853 If the former, indicate that GPR registers are needed,
5854 if the latter, indicate that FPR registers are needed.
5855
5856 Also look for LHS = (*ptr).field, where ptr is one of the forms
5857 listed above.
5858
5859 On alpha, cfun->va_list_gpr_size is used as size of the needed
5860 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5861 registers are needed and bit 1 set if FPR registers are needed.
5862 Return true if va_list references should not be scanned for the
5863 current statement. */
5864
5865 static bool
5866 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5867 {
5868 tree base, offset, arg1, arg2;
5869 int offset_arg = 1;
5870
5871 while (handled_component_p (rhs))
5872 rhs = TREE_OPERAND (rhs, 0);
5873 if (TREE_CODE (rhs) != INDIRECT_REF
5874 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5875 return false;
5876
5877 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5878 if (lhs == NULL_TREE
5879 || TREE_CODE (lhs) != PLUS_EXPR)
5880 return false;
5881
5882 base = TREE_OPERAND (lhs, 0);
5883 if (TREE_CODE (base) == SSA_NAME)
5884 base = va_list_skip_additions (base);
5885
5886 if (TREE_CODE (base) != COMPONENT_REF
5887 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5888 {
5889 base = TREE_OPERAND (lhs, 0);
5890 if (TREE_CODE (base) == SSA_NAME)
5891 base = va_list_skip_additions (base);
5892
5893 if (TREE_CODE (base) != COMPONENT_REF
5894 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5895 return false;
5896
5897 offset_arg = 0;
5898 }
5899
5900 base = get_base_address (base);
5901 if (TREE_CODE (base) != VAR_DECL
5902 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5903 return false;
5904
5905 offset = TREE_OPERAND (lhs, offset_arg);
5906 if (TREE_CODE (offset) == SSA_NAME)
5907 offset = va_list_skip_additions (offset);
5908
5909 if (TREE_CODE (offset) == PHI_NODE)
5910 {
5911 HOST_WIDE_INT sub;
5912
5913 if (PHI_NUM_ARGS (offset) != 2)
5914 goto escapes;
5915
5916 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5917 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5918 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5919 {
5920 tree tem = arg1;
5921 arg1 = arg2;
5922 arg2 = tem;
5923
5924 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5925 goto escapes;
5926 }
5927 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5928 goto escapes;
5929
5930 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5931 if (TREE_CODE (arg2) == MINUS_EXPR)
5932 sub = -sub;
5933 if (sub < -48 || sub > -32)
5934 goto escapes;
5935
5936 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5937 if (arg1 != arg2)
5938 goto escapes;
5939
5940 if (TREE_CODE (arg1) == SSA_NAME)
5941 arg1 = va_list_skip_additions (arg1);
5942
5943 if (TREE_CODE (arg1) != COMPONENT_REF
5944 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5945 || get_base_address (arg1) != base)
5946 goto escapes;
5947
5948 /* Need floating point regs. */
5949 cfun->va_list_fpr_size |= 2;
5950 }
5951 else if (TREE_CODE (offset) != COMPONENT_REF
5952 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5953 || get_base_address (offset) != base)
5954 goto escapes;
5955 else
5956 /* Need general regs. */
5957 cfun->va_list_fpr_size |= 1;
5958 return false;
5959
5960 escapes:
5961 si->va_list_escapes = true;
5962 return false;
5963 }
5964 #endif
5965
5966 /* Perform any needed actions needed for a function that is receiving a
5967 variable number of arguments. */
5968
5969 static void
5970 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5971 tree type, int *pretend_size, int no_rtl)
5972 {
5973 CUMULATIVE_ARGS cum = *pcum;
5974
5975 /* Skip the current argument. */
5976 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5977
5978 #if TARGET_ABI_UNICOSMK
5979 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5980 arguments on the stack. Unfortunately, it doesn't always store the first
5981 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5982 with stdargs as we always have at least one named argument there. */
5983 if (cum.num_reg_words < 6)
5984 {
5985 if (!no_rtl)
5986 {
5987 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5988 emit_insn (gen_arg_home_umk ());
5989 }
5990 *pretend_size = 0;
5991 }
5992 #elif TARGET_ABI_OPEN_VMS
5993 /* For VMS, we allocate space for all 6 arg registers plus a count.
5994
5995 However, if NO registers need to be saved, don't allocate any space.
5996 This is not only because we won't need the space, but because AP
5997 includes the current_pretend_args_size and we don't want to mess up
5998 any ap-relative addresses already made. */
5999 if (cum.num_args < 6)
6000 {
6001 if (!no_rtl)
6002 {
6003 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6004 emit_insn (gen_arg_home ());
6005 }
6006 *pretend_size = 7 * UNITS_PER_WORD;
6007 }
6008 #else
6009 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6010 only push those that are remaining. However, if NO registers need to
6011 be saved, don't allocate any space. This is not only because we won't
6012 need the space, but because AP includes the current_pretend_args_size
6013 and we don't want to mess up any ap-relative addresses already made.
6014
6015 If we are not to use the floating-point registers, save the integer
6016 registers where we would put the floating-point registers. This is
6017 not the most efficient way to implement varargs with just one register
6018 class, but it isn't worth doing anything more efficient in this rare
6019 case. */
6020 if (cum >= 6)
6021 return;
6022
6023 if (!no_rtl)
6024 {
6025 int count;
6026 alias_set_type set = get_varargs_alias_set ();
6027 rtx tmp;
6028
6029 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6030 if (count > 6 - cum)
6031 count = 6 - cum;
6032
6033 /* Detect whether integer registers or floating-point registers
6034 are needed by the detected va_arg statements. See above for
6035 how these values are computed. Note that the "escape" value
6036 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6037 these bits set. */
6038 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6039
6040 if (cfun->va_list_fpr_size & 1)
6041 {
6042 tmp = gen_rtx_MEM (BLKmode,
6043 plus_constant (virtual_incoming_args_rtx,
6044 (cum + 6) * UNITS_PER_WORD));
6045 MEM_NOTRAP_P (tmp) = 1;
6046 set_mem_alias_set (tmp, set);
6047 move_block_from_reg (16 + cum, tmp, count);
6048 }
6049
6050 if (cfun->va_list_fpr_size & 2)
6051 {
6052 tmp = gen_rtx_MEM (BLKmode,
6053 plus_constant (virtual_incoming_args_rtx,
6054 cum * UNITS_PER_WORD));
6055 MEM_NOTRAP_P (tmp) = 1;
6056 set_mem_alias_set (tmp, set);
6057 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6058 }
6059 }
6060 *pretend_size = 12 * UNITS_PER_WORD;
6061 #endif
6062 }
6063
6064 void
6065 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6066 {
6067 HOST_WIDE_INT offset;
6068 tree t, offset_field, base_field;
6069
6070 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6071 return;
6072
6073 if (TARGET_ABI_UNICOSMK)
6074 std_expand_builtin_va_start (valist, nextarg);
6075
6076 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6077 up by 48, storing fp arg registers in the first 48 bytes, and the
6078 integer arg registers in the next 48 bytes. This is only done,
6079 however, if any integer registers need to be stored.
6080
6081 If no integer registers need be stored, then we must subtract 48
6082 in order to account for the integer arg registers which are counted
6083 in argsize above, but which are not actually stored on the stack.
6084 Must further be careful here about structures straddling the last
6085 integer argument register; that futzes with pretend_args_size,
6086 which changes the meaning of AP. */
6087
6088 if (NUM_ARGS < 6)
6089 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6090 else
6091 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6092
6093 if (TARGET_ABI_OPEN_VMS)
6094 {
6095 nextarg = plus_constant (nextarg, offset);
6096 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6097 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6098 make_tree (ptr_type_node, nextarg));
6099 TREE_SIDE_EFFECTS (t) = 1;
6100
6101 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6102 }
6103 else
6104 {
6105 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6106 offset_field = TREE_CHAIN (base_field);
6107
6108 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6109 valist, base_field, NULL_TREE);
6110 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6111 valist, offset_field, NULL_TREE);
6112
6113 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6114 t = build2 (PLUS_EXPR, ptr_type_node, t,
6115 build_int_cst (NULL_TREE, offset));
6116 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6117 TREE_SIDE_EFFECTS (t) = 1;
6118 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6119
6120 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6121 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6122 offset_field, t);
6123 TREE_SIDE_EFFECTS (t) = 1;
6124 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6125 }
6126 }
6127
6128 static tree
6129 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6130 {
6131 tree type_size, ptr_type, addend, t, addr, internal_post;
6132
6133 /* If the type could not be passed in registers, skip the block
6134 reserved for the registers. */
6135 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6136 {
6137 t = build_int_cst (TREE_TYPE (offset), 6*8);
6138 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6139 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6140 gimplify_and_add (t, pre_p);
6141 }
6142
6143 addend = offset;
6144 ptr_type = build_pointer_type (type);
6145
6146 if (TREE_CODE (type) == COMPLEX_TYPE)
6147 {
6148 tree real_part, imag_part, real_temp;
6149
6150 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6151 offset, pre_p);
6152
6153 /* Copy the value into a new temporary, lest the formal temporary
6154 be reused out from under us. */
6155 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6156
6157 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6158 offset, pre_p);
6159
6160 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6161 }
6162 else if (TREE_CODE (type) == REAL_TYPE)
6163 {
6164 tree fpaddend, cond, fourtyeight;
6165
6166 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6167 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6168 addend, fourtyeight);
6169 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6170 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6171 fpaddend, addend);
6172 }
6173
6174 /* Build the final address and force that value into a temporary. */
6175 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6176 fold_convert (ptr_type, addend));
6177 internal_post = NULL;
6178 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6179 append_to_statement_list (internal_post, pre_p);
6180
6181 /* Update the offset field. */
6182 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6183 if (type_size == NULL || TREE_OVERFLOW (type_size))
6184 t = size_zero_node;
6185 else
6186 {
6187 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6188 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6189 t = size_binop (MULT_EXPR, t, size_int (8));
6190 }
6191 t = fold_convert (TREE_TYPE (offset), t);
6192 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6193 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6194 gimplify_and_add (t, pre_p);
6195
6196 return build_va_arg_indirect_ref (addr);
6197 }
6198
6199 static tree
6200 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6201 {
6202 tree offset_field, base_field, offset, base, t, r;
6203 bool indirect;
6204
6205 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6206 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6207
6208 base_field = TYPE_FIELDS (va_list_type_node);
6209 offset_field = TREE_CHAIN (base_field);
6210 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6211 valist, base_field, NULL_TREE);
6212 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6213 valist, offset_field, NULL_TREE);
6214
6215 /* Pull the fields of the structure out into temporaries. Since we never
6216 modify the base field, we can use a formal temporary. Sign-extend the
6217 offset field so that it's the proper width for pointer arithmetic. */
6218 base = get_formal_tmp_var (base_field, pre_p);
6219
6220 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6221 offset = get_initialized_tmp_var (t, pre_p, NULL);
6222
6223 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6224 if (indirect)
6225 type = build_pointer_type (type);
6226
6227 /* Find the value. Note that this will be a stable indirection, or
6228 a composite of stable indirections in the case of complex. */
6229 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6230
6231 /* Stuff the offset temporary back into its field. */
6232 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6233 fold_convert (TREE_TYPE (offset_field), offset));
6234 gimplify_and_add (t, pre_p);
6235
6236 if (indirect)
6237 r = build_va_arg_indirect_ref (r);
6238
6239 return r;
6240 }
6241 \f
6242 /* Builtins. */
6243
6244 enum alpha_builtin
6245 {
6246 ALPHA_BUILTIN_CMPBGE,
6247 ALPHA_BUILTIN_EXTBL,
6248 ALPHA_BUILTIN_EXTWL,
6249 ALPHA_BUILTIN_EXTLL,
6250 ALPHA_BUILTIN_EXTQL,
6251 ALPHA_BUILTIN_EXTWH,
6252 ALPHA_BUILTIN_EXTLH,
6253 ALPHA_BUILTIN_EXTQH,
6254 ALPHA_BUILTIN_INSBL,
6255 ALPHA_BUILTIN_INSWL,
6256 ALPHA_BUILTIN_INSLL,
6257 ALPHA_BUILTIN_INSQL,
6258 ALPHA_BUILTIN_INSWH,
6259 ALPHA_BUILTIN_INSLH,
6260 ALPHA_BUILTIN_INSQH,
6261 ALPHA_BUILTIN_MSKBL,
6262 ALPHA_BUILTIN_MSKWL,
6263 ALPHA_BUILTIN_MSKLL,
6264 ALPHA_BUILTIN_MSKQL,
6265 ALPHA_BUILTIN_MSKWH,
6266 ALPHA_BUILTIN_MSKLH,
6267 ALPHA_BUILTIN_MSKQH,
6268 ALPHA_BUILTIN_UMULH,
6269 ALPHA_BUILTIN_ZAP,
6270 ALPHA_BUILTIN_ZAPNOT,
6271 ALPHA_BUILTIN_AMASK,
6272 ALPHA_BUILTIN_IMPLVER,
6273 ALPHA_BUILTIN_RPCC,
6274 ALPHA_BUILTIN_THREAD_POINTER,
6275 ALPHA_BUILTIN_SET_THREAD_POINTER,
6276
6277 /* TARGET_MAX */
6278 ALPHA_BUILTIN_MINUB8,
6279 ALPHA_BUILTIN_MINSB8,
6280 ALPHA_BUILTIN_MINUW4,
6281 ALPHA_BUILTIN_MINSW4,
6282 ALPHA_BUILTIN_MAXUB8,
6283 ALPHA_BUILTIN_MAXSB8,
6284 ALPHA_BUILTIN_MAXUW4,
6285 ALPHA_BUILTIN_MAXSW4,
6286 ALPHA_BUILTIN_PERR,
6287 ALPHA_BUILTIN_PKLB,
6288 ALPHA_BUILTIN_PKWB,
6289 ALPHA_BUILTIN_UNPKBL,
6290 ALPHA_BUILTIN_UNPKBW,
6291
6292 /* TARGET_CIX */
6293 ALPHA_BUILTIN_CTTZ,
6294 ALPHA_BUILTIN_CTLZ,
6295 ALPHA_BUILTIN_CTPOP,
6296
6297 ALPHA_BUILTIN_max
6298 };
6299
6300 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6301 CODE_FOR_builtin_cmpbge,
6302 CODE_FOR_builtin_extbl,
6303 CODE_FOR_builtin_extwl,
6304 CODE_FOR_builtin_extll,
6305 CODE_FOR_builtin_extql,
6306 CODE_FOR_builtin_extwh,
6307 CODE_FOR_builtin_extlh,
6308 CODE_FOR_builtin_extqh,
6309 CODE_FOR_builtin_insbl,
6310 CODE_FOR_builtin_inswl,
6311 CODE_FOR_builtin_insll,
6312 CODE_FOR_builtin_insql,
6313 CODE_FOR_builtin_inswh,
6314 CODE_FOR_builtin_inslh,
6315 CODE_FOR_builtin_insqh,
6316 CODE_FOR_builtin_mskbl,
6317 CODE_FOR_builtin_mskwl,
6318 CODE_FOR_builtin_mskll,
6319 CODE_FOR_builtin_mskql,
6320 CODE_FOR_builtin_mskwh,
6321 CODE_FOR_builtin_msklh,
6322 CODE_FOR_builtin_mskqh,
6323 CODE_FOR_umuldi3_highpart,
6324 CODE_FOR_builtin_zap,
6325 CODE_FOR_builtin_zapnot,
6326 CODE_FOR_builtin_amask,
6327 CODE_FOR_builtin_implver,
6328 CODE_FOR_builtin_rpcc,
6329 CODE_FOR_load_tp,
6330 CODE_FOR_set_tp,
6331
6332 /* TARGET_MAX */
6333 CODE_FOR_builtin_minub8,
6334 CODE_FOR_builtin_minsb8,
6335 CODE_FOR_builtin_minuw4,
6336 CODE_FOR_builtin_minsw4,
6337 CODE_FOR_builtin_maxub8,
6338 CODE_FOR_builtin_maxsb8,
6339 CODE_FOR_builtin_maxuw4,
6340 CODE_FOR_builtin_maxsw4,
6341 CODE_FOR_builtin_perr,
6342 CODE_FOR_builtin_pklb,
6343 CODE_FOR_builtin_pkwb,
6344 CODE_FOR_builtin_unpkbl,
6345 CODE_FOR_builtin_unpkbw,
6346
6347 /* TARGET_CIX */
6348 CODE_FOR_ctzdi2,
6349 CODE_FOR_clzdi2,
6350 CODE_FOR_popcountdi2
6351 };
6352
6353 struct alpha_builtin_def
6354 {
6355 const char *name;
6356 enum alpha_builtin code;
6357 unsigned int target_mask;
6358 bool is_const;
6359 };
6360
6361 static struct alpha_builtin_def const zero_arg_builtins[] = {
6362 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6363 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6364 };
6365
6366 static struct alpha_builtin_def const one_arg_builtins[] = {
6367 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6368 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6369 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6370 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6371 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6372 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6373 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6374 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6375 };
6376
6377 static struct alpha_builtin_def const two_arg_builtins[] = {
6378 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6379 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6380 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6381 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6382 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6383 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6384 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6385 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6386 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6387 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6388 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6389 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6390 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6391 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6392 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6393 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6394 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6395 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6396 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6397 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6398 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6399 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6400 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6401 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6402 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6403 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6404 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6405 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6406 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6407 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6408 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6409 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6410 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6411 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6412 };
6413
6414 static GTY(()) tree alpha_v8qi_u;
6415 static GTY(()) tree alpha_v8qi_s;
6416 static GTY(()) tree alpha_v4hi_u;
6417 static GTY(()) tree alpha_v4hi_s;
6418
6419 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6420 functions pointed to by P, with function type FTYPE. */
6421
6422 static void
6423 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6424 tree ftype)
6425 {
6426 tree decl;
6427 size_t i;
6428
6429 for (i = 0; i < count; ++i, ++p)
6430 if ((target_flags & p->target_mask) == p->target_mask)
6431 {
6432 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6433 NULL, NULL);
6434 if (p->is_const)
6435 TREE_READONLY (decl) = 1;
6436 TREE_NOTHROW (decl) = 1;
6437 }
6438 }
6439
6440
6441 static void
6442 alpha_init_builtins (void)
6443 {
6444 tree dimode_integer_type_node;
6445 tree ftype, decl;
6446
6447 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6448
6449 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6450 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6451 ftype);
6452
6453 ftype = build_function_type_list (dimode_integer_type_node,
6454 dimode_integer_type_node, NULL_TREE);
6455 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6456 ftype);
6457
6458 ftype = build_function_type_list (dimode_integer_type_node,
6459 dimode_integer_type_node,
6460 dimode_integer_type_node, NULL_TREE);
6461 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6462 ftype);
6463
6464 ftype = build_function_type (ptr_type_node, void_list_node);
6465 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6466 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6467 NULL, NULL);
6468 TREE_NOTHROW (decl) = 1;
6469
6470 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6471 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6472 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6473 NULL, NULL);
6474 TREE_NOTHROW (decl) = 1;
6475
6476 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6477 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6478 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6479 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6480 }
6481
6482 /* Expand an expression EXP that calls a built-in function,
6483 with result going to TARGET if that's convenient
6484 (and in mode MODE if that's convenient).
6485 SUBTARGET may be used as the target for computing one of EXP's operands.
6486 IGNORE is nonzero if the value is to be ignored. */
6487
6488 static rtx
6489 alpha_expand_builtin (tree exp, rtx target,
6490 rtx subtarget ATTRIBUTE_UNUSED,
6491 enum machine_mode mode ATTRIBUTE_UNUSED,
6492 int ignore ATTRIBUTE_UNUSED)
6493 {
6494 #define MAX_ARGS 2
6495
6496 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6497 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6498 tree arg;
6499 call_expr_arg_iterator iter;
6500 enum insn_code icode;
6501 rtx op[MAX_ARGS], pat;
6502 int arity;
6503 bool nonvoid;
6504
6505 if (fcode >= ALPHA_BUILTIN_max)
6506 internal_error ("bad builtin fcode");
6507 icode = code_for_builtin[fcode];
6508 if (icode == 0)
6509 internal_error ("bad builtin fcode");
6510
6511 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6512
6513 arity = 0;
6514 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6515 {
6516 const struct insn_operand_data *insn_op;
6517
6518 if (arg == error_mark_node)
6519 return NULL_RTX;
6520 if (arity > MAX_ARGS)
6521 return NULL_RTX;
6522
6523 insn_op = &insn_data[icode].operand[arity + nonvoid];
6524
6525 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6526
6527 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6528 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6529 arity++;
6530 }
6531
6532 if (nonvoid)
6533 {
6534 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6535 if (!target
6536 || GET_MODE (target) != tmode
6537 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6538 target = gen_reg_rtx (tmode);
6539 }
6540
6541 switch (arity)
6542 {
6543 case 0:
6544 pat = GEN_FCN (icode) (target);
6545 break;
6546 case 1:
6547 if (nonvoid)
6548 pat = GEN_FCN (icode) (target, op[0]);
6549 else
6550 pat = GEN_FCN (icode) (op[0]);
6551 break;
6552 case 2:
6553 pat = GEN_FCN (icode) (target, op[0], op[1]);
6554 break;
6555 default:
6556 gcc_unreachable ();
6557 }
6558 if (!pat)
6559 return NULL_RTX;
6560 emit_insn (pat);
6561
6562 if (nonvoid)
6563 return target;
6564 else
6565 return const0_rtx;
6566 }
6567
6568
6569 /* Several bits below assume HWI >= 64 bits. This should be enforced
6570 by config.gcc. */
6571 #if HOST_BITS_PER_WIDE_INT < 64
6572 # error "HOST_WIDE_INT too small"
6573 #endif
6574
6575 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6576 with an 8-bit output vector. OPINT contains the integer operands; bit N
6577 of OP_CONST is set if OPINT[N] is valid. */
6578
6579 static tree
6580 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6581 {
6582 if (op_const == 3)
6583 {
6584 int i, val;
6585 for (i = 0, val = 0; i < 8; ++i)
6586 {
6587 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6588 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6589 if (c0 >= c1)
6590 val |= 1 << i;
6591 }
6592 return build_int_cst (long_integer_type_node, val);
6593 }
6594 else if (op_const == 2 && opint[1] == 0)
6595 return build_int_cst (long_integer_type_node, 0xff);
6596 return NULL;
6597 }
6598
6599 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6600 specialized form of an AND operation. Other byte manipulation instructions
6601 are defined in terms of this instruction, so this is also used as a
6602 subroutine for other builtins.
6603
6604 OP contains the tree operands; OPINT contains the extracted integer values.
6605 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6606 OPINT may be considered. */
6607
6608 static tree
6609 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6610 long op_const)
6611 {
6612 if (op_const & 2)
6613 {
6614 unsigned HOST_WIDE_INT mask = 0;
6615 int i;
6616
6617 for (i = 0; i < 8; ++i)
6618 if ((opint[1] >> i) & 1)
6619 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6620
6621 if (op_const & 1)
6622 return build_int_cst (long_integer_type_node, opint[0] & mask);
6623
6624 if (op)
6625 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6626 build_int_cst (long_integer_type_node, mask));
6627 }
6628 else if ((op_const & 1) && opint[0] == 0)
6629 return build_int_cst (long_integer_type_node, 0);
6630 return NULL;
6631 }
6632
6633 /* Fold the builtins for the EXT family of instructions. */
6634
6635 static tree
6636 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6637 long op_const, unsigned HOST_WIDE_INT bytemask,
6638 bool is_high)
6639 {
6640 long zap_const = 2;
6641 tree *zap_op = NULL;
6642
6643 if (op_const & 2)
6644 {
6645 unsigned HOST_WIDE_INT loc;
6646
6647 loc = opint[1] & 7;
6648 if (BYTES_BIG_ENDIAN)
6649 loc ^= 7;
6650 loc *= 8;
6651
6652 if (loc != 0)
6653 {
6654 if (op_const & 1)
6655 {
6656 unsigned HOST_WIDE_INT temp = opint[0];
6657 if (is_high)
6658 temp <<= loc;
6659 else
6660 temp >>= loc;
6661 opint[0] = temp;
6662 zap_const = 3;
6663 }
6664 }
6665 else
6666 zap_op = op;
6667 }
6668
6669 opint[1] = bytemask;
6670 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6671 }
6672
6673 /* Fold the builtins for the INS family of instructions. */
6674
6675 static tree
6676 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6677 long op_const, unsigned HOST_WIDE_INT bytemask,
6678 bool is_high)
6679 {
6680 if ((op_const & 1) && opint[0] == 0)
6681 return build_int_cst (long_integer_type_node, 0);
6682
6683 if (op_const & 2)
6684 {
6685 unsigned HOST_WIDE_INT temp, loc, byteloc;
6686 tree *zap_op = NULL;
6687
6688 loc = opint[1] & 7;
6689 if (BYTES_BIG_ENDIAN)
6690 loc ^= 7;
6691 bytemask <<= loc;
6692
6693 temp = opint[0];
6694 if (is_high)
6695 {
6696 byteloc = (64 - (loc * 8)) & 0x3f;
6697 if (byteloc == 0)
6698 zap_op = op;
6699 else
6700 temp >>= byteloc;
6701 bytemask >>= 8;
6702 }
6703 else
6704 {
6705 byteloc = loc * 8;
6706 if (byteloc == 0)
6707 zap_op = op;
6708 else
6709 temp <<= byteloc;
6710 }
6711
6712 opint[0] = temp;
6713 opint[1] = bytemask;
6714 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6715 }
6716
6717 return NULL;
6718 }
6719
6720 static tree
6721 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6722 long op_const, unsigned HOST_WIDE_INT bytemask,
6723 bool is_high)
6724 {
6725 if (op_const & 2)
6726 {
6727 unsigned HOST_WIDE_INT loc;
6728
6729 loc = opint[1] & 7;
6730 if (BYTES_BIG_ENDIAN)
6731 loc ^= 7;
6732 bytemask <<= loc;
6733
6734 if (is_high)
6735 bytemask >>= 8;
6736
6737 opint[1] = bytemask ^ 0xff;
6738 }
6739
6740 return alpha_fold_builtin_zapnot (op, opint, op_const);
6741 }
6742
6743 static tree
6744 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6745 {
6746 switch (op_const)
6747 {
6748 case 3:
6749 {
6750 unsigned HOST_WIDE_INT l;
6751 HOST_WIDE_INT h;
6752
6753 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6754
6755 #if HOST_BITS_PER_WIDE_INT > 64
6756 # error fixme
6757 #endif
6758
6759 return build_int_cst (long_integer_type_node, h);
6760 }
6761
6762 case 1:
6763 opint[1] = opint[0];
6764 /* FALLTHRU */
6765 case 2:
6766 /* Note that (X*1) >> 64 == 0. */
6767 if (opint[1] == 0 || opint[1] == 1)
6768 return build_int_cst (long_integer_type_node, 0);
6769 break;
6770 }
6771 return NULL;
6772 }
6773
6774 static tree
6775 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6776 {
6777 tree op0 = fold_convert (vtype, op[0]);
6778 tree op1 = fold_convert (vtype, op[1]);
6779 tree val = fold_build2 (code, vtype, op0, op1);
6780 return fold_convert (long_integer_type_node, val);
6781 }
6782
6783 static tree
6784 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6785 {
6786 unsigned HOST_WIDE_INT temp = 0;
6787 int i;
6788
6789 if (op_const != 3)
6790 return NULL;
6791
6792 for (i = 0; i < 8; ++i)
6793 {
6794 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6795 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6796 if (a >= b)
6797 temp += a - b;
6798 else
6799 temp += b - a;
6800 }
6801
6802 return build_int_cst (long_integer_type_node, temp);
6803 }
6804
6805 static tree
6806 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6807 {
6808 unsigned HOST_WIDE_INT temp;
6809
6810 if (op_const == 0)
6811 return NULL;
6812
6813 temp = opint[0] & 0xff;
6814 temp |= (opint[0] >> 24) & 0xff00;
6815
6816 return build_int_cst (long_integer_type_node, temp);
6817 }
6818
6819 static tree
6820 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6821 {
6822 unsigned HOST_WIDE_INT temp;
6823
6824 if (op_const == 0)
6825 return NULL;
6826
6827 temp = opint[0] & 0xff;
6828 temp |= (opint[0] >> 8) & 0xff00;
6829 temp |= (opint[0] >> 16) & 0xff0000;
6830 temp |= (opint[0] >> 24) & 0xff000000;
6831
6832 return build_int_cst (long_integer_type_node, temp);
6833 }
6834
6835 static tree
6836 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6837 {
6838 unsigned HOST_WIDE_INT temp;
6839
6840 if (op_const == 0)
6841 return NULL;
6842
6843 temp = opint[0] & 0xff;
6844 temp |= (opint[0] & 0xff00) << 24;
6845
6846 return build_int_cst (long_integer_type_node, temp);
6847 }
6848
6849 static tree
6850 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6851 {
6852 unsigned HOST_WIDE_INT temp;
6853
6854 if (op_const == 0)
6855 return NULL;
6856
6857 temp = opint[0] & 0xff;
6858 temp |= (opint[0] & 0x0000ff00) << 8;
6859 temp |= (opint[0] & 0x00ff0000) << 16;
6860 temp |= (opint[0] & 0xff000000) << 24;
6861
6862 return build_int_cst (long_integer_type_node, temp);
6863 }
6864
6865 static tree
6866 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6867 {
6868 unsigned HOST_WIDE_INT temp;
6869
6870 if (op_const == 0)
6871 return NULL;
6872
6873 if (opint[0] == 0)
6874 temp = 64;
6875 else
6876 temp = exact_log2 (opint[0] & -opint[0]);
6877
6878 return build_int_cst (long_integer_type_node, temp);
6879 }
6880
6881 static tree
6882 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6883 {
6884 unsigned HOST_WIDE_INT temp;
6885
6886 if (op_const == 0)
6887 return NULL;
6888
6889 if (opint[0] == 0)
6890 temp = 64;
6891 else
6892 temp = 64 - floor_log2 (opint[0]) - 1;
6893
6894 return build_int_cst (long_integer_type_node, temp);
6895 }
6896
6897 static tree
6898 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6899 {
6900 unsigned HOST_WIDE_INT temp, op;
6901
6902 if (op_const == 0)
6903 return NULL;
6904
6905 op = opint[0];
6906 temp = 0;
6907 while (op)
6908 temp++, op &= op - 1;
6909
6910 return build_int_cst (long_integer_type_node, temp);
6911 }
6912
6913 /* Fold one of our builtin functions. */
6914
6915 static tree
6916 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6917 {
6918 tree op[MAX_ARGS], t;
6919 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6920 long op_const = 0, arity = 0;
6921
6922 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6923 {
6924 tree arg = TREE_VALUE (t);
6925 if (arg == error_mark_node)
6926 return NULL;
6927 if (arity >= MAX_ARGS)
6928 return NULL;
6929
6930 op[arity] = arg;
6931 opint[arity] = 0;
6932 if (TREE_CODE (arg) == INTEGER_CST)
6933 {
6934 op_const |= 1L << arity;
6935 opint[arity] = int_cst_value (arg);
6936 }
6937 }
6938
6939 switch (DECL_FUNCTION_CODE (fndecl))
6940 {
6941 case ALPHA_BUILTIN_CMPBGE:
6942 return alpha_fold_builtin_cmpbge (opint, op_const);
6943
6944 case ALPHA_BUILTIN_EXTBL:
6945 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6946 case ALPHA_BUILTIN_EXTWL:
6947 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6948 case ALPHA_BUILTIN_EXTLL:
6949 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6950 case ALPHA_BUILTIN_EXTQL:
6951 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6952 case ALPHA_BUILTIN_EXTWH:
6953 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6954 case ALPHA_BUILTIN_EXTLH:
6955 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6956 case ALPHA_BUILTIN_EXTQH:
6957 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6958
6959 case ALPHA_BUILTIN_INSBL:
6960 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6961 case ALPHA_BUILTIN_INSWL:
6962 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6963 case ALPHA_BUILTIN_INSLL:
6964 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6965 case ALPHA_BUILTIN_INSQL:
6966 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6967 case ALPHA_BUILTIN_INSWH:
6968 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6969 case ALPHA_BUILTIN_INSLH:
6970 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6971 case ALPHA_BUILTIN_INSQH:
6972 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6973
6974 case ALPHA_BUILTIN_MSKBL:
6975 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6976 case ALPHA_BUILTIN_MSKWL:
6977 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6978 case ALPHA_BUILTIN_MSKLL:
6979 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6980 case ALPHA_BUILTIN_MSKQL:
6981 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6982 case ALPHA_BUILTIN_MSKWH:
6983 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6984 case ALPHA_BUILTIN_MSKLH:
6985 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6986 case ALPHA_BUILTIN_MSKQH:
6987 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6988
6989 case ALPHA_BUILTIN_UMULH:
6990 return alpha_fold_builtin_umulh (opint, op_const);
6991
6992 case ALPHA_BUILTIN_ZAP:
6993 opint[1] ^= 0xff;
6994 /* FALLTHRU */
6995 case ALPHA_BUILTIN_ZAPNOT:
6996 return alpha_fold_builtin_zapnot (op, opint, op_const);
6997
6998 case ALPHA_BUILTIN_MINUB8:
6999 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7000 case ALPHA_BUILTIN_MINSB8:
7001 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7002 case ALPHA_BUILTIN_MINUW4:
7003 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7004 case ALPHA_BUILTIN_MINSW4:
7005 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7006 case ALPHA_BUILTIN_MAXUB8:
7007 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7008 case ALPHA_BUILTIN_MAXSB8:
7009 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7010 case ALPHA_BUILTIN_MAXUW4:
7011 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7012 case ALPHA_BUILTIN_MAXSW4:
7013 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7014
7015 case ALPHA_BUILTIN_PERR:
7016 return alpha_fold_builtin_perr (opint, op_const);
7017 case ALPHA_BUILTIN_PKLB:
7018 return alpha_fold_builtin_pklb (opint, op_const);
7019 case ALPHA_BUILTIN_PKWB:
7020 return alpha_fold_builtin_pkwb (opint, op_const);
7021 case ALPHA_BUILTIN_UNPKBL:
7022 return alpha_fold_builtin_unpkbl (opint, op_const);
7023 case ALPHA_BUILTIN_UNPKBW:
7024 return alpha_fold_builtin_unpkbw (opint, op_const);
7025
7026 case ALPHA_BUILTIN_CTTZ:
7027 return alpha_fold_builtin_cttz (opint, op_const);
7028 case ALPHA_BUILTIN_CTLZ:
7029 return alpha_fold_builtin_ctlz (opint, op_const);
7030 case ALPHA_BUILTIN_CTPOP:
7031 return alpha_fold_builtin_ctpop (opint, op_const);
7032
7033 case ALPHA_BUILTIN_AMASK:
7034 case ALPHA_BUILTIN_IMPLVER:
7035 case ALPHA_BUILTIN_RPCC:
7036 case ALPHA_BUILTIN_THREAD_POINTER:
7037 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7038 /* None of these are foldable at compile-time. */
7039 default:
7040 return NULL;
7041 }
7042 }
7043 \f
7044 /* This page contains routines that are used to determine what the function
7045 prologue and epilogue code will do and write them out. */
7046
7047 /* Compute the size of the save area in the stack. */
7048
7049 /* These variables are used for communication between the following functions.
7050 They indicate various things about the current function being compiled
7051 that are used to tell what kind of prologue, epilogue and procedure
7052 descriptor to generate. */
7053
7054 /* Nonzero if we need a stack procedure. */
7055 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7056 static enum alpha_procedure_types alpha_procedure_type;
7057
7058 /* Register number (either FP or SP) that is used to unwind the frame. */
7059 static int vms_unwind_regno;
7060
7061 /* Register number used to save FP. We need not have one for RA since
7062 we don't modify it for register procedures. This is only defined
7063 for register frame procedures. */
7064 static int vms_save_fp_regno;
7065
7066 /* Register number used to reference objects off our PV. */
7067 static int vms_base_regno;
7068
7069 /* Compute register masks for saved registers. */
7070
7071 static void
7072 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7073 {
7074 unsigned long imask = 0;
7075 unsigned long fmask = 0;
7076 unsigned int i;
7077
7078 /* When outputting a thunk, we don't have valid register life info,
7079 but assemble_start_function wants to output .frame and .mask
7080 directives. */
7081 if (current_function_is_thunk)
7082 {
7083 *imaskP = 0;
7084 *fmaskP = 0;
7085 return;
7086 }
7087
7088 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7089 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7090
7091 /* One for every register we have to save. */
7092 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7093 if (! fixed_regs[i] && ! call_used_regs[i]
7094 && df_regs_ever_live_p (i) && i != REG_RA
7095 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7096 {
7097 if (i < 32)
7098 imask |= (1UL << i);
7099 else
7100 fmask |= (1UL << (i - 32));
7101 }
7102
7103 /* We need to restore these for the handler. */
7104 if (current_function_calls_eh_return)
7105 {
7106 for (i = 0; ; ++i)
7107 {
7108 unsigned regno = EH_RETURN_DATA_REGNO (i);
7109 if (regno == INVALID_REGNUM)
7110 break;
7111 imask |= 1UL << regno;
7112 }
7113 }
7114
7115 /* If any register spilled, then spill the return address also. */
7116 /* ??? This is required by the Digital stack unwind specification
7117 and isn't needed if we're doing Dwarf2 unwinding. */
7118 if (imask || fmask || alpha_ra_ever_killed ())
7119 imask |= (1UL << REG_RA);
7120
7121 *imaskP = imask;
7122 *fmaskP = fmask;
7123 }
7124
7125 int
7126 alpha_sa_size (void)
7127 {
7128 unsigned long mask[2];
7129 int sa_size = 0;
7130 int i, j;
7131
7132 alpha_sa_mask (&mask[0], &mask[1]);
7133
7134 if (TARGET_ABI_UNICOSMK)
7135 {
7136 if (mask[0] || mask[1])
7137 sa_size = 14;
7138 }
7139 else
7140 {
7141 for (j = 0; j < 2; ++j)
7142 for (i = 0; i < 32; ++i)
7143 if ((mask[j] >> i) & 1)
7144 sa_size++;
7145 }
7146
7147 if (TARGET_ABI_UNICOSMK)
7148 {
7149 /* We might not need to generate a frame if we don't make any calls
7150 (including calls to __T3E_MISMATCH if this is a vararg function),
7151 don't have any local variables which require stack slots, don't
7152 use alloca and have not determined that we need a frame for other
7153 reasons. */
7154
7155 alpha_procedure_type
7156 = (sa_size || get_frame_size() != 0
7157 || current_function_outgoing_args_size
7158 || current_function_stdarg || current_function_calls_alloca
7159 || frame_pointer_needed)
7160 ? PT_STACK : PT_REGISTER;
7161
7162 /* Always reserve space for saving callee-saved registers if we
7163 need a frame as required by the calling convention. */
7164 if (alpha_procedure_type == PT_STACK)
7165 sa_size = 14;
7166 }
7167 else if (TARGET_ABI_OPEN_VMS)
7168 {
7169 /* Start by assuming we can use a register procedure if we don't
7170 make any calls (REG_RA not used) or need to save any
7171 registers and a stack procedure if we do. */
7172 if ((mask[0] >> REG_RA) & 1)
7173 alpha_procedure_type = PT_STACK;
7174 else if (get_frame_size() != 0)
7175 alpha_procedure_type = PT_REGISTER;
7176 else
7177 alpha_procedure_type = PT_NULL;
7178
7179 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7180 made the final decision on stack procedure vs register procedure. */
7181 if (alpha_procedure_type == PT_STACK)
7182 sa_size -= 2;
7183
7184 /* Decide whether to refer to objects off our PV via FP or PV.
7185 If we need FP for something else or if we receive a nonlocal
7186 goto (which expects PV to contain the value), we must use PV.
7187 Otherwise, start by assuming we can use FP. */
7188
7189 vms_base_regno
7190 = (frame_pointer_needed
7191 || current_function_has_nonlocal_label
7192 || alpha_procedure_type == PT_STACK
7193 || current_function_outgoing_args_size)
7194 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7195
7196 /* If we want to copy PV into FP, we need to find some register
7197 in which to save FP. */
7198
7199 vms_save_fp_regno = -1;
7200 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7201 for (i = 0; i < 32; i++)
7202 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7203 vms_save_fp_regno = i;
7204
7205 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7206 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7207 else if (alpha_procedure_type == PT_NULL)
7208 vms_base_regno = REG_PV;
7209
7210 /* Stack unwinding should be done via FP unless we use it for PV. */
7211 vms_unwind_regno = (vms_base_regno == REG_PV
7212 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7213
7214 /* If this is a stack procedure, allow space for saving FP and RA. */
7215 if (alpha_procedure_type == PT_STACK)
7216 sa_size += 2;
7217 }
7218 else
7219 {
7220 /* Our size must be even (multiple of 16 bytes). */
7221 if (sa_size & 1)
7222 sa_size++;
7223 }
7224
7225 return sa_size * 8;
7226 }
7227
7228 /* Define the offset between two registers, one to be eliminated,
7229 and the other its replacement, at the start of a routine. */
7230
7231 HOST_WIDE_INT
7232 alpha_initial_elimination_offset (unsigned int from,
7233 unsigned int to ATTRIBUTE_UNUSED)
7234 {
7235 HOST_WIDE_INT ret;
7236
7237 ret = alpha_sa_size ();
7238 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7239
7240 switch (from)
7241 {
7242 case FRAME_POINTER_REGNUM:
7243 break;
7244
7245 case ARG_POINTER_REGNUM:
7246 ret += (ALPHA_ROUND (get_frame_size ()
7247 + current_function_pretend_args_size)
7248 - current_function_pretend_args_size);
7249 break;
7250
7251 default:
7252 gcc_unreachable ();
7253 }
7254
7255 return ret;
7256 }
7257
7258 int
7259 alpha_pv_save_size (void)
7260 {
7261 alpha_sa_size ();
7262 return alpha_procedure_type == PT_STACK ? 8 : 0;
7263 }
7264
7265 int
7266 alpha_using_fp (void)
7267 {
7268 alpha_sa_size ();
7269 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7270 }
7271
7272 #if TARGET_ABI_OPEN_VMS
7273
7274 const struct attribute_spec vms_attribute_table[] =
7275 {
7276 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7277 { "overlaid", 0, 0, true, false, false, NULL },
7278 { "global", 0, 0, true, false, false, NULL },
7279 { "initialize", 0, 0, true, false, false, NULL },
7280 { NULL, 0, 0, false, false, false, NULL }
7281 };
7282
7283 #endif
7284
7285 static int
7286 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7287 {
7288 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7289 }
7290
7291 int
7292 alpha_find_lo_sum_using_gp (rtx insn)
7293 {
7294 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7295 }
7296
7297 static int
7298 alpha_does_function_need_gp (void)
7299 {
7300 rtx insn;
7301
7302 /* The GP being variable is an OSF abi thing. */
7303 if (! TARGET_ABI_OSF)
7304 return 0;
7305
7306 /* We need the gp to load the address of __mcount. */
7307 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7308 return 1;
7309
7310 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7311 if (current_function_is_thunk)
7312 return 1;
7313
7314 /* The nonlocal receiver pattern assumes that the gp is valid for
7315 the nested function. Reasonable because it's almost always set
7316 correctly already. For the cases where that's wrong, make sure
7317 the nested function loads its gp on entry. */
7318 if (current_function_has_nonlocal_goto)
7319 return 1;
7320
7321 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7322 Even if we are a static function, we still need to do this in case
7323 our address is taken and passed to something like qsort. */
7324
7325 push_topmost_sequence ();
7326 insn = get_insns ();
7327 pop_topmost_sequence ();
7328
7329 for (; insn; insn = NEXT_INSN (insn))
7330 if (INSN_P (insn)
7331 && ! JUMP_TABLE_DATA_P (insn)
7332 && GET_CODE (PATTERN (insn)) != USE
7333 && GET_CODE (PATTERN (insn)) != CLOBBER
7334 && get_attr_usegp (insn))
7335 return 1;
7336
7337 return 0;
7338 }
7339
7340 \f
7341 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7342 sequences. */
7343
7344 static rtx
7345 set_frame_related_p (void)
7346 {
7347 rtx seq = get_insns ();
7348 rtx insn;
7349
7350 end_sequence ();
7351
7352 if (!seq)
7353 return NULL_RTX;
7354
7355 if (INSN_P (seq))
7356 {
7357 insn = seq;
7358 while (insn != NULL_RTX)
7359 {
7360 RTX_FRAME_RELATED_P (insn) = 1;
7361 insn = NEXT_INSN (insn);
7362 }
7363 seq = emit_insn (seq);
7364 }
7365 else
7366 {
7367 seq = emit_insn (seq);
7368 RTX_FRAME_RELATED_P (seq) = 1;
7369 }
7370 return seq;
7371 }
7372
7373 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7374
7375 /* Generates a store with the proper unwind info attached. VALUE is
7376 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7377 contains SP+FRAME_BIAS, and that is the unwind info that should be
7378 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7379 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7380
7381 static void
7382 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7383 HOST_WIDE_INT base_ofs, rtx frame_reg)
7384 {
7385 rtx addr, mem, insn;
7386
7387 addr = plus_constant (base_reg, base_ofs);
7388 mem = gen_rtx_MEM (DImode, addr);
7389 set_mem_alias_set (mem, alpha_sr_alias_set);
7390
7391 insn = emit_move_insn (mem, value);
7392 RTX_FRAME_RELATED_P (insn) = 1;
7393
7394 if (frame_bias || value != frame_reg)
7395 {
7396 if (frame_bias)
7397 {
7398 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7399 mem = gen_rtx_MEM (DImode, addr);
7400 }
7401
7402 REG_NOTES (insn)
7403 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7404 gen_rtx_SET (VOIDmode, mem, frame_reg),
7405 REG_NOTES (insn));
7406 }
7407 }
7408
7409 static void
7410 emit_frame_store (unsigned int regno, rtx base_reg,
7411 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7412 {
7413 rtx reg = gen_rtx_REG (DImode, regno);
7414 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7415 }
7416
7417 /* Write function prologue. */
7418
7419 /* On vms we have two kinds of functions:
7420
7421 - stack frame (PROC_STACK)
7422 these are 'normal' functions with local vars and which are
7423 calling other functions
7424 - register frame (PROC_REGISTER)
7425 keeps all data in registers, needs no stack
7426
7427 We must pass this to the assembler so it can generate the
7428 proper pdsc (procedure descriptor)
7429 This is done with the '.pdesc' command.
7430
7431 On not-vms, we don't really differentiate between the two, as we can
7432 simply allocate stack without saving registers. */
7433
7434 void
7435 alpha_expand_prologue (void)
7436 {
7437 /* Registers to save. */
7438 unsigned long imask = 0;
7439 unsigned long fmask = 0;
7440 /* Stack space needed for pushing registers clobbered by us. */
7441 HOST_WIDE_INT sa_size;
7442 /* Complete stack size needed. */
7443 HOST_WIDE_INT frame_size;
7444 /* Offset from base reg to register save area. */
7445 HOST_WIDE_INT reg_offset;
7446 rtx sa_reg;
7447 int i;
7448
7449 sa_size = alpha_sa_size ();
7450
7451 frame_size = get_frame_size ();
7452 if (TARGET_ABI_OPEN_VMS)
7453 frame_size = ALPHA_ROUND (sa_size
7454 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7455 + frame_size
7456 + current_function_pretend_args_size);
7457 else if (TARGET_ABI_UNICOSMK)
7458 /* We have to allocate space for the DSIB if we generate a frame. */
7459 frame_size = ALPHA_ROUND (sa_size
7460 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7461 + ALPHA_ROUND (frame_size
7462 + current_function_outgoing_args_size);
7463 else
7464 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7465 + sa_size
7466 + ALPHA_ROUND (frame_size
7467 + current_function_pretend_args_size));
7468
7469 if (TARGET_ABI_OPEN_VMS)
7470 reg_offset = 8;
7471 else
7472 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7473
7474 alpha_sa_mask (&imask, &fmask);
7475
7476 /* Emit an insn to reload GP, if needed. */
7477 if (TARGET_ABI_OSF)
7478 {
7479 alpha_function_needs_gp = alpha_does_function_need_gp ();
7480 if (alpha_function_needs_gp)
7481 emit_insn (gen_prologue_ldgp ());
7482 }
7483
7484 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7485 the call to mcount ourselves, rather than having the linker do it
7486 magically in response to -pg. Since _mcount has special linkage,
7487 don't represent the call as a call. */
7488 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7489 emit_insn (gen_prologue_mcount ());
7490
7491 if (TARGET_ABI_UNICOSMK)
7492 unicosmk_gen_dsib (&imask);
7493
7494 /* Adjust the stack by the frame size. If the frame size is > 4096
7495 bytes, we need to be sure we probe somewhere in the first and last
7496 4096 bytes (we can probably get away without the latter test) and
7497 every 8192 bytes in between. If the frame size is > 32768, we
7498 do this in a loop. Otherwise, we generate the explicit probe
7499 instructions.
7500
7501 Note that we are only allowed to adjust sp once in the prologue. */
7502
7503 if (frame_size <= 32768)
7504 {
7505 if (frame_size > 4096)
7506 {
7507 int probed;
7508
7509 for (probed = 4096; probed < frame_size; probed += 8192)
7510 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7511 ? -probed + 64
7512 : -probed)));
7513
7514 /* We only have to do this probe if we aren't saving registers. */
7515 if (sa_size == 0 && frame_size > probed - 4096)
7516 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7517 }
7518
7519 if (frame_size != 0)
7520 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7521 GEN_INT (TARGET_ABI_UNICOSMK
7522 ? -frame_size + 64
7523 : -frame_size))));
7524 }
7525 else
7526 {
7527 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7528 number of 8192 byte blocks to probe. We then probe each block
7529 in the loop and then set SP to the proper location. If the
7530 amount remaining is > 4096, we have to do one more probe if we
7531 are not saving any registers. */
7532
7533 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7534 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7535 rtx ptr = gen_rtx_REG (DImode, 22);
7536 rtx count = gen_rtx_REG (DImode, 23);
7537 rtx seq;
7538
7539 emit_move_insn (count, GEN_INT (blocks));
7540 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7541 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7542
7543 /* Because of the difficulty in emitting a new basic block this
7544 late in the compilation, generate the loop as a single insn. */
7545 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7546
7547 if (leftover > 4096 && sa_size == 0)
7548 {
7549 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7550 MEM_VOLATILE_P (last) = 1;
7551 emit_move_insn (last, const0_rtx);
7552 }
7553
7554 if (TARGET_ABI_WINDOWS_NT)
7555 {
7556 /* For NT stack unwind (done by 'reverse execution'), it's
7557 not OK to take the result of a loop, even though the value
7558 is already in ptr, so we reload it via a single operation
7559 and subtract it to sp.
7560
7561 Yes, that's correct -- we have to reload the whole constant
7562 into a temporary via ldah+lda then subtract from sp. */
7563
7564 HOST_WIDE_INT lo, hi;
7565 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7566 hi = frame_size - lo;
7567
7568 emit_move_insn (ptr, GEN_INT (hi));
7569 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7570 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7571 ptr));
7572 }
7573 else
7574 {
7575 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7576 GEN_INT (-leftover)));
7577 }
7578
7579 /* This alternative is special, because the DWARF code cannot
7580 possibly intuit through the loop above. So we invent this
7581 note it looks at instead. */
7582 RTX_FRAME_RELATED_P (seq) = 1;
7583 REG_NOTES (seq)
7584 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7585 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7586 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7587 GEN_INT (TARGET_ABI_UNICOSMK
7588 ? -frame_size + 64
7589 : -frame_size))),
7590 REG_NOTES (seq));
7591 }
7592
7593 if (!TARGET_ABI_UNICOSMK)
7594 {
7595 HOST_WIDE_INT sa_bias = 0;
7596
7597 /* Cope with very large offsets to the register save area. */
7598 sa_reg = stack_pointer_rtx;
7599 if (reg_offset + sa_size > 0x8000)
7600 {
7601 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7602 rtx sa_bias_rtx;
7603
7604 if (low + sa_size <= 0x8000)
7605 sa_bias = reg_offset - low, reg_offset = low;
7606 else
7607 sa_bias = reg_offset, reg_offset = 0;
7608
7609 sa_reg = gen_rtx_REG (DImode, 24);
7610 sa_bias_rtx = GEN_INT (sa_bias);
7611
7612 if (add_operand (sa_bias_rtx, DImode))
7613 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7614 else
7615 {
7616 emit_move_insn (sa_reg, sa_bias_rtx);
7617 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7618 }
7619 }
7620
7621 /* Save regs in stack order. Beginning with VMS PV. */
7622 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7623 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7624
7625 /* Save register RA next. */
7626 if (imask & (1UL << REG_RA))
7627 {
7628 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7629 imask &= ~(1UL << REG_RA);
7630 reg_offset += 8;
7631 }
7632
7633 /* Now save any other registers required to be saved. */
7634 for (i = 0; i < 31; i++)
7635 if (imask & (1UL << i))
7636 {
7637 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7638 reg_offset += 8;
7639 }
7640
7641 for (i = 0; i < 31; i++)
7642 if (fmask & (1UL << i))
7643 {
7644 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7645 reg_offset += 8;
7646 }
7647 }
7648 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7649 {
7650 /* The standard frame on the T3E includes space for saving registers.
7651 We just have to use it. We don't have to save the return address and
7652 the old frame pointer here - they are saved in the DSIB. */
7653
7654 reg_offset = -56;
7655 for (i = 9; i < 15; i++)
7656 if (imask & (1UL << i))
7657 {
7658 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7659 reg_offset -= 8;
7660 }
7661 for (i = 2; i < 10; i++)
7662 if (fmask & (1UL << i))
7663 {
7664 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7665 reg_offset -= 8;
7666 }
7667 }
7668
7669 if (TARGET_ABI_OPEN_VMS)
7670 {
7671 if (alpha_procedure_type == PT_REGISTER)
7672 /* Register frame procedures save the fp.
7673 ?? Ought to have a dwarf2 save for this. */
7674 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7675 hard_frame_pointer_rtx);
7676
7677 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7678 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7679 gen_rtx_REG (DImode, REG_PV)));
7680
7681 if (alpha_procedure_type != PT_NULL
7682 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7683 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7684
7685 /* If we have to allocate space for outgoing args, do it now. */
7686 if (current_function_outgoing_args_size != 0)
7687 {
7688 rtx seq
7689 = emit_move_insn (stack_pointer_rtx,
7690 plus_constant
7691 (hard_frame_pointer_rtx,
7692 - (ALPHA_ROUND
7693 (current_function_outgoing_args_size))));
7694
7695 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7696 if ! frame_pointer_needed. Setting the bit will change the CFA
7697 computation rule to use sp again, which would be wrong if we had
7698 frame_pointer_needed, as this means sp might move unpredictably
7699 later on.
7700
7701 Also, note that
7702 frame_pointer_needed
7703 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7704 and
7705 current_function_outgoing_args_size != 0
7706 => alpha_procedure_type != PT_NULL,
7707
7708 so when we are not setting the bit here, we are guaranteed to
7709 have emitted an FRP frame pointer update just before. */
7710 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7711 }
7712 }
7713 else if (!TARGET_ABI_UNICOSMK)
7714 {
7715 /* If we need a frame pointer, set it from the stack pointer. */
7716 if (frame_pointer_needed)
7717 {
7718 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7719 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7720 else
7721 /* This must always be the last instruction in the
7722 prologue, thus we emit a special move + clobber. */
7723 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7724 stack_pointer_rtx, sa_reg)));
7725 }
7726 }
7727
7728 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7729 the prologue, for exception handling reasons, we cannot do this for
7730 any insn that might fault. We could prevent this for mems with a
7731 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7732 have to prevent all such scheduling with a blockage.
7733
7734 Linux, on the other hand, never bothered to implement OSF/1's
7735 exception handling, and so doesn't care about such things. Anyone
7736 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7737
7738 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7739 emit_insn (gen_blockage ());
7740 }
7741
7742 /* Count the number of .file directives, so that .loc is up to date. */
7743 int num_source_filenames = 0;
7744
7745 /* Output the textual info surrounding the prologue. */
7746
7747 void
7748 alpha_start_function (FILE *file, const char *fnname,
7749 tree decl ATTRIBUTE_UNUSED)
7750 {
7751 unsigned long imask = 0;
7752 unsigned long fmask = 0;
7753 /* Stack space needed for pushing registers clobbered by us. */
7754 HOST_WIDE_INT sa_size;
7755 /* Complete stack size needed. */
7756 unsigned HOST_WIDE_INT frame_size;
7757 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7758 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7759 ? 524288
7760 : 1UL << 31;
7761 /* Offset from base reg to register save area. */
7762 HOST_WIDE_INT reg_offset;
7763 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7764 int i;
7765
7766 /* Don't emit an extern directive for functions defined in the same file. */
7767 if (TARGET_ABI_UNICOSMK)
7768 {
7769 tree name_tree;
7770 name_tree = get_identifier (fnname);
7771 TREE_ASM_WRITTEN (name_tree) = 1;
7772 }
7773
7774 alpha_fnname = fnname;
7775 sa_size = alpha_sa_size ();
7776
7777 frame_size = get_frame_size ();
7778 if (TARGET_ABI_OPEN_VMS)
7779 frame_size = ALPHA_ROUND (sa_size
7780 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7781 + frame_size
7782 + current_function_pretend_args_size);
7783 else if (TARGET_ABI_UNICOSMK)
7784 frame_size = ALPHA_ROUND (sa_size
7785 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7786 + ALPHA_ROUND (frame_size
7787 + current_function_outgoing_args_size);
7788 else
7789 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7790 + sa_size
7791 + ALPHA_ROUND (frame_size
7792 + current_function_pretend_args_size));
7793
7794 if (TARGET_ABI_OPEN_VMS)
7795 reg_offset = 8;
7796 else
7797 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7798
7799 alpha_sa_mask (&imask, &fmask);
7800
7801 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7802 We have to do that before the .ent directive as we cannot switch
7803 files within procedures with native ecoff because line numbers are
7804 linked to procedure descriptors.
7805 Outputting the lineno helps debugging of one line functions as they
7806 would otherwise get no line number at all. Please note that we would
7807 like to put out last_linenum from final.c, but it is not accessible. */
7808
7809 if (write_symbols == SDB_DEBUG)
7810 {
7811 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7812 ASM_OUTPUT_SOURCE_FILENAME (file,
7813 DECL_SOURCE_FILE (current_function_decl));
7814 #endif
7815 #ifdef SDB_OUTPUT_SOURCE_LINE
7816 if (debug_info_level != DINFO_LEVEL_TERSE)
7817 SDB_OUTPUT_SOURCE_LINE (file,
7818 DECL_SOURCE_LINE (current_function_decl));
7819 #endif
7820 }
7821
7822 /* Issue function start and label. */
7823 if (TARGET_ABI_OPEN_VMS
7824 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7825 {
7826 fputs ("\t.ent ", file);
7827 assemble_name (file, fnname);
7828 putc ('\n', file);
7829
7830 /* If the function needs GP, we'll write the "..ng" label there.
7831 Otherwise, do it here. */
7832 if (TARGET_ABI_OSF
7833 && ! alpha_function_needs_gp
7834 && ! current_function_is_thunk)
7835 {
7836 putc ('$', file);
7837 assemble_name (file, fnname);
7838 fputs ("..ng:\n", file);
7839 }
7840 }
7841
7842 strcpy (entry_label, fnname);
7843 if (TARGET_ABI_OPEN_VMS)
7844 strcat (entry_label, "..en");
7845
7846 /* For public functions, the label must be globalized by appending an
7847 additional colon. */
7848 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7849 strcat (entry_label, ":");
7850
7851 ASM_OUTPUT_LABEL (file, entry_label);
7852 inside_function = TRUE;
7853
7854 if (TARGET_ABI_OPEN_VMS)
7855 fprintf (file, "\t.base $%d\n", vms_base_regno);
7856
7857 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7858 && !flag_inhibit_size_directive)
7859 {
7860 /* Set flags in procedure descriptor to request IEEE-conformant
7861 math-library routines. The value we set it to is PDSC_EXC_IEEE
7862 (/usr/include/pdsc.h). */
7863 fputs ("\t.eflag 48\n", file);
7864 }
7865
7866 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7867 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7868 alpha_arg_offset = -frame_size + 48;
7869
7870 /* Describe our frame. If the frame size is larger than an integer,
7871 print it as zero to avoid an assembler error. We won't be
7872 properly describing such a frame, but that's the best we can do. */
7873 if (TARGET_ABI_UNICOSMK)
7874 ;
7875 else if (TARGET_ABI_OPEN_VMS)
7876 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7877 HOST_WIDE_INT_PRINT_DEC "\n",
7878 vms_unwind_regno,
7879 frame_size >= (1UL << 31) ? 0 : frame_size,
7880 reg_offset);
7881 else if (!flag_inhibit_size_directive)
7882 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7883 (frame_pointer_needed
7884 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7885 frame_size >= max_frame_size ? 0 : frame_size,
7886 current_function_pretend_args_size);
7887
7888 /* Describe which registers were spilled. */
7889 if (TARGET_ABI_UNICOSMK)
7890 ;
7891 else if (TARGET_ABI_OPEN_VMS)
7892 {
7893 if (imask)
7894 /* ??? Does VMS care if mask contains ra? The old code didn't
7895 set it, so I don't here. */
7896 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7897 if (fmask)
7898 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7899 if (alpha_procedure_type == PT_REGISTER)
7900 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7901 }
7902 else if (!flag_inhibit_size_directive)
7903 {
7904 if (imask)
7905 {
7906 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7907 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7908
7909 for (i = 0; i < 32; ++i)
7910 if (imask & (1UL << i))
7911 reg_offset += 8;
7912 }
7913
7914 if (fmask)
7915 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7916 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7917 }
7918
7919 #if TARGET_ABI_OPEN_VMS
7920 /* Ifdef'ed cause link_section are only available then. */
7921 switch_to_section (readonly_data_section);
7922 fprintf (file, "\t.align 3\n");
7923 assemble_name (file, fnname); fputs ("..na:\n", file);
7924 fputs ("\t.ascii \"", file);
7925 assemble_name (file, fnname);
7926 fputs ("\\0\"\n", file);
7927 alpha_need_linkage (fnname, 1);
7928 switch_to_section (text_section);
7929 #endif
7930 }
7931
7932 /* Emit the .prologue note at the scheduled end of the prologue. */
7933
7934 static void
7935 alpha_output_function_end_prologue (FILE *file)
7936 {
7937 if (TARGET_ABI_UNICOSMK)
7938 ;
7939 else if (TARGET_ABI_OPEN_VMS)
7940 fputs ("\t.prologue\n", file);
7941 else if (TARGET_ABI_WINDOWS_NT)
7942 fputs ("\t.prologue 0\n", file);
7943 else if (!flag_inhibit_size_directive)
7944 fprintf (file, "\t.prologue %d\n",
7945 alpha_function_needs_gp || current_function_is_thunk);
7946 }
7947
7948 /* Write function epilogue. */
7949
7950 /* ??? At some point we will want to support full unwind, and so will
7951 need to mark the epilogue as well. At the moment, we just confuse
7952 dwarf2out. */
7953 #undef FRP
7954 #define FRP(exp) exp
7955
7956 void
7957 alpha_expand_epilogue (void)
7958 {
7959 /* Registers to save. */
7960 unsigned long imask = 0;
7961 unsigned long fmask = 0;
7962 /* Stack space needed for pushing registers clobbered by us. */
7963 HOST_WIDE_INT sa_size;
7964 /* Complete stack size needed. */
7965 HOST_WIDE_INT frame_size;
7966 /* Offset from base reg to register save area. */
7967 HOST_WIDE_INT reg_offset;
7968 int fp_is_frame_pointer, fp_offset;
7969 rtx sa_reg, sa_reg_exp = NULL;
7970 rtx sp_adj1, sp_adj2, mem;
7971 rtx eh_ofs;
7972 int i;
7973
7974 sa_size = alpha_sa_size ();
7975
7976 frame_size = get_frame_size ();
7977 if (TARGET_ABI_OPEN_VMS)
7978 frame_size = ALPHA_ROUND (sa_size
7979 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7980 + frame_size
7981 + current_function_pretend_args_size);
7982 else if (TARGET_ABI_UNICOSMK)
7983 frame_size = ALPHA_ROUND (sa_size
7984 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7985 + ALPHA_ROUND (frame_size
7986 + current_function_outgoing_args_size);
7987 else
7988 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7989 + sa_size
7990 + ALPHA_ROUND (frame_size
7991 + current_function_pretend_args_size));
7992
7993 if (TARGET_ABI_OPEN_VMS)
7994 {
7995 if (alpha_procedure_type == PT_STACK)
7996 reg_offset = 8;
7997 else
7998 reg_offset = 0;
7999 }
8000 else
8001 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8002
8003 alpha_sa_mask (&imask, &fmask);
8004
8005 fp_is_frame_pointer
8006 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8007 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8008 fp_offset = 0;
8009 sa_reg = stack_pointer_rtx;
8010
8011 if (current_function_calls_eh_return)
8012 eh_ofs = EH_RETURN_STACKADJ_RTX;
8013 else
8014 eh_ofs = NULL_RTX;
8015
8016 if (!TARGET_ABI_UNICOSMK && sa_size)
8017 {
8018 /* If we have a frame pointer, restore SP from it. */
8019 if ((TARGET_ABI_OPEN_VMS
8020 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8021 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8022 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8023
8024 /* Cope with very large offsets to the register save area. */
8025 if (reg_offset + sa_size > 0x8000)
8026 {
8027 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8028 HOST_WIDE_INT bias;
8029
8030 if (low + sa_size <= 0x8000)
8031 bias = reg_offset - low, reg_offset = low;
8032 else
8033 bias = reg_offset, reg_offset = 0;
8034
8035 sa_reg = gen_rtx_REG (DImode, 22);
8036 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8037
8038 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8039 }
8040
8041 /* Restore registers in order, excepting a true frame pointer. */
8042
8043 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8044 if (! eh_ofs)
8045 set_mem_alias_set (mem, alpha_sr_alias_set);
8046 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8047
8048 reg_offset += 8;
8049 imask &= ~(1UL << REG_RA);
8050
8051 for (i = 0; i < 31; ++i)
8052 if (imask & (1UL << i))
8053 {
8054 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8055 fp_offset = reg_offset;
8056 else
8057 {
8058 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8059 set_mem_alias_set (mem, alpha_sr_alias_set);
8060 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8061 }
8062 reg_offset += 8;
8063 }
8064
8065 for (i = 0; i < 31; ++i)
8066 if (fmask & (1UL << i))
8067 {
8068 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8069 set_mem_alias_set (mem, alpha_sr_alias_set);
8070 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8071 reg_offset += 8;
8072 }
8073 }
8074 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8075 {
8076 /* Restore callee-saved general-purpose registers. */
8077
8078 reg_offset = -56;
8079
8080 for (i = 9; i < 15; i++)
8081 if (imask & (1UL << i))
8082 {
8083 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8084 reg_offset));
8085 set_mem_alias_set (mem, alpha_sr_alias_set);
8086 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8087 reg_offset -= 8;
8088 }
8089
8090 for (i = 2; i < 10; i++)
8091 if (fmask & (1UL << i))
8092 {
8093 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8094 reg_offset));
8095 set_mem_alias_set (mem, alpha_sr_alias_set);
8096 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8097 reg_offset -= 8;
8098 }
8099
8100 /* Restore the return address from the DSIB. */
8101
8102 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8103 set_mem_alias_set (mem, alpha_sr_alias_set);
8104 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8105 }
8106
8107 if (frame_size || eh_ofs)
8108 {
8109 sp_adj1 = stack_pointer_rtx;
8110
8111 if (eh_ofs)
8112 {
8113 sp_adj1 = gen_rtx_REG (DImode, 23);
8114 emit_move_insn (sp_adj1,
8115 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8116 }
8117
8118 /* If the stack size is large, begin computation into a temporary
8119 register so as not to interfere with a potential fp restore,
8120 which must be consecutive with an SP restore. */
8121 if (frame_size < 32768
8122 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8123 sp_adj2 = GEN_INT (frame_size);
8124 else if (TARGET_ABI_UNICOSMK)
8125 {
8126 sp_adj1 = gen_rtx_REG (DImode, 23);
8127 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8128 sp_adj2 = const0_rtx;
8129 }
8130 else if (frame_size < 0x40007fffL)
8131 {
8132 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8133
8134 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8135 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8136 sp_adj1 = sa_reg;
8137 else
8138 {
8139 sp_adj1 = gen_rtx_REG (DImode, 23);
8140 FRP (emit_move_insn (sp_adj1, sp_adj2));
8141 }
8142 sp_adj2 = GEN_INT (low);
8143 }
8144 else
8145 {
8146 rtx tmp = gen_rtx_REG (DImode, 23);
8147 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8148 3, false));
8149 if (!sp_adj2)
8150 {
8151 /* We can't drop new things to memory this late, afaik,
8152 so build it up by pieces. */
8153 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8154 -(frame_size < 0)));
8155 gcc_assert (sp_adj2);
8156 }
8157 }
8158
8159 /* From now on, things must be in order. So emit blockages. */
8160
8161 /* Restore the frame pointer. */
8162 if (TARGET_ABI_UNICOSMK)
8163 {
8164 emit_insn (gen_blockage ());
8165 mem = gen_rtx_MEM (DImode,
8166 plus_constant (hard_frame_pointer_rtx, -16));
8167 set_mem_alias_set (mem, alpha_sr_alias_set);
8168 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8169 }
8170 else if (fp_is_frame_pointer)
8171 {
8172 emit_insn (gen_blockage ());
8173 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8174 set_mem_alias_set (mem, alpha_sr_alias_set);
8175 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8176 }
8177 else if (TARGET_ABI_OPEN_VMS)
8178 {
8179 emit_insn (gen_blockage ());
8180 FRP (emit_move_insn (hard_frame_pointer_rtx,
8181 gen_rtx_REG (DImode, vms_save_fp_regno)));
8182 }
8183
8184 /* Restore the stack pointer. */
8185 emit_insn (gen_blockage ());
8186 if (sp_adj2 == const0_rtx)
8187 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8188 else
8189 FRP (emit_move_insn (stack_pointer_rtx,
8190 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8191 }
8192 else
8193 {
8194 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8195 {
8196 emit_insn (gen_blockage ());
8197 FRP (emit_move_insn (hard_frame_pointer_rtx,
8198 gen_rtx_REG (DImode, vms_save_fp_regno)));
8199 }
8200 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8201 {
8202 /* Decrement the frame pointer if the function does not have a
8203 frame. */
8204
8205 emit_insn (gen_blockage ());
8206 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8207 hard_frame_pointer_rtx, constm1_rtx)));
8208 }
8209 }
8210 }
8211 \f
8212 /* Output the rest of the textual info surrounding the epilogue. */
8213
8214 void
8215 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8216 {
8217 rtx insn;
8218
8219 /* We output a nop after noreturn calls at the very end of the function to
8220 ensure that the return address always remains in the caller's code range,
8221 as not doing so might confuse unwinding engines. */
8222 insn = get_last_insn ();
8223 if (!INSN_P (insn))
8224 insn = prev_active_insn (insn);
8225 if (GET_CODE (insn) == CALL_INSN)
8226 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8227
8228 #if TARGET_ABI_OPEN_VMS
8229 alpha_write_linkage (file, fnname, decl);
8230 #endif
8231
8232 /* End the function. */
8233 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8234 {
8235 fputs ("\t.end ", file);
8236 assemble_name (file, fnname);
8237 putc ('\n', file);
8238 }
8239 inside_function = FALSE;
8240
8241 /* Output jump tables and the static subroutine information block. */
8242 if (TARGET_ABI_UNICOSMK)
8243 {
8244 unicosmk_output_ssib (file, fnname);
8245 unicosmk_output_deferred_case_vectors (file);
8246 }
8247 }
8248
8249 #if TARGET_ABI_OSF
8250 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8251
8252 In order to avoid the hordes of differences between generated code
8253 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8254 lots of code loading up large constants, generate rtl and emit it
8255 instead of going straight to text.
8256
8257 Not sure why this idea hasn't been explored before... */
8258
8259 static void
8260 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8261 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8262 tree function)
8263 {
8264 HOST_WIDE_INT hi, lo;
8265 rtx this, insn, funexp;
8266
8267 /* We always require a valid GP. */
8268 emit_insn (gen_prologue_ldgp ());
8269 emit_note (NOTE_INSN_PROLOGUE_END);
8270
8271 /* Find the "this" pointer. If the function returns a structure,
8272 the structure return pointer is in $16. */
8273 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8274 this = gen_rtx_REG (Pmode, 17);
8275 else
8276 this = gen_rtx_REG (Pmode, 16);
8277
8278 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8279 entire constant for the add. */
8280 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8281 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8282 if (hi + lo == delta)
8283 {
8284 if (hi)
8285 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8286 if (lo)
8287 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8288 }
8289 else
8290 {
8291 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8292 delta, -(delta < 0));
8293 emit_insn (gen_adddi3 (this, this, tmp));
8294 }
8295
8296 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8297 if (vcall_offset)
8298 {
8299 rtx tmp, tmp2;
8300
8301 tmp = gen_rtx_REG (Pmode, 0);
8302 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8303
8304 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8305 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8306 if (hi + lo == vcall_offset)
8307 {
8308 if (hi)
8309 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8310 }
8311 else
8312 {
8313 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8314 vcall_offset, -(vcall_offset < 0));
8315 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8316 lo = 0;
8317 }
8318 if (lo)
8319 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8320 else
8321 tmp2 = tmp;
8322 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8323
8324 emit_insn (gen_adddi3 (this, this, tmp));
8325 }
8326
8327 /* Generate a tail call to the target function. */
8328 if (! TREE_USED (function))
8329 {
8330 assemble_external (function);
8331 TREE_USED (function) = 1;
8332 }
8333 funexp = XEXP (DECL_RTL (function), 0);
8334 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8335 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8336 SIBLING_CALL_P (insn) = 1;
8337
8338 /* Run just enough of rest_of_compilation to get the insns emitted.
8339 There's not really enough bulk here to make other passes such as
8340 instruction scheduling worth while. Note that use_thunk calls
8341 assemble_start_function and assemble_end_function. */
8342 insn = get_insns ();
8343 insn_locators_alloc ();
8344 shorten_branches (insn);
8345 final_start_function (insn, file, 1);
8346 final (insn, file, 1);
8347 final_end_function ();
8348 }
8349 #endif /* TARGET_ABI_OSF */
8350 \f
8351 /* Debugging support. */
8352
8353 #include "gstab.h"
8354
8355 /* Count the number of sdb related labels are generated (to find block
8356 start and end boundaries). */
8357
8358 int sdb_label_count = 0;
8359
8360 /* Name of the file containing the current function. */
8361
8362 static const char *current_function_file = "";
8363
8364 /* Offsets to alpha virtual arg/local debugging pointers. */
8365
8366 long alpha_arg_offset;
8367 long alpha_auto_offset;
8368 \f
8369 /* Emit a new filename to a stream. */
8370
8371 void
8372 alpha_output_filename (FILE *stream, const char *name)
8373 {
8374 static int first_time = TRUE;
8375
8376 if (first_time)
8377 {
8378 first_time = FALSE;
8379 ++num_source_filenames;
8380 current_function_file = name;
8381 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8382 output_quoted_string (stream, name);
8383 fprintf (stream, "\n");
8384 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8385 fprintf (stream, "\t#@stabs\n");
8386 }
8387
8388 else if (write_symbols == DBX_DEBUG)
8389 /* dbxout.c will emit an appropriate .stabs directive. */
8390 return;
8391
8392 else if (name != current_function_file
8393 && strcmp (name, current_function_file) != 0)
8394 {
8395 if (inside_function && ! TARGET_GAS)
8396 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8397 else
8398 {
8399 ++num_source_filenames;
8400 current_function_file = name;
8401 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8402 }
8403
8404 output_quoted_string (stream, name);
8405 fprintf (stream, "\n");
8406 }
8407 }
8408 \f
8409 /* Structure to show the current status of registers and memory. */
8410
8411 struct shadow_summary
8412 {
8413 struct {
8414 unsigned int i : 31; /* Mask of int regs */
8415 unsigned int fp : 31; /* Mask of fp regs */
8416 unsigned int mem : 1; /* mem == imem | fpmem */
8417 } used, defd;
8418 };
8419
8420 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8421 to the summary structure. SET is nonzero if the insn is setting the
8422 object, otherwise zero. */
8423
8424 static void
8425 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8426 {
8427 const char *format_ptr;
8428 int i, j;
8429
8430 if (x == 0)
8431 return;
8432
8433 switch (GET_CODE (x))
8434 {
8435 /* ??? Note that this case would be incorrect if the Alpha had a
8436 ZERO_EXTRACT in SET_DEST. */
8437 case SET:
8438 summarize_insn (SET_SRC (x), sum, 0);
8439 summarize_insn (SET_DEST (x), sum, 1);
8440 break;
8441
8442 case CLOBBER:
8443 summarize_insn (XEXP (x, 0), sum, 1);
8444 break;
8445
8446 case USE:
8447 summarize_insn (XEXP (x, 0), sum, 0);
8448 break;
8449
8450 case ASM_OPERANDS:
8451 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8452 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8453 break;
8454
8455 case PARALLEL:
8456 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8457 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8458 break;
8459
8460 case SUBREG:
8461 summarize_insn (SUBREG_REG (x), sum, 0);
8462 break;
8463
8464 case REG:
8465 {
8466 int regno = REGNO (x);
8467 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8468
8469 if (regno == 31 || regno == 63)
8470 break;
8471
8472 if (set)
8473 {
8474 if (regno < 32)
8475 sum->defd.i |= mask;
8476 else
8477 sum->defd.fp |= mask;
8478 }
8479 else
8480 {
8481 if (regno < 32)
8482 sum->used.i |= mask;
8483 else
8484 sum->used.fp |= mask;
8485 }
8486 }
8487 break;
8488
8489 case MEM:
8490 if (set)
8491 sum->defd.mem = 1;
8492 else
8493 sum->used.mem = 1;
8494
8495 /* Find the regs used in memory address computation: */
8496 summarize_insn (XEXP (x, 0), sum, 0);
8497 break;
8498
8499 case CONST_INT: case CONST_DOUBLE:
8500 case SYMBOL_REF: case LABEL_REF: case CONST:
8501 case SCRATCH: case ASM_INPUT:
8502 break;
8503
8504 /* Handle common unary and binary ops for efficiency. */
8505 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8506 case MOD: case UDIV: case UMOD: case AND: case IOR:
8507 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8508 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8509 case NE: case EQ: case GE: case GT: case LE:
8510 case LT: case GEU: case GTU: case LEU: case LTU:
8511 summarize_insn (XEXP (x, 0), sum, 0);
8512 summarize_insn (XEXP (x, 1), sum, 0);
8513 break;
8514
8515 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8516 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8517 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8518 case SQRT: case FFS:
8519 summarize_insn (XEXP (x, 0), sum, 0);
8520 break;
8521
8522 default:
8523 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8524 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8525 switch (format_ptr[i])
8526 {
8527 case 'e':
8528 summarize_insn (XEXP (x, i), sum, 0);
8529 break;
8530
8531 case 'E':
8532 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8533 summarize_insn (XVECEXP (x, i, j), sum, 0);
8534 break;
8535
8536 case 'i':
8537 break;
8538
8539 default:
8540 gcc_unreachable ();
8541 }
8542 }
8543 }
8544
8545 /* Ensure a sufficient number of `trapb' insns are in the code when
8546 the user requests code with a trap precision of functions or
8547 instructions.
8548
8549 In naive mode, when the user requests a trap-precision of
8550 "instruction", a trapb is needed after every instruction that may
8551 generate a trap. This ensures that the code is resumption safe but
8552 it is also slow.
8553
8554 When optimizations are turned on, we delay issuing a trapb as long
8555 as possible. In this context, a trap shadow is the sequence of
8556 instructions that starts with a (potentially) trap generating
8557 instruction and extends to the next trapb or call_pal instruction
8558 (but GCC never generates call_pal by itself). We can delay (and
8559 therefore sometimes omit) a trapb subject to the following
8560 conditions:
8561
8562 (a) On entry to the trap shadow, if any Alpha register or memory
8563 location contains a value that is used as an operand value by some
8564 instruction in the trap shadow (live on entry), then no instruction
8565 in the trap shadow may modify the register or memory location.
8566
8567 (b) Within the trap shadow, the computation of the base register
8568 for a memory load or store instruction may not involve using the
8569 result of an instruction that might generate an UNPREDICTABLE
8570 result.
8571
8572 (c) Within the trap shadow, no register may be used more than once
8573 as a destination register. (This is to make life easier for the
8574 trap-handler.)
8575
8576 (d) The trap shadow may not include any branch instructions. */
8577
8578 static void
8579 alpha_handle_trap_shadows (void)
8580 {
8581 struct shadow_summary shadow;
8582 int trap_pending, exception_nesting;
8583 rtx i, n;
8584
8585 trap_pending = 0;
8586 exception_nesting = 0;
8587 shadow.used.i = 0;
8588 shadow.used.fp = 0;
8589 shadow.used.mem = 0;
8590 shadow.defd = shadow.used;
8591
8592 for (i = get_insns (); i ; i = NEXT_INSN (i))
8593 {
8594 if (GET_CODE (i) == NOTE)
8595 {
8596 switch (NOTE_KIND (i))
8597 {
8598 case NOTE_INSN_EH_REGION_BEG:
8599 exception_nesting++;
8600 if (trap_pending)
8601 goto close_shadow;
8602 break;
8603
8604 case NOTE_INSN_EH_REGION_END:
8605 exception_nesting--;
8606 if (trap_pending)
8607 goto close_shadow;
8608 break;
8609
8610 case NOTE_INSN_EPILOGUE_BEG:
8611 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8612 goto close_shadow;
8613 break;
8614 }
8615 }
8616 else if (trap_pending)
8617 {
8618 if (alpha_tp == ALPHA_TP_FUNC)
8619 {
8620 if (GET_CODE (i) == JUMP_INSN
8621 && GET_CODE (PATTERN (i)) == RETURN)
8622 goto close_shadow;
8623 }
8624 else if (alpha_tp == ALPHA_TP_INSN)
8625 {
8626 if (optimize > 0)
8627 {
8628 struct shadow_summary sum;
8629
8630 sum.used.i = 0;
8631 sum.used.fp = 0;
8632 sum.used.mem = 0;
8633 sum.defd = sum.used;
8634
8635 switch (GET_CODE (i))
8636 {
8637 case INSN:
8638 /* Annoyingly, get_attr_trap will die on these. */
8639 if (GET_CODE (PATTERN (i)) == USE
8640 || GET_CODE (PATTERN (i)) == CLOBBER)
8641 break;
8642
8643 summarize_insn (PATTERN (i), &sum, 0);
8644
8645 if ((sum.defd.i & shadow.defd.i)
8646 || (sum.defd.fp & shadow.defd.fp))
8647 {
8648 /* (c) would be violated */
8649 goto close_shadow;
8650 }
8651
8652 /* Combine shadow with summary of current insn: */
8653 shadow.used.i |= sum.used.i;
8654 shadow.used.fp |= sum.used.fp;
8655 shadow.used.mem |= sum.used.mem;
8656 shadow.defd.i |= sum.defd.i;
8657 shadow.defd.fp |= sum.defd.fp;
8658 shadow.defd.mem |= sum.defd.mem;
8659
8660 if ((sum.defd.i & shadow.used.i)
8661 || (sum.defd.fp & shadow.used.fp)
8662 || (sum.defd.mem & shadow.used.mem))
8663 {
8664 /* (a) would be violated (also takes care of (b)) */
8665 gcc_assert (get_attr_trap (i) != TRAP_YES
8666 || (!(sum.defd.i & sum.used.i)
8667 && !(sum.defd.fp & sum.used.fp)));
8668
8669 goto close_shadow;
8670 }
8671 break;
8672
8673 case JUMP_INSN:
8674 case CALL_INSN:
8675 case CODE_LABEL:
8676 goto close_shadow;
8677
8678 default:
8679 gcc_unreachable ();
8680 }
8681 }
8682 else
8683 {
8684 close_shadow:
8685 n = emit_insn_before (gen_trapb (), i);
8686 PUT_MODE (n, TImode);
8687 PUT_MODE (i, TImode);
8688 trap_pending = 0;
8689 shadow.used.i = 0;
8690 shadow.used.fp = 0;
8691 shadow.used.mem = 0;
8692 shadow.defd = shadow.used;
8693 }
8694 }
8695 }
8696
8697 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8698 && GET_CODE (i) == INSN
8699 && GET_CODE (PATTERN (i)) != USE
8700 && GET_CODE (PATTERN (i)) != CLOBBER
8701 && get_attr_trap (i) == TRAP_YES)
8702 {
8703 if (optimize && !trap_pending)
8704 summarize_insn (PATTERN (i), &shadow, 0);
8705 trap_pending = 1;
8706 }
8707 }
8708 }
8709 \f
8710 /* Alpha can only issue instruction groups simultaneously if they are
8711 suitably aligned. This is very processor-specific. */
8712 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8713 that are marked "fake". These instructions do not exist on that target,
8714 but it is possible to see these insns with deranged combinations of
8715 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8716 choose a result at random. */
8717
8718 enum alphaev4_pipe {
8719 EV4_STOP = 0,
8720 EV4_IB0 = 1,
8721 EV4_IB1 = 2,
8722 EV4_IBX = 4
8723 };
8724
8725 enum alphaev5_pipe {
8726 EV5_STOP = 0,
8727 EV5_NONE = 1,
8728 EV5_E01 = 2,
8729 EV5_E0 = 4,
8730 EV5_E1 = 8,
8731 EV5_FAM = 16,
8732 EV5_FA = 32,
8733 EV5_FM = 64
8734 };
8735
8736 static enum alphaev4_pipe
8737 alphaev4_insn_pipe (rtx insn)
8738 {
8739 if (recog_memoized (insn) < 0)
8740 return EV4_STOP;
8741 if (get_attr_length (insn) != 4)
8742 return EV4_STOP;
8743
8744 switch (get_attr_type (insn))
8745 {
8746 case TYPE_ILD:
8747 case TYPE_LDSYM:
8748 case TYPE_FLD:
8749 case TYPE_LD_L:
8750 return EV4_IBX;
8751
8752 case TYPE_IADD:
8753 case TYPE_ILOG:
8754 case TYPE_ICMOV:
8755 case TYPE_ICMP:
8756 case TYPE_FST:
8757 case TYPE_SHIFT:
8758 case TYPE_IMUL:
8759 case TYPE_FBR:
8760 case TYPE_MVI: /* fake */
8761 return EV4_IB0;
8762
8763 case TYPE_IST:
8764 case TYPE_MISC:
8765 case TYPE_IBR:
8766 case TYPE_JSR:
8767 case TYPE_CALLPAL:
8768 case TYPE_FCPYS:
8769 case TYPE_FCMOV:
8770 case TYPE_FADD:
8771 case TYPE_FDIV:
8772 case TYPE_FMUL:
8773 case TYPE_ST_C:
8774 case TYPE_MB:
8775 case TYPE_FSQRT: /* fake */
8776 case TYPE_FTOI: /* fake */
8777 case TYPE_ITOF: /* fake */
8778 return EV4_IB1;
8779
8780 default:
8781 gcc_unreachable ();
8782 }
8783 }
8784
8785 static enum alphaev5_pipe
8786 alphaev5_insn_pipe (rtx insn)
8787 {
8788 if (recog_memoized (insn) < 0)
8789 return EV5_STOP;
8790 if (get_attr_length (insn) != 4)
8791 return EV5_STOP;
8792
8793 switch (get_attr_type (insn))
8794 {
8795 case TYPE_ILD:
8796 case TYPE_FLD:
8797 case TYPE_LDSYM:
8798 case TYPE_IADD:
8799 case TYPE_ILOG:
8800 case TYPE_ICMOV:
8801 case TYPE_ICMP:
8802 return EV5_E01;
8803
8804 case TYPE_IST:
8805 case TYPE_FST:
8806 case TYPE_SHIFT:
8807 case TYPE_IMUL:
8808 case TYPE_MISC:
8809 case TYPE_MVI:
8810 case TYPE_LD_L:
8811 case TYPE_ST_C:
8812 case TYPE_MB:
8813 case TYPE_FTOI: /* fake */
8814 case TYPE_ITOF: /* fake */
8815 return EV5_E0;
8816
8817 case TYPE_IBR:
8818 case TYPE_JSR:
8819 case TYPE_CALLPAL:
8820 return EV5_E1;
8821
8822 case TYPE_FCPYS:
8823 return EV5_FAM;
8824
8825 case TYPE_FBR:
8826 case TYPE_FCMOV:
8827 case TYPE_FADD:
8828 case TYPE_FDIV:
8829 case TYPE_FSQRT: /* fake */
8830 return EV5_FA;
8831
8832 case TYPE_FMUL:
8833 return EV5_FM;
8834
8835 default:
8836 gcc_unreachable ();
8837 }
8838 }
8839
8840 /* IN_USE is a mask of the slots currently filled within the insn group.
8841 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8842 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8843
8844 LEN is, of course, the length of the group in bytes. */
8845
8846 static rtx
8847 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8848 {
8849 int len, in_use;
8850
8851 len = in_use = 0;
8852
8853 if (! INSN_P (insn)
8854 || GET_CODE (PATTERN (insn)) == CLOBBER
8855 || GET_CODE (PATTERN (insn)) == USE)
8856 goto next_and_done;
8857
8858 while (1)
8859 {
8860 enum alphaev4_pipe pipe;
8861
8862 pipe = alphaev4_insn_pipe (insn);
8863 switch (pipe)
8864 {
8865 case EV4_STOP:
8866 /* Force complex instructions to start new groups. */
8867 if (in_use)
8868 goto done;
8869
8870 /* If this is a completely unrecognized insn, it's an asm.
8871 We don't know how long it is, so record length as -1 to
8872 signal a needed realignment. */
8873 if (recog_memoized (insn) < 0)
8874 len = -1;
8875 else
8876 len = get_attr_length (insn);
8877 goto next_and_done;
8878
8879 case EV4_IBX:
8880 if (in_use & EV4_IB0)
8881 {
8882 if (in_use & EV4_IB1)
8883 goto done;
8884 in_use |= EV4_IB1;
8885 }
8886 else
8887 in_use |= EV4_IB0 | EV4_IBX;
8888 break;
8889
8890 case EV4_IB0:
8891 if (in_use & EV4_IB0)
8892 {
8893 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8894 goto done;
8895 in_use |= EV4_IB1;
8896 }
8897 in_use |= EV4_IB0;
8898 break;
8899
8900 case EV4_IB1:
8901 if (in_use & EV4_IB1)
8902 goto done;
8903 in_use |= EV4_IB1;
8904 break;
8905
8906 default:
8907 gcc_unreachable ();
8908 }
8909 len += 4;
8910
8911 /* Haifa doesn't do well scheduling branches. */
8912 if (GET_CODE (insn) == JUMP_INSN)
8913 goto next_and_done;
8914
8915 next:
8916 insn = next_nonnote_insn (insn);
8917
8918 if (!insn || ! INSN_P (insn))
8919 goto done;
8920
8921 /* Let Haifa tell us where it thinks insn group boundaries are. */
8922 if (GET_MODE (insn) == TImode)
8923 goto done;
8924
8925 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8926 goto next;
8927 }
8928
8929 next_and_done:
8930 insn = next_nonnote_insn (insn);
8931
8932 done:
8933 *plen = len;
8934 *pin_use = in_use;
8935 return insn;
8936 }
8937
8938 /* IN_USE is a mask of the slots currently filled within the insn group.
8939 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8940 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8941
8942 LEN is, of course, the length of the group in bytes. */
8943
8944 static rtx
8945 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8946 {
8947 int len, in_use;
8948
8949 len = in_use = 0;
8950
8951 if (! INSN_P (insn)
8952 || GET_CODE (PATTERN (insn)) == CLOBBER
8953 || GET_CODE (PATTERN (insn)) == USE)
8954 goto next_and_done;
8955
8956 while (1)
8957 {
8958 enum alphaev5_pipe pipe;
8959
8960 pipe = alphaev5_insn_pipe (insn);
8961 switch (pipe)
8962 {
8963 case EV5_STOP:
8964 /* Force complex instructions to start new groups. */
8965 if (in_use)
8966 goto done;
8967
8968 /* If this is a completely unrecognized insn, it's an asm.
8969 We don't know how long it is, so record length as -1 to
8970 signal a needed realignment. */
8971 if (recog_memoized (insn) < 0)
8972 len = -1;
8973 else
8974 len = get_attr_length (insn);
8975 goto next_and_done;
8976
8977 /* ??? Most of the places below, we would like to assert never
8978 happen, as it would indicate an error either in Haifa, or
8979 in the scheduling description. Unfortunately, Haifa never
8980 schedules the last instruction of the BB, so we don't have
8981 an accurate TI bit to go off. */
8982 case EV5_E01:
8983 if (in_use & EV5_E0)
8984 {
8985 if (in_use & EV5_E1)
8986 goto done;
8987 in_use |= EV5_E1;
8988 }
8989 else
8990 in_use |= EV5_E0 | EV5_E01;
8991 break;
8992
8993 case EV5_E0:
8994 if (in_use & EV5_E0)
8995 {
8996 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8997 goto done;
8998 in_use |= EV5_E1;
8999 }
9000 in_use |= EV5_E0;
9001 break;
9002
9003 case EV5_E1:
9004 if (in_use & EV5_E1)
9005 goto done;
9006 in_use |= EV5_E1;
9007 break;
9008
9009 case EV5_FAM:
9010 if (in_use & EV5_FA)
9011 {
9012 if (in_use & EV5_FM)
9013 goto done;
9014 in_use |= EV5_FM;
9015 }
9016 else
9017 in_use |= EV5_FA | EV5_FAM;
9018 break;
9019
9020 case EV5_FA:
9021 if (in_use & EV5_FA)
9022 goto done;
9023 in_use |= EV5_FA;
9024 break;
9025
9026 case EV5_FM:
9027 if (in_use & EV5_FM)
9028 goto done;
9029 in_use |= EV5_FM;
9030 break;
9031
9032 case EV5_NONE:
9033 break;
9034
9035 default:
9036 gcc_unreachable ();
9037 }
9038 len += 4;
9039
9040 /* Haifa doesn't do well scheduling branches. */
9041 /* ??? If this is predicted not-taken, slotting continues, except
9042 that no more IBR, FBR, or JSR insns may be slotted. */
9043 if (GET_CODE (insn) == JUMP_INSN)
9044 goto next_and_done;
9045
9046 next:
9047 insn = next_nonnote_insn (insn);
9048
9049 if (!insn || ! INSN_P (insn))
9050 goto done;
9051
9052 /* Let Haifa tell us where it thinks insn group boundaries are. */
9053 if (GET_MODE (insn) == TImode)
9054 goto done;
9055
9056 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9057 goto next;
9058 }
9059
9060 next_and_done:
9061 insn = next_nonnote_insn (insn);
9062
9063 done:
9064 *plen = len;
9065 *pin_use = in_use;
9066 return insn;
9067 }
9068
9069 static rtx
9070 alphaev4_next_nop (int *pin_use)
9071 {
9072 int in_use = *pin_use;
9073 rtx nop;
9074
9075 if (!(in_use & EV4_IB0))
9076 {
9077 in_use |= EV4_IB0;
9078 nop = gen_nop ();
9079 }
9080 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9081 {
9082 in_use |= EV4_IB1;
9083 nop = gen_nop ();
9084 }
9085 else if (TARGET_FP && !(in_use & EV4_IB1))
9086 {
9087 in_use |= EV4_IB1;
9088 nop = gen_fnop ();
9089 }
9090 else
9091 nop = gen_unop ();
9092
9093 *pin_use = in_use;
9094 return nop;
9095 }
9096
9097 static rtx
9098 alphaev5_next_nop (int *pin_use)
9099 {
9100 int in_use = *pin_use;
9101 rtx nop;
9102
9103 if (!(in_use & EV5_E1))
9104 {
9105 in_use |= EV5_E1;
9106 nop = gen_nop ();
9107 }
9108 else if (TARGET_FP && !(in_use & EV5_FA))
9109 {
9110 in_use |= EV5_FA;
9111 nop = gen_fnop ();
9112 }
9113 else if (TARGET_FP && !(in_use & EV5_FM))
9114 {
9115 in_use |= EV5_FM;
9116 nop = gen_fnop ();
9117 }
9118 else
9119 nop = gen_unop ();
9120
9121 *pin_use = in_use;
9122 return nop;
9123 }
9124
9125 /* The instruction group alignment main loop. */
9126
9127 static void
9128 alpha_align_insns (unsigned int max_align,
9129 rtx (*next_group) (rtx, int *, int *),
9130 rtx (*next_nop) (int *))
9131 {
9132 /* ALIGN is the known alignment for the insn group. */
9133 unsigned int align;
9134 /* OFS is the offset of the current insn in the insn group. */
9135 int ofs;
9136 int prev_in_use, in_use, len, ldgp;
9137 rtx i, next;
9138
9139 /* Let shorten branches care for assigning alignments to code labels. */
9140 shorten_branches (get_insns ());
9141
9142 if (align_functions < 4)
9143 align = 4;
9144 else if ((unsigned int) align_functions < max_align)
9145 align = align_functions;
9146 else
9147 align = max_align;
9148
9149 ofs = prev_in_use = 0;
9150 i = get_insns ();
9151 if (GET_CODE (i) == NOTE)
9152 i = next_nonnote_insn (i);
9153
9154 ldgp = alpha_function_needs_gp ? 8 : 0;
9155
9156 while (i)
9157 {
9158 next = (*next_group) (i, &in_use, &len);
9159
9160 /* When we see a label, resync alignment etc. */
9161 if (GET_CODE (i) == CODE_LABEL)
9162 {
9163 unsigned int new_align = 1 << label_to_alignment (i);
9164
9165 if (new_align >= align)
9166 {
9167 align = new_align < max_align ? new_align : max_align;
9168 ofs = 0;
9169 }
9170
9171 else if (ofs & (new_align-1))
9172 ofs = (ofs | (new_align-1)) + 1;
9173 gcc_assert (!len);
9174 }
9175
9176 /* Handle complex instructions special. */
9177 else if (in_use == 0)
9178 {
9179 /* Asms will have length < 0. This is a signal that we have
9180 lost alignment knowledge. Assume, however, that the asm
9181 will not mis-align instructions. */
9182 if (len < 0)
9183 {
9184 ofs = 0;
9185 align = 4;
9186 len = 0;
9187 }
9188 }
9189
9190 /* If the known alignment is smaller than the recognized insn group,
9191 realign the output. */
9192 else if ((int) align < len)
9193 {
9194 unsigned int new_log_align = len > 8 ? 4 : 3;
9195 rtx prev, where;
9196
9197 where = prev = prev_nonnote_insn (i);
9198 if (!where || GET_CODE (where) != CODE_LABEL)
9199 where = i;
9200
9201 /* Can't realign between a call and its gp reload. */
9202 if (! (TARGET_EXPLICIT_RELOCS
9203 && prev && GET_CODE (prev) == CALL_INSN))
9204 {
9205 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9206 align = 1 << new_log_align;
9207 ofs = 0;
9208 }
9209 }
9210
9211 /* We may not insert padding inside the initial ldgp sequence. */
9212 else if (ldgp > 0)
9213 ldgp -= len;
9214
9215 /* If the group won't fit in the same INT16 as the previous,
9216 we need to add padding to keep the group together. Rather
9217 than simply leaving the insn filling to the assembler, we
9218 can make use of the knowledge of what sorts of instructions
9219 were issued in the previous group to make sure that all of
9220 the added nops are really free. */
9221 else if (ofs + len > (int) align)
9222 {
9223 int nop_count = (align - ofs) / 4;
9224 rtx where;
9225
9226 /* Insert nops before labels, branches, and calls to truly merge
9227 the execution of the nops with the previous instruction group. */
9228 where = prev_nonnote_insn (i);
9229 if (where)
9230 {
9231 if (GET_CODE (where) == CODE_LABEL)
9232 {
9233 rtx where2 = prev_nonnote_insn (where);
9234 if (where2 && GET_CODE (where2) == JUMP_INSN)
9235 where = where2;
9236 }
9237 else if (GET_CODE (where) == INSN)
9238 where = i;
9239 }
9240 else
9241 where = i;
9242
9243 do
9244 emit_insn_before ((*next_nop)(&prev_in_use), where);
9245 while (--nop_count);
9246 ofs = 0;
9247 }
9248
9249 ofs = (ofs + len) & (align - 1);
9250 prev_in_use = in_use;
9251 i = next;
9252 }
9253 }
9254 \f
9255 /* Machine dependent reorg pass. */
9256
9257 static void
9258 alpha_reorg (void)
9259 {
9260 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9261 alpha_handle_trap_shadows ();
9262
9263 /* Due to the number of extra trapb insns, don't bother fixing up
9264 alignment when trap precision is instruction. Moreover, we can
9265 only do our job when sched2 is run. */
9266 if (optimize && !optimize_size
9267 && alpha_tp != ALPHA_TP_INSN
9268 && flag_schedule_insns_after_reload)
9269 {
9270 if (alpha_tune == PROCESSOR_EV4)
9271 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9272 else if (alpha_tune == PROCESSOR_EV5)
9273 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9274 }
9275 }
9276 \f
9277 #if !TARGET_ABI_UNICOSMK
9278
9279 #ifdef HAVE_STAMP_H
9280 #include <stamp.h>
9281 #endif
9282
9283 static void
9284 alpha_file_start (void)
9285 {
9286 #ifdef OBJECT_FORMAT_ELF
9287 /* If emitting dwarf2 debug information, we cannot generate a .file
9288 directive to start the file, as it will conflict with dwarf2out
9289 file numbers. So it's only useful when emitting mdebug output. */
9290 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9291 #endif
9292
9293 default_file_start ();
9294 #ifdef MS_STAMP
9295 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9296 #endif
9297
9298 fputs ("\t.set noreorder\n", asm_out_file);
9299 fputs ("\t.set volatile\n", asm_out_file);
9300 if (!TARGET_ABI_OPEN_VMS)
9301 fputs ("\t.set noat\n", asm_out_file);
9302 if (TARGET_EXPLICIT_RELOCS)
9303 fputs ("\t.set nomacro\n", asm_out_file);
9304 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9305 {
9306 const char *arch;
9307
9308 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9309 arch = "ev6";
9310 else if (TARGET_MAX)
9311 arch = "pca56";
9312 else if (TARGET_BWX)
9313 arch = "ev56";
9314 else if (alpha_cpu == PROCESSOR_EV5)
9315 arch = "ev5";
9316 else
9317 arch = "ev4";
9318
9319 fprintf (asm_out_file, "\t.arch %s\n", arch);
9320 }
9321 }
9322 #endif
9323
9324 #ifdef OBJECT_FORMAT_ELF
9325 /* Since we don't have a .dynbss section, we should not allow global
9326 relocations in the .rodata section. */
9327
9328 static int
9329 alpha_elf_reloc_rw_mask (void)
9330 {
9331 return flag_pic ? 3 : 2;
9332 }
9333
9334 /* Return a section for X. The only special thing we do here is to
9335 honor small data. */
9336
9337 static section *
9338 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9339 unsigned HOST_WIDE_INT align)
9340 {
9341 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9342 /* ??? Consider using mergeable sdata sections. */
9343 return sdata_section;
9344 else
9345 return default_elf_select_rtx_section (mode, x, align);
9346 }
9347
9348 static unsigned int
9349 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9350 {
9351 unsigned int flags = 0;
9352
9353 if (strcmp (name, ".sdata") == 0
9354 || strncmp (name, ".sdata.", 7) == 0
9355 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9356 || strcmp (name, ".sbss") == 0
9357 || strncmp (name, ".sbss.", 6) == 0
9358 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9359 flags = SECTION_SMALL;
9360
9361 flags |= default_section_type_flags (decl, name, reloc);
9362 return flags;
9363 }
9364 #endif /* OBJECT_FORMAT_ELF */
9365 \f
9366 /* Structure to collect function names for final output in link section. */
9367 /* Note that items marked with GTY can't be ifdef'ed out. */
9368
9369 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9370 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9371
9372 struct alpha_links GTY(())
9373 {
9374 int num;
9375 rtx linkage;
9376 enum links_kind lkind;
9377 enum reloc_kind rkind;
9378 };
9379
9380 struct alpha_funcs GTY(())
9381 {
9382 int num;
9383 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9384 links;
9385 };
9386
9387 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9388 splay_tree alpha_links_tree;
9389 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9390 splay_tree alpha_funcs_tree;
9391
9392 static GTY(()) int alpha_funcs_num;
9393
9394 #if TARGET_ABI_OPEN_VMS
9395
9396 /* Return the VMS argument type corresponding to MODE. */
9397
9398 enum avms_arg_type
9399 alpha_arg_type (enum machine_mode mode)
9400 {
9401 switch (mode)
9402 {
9403 case SFmode:
9404 return TARGET_FLOAT_VAX ? FF : FS;
9405 case DFmode:
9406 return TARGET_FLOAT_VAX ? FD : FT;
9407 default:
9408 return I64;
9409 }
9410 }
9411
9412 /* Return an rtx for an integer representing the VMS Argument Information
9413 register value. */
9414
9415 rtx
9416 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9417 {
9418 unsigned HOST_WIDE_INT regval = cum.num_args;
9419 int i;
9420
9421 for (i = 0; i < 6; i++)
9422 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9423
9424 return GEN_INT (regval);
9425 }
9426 \f
9427 /* Make (or fake) .linkage entry for function call.
9428
9429 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9430
9431 Return an SYMBOL_REF rtx for the linkage. */
9432
9433 rtx
9434 alpha_need_linkage (const char *name, int is_local)
9435 {
9436 splay_tree_node node;
9437 struct alpha_links *al;
9438
9439 if (name[0] == '*')
9440 name++;
9441
9442 if (is_local)
9443 {
9444 struct alpha_funcs *cfaf;
9445
9446 if (!alpha_funcs_tree)
9447 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9448 splay_tree_compare_pointers);
9449
9450 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9451
9452 cfaf->links = 0;
9453 cfaf->num = ++alpha_funcs_num;
9454
9455 splay_tree_insert (alpha_funcs_tree,
9456 (splay_tree_key) current_function_decl,
9457 (splay_tree_value) cfaf);
9458 }
9459
9460 if (alpha_links_tree)
9461 {
9462 /* Is this name already defined? */
9463
9464 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9465 if (node)
9466 {
9467 al = (struct alpha_links *) node->value;
9468 if (is_local)
9469 {
9470 /* Defined here but external assumed. */
9471 if (al->lkind == KIND_EXTERN)
9472 al->lkind = KIND_LOCAL;
9473 }
9474 else
9475 {
9476 /* Used here but unused assumed. */
9477 if (al->lkind == KIND_UNUSED)
9478 al->lkind = KIND_LOCAL;
9479 }
9480 return al->linkage;
9481 }
9482 }
9483 else
9484 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9485
9486 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9487 name = ggc_strdup (name);
9488
9489 /* Assume external if no definition. */
9490 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9491
9492 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9493 get_identifier (name);
9494
9495 /* Construct a SYMBOL_REF for us to call. */
9496 {
9497 size_t name_len = strlen (name);
9498 char *linksym = alloca (name_len + 6);
9499 linksym[0] = '$';
9500 memcpy (linksym + 1, name, name_len);
9501 memcpy (linksym + 1 + name_len, "..lk", 5);
9502 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9503 ggc_alloc_string (linksym, name_len + 5));
9504 }
9505
9506 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9507 (splay_tree_value) al);
9508
9509 return al->linkage;
9510 }
9511
9512 rtx
9513 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9514 {
9515 splay_tree_node cfunnode;
9516 struct alpha_funcs *cfaf;
9517 struct alpha_links *al;
9518 const char *name = XSTR (linkage, 0);
9519
9520 cfaf = (struct alpha_funcs *) 0;
9521 al = (struct alpha_links *) 0;
9522
9523 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9524 cfaf = (struct alpha_funcs *) cfunnode->value;
9525
9526 if (cfaf->links)
9527 {
9528 splay_tree_node lnode;
9529
9530 /* Is this name already defined? */
9531
9532 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9533 if (lnode)
9534 al = (struct alpha_links *) lnode->value;
9535 }
9536 else
9537 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9538
9539 if (!al)
9540 {
9541 size_t name_len;
9542 size_t buflen;
9543 char buf [512];
9544 char *linksym;
9545 splay_tree_node node = 0;
9546 struct alpha_links *anl;
9547
9548 if (name[0] == '*')
9549 name++;
9550
9551 name_len = strlen (name);
9552
9553 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9554 al->num = cfaf->num;
9555
9556 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9557 if (node)
9558 {
9559 anl = (struct alpha_links *) node->value;
9560 al->lkind = anl->lkind;
9561 }
9562
9563 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9564 buflen = strlen (buf);
9565 linksym = alloca (buflen + 1);
9566 memcpy (linksym, buf, buflen + 1);
9567
9568 al->linkage = gen_rtx_SYMBOL_REF
9569 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9570
9571 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9572 (splay_tree_value) al);
9573 }
9574
9575 if (rflag)
9576 al->rkind = KIND_CODEADDR;
9577 else
9578 al->rkind = KIND_LINKAGE;
9579
9580 if (lflag)
9581 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9582 else
9583 return al->linkage;
9584 }
9585
9586 static int
9587 alpha_write_one_linkage (splay_tree_node node, void *data)
9588 {
9589 const char *const name = (const char *) node->key;
9590 struct alpha_links *link = (struct alpha_links *) node->value;
9591 FILE *stream = (FILE *) data;
9592
9593 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9594 if (link->rkind == KIND_CODEADDR)
9595 {
9596 if (link->lkind == KIND_LOCAL)
9597 {
9598 /* Local and used */
9599 fprintf (stream, "\t.quad %s..en\n", name);
9600 }
9601 else
9602 {
9603 /* External and used, request code address. */
9604 fprintf (stream, "\t.code_address %s\n", name);
9605 }
9606 }
9607 else
9608 {
9609 if (link->lkind == KIND_LOCAL)
9610 {
9611 /* Local and used, build linkage pair. */
9612 fprintf (stream, "\t.quad %s..en\n", name);
9613 fprintf (stream, "\t.quad %s\n", name);
9614 }
9615 else
9616 {
9617 /* External and used, request linkage pair. */
9618 fprintf (stream, "\t.linkage %s\n", name);
9619 }
9620 }
9621
9622 return 0;
9623 }
9624
9625 static void
9626 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9627 {
9628 splay_tree_node node;
9629 struct alpha_funcs *func;
9630
9631 fprintf (stream, "\t.link\n");
9632 fprintf (stream, "\t.align 3\n");
9633 in_section = NULL;
9634
9635 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9636 func = (struct alpha_funcs *) node->value;
9637
9638 fputs ("\t.name ", stream);
9639 assemble_name (stream, funname);
9640 fputs ("..na\n", stream);
9641 ASM_OUTPUT_LABEL (stream, funname);
9642 fprintf (stream, "\t.pdesc ");
9643 assemble_name (stream, funname);
9644 fprintf (stream, "..en,%s\n",
9645 alpha_procedure_type == PT_STACK ? "stack"
9646 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9647
9648 if (func->links)
9649 {
9650 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9651 /* splay_tree_delete (func->links); */
9652 }
9653 }
9654
9655 /* Given a decl, a section name, and whether the decl initializer
9656 has relocs, choose attributes for the section. */
9657
9658 #define SECTION_VMS_OVERLAY SECTION_FORGET
9659 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9660 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9661
9662 static unsigned int
9663 vms_section_type_flags (tree decl, const char *name, int reloc)
9664 {
9665 unsigned int flags = default_section_type_flags (decl, name, reloc);
9666
9667 if (decl && DECL_ATTRIBUTES (decl)
9668 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9669 flags |= SECTION_VMS_OVERLAY;
9670 if (decl && DECL_ATTRIBUTES (decl)
9671 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9672 flags |= SECTION_VMS_GLOBAL;
9673 if (decl && DECL_ATTRIBUTES (decl)
9674 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9675 flags |= SECTION_VMS_INITIALIZE;
9676
9677 return flags;
9678 }
9679
9680 /* Switch to an arbitrary section NAME with attributes as specified
9681 by FLAGS. ALIGN specifies any known alignment requirements for
9682 the section; 0 if the default should be used. */
9683
9684 static void
9685 vms_asm_named_section (const char *name, unsigned int flags,
9686 tree decl ATTRIBUTE_UNUSED)
9687 {
9688 fputc ('\n', asm_out_file);
9689 fprintf (asm_out_file, ".section\t%s", name);
9690
9691 if (flags & SECTION_VMS_OVERLAY)
9692 fprintf (asm_out_file, ",OVR");
9693 if (flags & SECTION_VMS_GLOBAL)
9694 fprintf (asm_out_file, ",GBL");
9695 if (flags & SECTION_VMS_INITIALIZE)
9696 fprintf (asm_out_file, ",NOMOD");
9697 if (flags & SECTION_DEBUG)
9698 fprintf (asm_out_file, ",NOWRT");
9699
9700 fputc ('\n', asm_out_file);
9701 }
9702
9703 /* Record an element in the table of global constructors. SYMBOL is
9704 a SYMBOL_REF of the function to be called; PRIORITY is a number
9705 between 0 and MAX_INIT_PRIORITY.
9706
9707 Differs from default_ctors_section_asm_out_constructor in that the
9708 width of the .ctors entry is always 64 bits, rather than the 32 bits
9709 used by a normal pointer. */
9710
9711 static void
9712 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9713 {
9714 switch_to_section (ctors_section);
9715 assemble_align (BITS_PER_WORD);
9716 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9717 }
9718
9719 static void
9720 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9721 {
9722 switch_to_section (dtors_section);
9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9725 }
9726 #else
9727
9728 rtx
9729 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9730 int is_local ATTRIBUTE_UNUSED)
9731 {
9732 return NULL_RTX;
9733 }
9734
9735 rtx
9736 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9737 tree cfundecl ATTRIBUTE_UNUSED,
9738 int lflag ATTRIBUTE_UNUSED,
9739 int rflag ATTRIBUTE_UNUSED)
9740 {
9741 return NULL_RTX;
9742 }
9743
9744 #endif /* TARGET_ABI_OPEN_VMS */
9745 \f
9746 #if TARGET_ABI_UNICOSMK
9747
9748 /* This evaluates to true if we do not know how to pass TYPE solely in
9749 registers. This is the case for all arguments that do not fit in two
9750 registers. */
9751
9752 static bool
9753 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9754 {
9755 if (type == NULL)
9756 return false;
9757
9758 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9759 return true;
9760 if (TREE_ADDRESSABLE (type))
9761 return true;
9762
9763 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9764 }
9765
9766 /* Define the offset between two registers, one to be eliminated, and the
9767 other its replacement, at the start of a routine. */
9768
9769 int
9770 unicosmk_initial_elimination_offset (int from, int to)
9771 {
9772 int fixed_size;
9773
9774 fixed_size = alpha_sa_size();
9775 if (fixed_size != 0)
9776 fixed_size += 48;
9777
9778 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9779 return -fixed_size;
9780 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9781 return 0;
9782 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9783 return (ALPHA_ROUND (current_function_outgoing_args_size)
9784 + ALPHA_ROUND (get_frame_size()));
9785 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9786 return (ALPHA_ROUND (fixed_size)
9787 + ALPHA_ROUND (get_frame_size()
9788 + current_function_outgoing_args_size));
9789 else
9790 gcc_unreachable ();
9791 }
9792
9793 /* Output the module name for .ident and .end directives. We have to strip
9794 directories and add make sure that the module name starts with a letter
9795 or '$'. */
9796
9797 static void
9798 unicosmk_output_module_name (FILE *file)
9799 {
9800 const char *name = lbasename (main_input_filename);
9801 unsigned len = strlen (name);
9802 char *clean_name = alloca (len + 2);
9803 char *ptr = clean_name;
9804
9805 /* CAM only accepts module names that start with a letter or '$'. We
9806 prefix the module name with a '$' if necessary. */
9807
9808 if (!ISALPHA (*name))
9809 *ptr++ = '$';
9810 memcpy (ptr, name, len + 1);
9811 clean_symbol_name (clean_name);
9812 fputs (clean_name, file);
9813 }
9814
9815 /* Output the definition of a common variable. */
9816
9817 void
9818 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9819 {
9820 tree name_tree;
9821 printf ("T3E__: common %s\n", name);
9822
9823 in_section = NULL;
9824 fputs("\t.endp\n\n\t.psect ", file);
9825 assemble_name(file, name);
9826 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9827 fprintf(file, "\t.byte\t0:%d\n", size);
9828
9829 /* Mark the symbol as defined in this module. */
9830 name_tree = get_identifier (name);
9831 TREE_ASM_WRITTEN (name_tree) = 1;
9832 }
9833
9834 #define SECTION_PUBLIC SECTION_MACH_DEP
9835 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9836 static int current_section_align;
9837
9838 /* A get_unnamed_section callback for switching to the text section. */
9839
9840 static void
9841 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9842 {
9843 static int count = 0;
9844 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9845 }
9846
9847 /* A get_unnamed_section callback for switching to the data section. */
9848
9849 static void
9850 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9851 {
9852 static int count = 1;
9853 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9854 }
9855
9856 /* Implement TARGET_ASM_INIT_SECTIONS.
9857
9858 The Cray assembler is really weird with respect to sections. It has only
9859 named sections and you can't reopen a section once it has been closed.
9860 This means that we have to generate unique names whenever we want to
9861 reenter the text or the data section. */
9862
9863 static void
9864 unicosmk_init_sections (void)
9865 {
9866 text_section = get_unnamed_section (SECTION_CODE,
9867 unicosmk_output_text_section_asm_op,
9868 NULL);
9869 data_section = get_unnamed_section (SECTION_WRITE,
9870 unicosmk_output_data_section_asm_op,
9871 NULL);
9872 readonly_data_section = data_section;
9873 }
9874
9875 static unsigned int
9876 unicosmk_section_type_flags (tree decl, const char *name,
9877 int reloc ATTRIBUTE_UNUSED)
9878 {
9879 unsigned int flags = default_section_type_flags (decl, name, reloc);
9880
9881 if (!decl)
9882 return flags;
9883
9884 if (TREE_CODE (decl) == FUNCTION_DECL)
9885 {
9886 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9887 if (align_functions_log > current_section_align)
9888 current_section_align = align_functions_log;
9889
9890 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9891 flags |= SECTION_MAIN;
9892 }
9893 else
9894 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9895
9896 if (TREE_PUBLIC (decl))
9897 flags |= SECTION_PUBLIC;
9898
9899 return flags;
9900 }
9901
9902 /* Generate a section name for decl and associate it with the
9903 declaration. */
9904
9905 static void
9906 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9907 {
9908 const char *name;
9909 int len;
9910
9911 gcc_assert (decl);
9912
9913 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9914 name = default_strip_name_encoding (name);
9915 len = strlen (name);
9916
9917 if (TREE_CODE (decl) == FUNCTION_DECL)
9918 {
9919 char *string;
9920
9921 /* It is essential that we prefix the section name here because
9922 otherwise the section names generated for constructors and
9923 destructors confuse collect2. */
9924
9925 string = alloca (len + 6);
9926 sprintf (string, "code@%s", name);
9927 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9928 }
9929 else if (TREE_PUBLIC (decl))
9930 DECL_SECTION_NAME (decl) = build_string (len, name);
9931 else
9932 {
9933 char *string;
9934
9935 string = alloca (len + 6);
9936 sprintf (string, "data@%s", name);
9937 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9938 }
9939 }
9940
9941 /* Switch to an arbitrary section NAME with attributes as specified
9942 by FLAGS. ALIGN specifies any known alignment requirements for
9943 the section; 0 if the default should be used. */
9944
9945 static void
9946 unicosmk_asm_named_section (const char *name, unsigned int flags,
9947 tree decl ATTRIBUTE_UNUSED)
9948 {
9949 const char *kind;
9950
9951 /* Close the previous section. */
9952
9953 fputs ("\t.endp\n\n", asm_out_file);
9954
9955 /* Find out what kind of section we are opening. */
9956
9957 if (flags & SECTION_MAIN)
9958 fputs ("\t.start\tmain\n", asm_out_file);
9959
9960 if (flags & SECTION_CODE)
9961 kind = "code";
9962 else if (flags & SECTION_PUBLIC)
9963 kind = "common";
9964 else
9965 kind = "data";
9966
9967 if (current_section_align != 0)
9968 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9969 current_section_align, kind);
9970 else
9971 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9972 }
9973
9974 static void
9975 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9976 {
9977 if (DECL_P (decl)
9978 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9979 unicosmk_unique_section (decl, 0);
9980 }
9981
9982 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9983 in code sections because .align fill unused space with zeroes. */
9984
9985 void
9986 unicosmk_output_align (FILE *file, int align)
9987 {
9988 if (inside_function)
9989 fprintf (file, "\tgcc@code@align\t%d\n", align);
9990 else
9991 fprintf (file, "\t.align\t%d\n", align);
9992 }
9993
9994 /* Add a case vector to the current function's list of deferred case
9995 vectors. Case vectors have to be put into a separate section because CAM
9996 does not allow data definitions in code sections. */
9997
9998 void
9999 unicosmk_defer_case_vector (rtx lab, rtx vec)
10000 {
10001 struct machine_function *machine = cfun->machine;
10002
10003 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10004 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10005 machine->addr_list);
10006 }
10007
10008 /* Output a case vector. */
10009
10010 static void
10011 unicosmk_output_addr_vec (FILE *file, rtx vec)
10012 {
10013 rtx lab = XEXP (vec, 0);
10014 rtx body = XEXP (vec, 1);
10015 int vlen = XVECLEN (body, 0);
10016 int idx;
10017
10018 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10019
10020 for (idx = 0; idx < vlen; idx++)
10021 {
10022 ASM_OUTPUT_ADDR_VEC_ELT
10023 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10024 }
10025 }
10026
10027 /* Output current function's deferred case vectors. */
10028
10029 static void
10030 unicosmk_output_deferred_case_vectors (FILE *file)
10031 {
10032 struct machine_function *machine = cfun->machine;
10033 rtx t;
10034
10035 if (machine->addr_list == NULL_RTX)
10036 return;
10037
10038 switch_to_section (data_section);
10039 for (t = machine->addr_list; t; t = XEXP (t, 1))
10040 unicosmk_output_addr_vec (file, XEXP (t, 0));
10041 }
10042
10043 /* Generate the name of the SSIB section for the current function. */
10044
10045 #define SSIB_PREFIX "__SSIB_"
10046 #define SSIB_PREFIX_LEN 7
10047
10048 static const char *
10049 unicosmk_ssib_name (void)
10050 {
10051 /* This is ok since CAM won't be able to deal with names longer than that
10052 anyway. */
10053
10054 static char name[256];
10055
10056 rtx x;
10057 const char *fnname;
10058 int len;
10059
10060 x = DECL_RTL (cfun->decl);
10061 gcc_assert (GET_CODE (x) == MEM);
10062 x = XEXP (x, 0);
10063 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10064 fnname = XSTR (x, 0);
10065
10066 len = strlen (fnname);
10067 if (len + SSIB_PREFIX_LEN > 255)
10068 len = 255 - SSIB_PREFIX_LEN;
10069
10070 strcpy (name, SSIB_PREFIX);
10071 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10072 name[len + SSIB_PREFIX_LEN] = 0;
10073
10074 return name;
10075 }
10076
10077 /* Set up the dynamic subprogram information block (DSIB) and update the
10078 frame pointer register ($15) for subroutines which have a frame. If the
10079 subroutine doesn't have a frame, simply increment $15. */
10080
10081 static void
10082 unicosmk_gen_dsib (unsigned long *imaskP)
10083 {
10084 if (alpha_procedure_type == PT_STACK)
10085 {
10086 const char *ssib_name;
10087 rtx mem;
10088
10089 /* Allocate 64 bytes for the DSIB. */
10090
10091 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10092 GEN_INT (-64))));
10093 emit_insn (gen_blockage ());
10094
10095 /* Save the return address. */
10096
10097 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10098 set_mem_alias_set (mem, alpha_sr_alias_set);
10099 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10100 (*imaskP) &= ~(1UL << REG_RA);
10101
10102 /* Save the old frame pointer. */
10103
10104 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10105 set_mem_alias_set (mem, alpha_sr_alias_set);
10106 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10107 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10108
10109 emit_insn (gen_blockage ());
10110
10111 /* Store the SSIB pointer. */
10112
10113 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10114 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10115 set_mem_alias_set (mem, alpha_sr_alias_set);
10116
10117 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10118 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10119 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10120
10121 /* Save the CIW index. */
10122
10123 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10124 set_mem_alias_set (mem, alpha_sr_alias_set);
10125 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10126
10127 emit_insn (gen_blockage ());
10128
10129 /* Set the new frame pointer. */
10130
10131 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10132 stack_pointer_rtx, GEN_INT (64))));
10133
10134 }
10135 else
10136 {
10137 /* Increment the frame pointer register to indicate that we do not
10138 have a frame. */
10139
10140 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10141 hard_frame_pointer_rtx, const1_rtx)));
10142 }
10143 }
10144
10145 /* Output the static subroutine information block for the current
10146 function. */
10147
10148 static void
10149 unicosmk_output_ssib (FILE *file, const char *fnname)
10150 {
10151 int len;
10152 int i;
10153 rtx x;
10154 rtx ciw;
10155 struct machine_function *machine = cfun->machine;
10156
10157 in_section = NULL;
10158 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10159 unicosmk_ssib_name ());
10160
10161 /* Some required stuff and the function name length. */
10162
10163 len = strlen (fnname);
10164 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10165
10166 /* Saved registers
10167 ??? We don't do that yet. */
10168
10169 fputs ("\t.quad\t0\n", file);
10170
10171 /* Function address. */
10172
10173 fputs ("\t.quad\t", file);
10174 assemble_name (file, fnname);
10175 putc ('\n', file);
10176
10177 fputs ("\t.quad\t0\n", file);
10178 fputs ("\t.quad\t0\n", file);
10179
10180 /* Function name.
10181 ??? We do it the same way Cray CC does it but this could be
10182 simplified. */
10183
10184 for( i = 0; i < len; i++ )
10185 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10186 if( (len % 8) == 0 )
10187 fputs ("\t.quad\t0\n", file);
10188 else
10189 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10190
10191 /* All call information words used in the function. */
10192
10193 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10194 {
10195 ciw = XEXP (x, 0);
10196 #if HOST_BITS_PER_WIDE_INT == 32
10197 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10198 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10199 #else
10200 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10201 #endif
10202 }
10203 }
10204
10205 /* Add a call information word (CIW) to the list of the current function's
10206 CIWs and return its index.
10207
10208 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10209
10210 rtx
10211 unicosmk_add_call_info_word (rtx x)
10212 {
10213 rtx node;
10214 struct machine_function *machine = cfun->machine;
10215
10216 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10217 if (machine->first_ciw == NULL_RTX)
10218 machine->first_ciw = node;
10219 else
10220 XEXP (machine->last_ciw, 1) = node;
10221
10222 machine->last_ciw = node;
10223 ++machine->ciw_count;
10224
10225 return GEN_INT (machine->ciw_count
10226 + strlen (current_function_name ())/8 + 5);
10227 }
10228
10229 /* The Cray assembler doesn't accept extern declarations for symbols which
10230 are defined in the same file. We have to keep track of all global
10231 symbols which are referenced and/or defined in a source file and output
10232 extern declarations for those which are referenced but not defined at
10233 the end of file. */
10234
10235 /* List of identifiers for which an extern declaration might have to be
10236 emitted. */
10237 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10238
10239 struct unicosmk_extern_list
10240 {
10241 struct unicosmk_extern_list *next;
10242 const char *name;
10243 };
10244
10245 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10246
10247 /* Output extern declarations which are required for every asm file. */
10248
10249 static void
10250 unicosmk_output_default_externs (FILE *file)
10251 {
10252 static const char *const externs[] =
10253 { "__T3E_MISMATCH" };
10254
10255 int i;
10256 int n;
10257
10258 n = ARRAY_SIZE (externs);
10259
10260 for (i = 0; i < n; i++)
10261 fprintf (file, "\t.extern\t%s\n", externs[i]);
10262 }
10263
10264 /* Output extern declarations for global symbols which are have been
10265 referenced but not defined. */
10266
10267 static void
10268 unicosmk_output_externs (FILE *file)
10269 {
10270 struct unicosmk_extern_list *p;
10271 const char *real_name;
10272 int len;
10273 tree name_tree;
10274
10275 len = strlen (user_label_prefix);
10276 for (p = unicosmk_extern_head; p != 0; p = p->next)
10277 {
10278 /* We have to strip the encoding and possibly remove user_label_prefix
10279 from the identifier in order to handle -fleading-underscore and
10280 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10281 real_name = default_strip_name_encoding (p->name);
10282 if (len && p->name[0] == '*'
10283 && !memcmp (real_name, user_label_prefix, len))
10284 real_name += len;
10285
10286 name_tree = get_identifier (real_name);
10287 if (! TREE_ASM_WRITTEN (name_tree))
10288 {
10289 TREE_ASM_WRITTEN (name_tree) = 1;
10290 fputs ("\t.extern\t", file);
10291 assemble_name (file, p->name);
10292 putc ('\n', file);
10293 }
10294 }
10295 }
10296
10297 /* Record an extern. */
10298
10299 void
10300 unicosmk_add_extern (const char *name)
10301 {
10302 struct unicosmk_extern_list *p;
10303
10304 p = (struct unicosmk_extern_list *)
10305 xmalloc (sizeof (struct unicosmk_extern_list));
10306 p->next = unicosmk_extern_head;
10307 p->name = name;
10308 unicosmk_extern_head = p;
10309 }
10310
10311 /* The Cray assembler generates incorrect code if identifiers which
10312 conflict with register names are used as instruction operands. We have
10313 to replace such identifiers with DEX expressions. */
10314
10315 /* Structure to collect identifiers which have been replaced by DEX
10316 expressions. */
10317 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10318
10319 struct unicosmk_dex {
10320 struct unicosmk_dex *next;
10321 const char *name;
10322 };
10323
10324 /* List of identifiers which have been replaced by DEX expressions. The DEX
10325 number is determined by the position in the list. */
10326
10327 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10328
10329 /* The number of elements in the DEX list. */
10330
10331 static int unicosmk_dex_count = 0;
10332
10333 /* Check if NAME must be replaced by a DEX expression. */
10334
10335 static int
10336 unicosmk_special_name (const char *name)
10337 {
10338 if (name[0] == '*')
10339 ++name;
10340
10341 if (name[0] == '$')
10342 ++name;
10343
10344 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10345 return 0;
10346
10347 switch (name[1])
10348 {
10349 case '1': case '2':
10350 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10351
10352 case '3':
10353 return (name[2] == '\0'
10354 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10355
10356 default:
10357 return (ISDIGIT (name[1]) && name[2] == '\0');
10358 }
10359 }
10360
10361 /* Return the DEX number if X must be replaced by a DEX expression and 0
10362 otherwise. */
10363
10364 static int
10365 unicosmk_need_dex (rtx x)
10366 {
10367 struct unicosmk_dex *dex;
10368 const char *name;
10369 int i;
10370
10371 if (GET_CODE (x) != SYMBOL_REF)
10372 return 0;
10373
10374 name = XSTR (x,0);
10375 if (! unicosmk_special_name (name))
10376 return 0;
10377
10378 i = unicosmk_dex_count;
10379 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10380 {
10381 if (! strcmp (name, dex->name))
10382 return i;
10383 --i;
10384 }
10385
10386 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10387 dex->name = name;
10388 dex->next = unicosmk_dex_list;
10389 unicosmk_dex_list = dex;
10390
10391 ++unicosmk_dex_count;
10392 return unicosmk_dex_count;
10393 }
10394
10395 /* Output the DEX definitions for this file. */
10396
10397 static void
10398 unicosmk_output_dex (FILE *file)
10399 {
10400 struct unicosmk_dex *dex;
10401 int i;
10402
10403 if (unicosmk_dex_list == NULL)
10404 return;
10405
10406 fprintf (file, "\t.dexstart\n");
10407
10408 i = unicosmk_dex_count;
10409 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10410 {
10411 fprintf (file, "\tDEX (%d) = ", i);
10412 assemble_name (file, dex->name);
10413 putc ('\n', file);
10414 --i;
10415 }
10416
10417 fprintf (file, "\t.dexend\n");
10418 }
10419
10420 /* Output text that to appear at the beginning of an assembler file. */
10421
10422 static void
10423 unicosmk_file_start (void)
10424 {
10425 int i;
10426
10427 fputs ("\t.ident\t", asm_out_file);
10428 unicosmk_output_module_name (asm_out_file);
10429 fputs ("\n\n", asm_out_file);
10430
10431 /* The Unicos/Mk assembler uses different register names. Instead of trying
10432 to support them, we simply use micro definitions. */
10433
10434 /* CAM has different register names: rN for the integer register N and fN
10435 for the floating-point register N. Instead of trying to use these in
10436 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10437 register. */
10438
10439 for (i = 0; i < 32; ++i)
10440 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10441
10442 for (i = 0; i < 32; ++i)
10443 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10444
10445 putc ('\n', asm_out_file);
10446
10447 /* The .align directive fill unused space with zeroes which does not work
10448 in code sections. We define the macro 'gcc@code@align' which uses nops
10449 instead. Note that it assumes that code sections always have the
10450 biggest possible alignment since . refers to the current offset from
10451 the beginning of the section. */
10452
10453 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10454 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10455 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10456 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10457 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10458 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10459 fputs ("\t.endr\n", asm_out_file);
10460 fputs ("\t.endif\n", asm_out_file);
10461 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10462
10463 /* Output extern declarations which should always be visible. */
10464 unicosmk_output_default_externs (asm_out_file);
10465
10466 /* Open a dummy section. We always need to be inside a section for the
10467 section-switching code to work correctly.
10468 ??? This should be a module id or something like that. I still have to
10469 figure out what the rules for those are. */
10470 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10471 }
10472
10473 /* Output text to appear at the end of an assembler file. This includes all
10474 pending extern declarations and DEX expressions. */
10475
10476 static void
10477 unicosmk_file_end (void)
10478 {
10479 fputs ("\t.endp\n\n", asm_out_file);
10480
10481 /* Output all pending externs. */
10482
10483 unicosmk_output_externs (asm_out_file);
10484
10485 /* Output dex definitions used for functions whose names conflict with
10486 register names. */
10487
10488 unicosmk_output_dex (asm_out_file);
10489
10490 fputs ("\t.end\t", asm_out_file);
10491 unicosmk_output_module_name (asm_out_file);
10492 putc ('\n', asm_out_file);
10493 }
10494
10495 #else
10496
10497 static void
10498 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10499 {}
10500
10501 static void
10502 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10503 {}
10504
10505 static void
10506 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10507 const char * fnname ATTRIBUTE_UNUSED)
10508 {}
10509
10510 rtx
10511 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10512 {
10513 return NULL_RTX;
10514 }
10515
10516 static int
10517 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10518 {
10519 return 0;
10520 }
10521
10522 #endif /* TARGET_ABI_UNICOSMK */
10523
10524 static void
10525 alpha_init_libfuncs (void)
10526 {
10527 if (TARGET_ABI_UNICOSMK)
10528 {
10529 /* Prevent gcc from generating calls to __divsi3. */
10530 set_optab_libfunc (sdiv_optab, SImode, 0);
10531 set_optab_libfunc (udiv_optab, SImode, 0);
10532
10533 /* Use the functions provided by the system library
10534 for DImode integer division. */
10535 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10536 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10537 }
10538 else if (TARGET_ABI_OPEN_VMS)
10539 {
10540 /* Use the VMS runtime library functions for division and
10541 remainder. */
10542 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10543 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10544 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10545 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10546 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10547 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10548 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10549 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10550 }
10551 }
10552
10553 \f
10554 /* Initialize the GCC target structure. */
10555 #if TARGET_ABI_OPEN_VMS
10556 # undef TARGET_ATTRIBUTE_TABLE
10557 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10558 # undef TARGET_SECTION_TYPE_FLAGS
10559 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10560 #endif
10561
10562 #undef TARGET_IN_SMALL_DATA_P
10563 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10564
10565 #if TARGET_ABI_UNICOSMK
10566 # undef TARGET_INSERT_ATTRIBUTES
10567 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10568 # undef TARGET_SECTION_TYPE_FLAGS
10569 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10570 # undef TARGET_ASM_UNIQUE_SECTION
10571 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10572 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10573 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10574 # undef TARGET_ASM_GLOBALIZE_LABEL
10575 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10576 # undef TARGET_MUST_PASS_IN_STACK
10577 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10578 #endif
10579
10580 #undef TARGET_ASM_ALIGNED_HI_OP
10581 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10582 #undef TARGET_ASM_ALIGNED_DI_OP
10583 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10584
10585 /* Default unaligned ops are provided for ELF systems. To get unaligned
10586 data for non-ELF systems, we have to turn off auto alignment. */
10587 #ifndef OBJECT_FORMAT_ELF
10588 #undef TARGET_ASM_UNALIGNED_HI_OP
10589 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10590 #undef TARGET_ASM_UNALIGNED_SI_OP
10591 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10592 #undef TARGET_ASM_UNALIGNED_DI_OP
10593 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10594 #endif
10595
10596 #ifdef OBJECT_FORMAT_ELF
10597 #undef TARGET_ASM_RELOC_RW_MASK
10598 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10599 #undef TARGET_ASM_SELECT_RTX_SECTION
10600 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10601 #undef TARGET_SECTION_TYPE_FLAGS
10602 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10603 #endif
10604
10605 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10606 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10607
10608 #undef TARGET_INIT_LIBFUNCS
10609 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10610
10611 #if TARGET_ABI_UNICOSMK
10612 #undef TARGET_ASM_FILE_START
10613 #define TARGET_ASM_FILE_START unicosmk_file_start
10614 #undef TARGET_ASM_FILE_END
10615 #define TARGET_ASM_FILE_END unicosmk_file_end
10616 #else
10617 #undef TARGET_ASM_FILE_START
10618 #define TARGET_ASM_FILE_START alpha_file_start
10619 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10620 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10621 #endif
10622
10623 #undef TARGET_SCHED_ADJUST_COST
10624 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10625 #undef TARGET_SCHED_ISSUE_RATE
10626 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10629 alpha_multipass_dfa_lookahead
10630
10631 #undef TARGET_HAVE_TLS
10632 #define TARGET_HAVE_TLS HAVE_AS_TLS
10633
10634 #undef TARGET_INIT_BUILTINS
10635 #define TARGET_INIT_BUILTINS alpha_init_builtins
10636 #undef TARGET_EXPAND_BUILTIN
10637 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10638 #undef TARGET_FOLD_BUILTIN
10639 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10640
10641 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10642 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10643 #undef TARGET_CANNOT_COPY_INSN_P
10644 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10645 #undef TARGET_CANNOT_FORCE_CONST_MEM
10646 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10647
10648 #if TARGET_ABI_OSF
10649 #undef TARGET_ASM_OUTPUT_MI_THUNK
10650 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10651 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10652 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10653 #undef TARGET_STDARG_OPTIMIZE_HOOK
10654 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10655 #endif
10656
10657 #undef TARGET_RTX_COSTS
10658 #define TARGET_RTX_COSTS alpha_rtx_costs
10659 #undef TARGET_ADDRESS_COST
10660 #define TARGET_ADDRESS_COST hook_int_rtx_0
10661
10662 #undef TARGET_MACHINE_DEPENDENT_REORG
10663 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10664
10665 #undef TARGET_PROMOTE_FUNCTION_ARGS
10666 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10667 #undef TARGET_PROMOTE_FUNCTION_RETURN
10668 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10669 #undef TARGET_PROMOTE_PROTOTYPES
10670 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10671 #undef TARGET_RETURN_IN_MEMORY
10672 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10673 #undef TARGET_PASS_BY_REFERENCE
10674 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10675 #undef TARGET_SETUP_INCOMING_VARARGS
10676 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10677 #undef TARGET_STRICT_ARGUMENT_NAMING
10678 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10679 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10680 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10681 #undef TARGET_SPLIT_COMPLEX_ARG
10682 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10683 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10684 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10685 #undef TARGET_ARG_PARTIAL_BYTES
10686 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10687
10688 #undef TARGET_SECONDARY_RELOAD
10689 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10690
10691 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10692 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10693 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10694 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10695
10696 #undef TARGET_BUILD_BUILTIN_VA_LIST
10697 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10698
10699 /* The Alpha architecture does not require sequential consistency. See
10700 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10701 for an example of how it can be violated in practice. */
10702 #undef TARGET_RELAXED_ORDERING
10703 #define TARGET_RELAXED_ORDERING true
10704
10705 #undef TARGET_DEFAULT_TARGET_FLAGS
10706 #define TARGET_DEFAULT_TARGET_FLAGS \
10707 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10708 #undef TARGET_HANDLE_OPTION
10709 #define TARGET_HANDLE_OPTION alpha_handle_option
10710
10711 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10712 #undef TARGET_MANGLE_TYPE
10713 #define TARGET_MANGLE_TYPE alpha_mangle_type
10714 #endif
10715
10716 struct gcc_target targetm = TARGET_INITIALIZER;
10717
10718 \f
10719 #include "gt-alpha.h"