]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/alpha/alpha.c
alpha.c (alpha_mangle_type, [...]): Constify.
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
59
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
62
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
65
66 static const char * const alpha_cpu_name[] =
67 {
68 "ev4", "ev5", "ev6"
69 };
70
71 /* Specify how accurate floating-point traps need to be. */
72
73 enum alpha_trap_precision alpha_tp;
74
75 /* Specify the floating-point rounding mode. */
76
77 enum alpha_fp_rounding_mode alpha_fprm;
78
79 /* Specify which things cause traps. */
80
81 enum alpha_fp_trap_mode alpha_fptm;
82
83 /* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
85
86 struct alpha_compare alpha_compare;
87
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
90
91 static int inside_function = FALSE;
92
93 /* The number of cycles of latency we should assume on memory reads. */
94
95 int alpha_memory_latency = 3;
96
97 /* Whether the function needs the GP. */
98
99 static int alpha_function_needs_gp;
100
101 /* The alias set for prologue/epilogue register save/restore. */
102
103 static GTY(()) alias_set_type alpha_sr_alias_set;
104
105 /* The assembler name of the current function. */
106
107 static const char *alpha_fnname;
108
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
112
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
119
120 /* Costs of various operations on the different architectures. */
121
122 struct alpha_rtx_cost_data
123 {
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
133 };
134
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 {
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
147 },
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
158 },
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
169 },
170 };
171
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
176
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 {
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
188 };
189
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
193 #else
194 #define NUM_ARGS current_function_args_info
195 #endif
196
197 #define REG_PV 27
198 #define REG_RA 26
199
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
206 #endif
207
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
212 \f
213 /* Implement TARGET_HANDLE_OPTION. */
214
215 static bool
216 alpha_handle_option (size_t code, const char *arg, int value)
217 {
218 switch (code)
219 {
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
224
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
229
230 case OPT_mtls_size_:
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
234 }
235
236 return true;
237 }
238
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
241
242 static const char *
243 alpha_mangle_type (const_tree type)
244 {
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
248
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
251 }
252 #endif
253
254 /* Parse target option strings. */
255
256 void
257 override_options (void)
258 {
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
263 } cpu_table[] = {
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { 0, 0, 0 }
279 };
280
281 int i;
282
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
285 {
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
289 }
290
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
297
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
300
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
306
307 if (TARGET_IEEE)
308 {
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
311 else
312 {
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
315 }
316 }
317
318 if (TARGET_IEEE_WITH_INEXACT)
319 {
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
322 else
323 {
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
326 }
327 }
328
329 if (alpha_tp_string)
330 {
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
337 else
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
339 }
340
341 if (alpha_fprm_string)
342 {
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
351 else
352 error ("bad value %qs for -mfp-rounding-mode switch",
353 alpha_fprm_string);
354 }
355
356 if (alpha_fptm_string)
357 {
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
368 }
369
370 if (alpha_cpu_string)
371 {
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 {
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
378 break;
379 }
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
382 }
383
384 if (alpha_tune_string)
385 {
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 {
389 alpha_tune = cpu_table [i].processor;
390 break;
391 }
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
394 }
395
396 /* Do some sanity checks on the above options. */
397
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 {
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
402 }
403
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
406 {
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
409 }
410
411 if (alpha_cpu == PROCESSOR_EV6)
412 {
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
417 }
418
419 if (TARGET_FLOAT_VAX)
420 {
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 {
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
425 }
426 if (alpha_fptm == ALPHA_FPTM_SUI)
427 {
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
430 }
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
434 }
435
436 {
437 char *end;
438 int lat;
439
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
442
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 ;
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
449 {
450 static int const cache_latency[][4] =
451 {
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
455 };
456
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
459 {
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
462 lat = 3;
463 }
464 else
465 lat = cache_latency[alpha_tune][lat-1];
466 }
467 else if (! strcmp (alpha_mlat_string, "main"))
468 {
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
472 }
473 else
474 {
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
476 lat = 3;
477 }
478
479 alpha_memory_latency = lat;
480 }
481
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
485
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
491
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 {
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
501 }
502 if (align_functions <= 0)
503 align_functions = 16;
504
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
507
508 /* Register variables and functions with the garbage collector. */
509
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
512
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
515 {
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
519 }
520
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524 #endif
525 }
526 \f
527 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
528
529 int
530 zap_mask (HOST_WIDE_INT value)
531 {
532 int i;
533
534 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
535 i++, value >>= 8)
536 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
537 return 0;
538
539 return 1;
540 }
541
542 /* Return true if OP is valid for a particular TLS relocation.
543 We are already guaranteed that OP is a CONST. */
544
545 int
546 tls_symbolic_operand_1 (rtx op, int size, int unspec)
547 {
548 op = XEXP (op, 0);
549
550 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
551 return 0;
552 op = XVECEXP (op, 0, 0);
553
554 if (GET_CODE (op) != SYMBOL_REF)
555 return 0;
556
557 switch (SYMBOL_REF_TLS_MODEL (op))
558 {
559 case TLS_MODEL_LOCAL_DYNAMIC:
560 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
561 case TLS_MODEL_INITIAL_EXEC:
562 return unspec == UNSPEC_TPREL && size == 64;
563 case TLS_MODEL_LOCAL_EXEC:
564 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
565 default:
566 gcc_unreachable ();
567 }
568 }
569
570 /* Used by aligned_memory_operand and unaligned_memory_operand to
571 resolve what reload is going to do with OP if it's a register. */
572
573 rtx
574 resolve_reload_operand (rtx op)
575 {
576 if (reload_in_progress)
577 {
578 rtx tmp = op;
579 if (GET_CODE (tmp) == SUBREG)
580 tmp = SUBREG_REG (tmp);
581 if (GET_CODE (tmp) == REG
582 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
583 {
584 op = reg_equiv_memory_loc[REGNO (tmp)];
585 if (op == 0)
586 return 0;
587 }
588 }
589 return op;
590 }
591
592 /* The scalar modes supported differs from the default check-what-c-supports
593 version in that sometimes TFmode is available even when long double
594 indicates only DFmode. On unicosmk, we have the situation that HImode
595 doesn't map to any C type, but of course we still support that. */
596
597 static bool
598 alpha_scalar_mode_supported_p (enum machine_mode mode)
599 {
600 switch (mode)
601 {
602 case QImode:
603 case HImode:
604 case SImode:
605 case DImode:
606 case TImode: /* via optabs.c */
607 return true;
608
609 case SFmode:
610 case DFmode:
611 return true;
612
613 case TFmode:
614 return TARGET_HAS_XFLOATING_LIBS;
615
616 default:
617 return false;
618 }
619 }
620
621 /* Alpha implements a couple of integer vector mode operations when
622 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
623 which allows the vectorizer to operate on e.g. move instructions,
624 or when expand_vector_operations can do something useful. */
625
626 static bool
627 alpha_vector_mode_supported_p (enum machine_mode mode)
628 {
629 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
630 }
631
632 /* Return 1 if this function can directly return via $26. */
633
634 int
635 direct_return (void)
636 {
637 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
638 && reload_completed
639 && alpha_sa_size () == 0
640 && get_frame_size () == 0
641 && current_function_outgoing_args_size == 0
642 && current_function_pretend_args_size == 0);
643 }
644
645 /* Return the ADDR_VEC associated with a tablejump insn. */
646
647 rtx
648 alpha_tablejump_addr_vec (rtx insn)
649 {
650 rtx tmp;
651
652 tmp = JUMP_LABEL (insn);
653 if (!tmp)
654 return NULL_RTX;
655 tmp = NEXT_INSN (tmp);
656 if (!tmp)
657 return NULL_RTX;
658 if (GET_CODE (tmp) == JUMP_INSN
659 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
660 return PATTERN (tmp);
661 return NULL_RTX;
662 }
663
664 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
665
666 rtx
667 alpha_tablejump_best_label (rtx insn)
668 {
669 rtx jump_table = alpha_tablejump_addr_vec (insn);
670 rtx best_label = NULL_RTX;
671
672 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
673 there for edge frequency counts from profile data. */
674
675 if (jump_table)
676 {
677 int n_labels = XVECLEN (jump_table, 1);
678 int best_count = -1;
679 int i, j;
680
681 for (i = 0; i < n_labels; i++)
682 {
683 int count = 1;
684
685 for (j = i + 1; j < n_labels; j++)
686 if (XEXP (XVECEXP (jump_table, 1, i), 0)
687 == XEXP (XVECEXP (jump_table, 1, j), 0))
688 count++;
689
690 if (count > best_count)
691 best_count = count, best_label = XVECEXP (jump_table, 1, i);
692 }
693 }
694
695 return best_label ? best_label : const0_rtx;
696 }
697
698 /* Return the TLS model to use for SYMBOL. */
699
700 static enum tls_model
701 tls_symbolic_operand_type (rtx symbol)
702 {
703 enum tls_model model;
704
705 if (GET_CODE (symbol) != SYMBOL_REF)
706 return 0;
707 model = SYMBOL_REF_TLS_MODEL (symbol);
708
709 /* Local-exec with a 64-bit size is the same code as initial-exec. */
710 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
711 model = TLS_MODEL_INITIAL_EXEC;
712
713 return model;
714 }
715 \f
716 /* Return true if the function DECL will share the same GP as any
717 function in the current unit of translation. */
718
719 static bool
720 decl_has_samegp (const_tree decl)
721 {
722 /* Functions that are not local can be overridden, and thus may
723 not share the same gp. */
724 if (!(*targetm.binds_local_p) (decl))
725 return false;
726
727 /* If -msmall-data is in effect, assume that there is only one GP
728 for the module, and so any local symbol has this property. We
729 need explicit relocations to be able to enforce this for symbols
730 not defined in this unit of translation, however. */
731 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
732 return true;
733
734 /* Functions that are not external are defined in this UoT. */
735 /* ??? Irritatingly, static functions not yet emitted are still
736 marked "external". Apply this to non-static functions only. */
737 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
738 }
739
740 /* Return true if EXP should be placed in the small data section. */
741
742 static bool
743 alpha_in_small_data_p (const_tree exp)
744 {
745 /* We want to merge strings, so we never consider them small data. */
746 if (TREE_CODE (exp) == STRING_CST)
747 return false;
748
749 /* Functions are never in the small data area. Duh. */
750 if (TREE_CODE (exp) == FUNCTION_DECL)
751 return false;
752
753 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
754 {
755 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
756 if (strcmp (section, ".sdata") == 0
757 || strcmp (section, ".sbss") == 0)
758 return true;
759 }
760 else
761 {
762 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
763
764 /* If this is an incomplete type with size 0, then we can't put it
765 in sdata because it might be too big when completed. */
766 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
767 return true;
768 }
769
770 return false;
771 }
772
773 #if TARGET_ABI_OPEN_VMS
774 static bool
775 alpha_linkage_symbol_p (const char *symname)
776 {
777 int symlen = strlen (symname);
778
779 if (symlen > 4)
780 return strcmp (&symname [symlen - 4], "..lk") == 0;
781
782 return false;
783 }
784
785 #define LINKAGE_SYMBOL_REF_P(X) \
786 ((GET_CODE (X) == SYMBOL_REF \
787 && alpha_linkage_symbol_p (XSTR (X, 0))) \
788 || (GET_CODE (X) == CONST \
789 && GET_CODE (XEXP (X, 0)) == PLUS \
790 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
791 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
792 #endif
793
794 /* legitimate_address_p recognizes an RTL expression that is a valid
795 memory address for an instruction. The MODE argument is the
796 machine mode for the MEM expression that wants to use this address.
797
798 For Alpha, we have either a constant address or the sum of a
799 register and a constant address, or just a register. For DImode,
800 any of those forms can be surrounded with an AND that clear the
801 low-order three bits; this is an "unaligned" access. */
802
803 bool
804 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
805 {
806 /* If this is an ldq_u type address, discard the outer AND. */
807 if (mode == DImode
808 && GET_CODE (x) == AND
809 && GET_CODE (XEXP (x, 1)) == CONST_INT
810 && INTVAL (XEXP (x, 1)) == -8)
811 x = XEXP (x, 0);
812
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
817 x = SUBREG_REG (x);
818
819 /* Unadorned general registers are valid. */
820 if (REG_P (x)
821 && (strict
822 ? STRICT_REG_OK_FOR_BASE_P (x)
823 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
824 return true;
825
826 /* Constant addresses (i.e. +/- 32k) are valid. */
827 if (CONSTANT_ADDRESS_P (x))
828 return true;
829
830 #if TARGET_ABI_OPEN_VMS
831 if (LINKAGE_SYMBOL_REF_P (x))
832 return true;
833 #endif
834
835 /* Register plus a small constant offset is valid. */
836 if (GET_CODE (x) == PLUS)
837 {
838 rtx ofs = XEXP (x, 1);
839 x = XEXP (x, 0);
840
841 /* Discard non-paradoxical subregs. */
842 if (GET_CODE (x) == SUBREG
843 && (GET_MODE_SIZE (GET_MODE (x))
844 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
845 x = SUBREG_REG (x);
846
847 if (REG_P (x))
848 {
849 if (! strict
850 && NONSTRICT_REG_OK_FP_BASE_P (x)
851 && GET_CODE (ofs) == CONST_INT)
852 return true;
853 if ((strict
854 ? STRICT_REG_OK_FOR_BASE_P (x)
855 : NONSTRICT_REG_OK_FOR_BASE_P (x))
856 && CONSTANT_ADDRESS_P (ofs))
857 return true;
858 }
859 }
860
861 /* If we're managing explicit relocations, LO_SUM is valid, as
862 are small data symbols. */
863 else if (TARGET_EXPLICIT_RELOCS)
864 {
865 if (small_symbolic_operand (x, Pmode))
866 return true;
867
868 if (GET_CODE (x) == LO_SUM)
869 {
870 rtx ofs = XEXP (x, 1);
871 x = XEXP (x, 0);
872
873 /* Discard non-paradoxical subregs. */
874 if (GET_CODE (x) == SUBREG
875 && (GET_MODE_SIZE (GET_MODE (x))
876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
877 x = SUBREG_REG (x);
878
879 /* Must have a valid base register. */
880 if (! (REG_P (x)
881 && (strict
882 ? STRICT_REG_OK_FOR_BASE_P (x)
883 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
884 return false;
885
886 /* The symbol must be local. */
887 if (local_symbolic_operand (ofs, Pmode)
888 || dtp32_symbolic_operand (ofs, Pmode)
889 || tp32_symbolic_operand (ofs, Pmode))
890 return true;
891 }
892 }
893
894 return false;
895 }
896
897 /* Build the SYMBOL_REF for __tls_get_addr. */
898
899 static GTY(()) rtx tls_get_addr_libfunc;
900
901 static rtx
902 get_tls_get_addr (void)
903 {
904 if (!tls_get_addr_libfunc)
905 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
906 return tls_get_addr_libfunc;
907 }
908
909 /* Try machine-dependent ways of modifying an illegitimate address
910 to be legitimate. If we find one, return the new, valid address. */
911
912 rtx
913 alpha_legitimize_address (rtx x, rtx scratch,
914 enum machine_mode mode ATTRIBUTE_UNUSED)
915 {
916 HOST_WIDE_INT addend;
917
918 /* If the address is (plus reg const_int) and the CONST_INT is not a
919 valid offset, compute the high part of the constant and add it to
920 the register. Then our address is (plus temp low-part-const). */
921 if (GET_CODE (x) == PLUS
922 && GET_CODE (XEXP (x, 0)) == REG
923 && GET_CODE (XEXP (x, 1)) == CONST_INT
924 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
925 {
926 addend = INTVAL (XEXP (x, 1));
927 x = XEXP (x, 0);
928 goto split_addend;
929 }
930
931 /* If the address is (const (plus FOO const_int)), find the low-order
932 part of the CONST_INT. Then load FOO plus any high-order part of the
933 CONST_INT into a register. Our address is (plus reg low-part-const).
934 This is done to reduce the number of GOT entries. */
935 if (can_create_pseudo_p ()
936 && GET_CODE (x) == CONST
937 && GET_CODE (XEXP (x, 0)) == PLUS
938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
939 {
940 addend = INTVAL (XEXP (XEXP (x, 0), 1));
941 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
942 goto split_addend;
943 }
944
945 /* If we have a (plus reg const), emit the load as in (2), then add
946 the two registers, and finally generate (plus reg low-part-const) as
947 our address. */
948 if (can_create_pseudo_p ()
949 && GET_CODE (x) == PLUS
950 && GET_CODE (XEXP (x, 0)) == REG
951 && GET_CODE (XEXP (x, 1)) == CONST
952 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
953 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
954 {
955 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
956 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
957 XEXP (XEXP (XEXP (x, 1), 0), 0),
958 NULL_RTX, 1, OPTAB_LIB_WIDEN);
959 goto split_addend;
960 }
961
962 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
963 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
964 {
965 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
966
967 switch (tls_symbolic_operand_type (x))
968 {
969 case TLS_MODEL_NONE:
970 break;
971
972 case TLS_MODEL_GLOBAL_DYNAMIC:
973 start_sequence ();
974
975 r0 = gen_rtx_REG (Pmode, 0);
976 r16 = gen_rtx_REG (Pmode, 16);
977 tga = get_tls_get_addr ();
978 dest = gen_reg_rtx (Pmode);
979 seq = GEN_INT (alpha_next_sequence_number++);
980
981 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
982 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
983 insn = emit_call_insn (insn);
984 CONST_OR_PURE_CALL_P (insn) = 1;
985 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
986
987 insn = get_insns ();
988 end_sequence ();
989
990 emit_libcall_block (insn, dest, r0, x);
991 return dest;
992
993 case TLS_MODEL_LOCAL_DYNAMIC:
994 start_sequence ();
995
996 r0 = gen_rtx_REG (Pmode, 0);
997 r16 = gen_rtx_REG (Pmode, 16);
998 tga = get_tls_get_addr ();
999 scratch = gen_reg_rtx (Pmode);
1000 seq = GEN_INT (alpha_next_sequence_number++);
1001
1002 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1003 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1004 insn = emit_call_insn (insn);
1005 CONST_OR_PURE_CALL_P (insn) = 1;
1006 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1007
1008 insn = get_insns ();
1009 end_sequence ();
1010
1011 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1012 UNSPEC_TLSLDM_CALL);
1013 emit_libcall_block (insn, scratch, r0, eqv);
1014
1015 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1016 eqv = gen_rtx_CONST (Pmode, eqv);
1017
1018 if (alpha_tls_size == 64)
1019 {
1020 dest = gen_reg_rtx (Pmode);
1021 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1022 emit_insn (gen_adddi3 (dest, dest, scratch));
1023 return dest;
1024 }
1025 if (alpha_tls_size == 32)
1026 {
1027 insn = gen_rtx_HIGH (Pmode, eqv);
1028 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1029 scratch = gen_reg_rtx (Pmode);
1030 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1031 }
1032 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1033
1034 case TLS_MODEL_INITIAL_EXEC:
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1036 eqv = gen_rtx_CONST (Pmode, eqv);
1037 tp = gen_reg_rtx (Pmode);
1038 scratch = gen_reg_rtx (Pmode);
1039 dest = gen_reg_rtx (Pmode);
1040
1041 emit_insn (gen_load_tp (tp));
1042 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1043 emit_insn (gen_adddi3 (dest, tp, scratch));
1044 return dest;
1045
1046 case TLS_MODEL_LOCAL_EXEC:
1047 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1048 eqv = gen_rtx_CONST (Pmode, eqv);
1049 tp = gen_reg_rtx (Pmode);
1050
1051 emit_insn (gen_load_tp (tp));
1052 if (alpha_tls_size == 32)
1053 {
1054 insn = gen_rtx_HIGH (Pmode, eqv);
1055 insn = gen_rtx_PLUS (Pmode, tp, insn);
1056 tp = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1058 }
1059 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1060
1061 default:
1062 gcc_unreachable ();
1063 }
1064
1065 if (local_symbolic_operand (x, Pmode))
1066 {
1067 if (small_symbolic_operand (x, Pmode))
1068 return x;
1069 else
1070 {
1071 if (can_create_pseudo_p ())
1072 scratch = gen_reg_rtx (Pmode);
1073 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1074 gen_rtx_HIGH (Pmode, x)));
1075 return gen_rtx_LO_SUM (Pmode, scratch, x);
1076 }
1077 }
1078 }
1079
1080 return NULL;
1081
1082 split_addend:
1083 {
1084 HOST_WIDE_INT low, high;
1085
1086 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1087 addend -= low;
1088 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1089 addend -= high;
1090
1091 if (addend)
1092 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1093 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1094 1, OPTAB_LIB_WIDEN);
1095 if (high)
1096 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1097 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1098 1, OPTAB_LIB_WIDEN);
1099
1100 return plus_constant (x, low);
1101 }
1102 }
1103
1104 /* Primarily this is required for TLS symbols, but given that our move
1105 patterns *ought* to be able to handle any symbol at any time, we
1106 should never be spilling symbolic operands to the constant pool, ever. */
1107
1108 static bool
1109 alpha_cannot_force_const_mem (rtx x)
1110 {
1111 enum rtx_code code = GET_CODE (x);
1112 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1113 }
1114
1115 /* We do not allow indirect calls to be optimized into sibling calls, nor
1116 can we allow a call to a function with a different GP to be optimized
1117 into a sibcall. */
1118
1119 static bool
1120 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1121 {
1122 /* Can't do indirect tail calls, since we don't know if the target
1123 uses the same GP. */
1124 if (!decl)
1125 return false;
1126
1127 /* Otherwise, we can make a tail call if the target function shares
1128 the same GP. */
1129 return decl_has_samegp (decl);
1130 }
1131
1132 int
1133 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1134 {
1135 rtx x = *px;
1136
1137 /* Don't re-split. */
1138 if (GET_CODE (x) == LO_SUM)
1139 return -1;
1140
1141 return small_symbolic_operand (x, Pmode) != 0;
1142 }
1143
1144 static int
1145 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1146 {
1147 rtx x = *px;
1148
1149 /* Don't re-split. */
1150 if (GET_CODE (x) == LO_SUM)
1151 return -1;
1152
1153 if (small_symbolic_operand (x, Pmode))
1154 {
1155 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1156 *px = x;
1157 return -1;
1158 }
1159
1160 return 0;
1161 }
1162
1163 rtx
1164 split_small_symbolic_operand (rtx x)
1165 {
1166 x = copy_insn (x);
1167 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1168 return x;
1169 }
1170
1171 /* Indicate that INSN cannot be duplicated. This is true for any insn
1172 that we've marked with gpdisp relocs, since those have to stay in
1173 1-1 correspondence with one another.
1174
1175 Technically we could copy them if we could set up a mapping from one
1176 sequence number to another, across the set of insns to be duplicated.
1177 This seems overly complicated and error-prone since interblock motion
1178 from sched-ebb could move one of the pair of insns to a different block.
1179
1180 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1181 then they'll be in a different block from their ldgp. Which could lead
1182 the bb reorder code to think that it would be ok to copy just the block
1183 containing the call and branch to the block containing the ldgp. */
1184
1185 static bool
1186 alpha_cannot_copy_insn_p (rtx insn)
1187 {
1188 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1189 return false;
1190 if (recog_memoized (insn) >= 0)
1191 return get_attr_cannot_copy (insn);
1192 else
1193 return false;
1194 }
1195
1196
1197 /* Try a machine-dependent way of reloading an illegitimate address
1198 operand. If we find one, push the reload and return the new rtx. */
1199
1200 rtx
1201 alpha_legitimize_reload_address (rtx x,
1202 enum machine_mode mode ATTRIBUTE_UNUSED,
1203 int opnum, int type,
1204 int ind_levels ATTRIBUTE_UNUSED)
1205 {
1206 /* We must recognize output that we have already generated ourselves. */
1207 if (GET_CODE (x) == PLUS
1208 && GET_CODE (XEXP (x, 0)) == PLUS
1209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1211 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1212 {
1213 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1214 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1215 opnum, type);
1216 return x;
1217 }
1218
1219 /* We wish to handle large displacements off a base register by
1220 splitting the addend across an ldah and the mem insn. This
1221 cuts number of extra insns needed from 3 to 1. */
1222 if (GET_CODE (x) == PLUS
1223 && GET_CODE (XEXP (x, 0)) == REG
1224 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1225 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1226 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1227 {
1228 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1229 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1230 HOST_WIDE_INT high
1231 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1232
1233 /* Check for 32-bit overflow. */
1234 if (high + low != val)
1235 return NULL_RTX;
1236
1237 /* Reload the high part into a base reg; leave the low part
1238 in the mem directly. */
1239 x = gen_rtx_PLUS (GET_MODE (x),
1240 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1241 GEN_INT (high)),
1242 GEN_INT (low));
1243
1244 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1245 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1246 opnum, type);
1247 return x;
1248 }
1249
1250 return NULL_RTX;
1251 }
1252 \f
1253 /* Compute a (partial) cost for rtx X. Return true if the complete
1254 cost has been computed, and false if subexpressions should be
1255 scanned. In either case, *TOTAL contains the cost result. */
1256
1257 static bool
1258 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1259 {
1260 enum machine_mode mode = GET_MODE (x);
1261 bool float_mode_p = FLOAT_MODE_P (mode);
1262 const struct alpha_rtx_cost_data *cost_data;
1263
1264 if (optimize_size)
1265 cost_data = &alpha_rtx_cost_size;
1266 else
1267 cost_data = &alpha_rtx_cost_data[alpha_tune];
1268
1269 switch (code)
1270 {
1271 case CONST_INT:
1272 /* If this is an 8-bit constant, return zero since it can be used
1273 nearly anywhere with no cost. If it is a valid operand for an
1274 ADD or AND, likewise return 0 if we know it will be used in that
1275 context. Otherwise, return 2 since it might be used there later.
1276 All other constants take at least two insns. */
1277 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1278 {
1279 *total = 0;
1280 return true;
1281 }
1282 /* FALLTHRU */
1283
1284 case CONST_DOUBLE:
1285 if (x == CONST0_RTX (mode))
1286 *total = 0;
1287 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1288 || (outer_code == AND && and_operand (x, VOIDmode)))
1289 *total = 0;
1290 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1291 *total = 2;
1292 else
1293 *total = COSTS_N_INSNS (2);
1294 return true;
1295
1296 case CONST:
1297 case SYMBOL_REF:
1298 case LABEL_REF:
1299 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1300 *total = COSTS_N_INSNS (outer_code != MEM);
1301 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1302 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1303 else if (tls_symbolic_operand_type (x))
1304 /* Estimate of cost for call_pal rduniq. */
1305 /* ??? How many insns do we emit here? More than one... */
1306 *total = COSTS_N_INSNS (15);
1307 else
1308 /* Otherwise we do a load from the GOT. */
1309 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1310 return true;
1311
1312 case HIGH:
1313 /* This is effectively an add_operand. */
1314 *total = 2;
1315 return true;
1316
1317 case PLUS:
1318 case MINUS:
1319 if (float_mode_p)
1320 *total = cost_data->fp_add;
1321 else if (GET_CODE (XEXP (x, 0)) == MULT
1322 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1323 {
1324 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1325 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1326 return true;
1327 }
1328 return false;
1329
1330 case MULT:
1331 if (float_mode_p)
1332 *total = cost_data->fp_mult;
1333 else if (mode == DImode)
1334 *total = cost_data->int_mult_di;
1335 else
1336 *total = cost_data->int_mult_si;
1337 return false;
1338
1339 case ASHIFT:
1340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1341 && INTVAL (XEXP (x, 1)) <= 3)
1342 {
1343 *total = COSTS_N_INSNS (1);
1344 return false;
1345 }
1346 /* FALLTHRU */
1347
1348 case ASHIFTRT:
1349 case LSHIFTRT:
1350 *total = cost_data->int_shift;
1351 return false;
1352
1353 case IF_THEN_ELSE:
1354 if (float_mode_p)
1355 *total = cost_data->fp_add;
1356 else
1357 *total = cost_data->int_cmov;
1358 return false;
1359
1360 case DIV:
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 if (!float_mode_p)
1365 *total = cost_data->int_div;
1366 else if (mode == SFmode)
1367 *total = cost_data->fp_div_sf;
1368 else
1369 *total = cost_data->fp_div_df;
1370 return false;
1371
1372 case MEM:
1373 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1374 return true;
1375
1376 case NEG:
1377 if (! float_mode_p)
1378 {
1379 *total = COSTS_N_INSNS (1);
1380 return false;
1381 }
1382 /* FALLTHRU */
1383
1384 case ABS:
1385 if (! float_mode_p)
1386 {
1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1388 return false;
1389 }
1390 /* FALLTHRU */
1391
1392 case FLOAT:
1393 case UNSIGNED_FLOAT:
1394 case FIX:
1395 case UNSIGNED_FIX:
1396 case FLOAT_TRUNCATE:
1397 *total = cost_data->fp_add;
1398 return false;
1399
1400 case FLOAT_EXTEND:
1401 if (GET_CODE (XEXP (x, 0)) == MEM)
1402 *total = 0;
1403 else
1404 *total = cost_data->fp_add;
1405 return false;
1406
1407 default:
1408 return false;
1409 }
1410 }
1411 \f
1412 /* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
1416
1417 void
1418 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1419 {
1420 rtx base;
1421 HOST_WIDE_INT disp, offset;
1422
1423 gcc_assert (GET_CODE (ref) == MEM);
1424
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1427 {
1428 base = find_replacement (&XEXP (ref, 0));
1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
1430 }
1431 else
1432 base = XEXP (ref, 0);
1433
1434 if (GET_CODE (base) == PLUS)
1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 else
1437 disp = 0;
1438
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1444 offset = 0;
1445 else
1446 offset = disp & 3;
1447
1448 /* Access the entire aligned word. */
1449 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1450
1451 /* Convert the byte offset within the word to a bit offset. */
1452 if (WORDS_BIG_ENDIAN)
1453 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1454 else
1455 offset *= 8;
1456 *pbitnum = GEN_INT (offset);
1457 }
1458
1459 /* Similar, but just get the address. Handle the two reload cases.
1460 Add EXTRA_OFFSET to the address we return. */
1461
1462 rtx
1463 get_unaligned_address (rtx ref)
1464 {
1465 rtx base;
1466 HOST_WIDE_INT offset = 0;
1467
1468 gcc_assert (GET_CODE (ref) == MEM);
1469
1470 if (reload_in_progress
1471 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1472 {
1473 base = find_replacement (&XEXP (ref, 0));
1474
1475 gcc_assert (memory_address_p (GET_MODE (ref), base));
1476 }
1477 else
1478 base = XEXP (ref, 0);
1479
1480 if (GET_CODE (base) == PLUS)
1481 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1482
1483 return plus_constant (base, offset);
1484 }
1485
1486 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1487 X is always returned in a register. */
1488
1489 rtx
1490 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1491 {
1492 if (GET_CODE (addr) == PLUS)
1493 {
1494 ofs += INTVAL (XEXP (addr, 1));
1495 addr = XEXP (addr, 0);
1496 }
1497
1498 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1500 }
1501
1502 /* On the Alpha, all (non-symbolic) constants except zero go into
1503 a floating-point register via memory. Note that we cannot
1504 return anything that is not a subset of CLASS, and that some
1505 symbolic constants cannot be dropped to memory. */
1506
1507 enum reg_class
1508 alpha_preferred_reload_class(rtx x, enum reg_class class)
1509 {
1510 /* Zero is present in any register class. */
1511 if (x == CONST0_RTX (GET_MODE (x)))
1512 return class;
1513
1514 /* These sorts of constants we can easily drop to memory. */
1515 if (GET_CODE (x) == CONST_INT
1516 || GET_CODE (x) == CONST_DOUBLE
1517 || GET_CODE (x) == CONST_VECTOR)
1518 {
1519 if (class == FLOAT_REGS)
1520 return NO_REGS;
1521 if (class == ALL_REGS)
1522 return GENERAL_REGS;
1523 return class;
1524 }
1525
1526 /* All other kinds of constants should not (and in the case of HIGH
1527 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1528 secondary reload. */
1529 if (CONSTANT_P (x))
1530 return (class == ALL_REGS ? GENERAL_REGS : class);
1531
1532 return class;
1533 }
1534
1535 /* Inform reload about cases where moving X with a mode MODE to a register in
1536 CLASS requires an extra scratch or immediate register. Return the class
1537 needed for the immediate register. */
1538
1539 static enum reg_class
1540 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1541 enum machine_mode mode, secondary_reload_info *sri)
1542 {
1543 /* Loading and storing HImode or QImode values to and from memory
1544 usually requires a scratch register. */
1545 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1546 {
1547 if (any_memory_operand (x, mode))
1548 {
1549 if (in_p)
1550 {
1551 if (!aligned_memory_operand (x, mode))
1552 sri->icode = reload_in_optab[mode];
1553 }
1554 else
1555 sri->icode = reload_out_optab[mode];
1556 return NO_REGS;
1557 }
1558 }
1559
1560 /* We also cannot do integral arithmetic into FP regs, as might result
1561 from register elimination into a DImode fp register. */
1562 if (class == FLOAT_REGS)
1563 {
1564 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1565 return GENERAL_REGS;
1566 if (in_p && INTEGRAL_MODE_P (mode)
1567 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1568 return GENERAL_REGS;
1569 }
1570
1571 return NO_REGS;
1572 }
1573 \f
1574 /* Subfunction of the following function. Update the flags of any MEM
1575 found in part of X. */
1576
1577 static int
1578 alpha_set_memflags_1 (rtx *xp, void *data)
1579 {
1580 rtx x = *xp, orig = (rtx) data;
1581
1582 if (GET_CODE (x) != MEM)
1583 return 0;
1584
1585 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1586 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1587 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1588 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1589 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1590
1591 /* Sadly, we cannot use alias sets because the extra aliasing
1592 produced by the AND interferes. Given that two-byte quantities
1593 are the only thing we would be able to differentiate anyway,
1594 there does not seem to be any point in convoluting the early
1595 out of the alias check. */
1596
1597 return -1;
1598 }
1599
1600 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1601 generated to perform a memory operation, look for any MEMs in either
1602 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1603 volatile flags from REF into each of the MEMs found. If REF is not
1604 a MEM, don't do anything. */
1605
1606 void
1607 alpha_set_memflags (rtx insn, rtx ref)
1608 {
1609 rtx *base_ptr;
1610
1611 if (GET_CODE (ref) != MEM)
1612 return;
1613
1614 /* This is only called from alpha.md, after having had something
1615 generated from one of the insn patterns. So if everything is
1616 zero, the pattern is already up-to-date. */
1617 if (!MEM_VOLATILE_P (ref)
1618 && !MEM_IN_STRUCT_P (ref)
1619 && !MEM_SCALAR_P (ref)
1620 && !MEM_NOTRAP_P (ref)
1621 && !MEM_READONLY_P (ref))
1622 return;
1623
1624 if (INSN_P (insn))
1625 base_ptr = &PATTERN (insn);
1626 else
1627 base_ptr = &insn;
1628 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1629 }
1630 \f
1631 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1632 int, bool);
1633
1634 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1635 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1636 and return pc_rtx if successful. */
1637
1638 static rtx
1639 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1640 HOST_WIDE_INT c, int n, bool no_output)
1641 {
1642 HOST_WIDE_INT new;
1643 int i, bits;
1644 /* Use a pseudo if highly optimizing and still generating RTL. */
1645 rtx subtarget
1646 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1647 rtx temp, insn;
1648
1649 /* If this is a sign-extended 32-bit constant, we can do this in at most
1650 three insns, so do it if we have enough insns left. We always have
1651 a sign-extended 32-bit constant when compiling on a narrow machine. */
1652
1653 if (HOST_BITS_PER_WIDE_INT != 64
1654 || c >> 31 == -1 || c >> 31 == 0)
1655 {
1656 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1657 HOST_WIDE_INT tmp1 = c - low;
1658 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1659 HOST_WIDE_INT extra = 0;
1660
1661 /* If HIGH will be interpreted as negative but the constant is
1662 positive, we must adjust it to do two ldha insns. */
1663
1664 if ((high & 0x8000) != 0 && c >= 0)
1665 {
1666 extra = 0x4000;
1667 tmp1 -= 0x40000000;
1668 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1669 }
1670
1671 if (c == low || (low == 0 && extra == 0))
1672 {
1673 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1674 but that meant that we can't handle INT_MIN on 32-bit machines
1675 (like NT/Alpha), because we recurse indefinitely through
1676 emit_move_insn to gen_movdi. So instead, since we know exactly
1677 what we want, create it explicitly. */
1678
1679 if (no_output)
1680 return pc_rtx;
1681 if (target == NULL)
1682 target = gen_reg_rtx (mode);
1683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1684 return target;
1685 }
1686 else if (n >= 2 + (extra != 0))
1687 {
1688 if (no_output)
1689 return pc_rtx;
1690 if (!can_create_pseudo_p ())
1691 {
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1693 temp = target;
1694 }
1695 else
1696 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1697 subtarget, mode);
1698
1699 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1700 This means that if we go through expand_binop, we'll try to
1701 generate extensions, etc, which will require new pseudos, which
1702 will fail during some split phases. The SImode add patterns
1703 still exist, but are not named. So build the insns by hand. */
1704
1705 if (extra != 0)
1706 {
1707 if (! subtarget)
1708 subtarget = gen_reg_rtx (mode);
1709 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1710 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1711 emit_insn (insn);
1712 temp = subtarget;
1713 }
1714
1715 if (target == NULL)
1716 target = gen_reg_rtx (mode);
1717 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1718 insn = gen_rtx_SET (VOIDmode, target, insn);
1719 emit_insn (insn);
1720 return target;
1721 }
1722 }
1723
1724 /* If we couldn't do it that way, try some other methods. But if we have
1725 no instructions left, don't bother. Likewise, if this is SImode and
1726 we can't make pseudos, we can't do anything since the expand_binop
1727 and expand_unop calls will widen and try to make pseudos. */
1728
1729 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1730 return 0;
1731
1732 /* Next, see if we can load a related constant and then shift and possibly
1733 negate it to get the constant we want. Try this once each increasing
1734 numbers of insns. */
1735
1736 for (i = 1; i < n; i++)
1737 {
1738 /* First, see if minus some low bits, we've an easy load of
1739 high bits. */
1740
1741 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1742 if (new != 0)
1743 {
1744 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1745 if (temp)
1746 {
1747 if (no_output)
1748 return temp;
1749 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1750 target, 0, OPTAB_WIDEN);
1751 }
1752 }
1753
1754 /* Next try complementing. */
1755 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1756 if (temp)
1757 {
1758 if (no_output)
1759 return temp;
1760 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1761 }
1762
1763 /* Next try to form a constant and do a left shift. We can do this
1764 if some low-order bits are zero; the exact_log2 call below tells
1765 us that information. The bits we are shifting out could be any
1766 value, but here we'll just try the 0- and sign-extended forms of
1767 the constant. To try to increase the chance of having the same
1768 constant in more than one insn, start at the highest number of
1769 bits to shift, but try all possibilities in case a ZAPNOT will
1770 be useful. */
1771
1772 bits = exact_log2 (c & -c);
1773 if (bits > 0)
1774 for (; bits > 0; bits--)
1775 {
1776 new = c >> bits;
1777 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1778 if (!temp && c < 0)
1779 {
1780 new = (unsigned HOST_WIDE_INT)c >> bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new,
1782 i, no_output);
1783 }
1784 if (temp)
1785 {
1786 if (no_output)
1787 return temp;
1788 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1789 target, 0, OPTAB_WIDEN);
1790 }
1791 }
1792
1793 /* Now try high-order zero bits. Here we try the shifted-in bits as
1794 all zero and all ones. Be careful to avoid shifting outside the
1795 mode and to avoid shifting outside the host wide int size. */
1796 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1797 confuse the recursive call and set all of the high 32 bits. */
1798
1799 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1800 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1801 if (bits > 0)
1802 for (; bits > 0; bits--)
1803 {
1804 new = c << bits;
1805 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1806 if (!temp)
1807 {
1808 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1809 temp = alpha_emit_set_const (subtarget, mode, new,
1810 i, no_output);
1811 }
1812 if (temp)
1813 {
1814 if (no_output)
1815 return temp;
1816 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1817 target, 1, OPTAB_WIDEN);
1818 }
1819 }
1820
1821 /* Now try high-order 1 bits. We get that with a sign-extension.
1822 But one bit isn't enough here. Be careful to avoid shifting outside
1823 the mode and to avoid shifting outside the host wide int size. */
1824
1825 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1826 - floor_log2 (~ c) - 2);
1827 if (bits > 0)
1828 for (; bits > 0; bits--)
1829 {
1830 new = c << bits;
1831 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1832 if (!temp)
1833 {
1834 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1835 temp = alpha_emit_set_const (subtarget, mode, new,
1836 i, no_output);
1837 }
1838 if (temp)
1839 {
1840 if (no_output)
1841 return temp;
1842 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1843 target, 0, OPTAB_WIDEN);
1844 }
1845 }
1846 }
1847
1848 #if HOST_BITS_PER_WIDE_INT == 64
1849 /* Finally, see if can load a value into the target that is the same as the
1850 constant except that all bytes that are 0 are changed to be 0xff. If we
1851 can, then we can do a ZAPNOT to obtain the desired constant. */
1852
1853 new = c;
1854 for (i = 0; i < 64; i += 8)
1855 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1856 new |= (HOST_WIDE_INT) 0xff << i;
1857
1858 /* We are only called for SImode and DImode. If this is SImode, ensure that
1859 we are sign extended to a full word. */
1860
1861 if (mode == SImode)
1862 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1863
1864 if (new != c)
1865 {
1866 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1867 if (temp)
1868 {
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1872 target, 0, OPTAB_WIDEN);
1873 }
1874 }
1875 #endif
1876
1877 return 0;
1878 }
1879
1880 /* Try to output insns to set TARGET equal to the constant C if it can be
1881 done in less than N insns. Do all computations in MODE. Returns the place
1882 where the output has been placed if it can be done and the insns have been
1883 emitted. If it would take more than N insns, zero is returned and no
1884 insns and emitted. */
1885
1886 static rtx
1887 alpha_emit_set_const (rtx target, enum machine_mode mode,
1888 HOST_WIDE_INT c, int n, bool no_output)
1889 {
1890 enum machine_mode orig_mode = mode;
1891 rtx orig_target = target;
1892 rtx result = 0;
1893 int i;
1894
1895 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1896 can't load this constant in one insn, do this in DImode. */
1897 if (!can_create_pseudo_p () && mode == SImode
1898 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1899 {
1900 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1901 if (result)
1902 return result;
1903
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1905 mode = DImode;
1906 }
1907 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1908 {
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1910 mode = DImode;
1911 }
1912
1913 /* Try 1 insn, then 2, then up to N. */
1914 for (i = 1; i <= n; i++)
1915 {
1916 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1917 if (result)
1918 {
1919 rtx insn, set;
1920
1921 if (no_output)
1922 return result;
1923
1924 insn = get_last_insn ();
1925 set = single_set (insn);
1926 if (! CONSTANT_P (SET_SRC (set)))
1927 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1928 break;
1929 }
1930 }
1931
1932 /* Allow for the case where we changed the mode of TARGET. */
1933 if (result)
1934 {
1935 if (result == target)
1936 result = orig_target;
1937 else if (mode != orig_mode)
1938 result = gen_lowpart (orig_mode, result);
1939 }
1940
1941 return result;
1942 }
1943
1944 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1945 fall back to a straight forward decomposition. We do this to avoid
1946 exponential run times encountered when looking for longer sequences
1947 with alpha_emit_set_const. */
1948
1949 static rtx
1950 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1951 {
1952 HOST_WIDE_INT d1, d2, d3, d4;
1953
1954 /* Decompose the entire word */
1955 #if HOST_BITS_PER_WIDE_INT >= 64
1956 gcc_assert (c2 == -(c1 < 0));
1957 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1958 c1 -= d1;
1959 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1960 c1 = (c1 - d2) >> 32;
1961 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1962 c1 -= d3;
1963 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1964 gcc_assert (c1 == d4);
1965 #else
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d1;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d2);
1970 c2 += (d2 < 0);
1971 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1972 c2 -= d3;
1973 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c2 == d4);
1975 #endif
1976
1977 /* Construct the high word */
1978 if (d4)
1979 {
1980 emit_move_insn (target, GEN_INT (d4));
1981 if (d3)
1982 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1983 }
1984 else
1985 emit_move_insn (target, GEN_INT (d3));
1986
1987 /* Shift it into place */
1988 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1989
1990 /* Add in the low bits. */
1991 if (d2)
1992 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1993 if (d1)
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1995
1996 return target;
1997 }
1998
1999 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2000 the low 64 bits. */
2001
2002 static void
2003 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2004 {
2005 HOST_WIDE_INT i0, i1;
2006
2007 if (GET_CODE (x) == CONST_VECTOR)
2008 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2009
2010
2011 if (GET_CODE (x) == CONST_INT)
2012 {
2013 i0 = INTVAL (x);
2014 i1 = -(i0 < 0);
2015 }
2016 else if (HOST_BITS_PER_WIDE_INT >= 64)
2017 {
2018 i0 = CONST_DOUBLE_LOW (x);
2019 i1 = -(i0 < 0);
2020 }
2021 else
2022 {
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = CONST_DOUBLE_HIGH (x);
2025 }
2026
2027 *p0 = i0;
2028 *p1 = i1;
2029 }
2030
2031 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2032 are willing to load the value into a register via a move pattern.
2033 Normally this is all symbolic constants, integral constants that
2034 take three or fewer instructions, and floating-point zero. */
2035
2036 bool
2037 alpha_legitimate_constant_p (rtx x)
2038 {
2039 enum machine_mode mode = GET_MODE (x);
2040 HOST_WIDE_INT i0, i1;
2041
2042 switch (GET_CODE (x))
2043 {
2044 case CONST:
2045 case LABEL_REF:
2046 case HIGH:
2047 return true;
2048
2049 case SYMBOL_REF:
2050 /* TLS symbols are never valid. */
2051 return SYMBOL_REF_TLS_MODEL (x) == 0;
2052
2053 case CONST_DOUBLE:
2054 if (x == CONST0_RTX (mode))
2055 return true;
2056 if (FLOAT_MODE_P (mode))
2057 return false;
2058 goto do_integer;
2059
2060 case CONST_VECTOR:
2061 if (x == CONST0_RTX (mode))
2062 return true;
2063 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2064 return false;
2065 if (GET_MODE_SIZE (mode) != 8)
2066 return false;
2067 goto do_integer;
2068
2069 case CONST_INT:
2070 do_integer:
2071 if (TARGET_BUILD_CONSTANTS)
2072 return true;
2073 alpha_extract_integer (x, &i0, &i1);
2074 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2075 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2076 return false;
2077
2078 default:
2079 return false;
2080 }
2081 }
2082
2083 /* Operand 1 is known to be a constant, and should require more than one
2084 instruction to load. Emit that multi-part load. */
2085
2086 bool
2087 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2088 {
2089 HOST_WIDE_INT i0, i1;
2090 rtx temp = NULL_RTX;
2091
2092 alpha_extract_integer (operands[1], &i0, &i1);
2093
2094 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2095 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2096
2097 if (!temp && TARGET_BUILD_CONSTANTS)
2098 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2099
2100 if (temp)
2101 {
2102 if (!rtx_equal_p (operands[0], temp))
2103 emit_move_insn (operands[0], temp);
2104 return true;
2105 }
2106
2107 return false;
2108 }
2109
2110 /* Expand a move instruction; return true if all work is done.
2111 We don't handle non-bwx subword loads here. */
2112
2113 bool
2114 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2115 {
2116 /* If the output is not a register, the input must be. */
2117 if (GET_CODE (operands[0]) == MEM
2118 && ! reg_or_0_operand (operands[1], mode))
2119 operands[1] = force_reg (mode, operands[1]);
2120
2121 /* Allow legitimize_address to perform some simplifications. */
2122 if (mode == Pmode && symbolic_operand (operands[1], mode))
2123 {
2124 rtx tmp;
2125
2126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2127 if (tmp)
2128 {
2129 if (tmp == operands[0])
2130 return true;
2131 operands[1] = tmp;
2132 return false;
2133 }
2134 }
2135
2136 /* Early out for non-constants and valid constants. */
2137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2138 return false;
2139
2140 /* Split large integers. */
2141 if (GET_CODE (operands[1]) == CONST_INT
2142 || GET_CODE (operands[1]) == CONST_DOUBLE
2143 || GET_CODE (operands[1]) == CONST_VECTOR)
2144 {
2145 if (alpha_split_const_mov (mode, operands))
2146 return true;
2147 }
2148
2149 /* Otherwise we've nothing left but to drop the thing to memory. */
2150 operands[1] = force_const_mem (mode, operands[1]);
2151 if (reload_in_progress)
2152 {
2153 emit_move_insn (operands[0], XEXP (operands[1], 0));
2154 operands[1] = replace_equiv_address (operands[1], operands[0]);
2155 }
2156 else
2157 operands[1] = validize_mem (operands[1]);
2158 return false;
2159 }
2160
2161 /* Expand a non-bwx QImode or HImode move instruction;
2162 return true if all work is done. */
2163
2164 bool
2165 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2166 {
2167 rtx seq;
2168
2169 /* If the output is not a register, the input must be. */
2170 if (MEM_P (operands[0]))
2171 operands[1] = force_reg (mode, operands[1]);
2172
2173 /* Handle four memory cases, unaligned and aligned for either the input
2174 or the output. The only case where we can be called during reload is
2175 for aligned loads; all other cases require temporaries. */
2176
2177 if (any_memory_operand (operands[1], mode))
2178 {
2179 if (aligned_memory_operand (operands[1], mode))
2180 {
2181 if (reload_in_progress)
2182 {
2183 if (mode == QImode)
2184 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2185 else
2186 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2187 emit_insn (seq);
2188 }
2189 else
2190 {
2191 rtx aligned_mem, bitnum;
2192 rtx scratch = gen_reg_rtx (SImode);
2193 rtx subtarget;
2194 bool copyout;
2195
2196 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2197
2198 subtarget = operands[0];
2199 if (GET_CODE (subtarget) == REG)
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 else
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2203
2204 if (mode == QImode)
2205 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2206 bitnum, scratch);
2207 else
2208 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2209 bitnum, scratch);
2210 emit_insn (seq);
2211
2212 if (copyout)
2213 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2214 }
2215 }
2216 else
2217 {
2218 /* Don't pass these as parameters since that makes the generated
2219 code depend on parameter evaluation order which will cause
2220 bootstrap failures. */
2221
2222 rtx temp1, temp2, subtarget, ua;
2223 bool copyout;
2224
2225 temp1 = gen_reg_rtx (DImode);
2226 temp2 = gen_reg_rtx (DImode);
2227
2228 subtarget = operands[0];
2229 if (GET_CODE (subtarget) == REG)
2230 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2231 else
2232 subtarget = gen_reg_rtx (DImode), copyout = true;
2233
2234 ua = get_unaligned_address (operands[1]);
2235 if (mode == QImode)
2236 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2237 else
2238 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2239
2240 alpha_set_memflags (seq, operands[1]);
2241 emit_insn (seq);
2242
2243 if (copyout)
2244 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2245 }
2246 return true;
2247 }
2248
2249 if (any_memory_operand (operands[0], mode))
2250 {
2251 if (aligned_memory_operand (operands[0], mode))
2252 {
2253 rtx aligned_mem, bitnum;
2254 rtx temp1 = gen_reg_rtx (SImode);
2255 rtx temp2 = gen_reg_rtx (SImode);
2256
2257 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2258
2259 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2260 temp1, temp2));
2261 }
2262 else
2263 {
2264 rtx temp1 = gen_reg_rtx (DImode);
2265 rtx temp2 = gen_reg_rtx (DImode);
2266 rtx temp3 = gen_reg_rtx (DImode);
2267 rtx ua = get_unaligned_address (operands[0]);
2268
2269 if (mode == QImode)
2270 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2271 else
2272 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2273
2274 alpha_set_memflags (seq, operands[0]);
2275 emit_insn (seq);
2276 }
2277 return true;
2278 }
2279
2280 return false;
2281 }
2282
2283 /* Implement the movmisalign patterns. One of the operands is a memory
2284 that is not naturally aligned. Emit instructions to load it. */
2285
2286 void
2287 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2288 {
2289 /* Honor misaligned loads, for those we promised to do so. */
2290 if (MEM_P (operands[1]))
2291 {
2292 rtx tmp;
2293
2294 if (register_operand (operands[0], mode))
2295 tmp = operands[0];
2296 else
2297 tmp = gen_reg_rtx (mode);
2298
2299 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2300 if (tmp != operands[0])
2301 emit_move_insn (operands[0], tmp);
2302 }
2303 else if (MEM_P (operands[0]))
2304 {
2305 if (!reg_or_0_operand (operands[1], mode))
2306 operands[1] = force_reg (mode, operands[1]);
2307 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2308 }
2309 else
2310 gcc_unreachable ();
2311 }
2312
2313 /* Generate an unsigned DImode to FP conversion. This is the same code
2314 optabs would emit if we didn't have TFmode patterns.
2315
2316 For SFmode, this is the only construction I've found that can pass
2317 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2318 intermediates will work, because you'll get intermediate rounding
2319 that ruins the end result. Some of this could be fixed by turning
2320 on round-to-positive-infinity, but that requires diddling the fpsr,
2321 which kills performance. I tried turning this around and converting
2322 to a negative number, so that I could turn on /m, but either I did
2323 it wrong or there's something else cause I wound up with the exact
2324 same single-bit error. There is a branch-less form of this same code:
2325
2326 srl $16,1,$1
2327 and $16,1,$2
2328 cmplt $16,0,$3
2329 or $1,$2,$2
2330 cmovge $16,$16,$2
2331 itoft $3,$f10
2332 itoft $2,$f11
2333 cvtqs $f11,$f11
2334 adds $f11,$f11,$f0
2335 fcmoveq $f10,$f11,$f0
2336
2337 I'm not using it because it's the same number of instructions as
2338 this branch-full form, and it has more serialized long latency
2339 instructions on the critical path.
2340
2341 For DFmode, we can avoid rounding errors by breaking up the word
2342 into two pieces, converting them separately, and adding them back:
2343
2344 LC0: .long 0,0x5f800000
2345
2346 itoft $16,$f11
2347 lda $2,LC0
2348 cmplt $16,0,$1
2349 cpyse $f11,$f31,$f10
2350 cpyse $f31,$f11,$f11
2351 s4addq $1,$2,$1
2352 lds $f12,0($1)
2353 cvtqt $f10,$f10
2354 cvtqt $f11,$f11
2355 addt $f12,$f10,$f0
2356 addt $f0,$f11,$f0
2357
2358 This doesn't seem to be a clear-cut win over the optabs form.
2359 It probably all depends on the distribution of numbers being
2360 converted -- in the optabs form, all but high-bit-set has a
2361 much lower minimum execution time. */
2362
2363 void
2364 alpha_emit_floatuns (rtx operands[2])
2365 {
2366 rtx neglab, donelab, i0, i1, f0, in, out;
2367 enum machine_mode mode;
2368
2369 out = operands[0];
2370 in = force_reg (DImode, operands[1]);
2371 mode = GET_MODE (out);
2372 neglab = gen_label_rtx ();
2373 donelab = gen_label_rtx ();
2374 i0 = gen_reg_rtx (DImode);
2375 i1 = gen_reg_rtx (DImode);
2376 f0 = gen_reg_rtx (mode);
2377
2378 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2379
2380 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2381 emit_jump_insn (gen_jump (donelab));
2382 emit_barrier ();
2383
2384 emit_label (neglab);
2385
2386 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2387 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2388 emit_insn (gen_iordi3 (i0, i0, i1));
2389 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2390 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2391
2392 emit_label (donelab);
2393 }
2394
2395 /* Generate the comparison for a conditional branch. */
2396
2397 rtx
2398 alpha_emit_conditional_branch (enum rtx_code code)
2399 {
2400 enum rtx_code cmp_code, branch_code;
2401 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2402 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2403 rtx tem;
2404
2405 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2406 {
2407 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2408 op1 = const0_rtx;
2409 alpha_compare.fp_p = 0;
2410 }
2411
2412 /* The general case: fold the comparison code to the types of compares
2413 that we have, choosing the branch as necessary. */
2414 switch (code)
2415 {
2416 case EQ: case LE: case LT: case LEU: case LTU:
2417 case UNORDERED:
2418 /* We have these compares: */
2419 cmp_code = code, branch_code = NE;
2420 break;
2421
2422 case NE:
2423 case ORDERED:
2424 /* These must be reversed. */
2425 cmp_code = reverse_condition (code), branch_code = EQ;
2426 break;
2427
2428 case GE: case GT: case GEU: case GTU:
2429 /* For FP, we swap them, for INT, we reverse them. */
2430 if (alpha_compare.fp_p)
2431 {
2432 cmp_code = swap_condition (code);
2433 branch_code = NE;
2434 tem = op0, op0 = op1, op1 = tem;
2435 }
2436 else
2437 {
2438 cmp_code = reverse_condition (code);
2439 branch_code = EQ;
2440 }
2441 break;
2442
2443 default:
2444 gcc_unreachable ();
2445 }
2446
2447 if (alpha_compare.fp_p)
2448 {
2449 cmp_mode = DFmode;
2450 if (flag_unsafe_math_optimizations)
2451 {
2452 /* When we are not as concerned about non-finite values, and we
2453 are comparing against zero, we can branch directly. */
2454 if (op1 == CONST0_RTX (DFmode))
2455 cmp_code = UNKNOWN, branch_code = code;
2456 else if (op0 == CONST0_RTX (DFmode))
2457 {
2458 /* Undo the swap we probably did just above. */
2459 tem = op0, op0 = op1, op1 = tem;
2460 branch_code = swap_condition (cmp_code);
2461 cmp_code = UNKNOWN;
2462 }
2463 }
2464 else
2465 {
2466 /* ??? We mark the branch mode to be CCmode to prevent the
2467 compare and branch from being combined, since the compare
2468 insn follows IEEE rules that the branch does not. */
2469 branch_mode = CCmode;
2470 }
2471 }
2472 else
2473 {
2474 cmp_mode = DImode;
2475
2476 /* The following optimizations are only for signed compares. */
2477 if (code != LEU && code != LTU && code != GEU && code != GTU)
2478 {
2479 /* Whee. Compare and branch against 0 directly. */
2480 if (op1 == const0_rtx)
2481 cmp_code = UNKNOWN, branch_code = code;
2482
2483 /* If the constants doesn't fit into an immediate, but can
2484 be generated by lda/ldah, we adjust the argument and
2485 compare against zero, so we can use beq/bne directly. */
2486 /* ??? Don't do this when comparing against symbols, otherwise
2487 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2488 be declared false out of hand (at least for non-weak). */
2489 else if (GET_CODE (op1) == CONST_INT
2490 && (code == EQ || code == NE)
2491 && !(symbolic_operand (op0, VOIDmode)
2492 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2493 {
2494 rtx n_op1 = GEN_INT (-INTVAL (op1));
2495
2496 if (! satisfies_constraint_I (op1)
2497 && (satisfies_constraint_K (n_op1)
2498 || satisfies_constraint_L (n_op1)))
2499 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2500 }
2501 }
2502
2503 if (!reg_or_0_operand (op0, DImode))
2504 op0 = force_reg (DImode, op0);
2505 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2506 op1 = force_reg (DImode, op1);
2507 }
2508
2509 /* Emit an initial compare instruction, if necessary. */
2510 tem = op0;
2511 if (cmp_code != UNKNOWN)
2512 {
2513 tem = gen_reg_rtx (cmp_mode);
2514 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2515 }
2516
2517 /* Zero the operands. */
2518 memset (&alpha_compare, 0, sizeof (alpha_compare));
2519
2520 /* Return the branch comparison. */
2521 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2522 }
2523
2524 /* Certain simplifications can be done to make invalid setcc operations
2525 valid. Return the final comparison, or NULL if we can't work. */
2526
2527 rtx
2528 alpha_emit_setcc (enum rtx_code code)
2529 {
2530 enum rtx_code cmp_code;
2531 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2532 int fp_p = alpha_compare.fp_p;
2533 rtx tmp;
2534
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2537
2538 if (fp_p && GET_MODE (op0) == TFmode)
2539 {
2540 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2541 op1 = const0_rtx;
2542 fp_p = 0;
2543 }
2544
2545 if (fp_p && !TARGET_FIX)
2546 return NULL_RTX;
2547
2548 /* The general case: fold the comparison code to the types of compares
2549 that we have, choosing the branch as necessary. */
2550
2551 cmp_code = UNKNOWN;
2552 switch (code)
2553 {
2554 case EQ: case LE: case LT: case LEU: case LTU:
2555 case UNORDERED:
2556 /* We have these compares. */
2557 if (fp_p)
2558 cmp_code = code, code = NE;
2559 break;
2560
2561 case NE:
2562 if (!fp_p && op1 == const0_rtx)
2563 break;
2564 /* FALLTHRU */
2565
2566 case ORDERED:
2567 cmp_code = reverse_condition (code);
2568 code = EQ;
2569 break;
2570
2571 case GE: case GT: case GEU: case GTU:
2572 /* These normally need swapping, but for integer zero we have
2573 special patterns that recognize swapped operands. */
2574 if (!fp_p && op1 == const0_rtx)
2575 break;
2576 code = swap_condition (code);
2577 if (fp_p)
2578 cmp_code = code, code = NE;
2579 tmp = op0, op0 = op1, op1 = tmp;
2580 break;
2581
2582 default:
2583 gcc_unreachable ();
2584 }
2585
2586 if (!fp_p)
2587 {
2588 if (!register_operand (op0, DImode))
2589 op0 = force_reg (DImode, op0);
2590 if (!reg_or_8bit_operand (op1, DImode))
2591 op1 = force_reg (DImode, op1);
2592 }
2593
2594 /* Emit an initial compare instruction, if necessary. */
2595 if (cmp_code != UNKNOWN)
2596 {
2597 enum machine_mode mode = fp_p ? DFmode : DImode;
2598
2599 tmp = gen_reg_rtx (mode);
2600 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2601 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2602
2603 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2604 op1 = const0_rtx;
2605 }
2606
2607 /* Return the setcc comparison. */
2608 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2609 }
2610
2611
2612 /* Rewrite a comparison against zero CMP of the form
2613 (CODE (cc0) (const_int 0)) so it can be written validly in
2614 a conditional move (if_then_else CMP ...).
2615 If both of the operands that set cc0 are nonzero we must emit
2616 an insn to perform the compare (it can't be done within
2617 the conditional move). */
2618
2619 rtx
2620 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2621 {
2622 enum rtx_code code = GET_CODE (cmp);
2623 enum rtx_code cmov_code = NE;
2624 rtx op0 = alpha_compare.op0;
2625 rtx op1 = alpha_compare.op1;
2626 int fp_p = alpha_compare.fp_p;
2627 enum machine_mode cmp_mode
2628 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2629 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2630 enum machine_mode cmov_mode = VOIDmode;
2631 int local_fast_math = flag_unsafe_math_optimizations;
2632 rtx tem;
2633
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2636
2637 if (fp_p != FLOAT_MODE_P (mode))
2638 {
2639 enum rtx_code cmp_code;
2640
2641 if (! TARGET_FIX)
2642 return 0;
2643
2644 /* If we have fp<->int register move instructions, do a cmov by
2645 performing the comparison in fp registers, and move the
2646 zero/nonzero value to integer registers, where we can then
2647 use a normal cmov, or vice-versa. */
2648
2649 switch (code)
2650 {
2651 case EQ: case LE: case LT: case LEU: case LTU:
2652 /* We have these compares. */
2653 cmp_code = code, code = NE;
2654 break;
2655
2656 case NE:
2657 /* This must be reversed. */
2658 cmp_code = EQ, code = EQ;
2659 break;
2660
2661 case GE: case GT: case GEU: case GTU:
2662 /* These normally need swapping, but for integer zero we have
2663 special patterns that recognize swapped operands. */
2664 if (!fp_p && op1 == const0_rtx)
2665 cmp_code = code, code = NE;
2666 else
2667 {
2668 cmp_code = swap_condition (code);
2669 code = NE;
2670 tem = op0, op0 = op1, op1 = tem;
2671 }
2672 break;
2673
2674 default:
2675 gcc_unreachable ();
2676 }
2677
2678 tem = gen_reg_rtx (cmp_op_mode);
2679 emit_insn (gen_rtx_SET (VOIDmode, tem,
2680 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2681 op0, op1)));
2682
2683 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2684 op0 = gen_lowpart (cmp_op_mode, tem);
2685 op1 = CONST0_RTX (cmp_op_mode);
2686 fp_p = !fp_p;
2687 local_fast_math = 1;
2688 }
2689
2690 /* We may be able to use a conditional move directly.
2691 This avoids emitting spurious compares. */
2692 if (signed_comparison_operator (cmp, VOIDmode)
2693 && (!fp_p || local_fast_math)
2694 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2695 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2696
2697 /* We can't put the comparison inside the conditional move;
2698 emit a compare instruction and put that inside the
2699 conditional move. Make sure we emit only comparisons we have;
2700 swap or reverse as necessary. */
2701
2702 if (!can_create_pseudo_p ())
2703 return NULL_RTX;
2704
2705 switch (code)
2706 {
2707 case EQ: case LE: case LT: case LEU: case LTU:
2708 /* We have these compares: */
2709 break;
2710
2711 case NE:
2712 /* This must be reversed. */
2713 code = reverse_condition (code);
2714 cmov_code = EQ;
2715 break;
2716
2717 case GE: case GT: case GEU: case GTU:
2718 /* These must be swapped. */
2719 if (op1 != CONST0_RTX (cmp_mode))
2720 {
2721 code = swap_condition (code);
2722 tem = op0, op0 = op1, op1 = tem;
2723 }
2724 break;
2725
2726 default:
2727 gcc_unreachable ();
2728 }
2729
2730 if (!fp_p)
2731 {
2732 if (!reg_or_0_operand (op0, DImode))
2733 op0 = force_reg (DImode, op0);
2734 if (!reg_or_8bit_operand (op1, DImode))
2735 op1 = force_reg (DImode, op1);
2736 }
2737
2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
2741 if (fp_p && !local_fast_math)
2742 cmov_mode = CCmode;
2743
2744 tem = gen_reg_rtx (cmp_op_mode);
2745 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2746 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2747 }
2748
2749 /* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2753
2754 int
2755 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2756 rtx t_rtx, rtx f_rtx)
2757 {
2758 HOST_WIDE_INT t, f, diff;
2759 enum machine_mode mode;
2760 rtx target, subtarget, tmp;
2761
2762 mode = GET_MODE (dest);
2763 t = INTVAL (t_rtx);
2764 f = INTVAL (f_rtx);
2765 diff = t - f;
2766
2767 if (((code == NE || code == EQ) && diff < 0)
2768 || (code == GE || code == GT))
2769 {
2770 code = reverse_condition (code);
2771 diff = t, t = f, f = diff;
2772 diff = t - f;
2773 }
2774
2775 subtarget = target = dest;
2776 if (mode != DImode)
2777 {
2778 target = gen_lowpart (DImode, dest);
2779 if (can_create_pseudo_p ())
2780 subtarget = gen_reg_rtx (DImode);
2781 else
2782 subtarget = target;
2783 }
2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2786 be shared. */
2787
2788 if (f == 0 && exact_log2 (diff) > 0
2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2790 viable over a longer latency cmove. On EV5, the E0 slot is a
2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
2792 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2793 {
2794 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2795 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2796
2797 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2798 GEN_INT (exact_log2 (t)));
2799 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2800 }
2801 else if (f == 0 && t == -1)
2802 {
2803 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2804 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2805
2806 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2807 }
2808 else if (diff == 1 || diff == 4 || diff == 8)
2809 {
2810 rtx add_op;
2811
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2814
2815 if (diff == 1)
2816 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2817 else
2818 {
2819 add_op = GEN_INT (f);
2820 if (sext_add_operand (add_op, mode))
2821 {
2822 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2823 GEN_INT (diff));
2824 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2825 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2826 }
2827 else
2828 return 0;
2829 }
2830 }
2831 else
2832 return 0;
2833
2834 return 1;
2835 }
2836 \f
2837 /* Look up the function X_floating library function name for the
2838 given operation. */
2839
2840 struct xfloating_op GTY(())
2841 {
2842 const enum rtx_code code;
2843 const char *const GTY((skip)) osf_func;
2844 const char *const GTY((skip)) vms_func;
2845 rtx libcall;
2846 };
2847
2848 static GTY(()) struct xfloating_op xfloating_ops[] =
2849 {
2850 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2865 };
2866
2867 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2868 {
2869 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2871 };
2872
2873 static rtx
2874 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2875 {
2876 struct xfloating_op *ops = xfloating_ops;
2877 long n = ARRAY_SIZE (xfloating_ops);
2878 long i;
2879
2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2881
2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2884 {
2885 ops = vax_cvt_ops;
2886 n = ARRAY_SIZE (vax_cvt_ops);
2887 }
2888
2889 for (i = 0; i < n; ++i, ++ops)
2890 if (ops->code == code)
2891 {
2892 rtx func = ops->libcall;
2893 if (!func)
2894 {
2895 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops->vms_func : ops->osf_func);
2897 ops->libcall = func;
2898 }
2899 return func;
2900 }
2901
2902 gcc_unreachable ();
2903 }
2904
2905 /* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2907
2908 static int
2909 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2910 enum alpha_fp_rounding_mode round)
2911 {
2912 int mode;
2913
2914 switch (round)
2915 {
2916 case ALPHA_FPRM_NORM:
2917 mode = 2;
2918 break;
2919 case ALPHA_FPRM_MINF:
2920 mode = 1;
2921 break;
2922 case ALPHA_FPRM_CHOP:
2923 mode = 0;
2924 break;
2925 case ALPHA_FPRM_DYN:
2926 mode = 4;
2927 break;
2928 default:
2929 gcc_unreachable ();
2930
2931 /* XXX For reference, round to +inf is mode = 3. */
2932 }
2933
2934 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2935 mode |= 0x10000;
2936
2937 return mode;
2938 }
2939
2940 /* Emit an X_floating library function call.
2941
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
2944 indirect); TFmode return values appear in R16+R17.
2945
2946 FUNC is the function to call.
2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2951 */
2952
2953 static void
2954 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2955 int noperands, rtx equiv)
2956 {
2957 rtx usage = NULL_RTX, tmp, reg;
2958 int regno = 16, i;
2959
2960 start_sequence ();
2961
2962 for (i = 0; i < noperands; ++i)
2963 {
2964 switch (GET_MODE (operands[i]))
2965 {
2966 case TFmode:
2967 reg = gen_rtx_REG (TFmode, regno);
2968 regno += 2;
2969 break;
2970
2971 case DFmode:
2972 reg = gen_rtx_REG (DFmode, regno + 32);
2973 regno += 1;
2974 break;
2975
2976 case VOIDmode:
2977 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2978 /* FALLTHRU */
2979 case DImode:
2980 reg = gen_rtx_REG (DImode, regno);
2981 regno += 1;
2982 break;
2983
2984 default:
2985 gcc_unreachable ();
2986 }
2987
2988 emit_move_insn (reg, operands[i]);
2989 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2990 }
2991
2992 switch (GET_MODE (target))
2993 {
2994 case TFmode:
2995 reg = gen_rtx_REG (TFmode, 16);
2996 break;
2997 case DFmode:
2998 reg = gen_rtx_REG (DFmode, 32);
2999 break;
3000 case DImode:
3001 reg = gen_rtx_REG (DImode, 0);
3002 break;
3003 default:
3004 gcc_unreachable ();
3005 }
3006
3007 tmp = gen_rtx_MEM (QImode, func);
3008 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3009 const0_rtx, const0_rtx));
3010 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3011 CONST_OR_PURE_CALL_P (tmp) = 1;
3012
3013 tmp = get_insns ();
3014 end_sequence ();
3015
3016 emit_libcall_block (tmp, target, reg, equiv);
3017 }
3018
3019 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3020
3021 void
3022 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3023 {
3024 rtx func;
3025 int mode;
3026 rtx out_operands[3];
3027
3028 func = alpha_lookup_xfloating_lib_func (code);
3029 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3030
3031 out_operands[0] = operands[1];
3032 out_operands[1] = operands[2];
3033 out_operands[2] = GEN_INT (mode);
3034 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3035 gen_rtx_fmt_ee (code, TFmode, operands[1],
3036 operands[2]));
3037 }
3038
3039 /* Emit an X_floating library function call for a comparison. */
3040
3041 static rtx
3042 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3043 {
3044 enum rtx_code cmp_code, res_code;
3045 rtx func, out, operands[2];
3046
3047 /* X_floating library comparison functions return
3048 -1 unordered
3049 0 false
3050 1 true
3051 Convert the compare against the raw return value. */
3052
3053 cmp_code = *pcode;
3054 switch (cmp_code)
3055 {
3056 case UNORDERED:
3057 cmp_code = EQ;
3058 res_code = LT;
3059 break;
3060 case ORDERED:
3061 cmp_code = EQ;
3062 res_code = GE;
3063 break;
3064 case NE:
3065 res_code = NE;
3066 break;
3067 case EQ:
3068 case LT:
3069 case GT:
3070 case LE:
3071 case GE:
3072 res_code = GT;
3073 break;
3074 default:
3075 gcc_unreachable ();
3076 }
3077 *pcode = res_code;
3078
3079 func = alpha_lookup_xfloating_lib_func (cmp_code);
3080
3081 operands[0] = op0;
3082 operands[1] = op1;
3083 out = gen_reg_rtx (DImode);
3084
3085 /* ??? Strange mode for equiv because what's actually returned
3086 is -1,0,1, not a proper boolean value. */
3087 alpha_emit_xfloating_libcall (func, out, operands, 2,
3088 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3089
3090 return out;
3091 }
3092
3093 /* Emit an X_floating library function call for a conversion. */
3094
3095 void
3096 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3097 {
3098 int noperands = 1, mode;
3099 rtx out_operands[2];
3100 rtx func;
3101 enum rtx_code code = orig_code;
3102
3103 if (code == UNSIGNED_FIX)
3104 code = FIX;
3105
3106 func = alpha_lookup_xfloating_lib_func (code);
3107
3108 out_operands[0] = operands[1];
3109
3110 switch (code)
3111 {
3112 case FIX:
3113 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3114 out_operands[1] = GEN_INT (mode);
3115 noperands = 2;
3116 break;
3117 case FLOAT_TRUNCATE:
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3119 out_operands[1] = GEN_INT (mode);
3120 noperands = 2;
3121 break;
3122 default:
3123 break;
3124 }
3125
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3127 gen_rtx_fmt_e (orig_code,
3128 GET_MODE (operands[0]),
3129 operands[1]));
3130 }
3131
3132 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3135 set (OP[0] OP[2])
3136 set (OP[1] OP[3])
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3139
3140 void
3141 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3142 bool fixup_overlap)
3143 {
3144 switch (GET_CODE (operands[1]))
3145 {
3146 case REG:
3147 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3148 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3149 break;
3150
3151 case MEM:
3152 operands[3] = adjust_address (operands[1], DImode, 8);
3153 operands[2] = adjust_address (operands[1], DImode, 0);
3154 break;
3155
3156 case CONST_INT:
3157 case CONST_DOUBLE:
3158 gcc_assert (operands[1] == CONST0_RTX (mode));
3159 operands[2] = operands[3] = const0_rtx;
3160 break;
3161
3162 default:
3163 gcc_unreachable ();
3164 }
3165
3166 switch (GET_CODE (operands[0]))
3167 {
3168 case REG:
3169 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3170 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3171 break;
3172
3173 case MEM:
3174 operands[1] = adjust_address (operands[0], DImode, 8);
3175 operands[0] = adjust_address (operands[0], DImode, 0);
3176 break;
3177
3178 default:
3179 gcc_unreachable ();
3180 }
3181
3182 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3183 {
3184 rtx tmp;
3185 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3186 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3187 }
3188 }
3189
3190 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
3192 logical operation to be performed. */
3193
3194 void
3195 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3196 {
3197 rtx high_bit = operands[2];
3198 rtx scratch;
3199 int move;
3200
3201 alpha_split_tmode_pair (operands, TFmode, false);
3202
3203 /* Detect three flavors of operand overlap. */
3204 move = 1;
3205 if (rtx_equal_p (operands[0], operands[2]))
3206 move = 0;
3207 else if (rtx_equal_p (operands[1], operands[2]))
3208 {
3209 if (rtx_equal_p (operands[0], high_bit))
3210 move = 2;
3211 else
3212 move = -1;
3213 }
3214
3215 if (move < 0)
3216 emit_move_insn (operands[0], operands[2]);
3217
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch = operands[move < 2 ? 1 : 3];
3223
3224 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3225
3226 if (move > 0)
3227 {
3228 emit_move_insn (operands[0], operands[2]);
3229 if (move > 1)
3230 emit_move_insn (operands[1], scratch);
3231 }
3232 }
3233 \f
3234 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3235 unaligned data:
3236
3237 unsigned: signed:
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3244 sra r1,48,r1
3245
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3252
3253 quad: ldq_u r1,X(r11)
3254 ldq_u r2,X+7(r11)
3255 lda r3,X(r11)
3256 extql r1,r3,r1
3257 extqh r2,r3,r2
3258 or r1.r2.r1
3259 */
3260
3261 void
3262 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3263 HOST_WIDE_INT ofs, int sign)
3264 {
3265 rtx meml, memh, addr, extl, exth, tmp, mema;
3266 enum machine_mode mode;
3267
3268 if (TARGET_BWX && size == 2)
3269 {
3270 meml = adjust_address (mem, QImode, ofs);
3271 memh = adjust_address (mem, QImode, ofs+1);
3272 if (BYTES_BIG_ENDIAN)
3273 tmp = meml, meml = memh, memh = tmp;
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276 emit_insn (gen_zero_extendqidi2 (extl, meml));
3277 emit_insn (gen_zero_extendqidi2 (exth, memh));
3278 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3279 NULL, 1, OPTAB_LIB_WIDEN);
3280 addr = expand_simple_binop (DImode, IOR, extl, exth,
3281 NULL, 1, OPTAB_LIB_WIDEN);
3282
3283 if (sign && GET_MODE (tgt) != HImode)
3284 {
3285 addr = gen_lowpart (HImode, addr);
3286 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3287 }
3288 else
3289 {
3290 if (GET_MODE (tgt) != DImode)
3291 addr = gen_lowpart (GET_MODE (tgt), addr);
3292 emit_move_insn (tgt, addr);
3293 }
3294 return;
3295 }
3296
3297 meml = gen_reg_rtx (DImode);
3298 memh = gen_reg_rtx (DImode);
3299 addr = gen_reg_rtx (DImode);
3300 extl = gen_reg_rtx (DImode);
3301 exth = gen_reg_rtx (DImode);
3302
3303 mema = XEXP (mem, 0);
3304 if (GET_CODE (mema) == LO_SUM)
3305 mema = force_reg (Pmode, mema);
3306
3307 /* AND addresses cannot be in any alias set, since they may implicitly
3308 alias surrounding code. Ideally we'd have some alias set that
3309 covered all types except those with alignment 8 or higher. */
3310
3311 tmp = change_address (mem, DImode,
3312 gen_rtx_AND (DImode,
3313 plus_constant (mema, ofs),
3314 GEN_INT (-8)));
3315 set_mem_alias_set (tmp, 0);
3316 emit_move_insn (meml, tmp);
3317
3318 tmp = change_address (mem, DImode,
3319 gen_rtx_AND (DImode,
3320 plus_constant (mema, ofs + size - 1),
3321 GEN_INT (-8)));
3322 set_mem_alias_set (tmp, 0);
3323 emit_move_insn (memh, tmp);
3324
3325 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3326 {
3327 emit_move_insn (addr, plus_constant (mema, -1));
3328
3329 emit_insn (gen_extqh_be (extl, meml, addr));
3330 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3331
3332 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3333 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3334 addr, 1, OPTAB_WIDEN);
3335 }
3336 else if (sign && size == 2)
3337 {
3338 emit_move_insn (addr, plus_constant (mema, ofs+2));
3339
3340 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3341 emit_insn (gen_extqh_le (exth, memh, addr));
3342
3343 /* We must use tgt here for the target. Alpha-vms port fails if we use
3344 addr for the target, because addr is marked as a pointer and combine
3345 knows that pointers are always sign-extended 32-bit values. */
3346 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3347 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3348 addr, 1, OPTAB_WIDEN);
3349 }
3350 else
3351 {
3352 if (WORDS_BIG_ENDIAN)
3353 {
3354 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3355 switch ((int) size)
3356 {
3357 case 2:
3358 emit_insn (gen_extwh_be (extl, meml, addr));
3359 mode = HImode;
3360 break;
3361
3362 case 4:
3363 emit_insn (gen_extlh_be (extl, meml, addr));
3364 mode = SImode;
3365 break;
3366
3367 case 8:
3368 emit_insn (gen_extqh_be (extl, meml, addr));
3369 mode = DImode;
3370 break;
3371
3372 default:
3373 gcc_unreachable ();
3374 }
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3376 }
3377 else
3378 {
3379 emit_move_insn (addr, plus_constant (mema, ofs));
3380 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3381 switch ((int) size)
3382 {
3383 case 2:
3384 emit_insn (gen_extwh_le (exth, memh, addr));
3385 mode = HImode;
3386 break;
3387
3388 case 4:
3389 emit_insn (gen_extlh_le (exth, memh, addr));
3390 mode = SImode;
3391 break;
3392
3393 case 8:
3394 emit_insn (gen_extqh_le (exth, memh, addr));
3395 mode = DImode;
3396 break;
3397
3398 default:
3399 gcc_unreachable ();
3400 }
3401 }
3402
3403 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3404 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3405 sign, OPTAB_WIDEN);
3406 }
3407
3408 if (addr != tgt)
3409 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3410 }
3411
3412 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3413
3414 void
3415 alpha_expand_unaligned_store (rtx dst, rtx src,
3416 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3417 {
3418 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3419
3420 if (TARGET_BWX && size == 2)
3421 {
3422 if (src != const0_rtx)
3423 {
3424 dstl = gen_lowpart (QImode, src);
3425 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3426 NULL, 1, OPTAB_LIB_WIDEN);
3427 dsth = gen_lowpart (QImode, dsth);
3428 }
3429 else
3430 dstl = dsth = const0_rtx;
3431
3432 meml = adjust_address (dst, QImode, ofs);
3433 memh = adjust_address (dst, QImode, ofs+1);
3434 if (BYTES_BIG_ENDIAN)
3435 addr = meml, meml = memh, memh = addr;
3436
3437 emit_move_insn (meml, dstl);
3438 emit_move_insn (memh, dsth);
3439 return;
3440 }
3441
3442 dstl = gen_reg_rtx (DImode);
3443 dsth = gen_reg_rtx (DImode);
3444 insl = gen_reg_rtx (DImode);
3445 insh = gen_reg_rtx (DImode);
3446
3447 dsta = XEXP (dst, 0);
3448 if (GET_CODE (dsta) == LO_SUM)
3449 dsta = force_reg (Pmode, dsta);
3450
3451 /* AND addresses cannot be in any alias set, since they may implicitly
3452 alias surrounding code. Ideally we'd have some alias set that
3453 covered all types except those with alignment 8 or higher. */
3454
3455 meml = change_address (dst, DImode,
3456 gen_rtx_AND (DImode,
3457 plus_constant (dsta, ofs),
3458 GEN_INT (-8)));
3459 set_mem_alias_set (meml, 0);
3460
3461 memh = change_address (dst, DImode,
3462 gen_rtx_AND (DImode,
3463 plus_constant (dsta, ofs + size - 1),
3464 GEN_INT (-8)));
3465 set_mem_alias_set (memh, 0);
3466
3467 emit_move_insn (dsth, memh);
3468 emit_move_insn (dstl, meml);
3469 if (WORDS_BIG_ENDIAN)
3470 {
3471 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3472
3473 if (src != const0_rtx)
3474 {
3475 switch ((int) size)
3476 {
3477 case 2:
3478 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3479 break;
3480 case 4:
3481 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3482 break;
3483 case 8:
3484 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3485 break;
3486 }
3487 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3488 GEN_INT (size*8), addr));
3489 }
3490
3491 switch ((int) size)
3492 {
3493 case 2:
3494 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3495 break;
3496 case 4:
3497 {
3498 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3499 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3500 break;
3501 }
3502 case 8:
3503 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3504 break;
3505 }
3506
3507 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3508 }
3509 else
3510 {
3511 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3512
3513 if (src != CONST0_RTX (GET_MODE (src)))
3514 {
3515 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3516 GEN_INT (size*8), addr));
3517
3518 switch ((int) size)
3519 {
3520 case 2:
3521 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3522 break;
3523 case 4:
3524 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3525 break;
3526 case 8:
3527 emit_insn (gen_insql_le (insl, src, addr));
3528 break;
3529 }
3530 }
3531
3532 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3533
3534 switch ((int) size)
3535 {
3536 case 2:
3537 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3538 break;
3539 case 4:
3540 {
3541 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3542 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3543 break;
3544 }
3545 case 8:
3546 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3547 break;
3548 }
3549 }
3550
3551 if (src != CONST0_RTX (GET_MODE (src)))
3552 {
3553 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3554 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3555 }
3556
3557 if (WORDS_BIG_ENDIAN)
3558 {
3559 emit_move_insn (meml, dstl);
3560 emit_move_insn (memh, dsth);
3561 }
3562 else
3563 {
3564 /* Must store high before low for degenerate case of aligned. */
3565 emit_move_insn (memh, dsth);
3566 emit_move_insn (meml, dstl);
3567 }
3568 }
3569
3570 /* The block move code tries to maximize speed by separating loads and
3571 stores at the expense of register pressure: we load all of the data
3572 before we store it back out. There are two secondary effects worth
3573 mentioning, that this speeds copying to/from aligned and unaligned
3574 buffers, and that it makes the code significantly easier to write. */
3575
3576 #define MAX_MOVE_WORDS 8
3577
3578 /* Load an integral number of consecutive unaligned quadwords. */
3579
3580 static void
3581 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3582 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3583 {
3584 rtx const im8 = GEN_INT (-8);
3585 rtx const i64 = GEN_INT (64);
3586 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3587 rtx sreg, areg, tmp, smema;
3588 HOST_WIDE_INT i;
3589
3590 smema = XEXP (smem, 0);
3591 if (GET_CODE (smema) == LO_SUM)
3592 smema = force_reg (Pmode, smema);
3593
3594 /* Generate all the tmp registers we need. */
3595 for (i = 0; i < words; ++i)
3596 {
3597 data_regs[i] = out_regs[i];
3598 ext_tmps[i] = gen_reg_rtx (DImode);
3599 }
3600 data_regs[words] = gen_reg_rtx (DImode);
3601
3602 if (ofs != 0)
3603 smem = adjust_address (smem, GET_MODE (smem), ofs);
3604
3605 /* Load up all of the source data. */
3606 for (i = 0; i < words; ++i)
3607 {
3608 tmp = change_address (smem, DImode,
3609 gen_rtx_AND (DImode,
3610 plus_constant (smema, 8*i),
3611 im8));
3612 set_mem_alias_set (tmp, 0);
3613 emit_move_insn (data_regs[i], tmp);
3614 }
3615
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*words - 1),
3619 im8));
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[words], tmp);
3622
3623 /* Extract the half-word fragments. Unfortunately DEC decided to make
3624 extxh with offset zero a noop instead of zeroing the register, so
3625 we must take care of that edge condition ourselves with cmov. */
3626
3627 sreg = copy_addr_to_reg (smema);
3628 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3629 1, OPTAB_WIDEN);
3630 if (WORDS_BIG_ENDIAN)
3631 emit_move_insn (sreg, plus_constant (sreg, 7));
3632 for (i = 0; i < words; ++i)
3633 {
3634 if (WORDS_BIG_ENDIAN)
3635 {
3636 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3637 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3638 }
3639 else
3640 {
3641 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3642 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3643 }
3644 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3645 gen_rtx_IF_THEN_ELSE (DImode,
3646 gen_rtx_EQ (DImode, areg,
3647 const0_rtx),
3648 const0_rtx, ext_tmps[i])));
3649 }
3650
3651 /* Merge the half-words into whole words. */
3652 for (i = 0; i < words; ++i)
3653 {
3654 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3655 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3656 }
3657 }
3658
3659 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3660 may be NULL to store zeros. */
3661
3662 static void
3663 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3664 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3665 {
3666 rtx const im8 = GEN_INT (-8);
3667 rtx const i64 = GEN_INT (64);
3668 rtx ins_tmps[MAX_MOVE_WORDS];
3669 rtx st_tmp_1, st_tmp_2, dreg;
3670 rtx st_addr_1, st_addr_2, dmema;
3671 HOST_WIDE_INT i;
3672
3673 dmema = XEXP (dmem, 0);
3674 if (GET_CODE (dmema) == LO_SUM)
3675 dmema = force_reg (Pmode, dmema);
3676
3677 /* Generate all the tmp registers we need. */
3678 if (data_regs != NULL)
3679 for (i = 0; i < words; ++i)
3680 ins_tmps[i] = gen_reg_rtx(DImode);
3681 st_tmp_1 = gen_reg_rtx(DImode);
3682 st_tmp_2 = gen_reg_rtx(DImode);
3683
3684 if (ofs != 0)
3685 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3686
3687 st_addr_2 = change_address (dmem, DImode,
3688 gen_rtx_AND (DImode,
3689 plus_constant (dmema, words*8 - 1),
3690 im8));
3691 set_mem_alias_set (st_addr_2, 0);
3692
3693 st_addr_1 = change_address (dmem, DImode,
3694 gen_rtx_AND (DImode, dmema, im8));
3695 set_mem_alias_set (st_addr_1, 0);
3696
3697 /* Load up the destination end bits. */
3698 emit_move_insn (st_tmp_2, st_addr_2);
3699 emit_move_insn (st_tmp_1, st_addr_1);
3700
3701 /* Shift the input data into place. */
3702 dreg = copy_addr_to_reg (dmema);
3703 if (WORDS_BIG_ENDIAN)
3704 emit_move_insn (dreg, plus_constant (dreg, 7));
3705 if (data_regs != NULL)
3706 {
3707 for (i = words-1; i >= 0; --i)
3708 {
3709 if (WORDS_BIG_ENDIAN)
3710 {
3711 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3712 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3713 }
3714 else
3715 {
3716 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3718 }
3719 }
3720 for (i = words-1; i > 0; --i)
3721 {
3722 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3723 ins_tmps[i-1], ins_tmps[i-1], 1,
3724 OPTAB_WIDEN);
3725 }
3726 }
3727
3728 /* Split and merge the ends with the destination data. */
3729 if (WORDS_BIG_ENDIAN)
3730 {
3731 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3732 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3733 }
3734 else
3735 {
3736 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3737 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3738 }
3739
3740 if (data_regs != NULL)
3741 {
3742 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3743 st_tmp_2, 1, OPTAB_WIDEN);
3744 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3745 st_tmp_1, 1, OPTAB_WIDEN);
3746 }
3747
3748 /* Store it all. */
3749 if (WORDS_BIG_ENDIAN)
3750 emit_move_insn (st_addr_1, st_tmp_1);
3751 else
3752 emit_move_insn (st_addr_2, st_tmp_2);
3753 for (i = words-1; i > 0; --i)
3754 {
3755 rtx tmp = change_address (dmem, DImode,
3756 gen_rtx_AND (DImode,
3757 plus_constant(dmema,
3758 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3759 im8));
3760 set_mem_alias_set (tmp, 0);
3761 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3762 }
3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (st_addr_2, st_tmp_2);
3765 else
3766 emit_move_insn (st_addr_1, st_tmp_1);
3767 }
3768
3769
3770 /* Expand string/block move operations.
3771
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3776
3777 int
3778 alpha_expand_block_move (rtx operands[])
3779 {
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3789 rtx tmp;
3790 unsigned int i, words, ofs, nregs = 0;
3791
3792 if (orig_bytes <= 0)
3793 return 1;
3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3795 return 0;
3796
3797 /* Look for additional alignment information from recorded register info. */
3798
3799 tmp = XEXP (orig_src, 0);
3800 if (GET_CODE (tmp) == REG)
3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3802 else if (GET_CODE (tmp) == PLUS
3803 && GET_CODE (XEXP (tmp, 0)) == REG
3804 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3805 {
3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3808
3809 if (a > src_align)
3810 {
3811 if (a >= 64 && c % 8 == 0)
3812 src_align = 64;
3813 else if (a >= 32 && c % 4 == 0)
3814 src_align = 32;
3815 else if (a >= 16 && c % 2 == 0)
3816 src_align = 16;
3817 }
3818 }
3819
3820 tmp = XEXP (orig_dst, 0);
3821 if (GET_CODE (tmp) == REG)
3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && GET_CODE (XEXP (tmp, 0)) == REG
3825 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3826 {
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3829
3830 if (a > dst_align)
3831 {
3832 if (a >= 64 && c % 8 == 0)
3833 dst_align = 64;
3834 else if (a >= 32 && c % 4 == 0)
3835 dst_align = 32;
3836 else if (a >= 16 && c % 2 == 0)
3837 dst_align = 16;
3838 }
3839 }
3840
3841 ofs = 0;
3842 if (src_align >= 64 && bytes >= 8)
3843 {
3844 words = bytes / 8;
3845
3846 for (i = 0; i < words; ++i)
3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
3848
3849 for (i = 0; i < words; ++i)
3850 emit_move_insn (data_regs[nregs + i],
3851 adjust_address (orig_src, DImode, ofs + i * 8));
3852
3853 nregs += words;
3854 bytes -= words * 8;
3855 ofs += words * 8;
3856 }
3857
3858 if (src_align >= 32 && bytes >= 4)
3859 {
3860 words = bytes / 4;
3861
3862 for (i = 0; i < words; ++i)
3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
3864
3865 for (i = 0; i < words; ++i)
3866 emit_move_insn (data_regs[nregs + i],
3867 adjust_address (orig_src, SImode, ofs + i * 4));
3868
3869 nregs += words;
3870 bytes -= words * 4;
3871 ofs += words * 4;
3872 }
3873
3874 if (bytes >= 8)
3875 {
3876 words = bytes / 8;
3877
3878 for (i = 0; i < words+1; ++i)
3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
3880
3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3882 words, ofs);
3883
3884 nregs += words;
3885 bytes -= words * 8;
3886 ofs += words * 8;
3887 }
3888
3889 if (! TARGET_BWX && bytes >= 4)
3890 {
3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3893 bytes -= 4;
3894 ofs += 4;
3895 }
3896
3897 if (bytes >= 2)
3898 {
3899 if (src_align >= 16)
3900 {
3901 do {
3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3904 bytes -= 2;
3905 ofs += 2;
3906 } while (bytes >= 2);
3907 }
3908 else if (! TARGET_BWX)
3909 {
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3912 bytes -= 2;
3913 ofs += 2;
3914 }
3915 }
3916
3917 while (bytes > 0)
3918 {
3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3921 bytes -= 1;
3922 ofs += 1;
3923 }
3924
3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3926
3927 /* Now save it back out again. */
3928
3929 i = 0, ofs = 0;
3930
3931 /* Write out the data in whatever chunks reading the source allowed. */
3932 if (dst_align >= 64)
3933 {
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3935 {
3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3937 data_regs[i]);
3938 ofs += 8;
3939 i++;
3940 }
3941 }
3942
3943 if (dst_align >= 32)
3944 {
3945 /* If the source has remaining DImode regs, write them out in
3946 two pieces. */
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3948 {
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3951
3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3953 gen_lowpart (SImode, data_regs[i]));
3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3955 gen_lowpart (SImode, tmp));
3956 ofs += 8;
3957 i++;
3958 }
3959
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3961 {
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3963 data_regs[i]);
3964 ofs += 4;
3965 i++;
3966 }
3967 }
3968
3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3970 {
3971 /* Write out a remaining block of words using unaligned methods. */
3972
3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
3975 break;
3976
3977 if (words == 1)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3979 else
3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3981 words, ofs);
3982
3983 i += words;
3984 ofs += words * 8;
3985 }
3986
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3991 {
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3993 ofs += 4;
3994 i++;
3995 }
3996
3997 if (dst_align >= 16)
3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3999 {
4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4001 i++;
4002 ofs += 2;
4003 }
4004 else
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4006 {
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4008 i++;
4009 ofs += 2;
4010 }
4011
4012 /* The remainder must be byte copies. */
4013 while (i < nregs)
4014 {
4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4017 i++;
4018 ofs += 1;
4019 }
4020
4021 return 1;
4022 }
4023
4024 int
4025 alpha_expand_block_clear (rtx operands[])
4026 {
4027 rtx bytes_rtx = operands[1];
4028 rtx align_rtx = operands[3];
4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
4033 rtx orig_dst = operands[0];
4034 rtx tmp;
4035 int i, words, ofs = 0;
4036
4037 if (orig_bytes <= 0)
4038 return 1;
4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4040 return 0;
4041
4042 /* Look for stricter alignment. */
4043 tmp = XEXP (orig_dst, 0);
4044 if (GET_CODE (tmp) == REG)
4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4046 else if (GET_CODE (tmp) == PLUS
4047 && GET_CODE (XEXP (tmp, 0)) == REG
4048 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4049 {
4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4052
4053 if (a > align)
4054 {
4055 if (a >= 64)
4056 align = a, alignofs = 8 - c % 8;
4057 else if (a >= 32)
4058 align = a, alignofs = 4 - c % 4;
4059 else if (a >= 16)
4060 align = a, alignofs = 2 - c % 2;
4061 }
4062 }
4063
4064 /* Handle an unaligned prefix first. */
4065
4066 if (alignofs > 0)
4067 {
4068 #if HOST_BITS_PER_WIDE_INT >= 64
4069 /* Given that alignofs is bounded by align, the only time BWX could
4070 generate three stores is for a 7 byte fill. Prefer two individual
4071 stores over a load/mask/store sequence. */
4072 if ((!TARGET_BWX || alignofs == 7)
4073 && align >= 32
4074 && !(alignofs == 4 && bytes >= 4))
4075 {
4076 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4077 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4078 rtx mem, tmp;
4079 HOST_WIDE_INT mask;
4080
4081 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4082 set_mem_alias_set (mem, 0);
4083
4084 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4085 if (bytes < alignofs)
4086 {
4087 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4088 ofs += bytes;
4089 bytes = 0;
4090 }
4091 else
4092 {
4093 bytes -= alignofs;
4094 ofs += alignofs;
4095 }
4096 alignofs = 0;
4097
4098 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4099 NULL_RTX, 1, OPTAB_WIDEN);
4100
4101 emit_move_insn (mem, tmp);
4102 }
4103 #endif
4104
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4106 {
4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4108 bytes -= 1;
4109 ofs += 1;
4110 alignofs -= 1;
4111 }
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4113 {
4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4115 bytes -= 2;
4116 ofs += 2;
4117 alignofs -= 2;
4118 }
4119 if (alignofs == 4 && bytes >= 4)
4120 {
4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4122 bytes -= 4;
4123 ofs += 4;
4124 alignofs = 0;
4125 }
4126
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4129 if (alignofs > 0)
4130 {
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4133 }
4134 }
4135
4136 /* Handle a block of contiguous long-words. */
4137
4138 if (align >= 64 && bytes >= 8)
4139 {
4140 words = bytes / 8;
4141
4142 for (i = 0; i < words; ++i)
4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4144 const0_rtx);
4145
4146 bytes -= words * 8;
4147 ofs += words * 8;
4148 }
4149
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4152
4153 if (align >= 32 && bytes > 16)
4154 {
4155 rtx orig_dsta;
4156
4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4158 bytes -= 4;
4159 ofs += 4;
4160
4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4164
4165 words = bytes / 8;
4166 for (i = 0; i < words; ++i)
4167 {
4168 rtx mem
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
4171 plus_constant (orig_dsta, ofs + i*8),
4172 GEN_INT (-8)));
4173 set_mem_alias_set (mem, 0);
4174 emit_move_insn (mem, const0_rtx);
4175 }
4176
4177 /* Depending on the alignment, the first stq_u may have overlapped
4178 with the initial stl, which means that the last stq_u didn't
4179 write as much as it would appear. Leave those questionable bytes
4180 unaccounted for. */
4181 bytes -= words * 8 - 4;
4182 ofs += words * 8 - 4;
4183 }
4184
4185 /* Handle a smaller block of aligned words. */
4186
4187 if ((align >= 64 && bytes == 4)
4188 || (align == 32 && bytes >= 4))
4189 {
4190 words = bytes / 4;
4191
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4194 const0_rtx);
4195
4196 bytes -= words * 4;
4197 ofs += words * 4;
4198 }
4199
4200 /* An unaligned block uses stq_u stores for as many as possible. */
4201
4202 if (bytes >= 8)
4203 {
4204 words = bytes / 8;
4205
4206 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4207
4208 bytes -= words * 8;
4209 ofs += words * 8;
4210 }
4211
4212 /* Next clean up any trailing pieces. */
4213
4214 #if HOST_BITS_PER_WIDE_INT >= 64
4215 /* Count the number of bits in BYTES for which aligned stores could
4216 be emitted. */
4217 words = 0;
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4219 if (bytes & i)
4220 words += 1;
4221
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4225 {
4226 if (align >= 64)
4227 {
4228 rtx mem, tmp;
4229 HOST_WIDE_INT mask;
4230
4231 mem = adjust_address (orig_dst, DImode, ofs);
4232 set_mem_alias_set (mem, 0);
4233
4234 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4235
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4238
4239 emit_move_insn (mem, tmp);
4240 return 1;
4241 }
4242 else if (align >= 32 && bytes < 4)
4243 {
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4246
4247 mem = adjust_address (orig_dst, SImode, ofs);
4248 set_mem_alias_set (mem, 0);
4249
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4251
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4254
4255 emit_move_insn (mem, tmp);
4256 return 1;
4257 }
4258 }
4259 #endif
4260
4261 if (!TARGET_BWX && bytes >= 4)
4262 {
4263 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4264 bytes -= 4;
4265 ofs += 4;
4266 }
4267
4268 if (bytes >= 2)
4269 {
4270 if (align >= 16)
4271 {
4272 do {
4273 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4274 const0_rtx);
4275 bytes -= 2;
4276 ofs += 2;
4277 } while (bytes >= 2);
4278 }
4279 else if (! TARGET_BWX)
4280 {
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4282 bytes -= 2;
4283 ofs += 2;
4284 }
4285 }
4286
4287 while (bytes > 0)
4288 {
4289 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4290 bytes -= 1;
4291 ofs += 1;
4292 }
4293
4294 return 1;
4295 }
4296
4297 /* Returns a mask so that zap(x, value) == x & mask. */
4298
4299 rtx
4300 alpha_expand_zap_mask (HOST_WIDE_INT value)
4301 {
4302 rtx result;
4303 int i;
4304
4305 if (HOST_BITS_PER_WIDE_INT >= 64)
4306 {
4307 HOST_WIDE_INT mask = 0;
4308
4309 for (i = 7; i >= 0; --i)
4310 {
4311 mask <<= 8;
4312 if (!((value >> i) & 1))
4313 mask |= 0xff;
4314 }
4315
4316 result = gen_int_mode (mask, DImode);
4317 }
4318 else
4319 {
4320 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4321
4322 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4323
4324 for (i = 7; i >= 4; --i)
4325 {
4326 mask_hi <<= 8;
4327 if (!((value >> i) & 1))
4328 mask_hi |= 0xff;
4329 }
4330
4331 for (i = 3; i >= 0; --i)
4332 {
4333 mask_lo <<= 8;
4334 if (!((value >> i) & 1))
4335 mask_lo |= 0xff;
4336 }
4337
4338 result = immed_double_const (mask_lo, mask_hi, DImode);
4339 }
4340
4341 return result;
4342 }
4343
4344 void
4345 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4346 enum machine_mode mode,
4347 rtx op0, rtx op1, rtx op2)
4348 {
4349 op0 = gen_lowpart (mode, op0);
4350
4351 if (op1 == const0_rtx)
4352 op1 = CONST0_RTX (mode);
4353 else
4354 op1 = gen_lowpart (mode, op1);
4355
4356 if (op2 == const0_rtx)
4357 op2 = CONST0_RTX (mode);
4358 else
4359 op2 = gen_lowpart (mode, op2);
4360
4361 emit_insn ((*gen) (op0, op1, op2));
4362 }
4363
4364 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4365 COND is true. Mark the jump as unlikely to be taken. */
4366
4367 static void
4368 emit_unlikely_jump (rtx cond, rtx label)
4369 {
4370 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4371 rtx x;
4372
4373 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4374 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4375 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4376 }
4377
4378 /* A subroutine of the atomic operation splitters. Emit a load-locked
4379 instruction in MODE. */
4380
4381 static void
4382 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4383 {
4384 rtx (*fn) (rtx, rtx) = NULL;
4385 if (mode == SImode)
4386 fn = gen_load_locked_si;
4387 else if (mode == DImode)
4388 fn = gen_load_locked_di;
4389 emit_insn (fn (reg, mem));
4390 }
4391
4392 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4393 instruction in MODE. */
4394
4395 static void
4396 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4397 {
4398 rtx (*fn) (rtx, rtx, rtx) = NULL;
4399 if (mode == SImode)
4400 fn = gen_store_conditional_si;
4401 else if (mode == DImode)
4402 fn = gen_store_conditional_di;
4403 emit_insn (fn (res, mem, val));
4404 }
4405
4406 /* A subroutine of the atomic operation splitters. Emit an insxl
4407 instruction in MODE. */
4408
4409 static rtx
4410 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4411 {
4412 rtx ret = gen_reg_rtx (DImode);
4413 rtx (*fn) (rtx, rtx, rtx);
4414
4415 if (WORDS_BIG_ENDIAN)
4416 {
4417 if (mode == QImode)
4418 fn = gen_insbl_be;
4419 else
4420 fn = gen_inswl_be;
4421 }
4422 else
4423 {
4424 if (mode == QImode)
4425 fn = gen_insbl_le;
4426 else
4427 fn = gen_inswl_le;
4428 }
4429 /* The insbl and inswl patterns require a register operand. */
4430 op1 = force_reg (mode, op1);
4431 emit_insn (fn (ret, op1, op2));
4432
4433 return ret;
4434 }
4435
4436 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4437 to perform. MEM is the memory on which to operate. VAL is the second
4438 operand of the binary operator. BEFORE and AFTER are optional locations to
4439 return the value of MEM either before of after the operation. SCRATCH is
4440 a scratch register. */
4441
4442 void
4443 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4444 rtx before, rtx after, rtx scratch)
4445 {
4446 enum machine_mode mode = GET_MODE (mem);
4447 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4448
4449 emit_insn (gen_memory_barrier ());
4450
4451 label = gen_label_rtx ();
4452 emit_label (label);
4453 label = gen_rtx_LABEL_REF (DImode, label);
4454
4455 if (before == NULL)
4456 before = scratch;
4457 emit_load_locked (mode, before, mem);
4458
4459 if (code == NOT)
4460 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4461 else
4462 x = gen_rtx_fmt_ee (code, mode, before, val);
4463 if (after)
4464 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4465 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4466
4467 emit_store_conditional (mode, cond, mem, scratch);
4468
4469 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4470 emit_unlikely_jump (x, label);
4471
4472 emit_insn (gen_memory_barrier ());
4473 }
4474
4475 /* Expand a compare and swap operation. */
4476
4477 void
4478 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4479 rtx scratch)
4480 {
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4483
4484 emit_insn (gen_memory_barrier ());
4485
4486 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4487 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 emit_label (XEXP (label1, 0));
4489
4490 emit_load_locked (mode, retval, mem);
4491
4492 x = gen_lowpart (DImode, retval);
4493 if (oldval == const0_rtx)
4494 x = gen_rtx_NE (DImode, x, const0_rtx);
4495 else
4496 {
4497 x = gen_rtx_EQ (DImode, x, oldval);
4498 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4499 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4500 }
4501 emit_unlikely_jump (x, label2);
4502
4503 emit_move_insn (scratch, newval);
4504 emit_store_conditional (mode, cond, mem, scratch);
4505
4506 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4507 emit_unlikely_jump (x, label1);
4508
4509 emit_insn (gen_memory_barrier ());
4510 emit_label (XEXP (label2, 0));
4511 }
4512
4513 void
4514 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4515 {
4516 enum machine_mode mode = GET_MODE (mem);
4517 rtx addr, align, wdst;
4518 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4519
4520 addr = force_reg (DImode, XEXP (mem, 0));
4521 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4522 NULL_RTX, 1, OPTAB_DIRECT);
4523
4524 oldval = convert_modes (DImode, mode, oldval, 1);
4525 newval = emit_insxl (mode, newval, addr);
4526
4527 wdst = gen_reg_rtx (DImode);
4528 if (mode == QImode)
4529 fn5 = gen_sync_compare_and_swapqi_1;
4530 else
4531 fn5 = gen_sync_compare_and_swaphi_1;
4532 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4533
4534 emit_move_insn (dst, gen_lowpart (mode, wdst));
4535 }
4536
4537 void
4538 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4539 rtx oldval, rtx newval, rtx align,
4540 rtx scratch, rtx cond)
4541 {
4542 rtx label1, label2, mem, width, mask, x;
4543
4544 mem = gen_rtx_MEM (DImode, align);
4545 MEM_VOLATILE_P (mem) = 1;
4546
4547 emit_insn (gen_memory_barrier ());
4548 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4549 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 emit_label (XEXP (label1, 0));
4551
4552 emit_load_locked (DImode, scratch, mem);
4553
4554 width = GEN_INT (GET_MODE_BITSIZE (mode));
4555 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4556 if (WORDS_BIG_ENDIAN)
4557 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4558 else
4559 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4560
4561 if (oldval == const0_rtx)
4562 x = gen_rtx_NE (DImode, dest, const0_rtx);
4563 else
4564 {
4565 x = gen_rtx_EQ (DImode, dest, oldval);
4566 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4567 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4568 }
4569 emit_unlikely_jump (x, label2);
4570
4571 if (WORDS_BIG_ENDIAN)
4572 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4573 else
4574 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4575 emit_insn (gen_iordi3 (scratch, scratch, newval));
4576
4577 emit_store_conditional (DImode, scratch, mem, scratch);
4578
4579 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4580 emit_unlikely_jump (x, label1);
4581
4582 emit_insn (gen_memory_barrier ());
4583 emit_label (XEXP (label2, 0));
4584 }
4585
4586 /* Expand an atomic exchange operation. */
4587
4588 void
4589 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4590 {
4591 enum machine_mode mode = GET_MODE (mem);
4592 rtx label, x, cond = gen_lowpart (DImode, scratch);
4593
4594 emit_insn (gen_memory_barrier ());
4595
4596 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 emit_label (XEXP (label, 0));
4598
4599 emit_load_locked (mode, retval, mem);
4600 emit_move_insn (scratch, val);
4601 emit_store_conditional (mode, cond, mem, scratch);
4602
4603 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4604 emit_unlikely_jump (x, label);
4605 }
4606
4607 void
4608 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4609 {
4610 enum machine_mode mode = GET_MODE (mem);
4611 rtx addr, align, wdst;
4612 rtx (*fn4) (rtx, rtx, rtx, rtx);
4613
4614 /* Force the address into a register. */
4615 addr = force_reg (DImode, XEXP (mem, 0));
4616
4617 /* Align it to a multiple of 8. */
4618 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4619 NULL_RTX, 1, OPTAB_DIRECT);
4620
4621 /* Insert val into the correct byte location within the word. */
4622 val = emit_insxl (mode, val, addr);
4623
4624 wdst = gen_reg_rtx (DImode);
4625 if (mode == QImode)
4626 fn4 = gen_sync_lock_test_and_setqi_1;
4627 else
4628 fn4 = gen_sync_lock_test_and_sethi_1;
4629 emit_insn (fn4 (wdst, addr, val, align));
4630
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4632 }
4633
4634 void
4635 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4636 rtx val, rtx align, rtx scratch)
4637 {
4638 rtx label, mem, width, mask, x;
4639
4640 mem = gen_rtx_MEM (DImode, align);
4641 MEM_VOLATILE_P (mem) = 1;
4642
4643 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4646
4647 emit_load_locked (DImode, scratch, mem);
4648
4649 width = GEN_INT (GET_MODE_BITSIZE (mode));
4650 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4651 if (WORDS_BIG_ENDIAN)
4652 {
4653 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4655 }
4656 else
4657 {
4658 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4659 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4660 }
4661 emit_insn (gen_iordi3 (scratch, scratch, val));
4662
4663 emit_store_conditional (DImode, scratch, mem, scratch);
4664
4665 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4666 emit_unlikely_jump (x, label);
4667 }
4668 \f
4669 /* Adjust the cost of a scheduling dependency. Return the new cost of
4670 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4671
4672 static int
4673 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4674 {
4675 enum attr_type insn_type, dep_insn_type;
4676
4677 /* If the dependence is an anti-dependence, there is no cost. For an
4678 output dependence, there is sometimes a cost, but it doesn't seem
4679 worth handling those few cases. */
4680 if (REG_NOTE_KIND (link) != 0)
4681 return cost;
4682
4683 /* If we can't recognize the insns, we can't really do anything. */
4684 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4685 return cost;
4686
4687 insn_type = get_attr_type (insn);
4688 dep_insn_type = get_attr_type (dep_insn);
4689
4690 /* Bring in the user-defined memory latency. */
4691 if (dep_insn_type == TYPE_ILD
4692 || dep_insn_type == TYPE_FLD
4693 || dep_insn_type == TYPE_LDSYM)
4694 cost += alpha_memory_latency-1;
4695
4696 /* Everything else handled in DFA bypasses now. */
4697
4698 return cost;
4699 }
4700
4701 /* The number of instructions that can be issued per cycle. */
4702
4703 static int
4704 alpha_issue_rate (void)
4705 {
4706 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4707 }
4708
4709 /* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4712
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4716
4717 static int
4718 alpha_multipass_dfa_lookahead (void)
4719 {
4720 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4721 }
4722 \f
4723 /* Machine-specific function data. */
4724
4725 struct machine_function GTY(())
4726 {
4727 /* For unicosmk. */
4728 /* List of call information words for calls from this function. */
4729 struct rtx_def *first_ciw;
4730 struct rtx_def *last_ciw;
4731 int ciw_count;
4732
4733 /* List of deferred case vectors. */
4734 struct rtx_def *addr_list;
4735
4736 /* For OSF. */
4737 const char *some_ld_name;
4738
4739 /* For TARGET_LD_BUGGY_LDGP. */
4740 struct rtx_def *gp_save_rtx;
4741 };
4742
4743 /* How to allocate a 'struct machine_function'. */
4744
4745 static struct machine_function *
4746 alpha_init_machine_status (void)
4747 {
4748 return ((struct machine_function *)
4749 ggc_alloc_cleared (sizeof (struct machine_function)));
4750 }
4751
4752 /* Functions to save and restore alpha_return_addr_rtx. */
4753
4754 /* Start the ball rolling with RETURN_ADDR_RTX. */
4755
4756 rtx
4757 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4758 {
4759 if (count != 0)
4760 return const0_rtx;
4761
4762 return get_hard_reg_initial_val (Pmode, REG_RA);
4763 }
4764
4765 /* Return or create a memory slot containing the gp value for the current
4766 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4767
4768 rtx
4769 alpha_gp_save_rtx (void)
4770 {
4771 rtx seq, m = cfun->machine->gp_save_rtx;
4772
4773 if (m == NULL)
4774 {
4775 start_sequence ();
4776
4777 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778 m = validize_mem (m);
4779 emit_move_insn (m, pic_offset_table_rtx);
4780
4781 seq = get_insns ();
4782 end_sequence ();
4783 emit_insn_at_entry (seq);
4784
4785 cfun->machine->gp_save_rtx = m;
4786 }
4787
4788 return m;
4789 }
4790
4791 static int
4792 alpha_ra_ever_killed (void)
4793 {
4794 rtx top;
4795
4796 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4797 return (int)df_regs_ever_live_p (REG_RA);
4798
4799 push_topmost_sequence ();
4800 top = get_insns ();
4801 pop_topmost_sequence ();
4802
4803 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4804 }
4805
4806 \f
4807 /* Return the trap mode suffix applicable to the current
4808 instruction, or NULL. */
4809
4810 static const char *
4811 get_trap_mode_suffix (void)
4812 {
4813 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4814
4815 switch (s)
4816 {
4817 case TRAP_SUFFIX_NONE:
4818 return NULL;
4819
4820 case TRAP_SUFFIX_SU:
4821 if (alpha_fptm >= ALPHA_FPTM_SU)
4822 return "su";
4823 return NULL;
4824
4825 case TRAP_SUFFIX_SUI:
4826 if (alpha_fptm >= ALPHA_FPTM_SUI)
4827 return "sui";
4828 return NULL;
4829
4830 case TRAP_SUFFIX_V_SV:
4831 switch (alpha_fptm)
4832 {
4833 case ALPHA_FPTM_N:
4834 return NULL;
4835 case ALPHA_FPTM_U:
4836 return "v";
4837 case ALPHA_FPTM_SU:
4838 case ALPHA_FPTM_SUI:
4839 return "sv";
4840 default:
4841 gcc_unreachable ();
4842 }
4843
4844 case TRAP_SUFFIX_V_SV_SVI:
4845 switch (alpha_fptm)
4846 {
4847 case ALPHA_FPTM_N:
4848 return NULL;
4849 case ALPHA_FPTM_U:
4850 return "v";
4851 case ALPHA_FPTM_SU:
4852 return "sv";
4853 case ALPHA_FPTM_SUI:
4854 return "svi";
4855 default:
4856 gcc_unreachable ();
4857 }
4858 break;
4859
4860 case TRAP_SUFFIX_U_SU_SUI:
4861 switch (alpha_fptm)
4862 {
4863 case ALPHA_FPTM_N:
4864 return NULL;
4865 case ALPHA_FPTM_U:
4866 return "u";
4867 case ALPHA_FPTM_SU:
4868 return "su";
4869 case ALPHA_FPTM_SUI:
4870 return "sui";
4871 default:
4872 gcc_unreachable ();
4873 }
4874 break;
4875
4876 default:
4877 gcc_unreachable ();
4878 }
4879 gcc_unreachable ();
4880 }
4881
4882 /* Return the rounding mode suffix applicable to the current
4883 instruction, or NULL. */
4884
4885 static const char *
4886 get_round_mode_suffix (void)
4887 {
4888 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4889
4890 switch (s)
4891 {
4892 case ROUND_SUFFIX_NONE:
4893 return NULL;
4894 case ROUND_SUFFIX_NORMAL:
4895 switch (alpha_fprm)
4896 {
4897 case ALPHA_FPRM_NORM:
4898 return NULL;
4899 case ALPHA_FPRM_MINF:
4900 return "m";
4901 case ALPHA_FPRM_CHOP:
4902 return "c";
4903 case ALPHA_FPRM_DYN:
4904 return "d";
4905 default:
4906 gcc_unreachable ();
4907 }
4908 break;
4909
4910 case ROUND_SUFFIX_C:
4911 return "c";
4912
4913 default:
4914 gcc_unreachable ();
4915 }
4916 gcc_unreachable ();
4917 }
4918
4919 /* Locate some local-dynamic symbol still in use by this function
4920 so that we can print its name in some movdi_er_tlsldm pattern. */
4921
4922 static int
4923 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4924 {
4925 rtx x = *px;
4926
4927 if (GET_CODE (x) == SYMBOL_REF
4928 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4929 {
4930 cfun->machine->some_ld_name = XSTR (x, 0);
4931 return 1;
4932 }
4933
4934 return 0;
4935 }
4936
4937 static const char *
4938 get_some_local_dynamic_name (void)
4939 {
4940 rtx insn;
4941
4942 if (cfun->machine->some_ld_name)
4943 return cfun->machine->some_ld_name;
4944
4945 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4946 if (INSN_P (insn)
4947 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4948 return cfun->machine->some_ld_name;
4949
4950 gcc_unreachable ();
4951 }
4952
4953 /* Print an operand. Recognize special options, documented below. */
4954
4955 void
4956 print_operand (FILE *file, rtx x, int code)
4957 {
4958 int i;
4959
4960 switch (code)
4961 {
4962 case '~':
4963 /* Print the assembler name of the current function. */
4964 assemble_name (file, alpha_fnname);
4965 break;
4966
4967 case '&':
4968 assemble_name (file, get_some_local_dynamic_name ());
4969 break;
4970
4971 case '/':
4972 {
4973 const char *trap = get_trap_mode_suffix ();
4974 const char *round = get_round_mode_suffix ();
4975
4976 if (trap || round)
4977 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4978 (trap ? trap : ""), (round ? round : ""));
4979 break;
4980 }
4981
4982 case ',':
4983 /* Generates single precision instruction suffix. */
4984 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4985 break;
4986
4987 case '-':
4988 /* Generates double precision instruction suffix. */
4989 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4990 break;
4991
4992 case '#':
4993 if (alpha_this_literal_sequence_number == 0)
4994 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4995 fprintf (file, "%d", alpha_this_literal_sequence_number);
4996 break;
4997
4998 case '*':
4999 if (alpha_this_gpdisp_sequence_number == 0)
5000 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5001 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5002 break;
5003
5004 case 'H':
5005 if (GET_CODE (x) == HIGH)
5006 output_addr_const (file, XEXP (x, 0));
5007 else
5008 output_operand_lossage ("invalid %%H value");
5009 break;
5010
5011 case 'J':
5012 {
5013 const char *lituse;
5014
5015 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5016 {
5017 x = XVECEXP (x, 0, 0);
5018 lituse = "lituse_tlsgd";
5019 }
5020 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5021 {
5022 x = XVECEXP (x, 0, 0);
5023 lituse = "lituse_tlsldm";
5024 }
5025 else if (GET_CODE (x) == CONST_INT)
5026 lituse = "lituse_jsr";
5027 else
5028 {
5029 output_operand_lossage ("invalid %%J value");
5030 break;
5031 }
5032
5033 if (x != const0_rtx)
5034 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5035 }
5036 break;
5037
5038 case 'j':
5039 {
5040 const char *lituse;
5041
5042 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5043 lituse = "lituse_jsrdirect";
5044 #else
5045 lituse = "lituse_jsr";
5046 #endif
5047
5048 gcc_assert (INTVAL (x) != 0);
5049 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5050 }
5051 break;
5052 case 'r':
5053 /* If this operand is the constant zero, write it as "$31". */
5054 if (GET_CODE (x) == REG)
5055 fprintf (file, "%s", reg_names[REGNO (x)]);
5056 else if (x == CONST0_RTX (GET_MODE (x)))
5057 fprintf (file, "$31");
5058 else
5059 output_operand_lossage ("invalid %%r value");
5060 break;
5061
5062 case 'R':
5063 /* Similar, but for floating-point. */
5064 if (GET_CODE (x) == REG)
5065 fprintf (file, "%s", reg_names[REGNO (x)]);
5066 else if (x == CONST0_RTX (GET_MODE (x)))
5067 fprintf (file, "$f31");
5068 else
5069 output_operand_lossage ("invalid %%R value");
5070 break;
5071
5072 case 'N':
5073 /* Write the 1's complement of a constant. */
5074 if (GET_CODE (x) != CONST_INT)
5075 output_operand_lossage ("invalid %%N value");
5076
5077 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5078 break;
5079
5080 case 'P':
5081 /* Write 1 << C, for a constant C. */
5082 if (GET_CODE (x) != CONST_INT)
5083 output_operand_lossage ("invalid %%P value");
5084
5085 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5086 break;
5087
5088 case 'h':
5089 /* Write the high-order 16 bits of a constant, sign-extended. */
5090 if (GET_CODE (x) != CONST_INT)
5091 output_operand_lossage ("invalid %%h value");
5092
5093 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5094 break;
5095
5096 case 'L':
5097 /* Write the low-order 16 bits of a constant, sign-extended. */
5098 if (GET_CODE (x) != CONST_INT)
5099 output_operand_lossage ("invalid %%L value");
5100
5101 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5102 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5103 break;
5104
5105 case 'm':
5106 /* Write mask for ZAP insn. */
5107 if (GET_CODE (x) == CONST_DOUBLE)
5108 {
5109 HOST_WIDE_INT mask = 0;
5110 HOST_WIDE_INT value;
5111
5112 value = CONST_DOUBLE_LOW (x);
5113 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5114 i++, value >>= 8)
5115 if (value & 0xff)
5116 mask |= (1 << i);
5117
5118 value = CONST_DOUBLE_HIGH (x);
5119 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5120 i++, value >>= 8)
5121 if (value & 0xff)
5122 mask |= (1 << (i + sizeof (int)));
5123
5124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5125 }
5126
5127 else if (GET_CODE (x) == CONST_INT)
5128 {
5129 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5130
5131 for (i = 0; i < 8; i++, value >>= 8)
5132 if (value & 0xff)
5133 mask |= (1 << i);
5134
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5136 }
5137 else
5138 output_operand_lossage ("invalid %%m value");
5139 break;
5140
5141 case 'M':
5142 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5143 if (GET_CODE (x) != CONST_INT
5144 || (INTVAL (x) != 8 && INTVAL (x) != 16
5145 && INTVAL (x) != 32 && INTVAL (x) != 64))
5146 output_operand_lossage ("invalid %%M value");
5147
5148 fprintf (file, "%s",
5149 (INTVAL (x) == 8 ? "b"
5150 : INTVAL (x) == 16 ? "w"
5151 : INTVAL (x) == 32 ? "l"
5152 : "q"));
5153 break;
5154
5155 case 'U':
5156 /* Similar, except do it from the mask. */
5157 if (GET_CODE (x) == CONST_INT)
5158 {
5159 HOST_WIDE_INT value = INTVAL (x);
5160
5161 if (value == 0xff)
5162 {
5163 fputc ('b', file);
5164 break;
5165 }
5166 if (value == 0xffff)
5167 {
5168 fputc ('w', file);
5169 break;
5170 }
5171 if (value == 0xffffffff)
5172 {
5173 fputc ('l', file);
5174 break;
5175 }
5176 if (value == -1)
5177 {
5178 fputc ('q', file);
5179 break;
5180 }
5181 }
5182 else if (HOST_BITS_PER_WIDE_INT == 32
5183 && GET_CODE (x) == CONST_DOUBLE
5184 && CONST_DOUBLE_LOW (x) == 0xffffffff
5185 && CONST_DOUBLE_HIGH (x) == 0)
5186 {
5187 fputc ('l', file);
5188 break;
5189 }
5190 output_operand_lossage ("invalid %%U value");
5191 break;
5192
5193 case 's':
5194 /* Write the constant value divided by 8 for little-endian mode or
5195 (56 - value) / 8 for big-endian mode. */
5196
5197 if (GET_CODE (x) != CONST_INT
5198 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5199 ? 56
5200 : 64)
5201 || (INTVAL (x) & 7) != 0)
5202 output_operand_lossage ("invalid %%s value");
5203
5204 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5205 WORDS_BIG_ENDIAN
5206 ? (56 - INTVAL (x)) / 8
5207 : INTVAL (x) / 8);
5208 break;
5209
5210 case 'S':
5211 /* Same, except compute (64 - c) / 8 */
5212
5213 if (GET_CODE (x) != CONST_INT
5214 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5215 && (INTVAL (x) & 7) != 8)
5216 output_operand_lossage ("invalid %%s value");
5217
5218 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5219 break;
5220
5221 case 't':
5222 {
5223 /* On Unicos/Mk systems: use a DEX expression if the symbol
5224 clashes with a register name. */
5225 int dex = unicosmk_need_dex (x);
5226 if (dex)
5227 fprintf (file, "DEX(%d)", dex);
5228 else
5229 output_addr_const (file, x);
5230 }
5231 break;
5232
5233 case 'C': case 'D': case 'c': case 'd':
5234 /* Write out comparison name. */
5235 {
5236 enum rtx_code c = GET_CODE (x);
5237
5238 if (!COMPARISON_P (x))
5239 output_operand_lossage ("invalid %%C value");
5240
5241 else if (code == 'D')
5242 c = reverse_condition (c);
5243 else if (code == 'c')
5244 c = swap_condition (c);
5245 else if (code == 'd')
5246 c = swap_condition (reverse_condition (c));
5247
5248 if (c == LEU)
5249 fprintf (file, "ule");
5250 else if (c == LTU)
5251 fprintf (file, "ult");
5252 else if (c == UNORDERED)
5253 fprintf (file, "un");
5254 else
5255 fprintf (file, "%s", GET_RTX_NAME (c));
5256 }
5257 break;
5258
5259 case 'E':
5260 /* Write the divide or modulus operator. */
5261 switch (GET_CODE (x))
5262 {
5263 case DIV:
5264 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5265 break;
5266 case UDIV:
5267 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5268 break;
5269 case MOD:
5270 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5271 break;
5272 case UMOD:
5273 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5274 break;
5275 default:
5276 output_operand_lossage ("invalid %%E value");
5277 break;
5278 }
5279 break;
5280
5281 case 'A':
5282 /* Write "_u" for unaligned access. */
5283 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5284 fprintf (file, "_u");
5285 break;
5286
5287 case 0:
5288 if (GET_CODE (x) == REG)
5289 fprintf (file, "%s", reg_names[REGNO (x)]);
5290 else if (GET_CODE (x) == MEM)
5291 output_address (XEXP (x, 0));
5292 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5293 {
5294 switch (XINT (XEXP (x, 0), 1))
5295 {
5296 case UNSPEC_DTPREL:
5297 case UNSPEC_TPREL:
5298 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5299 break;
5300 default:
5301 output_operand_lossage ("unknown relocation unspec");
5302 break;
5303 }
5304 }
5305 else
5306 output_addr_const (file, x);
5307 break;
5308
5309 default:
5310 output_operand_lossage ("invalid %%xn code");
5311 }
5312 }
5313
5314 void
5315 print_operand_address (FILE *file, rtx addr)
5316 {
5317 int basereg = 31;
5318 HOST_WIDE_INT offset = 0;
5319
5320 if (GET_CODE (addr) == AND)
5321 addr = XEXP (addr, 0);
5322
5323 if (GET_CODE (addr) == PLUS
5324 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5325 {
5326 offset = INTVAL (XEXP (addr, 1));
5327 addr = XEXP (addr, 0);
5328 }
5329
5330 if (GET_CODE (addr) == LO_SUM)
5331 {
5332 const char *reloc16, *reloclo;
5333 rtx op1 = XEXP (addr, 1);
5334
5335 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5336 {
5337 op1 = XEXP (op1, 0);
5338 switch (XINT (op1, 1))
5339 {
5340 case UNSPEC_DTPREL:
5341 reloc16 = NULL;
5342 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5343 break;
5344 case UNSPEC_TPREL:
5345 reloc16 = NULL;
5346 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5347 break;
5348 default:
5349 output_operand_lossage ("unknown relocation unspec");
5350 return;
5351 }
5352
5353 output_addr_const (file, XVECEXP (op1, 0, 0));
5354 }
5355 else
5356 {
5357 reloc16 = "gprel";
5358 reloclo = "gprellow";
5359 output_addr_const (file, op1);
5360 }
5361
5362 if (offset)
5363 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5364
5365 addr = XEXP (addr, 0);
5366 switch (GET_CODE (addr))
5367 {
5368 case REG:
5369 basereg = REGNO (addr);
5370 break;
5371
5372 case SUBREG:
5373 basereg = subreg_regno (addr);
5374 break;
5375
5376 default:
5377 gcc_unreachable ();
5378 }
5379
5380 fprintf (file, "($%d)\t\t!%s", basereg,
5381 (basereg == 29 ? reloc16 : reloclo));
5382 return;
5383 }
5384
5385 switch (GET_CODE (addr))
5386 {
5387 case REG:
5388 basereg = REGNO (addr);
5389 break;
5390
5391 case SUBREG:
5392 basereg = subreg_regno (addr);
5393 break;
5394
5395 case CONST_INT:
5396 offset = INTVAL (addr);
5397 break;
5398
5399 #if TARGET_ABI_OPEN_VMS
5400 case SYMBOL_REF:
5401 fprintf (file, "%s", XSTR (addr, 0));
5402 return;
5403
5404 case CONST:
5405 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5406 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5407 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5408 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5409 INTVAL (XEXP (XEXP (addr, 0), 1)));
5410 return;
5411
5412 #endif
5413 default:
5414 gcc_unreachable ();
5415 }
5416
5417 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5418 }
5419 \f
5420 /* Emit RTL insns to initialize the variable parts of a trampoline at
5421 TRAMP. FNADDR is an RTX for the address of the function's pure
5422 code. CXT is an RTX for the static chain value for the function.
5423
5424 The three offset parameters are for the individual template's
5425 layout. A JMPOFS < 0 indicates that the trampoline does not
5426 contain instructions at all.
5427
5428 We assume here that a function will be called many more times than
5429 its address is taken (e.g., it might be passed to qsort), so we
5430 take the trouble to initialize the "hint" field in the JMP insn.
5431 Note that the hint field is PC (new) + 4 * bits 13:0. */
5432
5433 void
5434 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5435 int fnofs, int cxtofs, int jmpofs)
5436 {
5437 rtx temp, temp1, addr;
5438 /* VMS really uses DImode pointers in memory at this point. */
5439 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5440
5441 #ifdef POINTERS_EXTEND_UNSIGNED
5442 fnaddr = convert_memory_address (mode, fnaddr);
5443 cxt = convert_memory_address (mode, cxt);
5444 #endif
5445
5446 /* Store function address and CXT. */
5447 addr = memory_address (mode, plus_constant (tramp, fnofs));
5448 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5449 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5450 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5451
5452 /* This has been disabled since the hint only has a 32k range, and in
5453 no existing OS is the stack within 32k of the text segment. */
5454 if (0 && jmpofs >= 0)
5455 {
5456 /* Compute hint value. */
5457 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5458 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5459 OPTAB_WIDEN);
5460 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5461 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5462 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5463 GEN_INT (0x3fff), 0);
5464
5465 /* Merge in the hint. */
5466 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5467 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5468 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5469 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5470 OPTAB_WIDEN);
5471 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5472 }
5473
5474 #ifdef ENABLE_EXECUTE_STACK
5475 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5476 0, VOIDmode, 1, tramp, Pmode);
5477 #endif
5478
5479 if (jmpofs >= 0)
5480 emit_insn (gen_imb ());
5481 }
5482 \f
5483 /* Determine where to put an argument to a function.
5484 Value is zero to push the argument on the stack,
5485 or a hard register in which to store the argument.
5486
5487 MODE is the argument's machine mode.
5488 TYPE is the data type of the argument (as a tree).
5489 This is null for libcalls where that information may
5490 not be available.
5491 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5492 the preceding args and about the function being called.
5493 NAMED is nonzero if this argument is a named parameter
5494 (otherwise it is an extra parameter matching an ellipsis).
5495
5496 On Alpha the first 6 words of args are normally in registers
5497 and the rest are pushed. */
5498
5499 rtx
5500 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5501 int named ATTRIBUTE_UNUSED)
5502 {
5503 int basereg;
5504 int num_args;
5505
5506 /* Don't get confused and pass small structures in FP registers. */
5507 if (type && AGGREGATE_TYPE_P (type))
5508 basereg = 16;
5509 else
5510 {
5511 #ifdef ENABLE_CHECKING
5512 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5513 values here. */
5514 gcc_assert (!COMPLEX_MODE_P (mode));
5515 #endif
5516
5517 /* Set up defaults for FP operands passed in FP registers, and
5518 integral operands passed in integer registers. */
5519 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5520 basereg = 32 + 16;
5521 else
5522 basereg = 16;
5523 }
5524
5525 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5526 the three platforms, so we can't avoid conditional compilation. */
5527 #if TARGET_ABI_OPEN_VMS
5528 {
5529 if (mode == VOIDmode)
5530 return alpha_arg_info_reg_val (cum);
5531
5532 num_args = cum.num_args;
5533 if (num_args >= 6
5534 || targetm.calls.must_pass_in_stack (mode, type))
5535 return NULL_RTX;
5536 }
5537 #elif TARGET_ABI_UNICOSMK
5538 {
5539 int size;
5540
5541 /* If this is the last argument, generate the call info word (CIW). */
5542 /* ??? We don't include the caller's line number in the CIW because
5543 I don't know how to determine it if debug infos are turned off. */
5544 if (mode == VOIDmode)
5545 {
5546 int i;
5547 HOST_WIDE_INT lo;
5548 HOST_WIDE_INT hi;
5549 rtx ciw;
5550
5551 lo = 0;
5552
5553 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5554 if (cum.reg_args_type[i])
5555 lo |= (1 << (7 - i));
5556
5557 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5558 lo |= 7;
5559 else
5560 lo |= cum.num_reg_words;
5561
5562 #if HOST_BITS_PER_WIDE_INT == 32
5563 hi = (cum.num_args << 20) | cum.num_arg_words;
5564 #else
5565 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5566 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5567 hi = 0;
5568 #endif
5569 ciw = immed_double_const (lo, hi, DImode);
5570
5571 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5572 UNSPEC_UMK_LOAD_CIW);
5573 }
5574
5575 size = ALPHA_ARG_SIZE (mode, type, named);
5576 num_args = cum.num_reg_words;
5577 if (cum.force_stack
5578 || cum.num_reg_words + size > 6
5579 || targetm.calls.must_pass_in_stack (mode, type))
5580 return NULL_RTX;
5581 else if (type && TYPE_MODE (type) == BLKmode)
5582 {
5583 rtx reg1, reg2;
5584
5585 reg1 = gen_rtx_REG (DImode, num_args + 16);
5586 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5587
5588 /* The argument fits in two registers. Note that we still need to
5589 reserve a register for empty structures. */
5590 if (size == 0)
5591 return NULL_RTX;
5592 else if (size == 1)
5593 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5594 else
5595 {
5596 reg2 = gen_rtx_REG (DImode, num_args + 17);
5597 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5598 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5599 }
5600 }
5601 }
5602 #elif TARGET_ABI_OSF
5603 {
5604 if (cum >= 6)
5605 return NULL_RTX;
5606 num_args = cum;
5607
5608 /* VOID is passed as a special flag for "last argument". */
5609 if (type == void_type_node)
5610 basereg = 16;
5611 else if (targetm.calls.must_pass_in_stack (mode, type))
5612 return NULL_RTX;
5613 }
5614 #else
5615 #error Unhandled ABI
5616 #endif
5617
5618 return gen_rtx_REG (mode, num_args + basereg);
5619 }
5620
5621 static int
5622 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5623 enum machine_mode mode ATTRIBUTE_UNUSED,
5624 tree type ATTRIBUTE_UNUSED,
5625 bool named ATTRIBUTE_UNUSED)
5626 {
5627 int words = 0;
5628
5629 #if TARGET_ABI_OPEN_VMS
5630 if (cum->num_args < 6
5631 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5632 words = 6 - cum->num_args;
5633 #elif TARGET_ABI_UNICOSMK
5634 /* Never any split arguments. */
5635 #elif TARGET_ABI_OSF
5636 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5637 words = 6 - *cum;
5638 #else
5639 #error Unhandled ABI
5640 #endif
5641
5642 return words * UNITS_PER_WORD;
5643 }
5644
5645
5646 /* Return true if TYPE must be returned in memory, instead of in registers. */
5647
5648 static bool
5649 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5650 {
5651 enum machine_mode mode = VOIDmode;
5652 int size;
5653
5654 if (type)
5655 {
5656 mode = TYPE_MODE (type);
5657
5658 /* All aggregates are returned in memory. */
5659 if (AGGREGATE_TYPE_P (type))
5660 return true;
5661 }
5662
5663 size = GET_MODE_SIZE (mode);
5664 switch (GET_MODE_CLASS (mode))
5665 {
5666 case MODE_VECTOR_FLOAT:
5667 /* Pass all float vectors in memory, like an aggregate. */
5668 return true;
5669
5670 case MODE_COMPLEX_FLOAT:
5671 /* We judge complex floats on the size of their element,
5672 not the size of the whole type. */
5673 size = GET_MODE_UNIT_SIZE (mode);
5674 break;
5675
5676 case MODE_INT:
5677 case MODE_FLOAT:
5678 case MODE_COMPLEX_INT:
5679 case MODE_VECTOR_INT:
5680 break;
5681
5682 default:
5683 /* ??? We get called on all sorts of random stuff from
5684 aggregate_value_p. We must return something, but it's not
5685 clear what's safe to return. Pretend it's a struct I
5686 guess. */
5687 return true;
5688 }
5689
5690 /* Otherwise types must fit in one register. */
5691 return size > UNITS_PER_WORD;
5692 }
5693
5694 /* Return true if TYPE should be passed by invisible reference. */
5695
5696 static bool
5697 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5698 enum machine_mode mode,
5699 const_tree type ATTRIBUTE_UNUSED,
5700 bool named ATTRIBUTE_UNUSED)
5701 {
5702 return mode == TFmode || mode == TCmode;
5703 }
5704
5705 /* Define how to find the value returned by a function. VALTYPE is the
5706 data type of the value (as a tree). If the precise function being
5707 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5708 MODE is set instead of VALTYPE for libcalls.
5709
5710 On Alpha the value is found in $0 for integer functions and
5711 $f0 for floating-point functions. */
5712
5713 rtx
5714 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5715 enum machine_mode mode)
5716 {
5717 unsigned int regnum, dummy;
5718 enum mode_class class;
5719
5720 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5721
5722 if (valtype)
5723 mode = TYPE_MODE (valtype);
5724
5725 class = GET_MODE_CLASS (mode);
5726 switch (class)
5727 {
5728 case MODE_INT:
5729 PROMOTE_MODE (mode, dummy, valtype);
5730 /* FALLTHRU */
5731
5732 case MODE_COMPLEX_INT:
5733 case MODE_VECTOR_INT:
5734 regnum = 0;
5735 break;
5736
5737 case MODE_FLOAT:
5738 regnum = 32;
5739 break;
5740
5741 case MODE_COMPLEX_FLOAT:
5742 {
5743 enum machine_mode cmode = GET_MODE_INNER (mode);
5744
5745 return gen_rtx_PARALLEL
5746 (VOIDmode,
5747 gen_rtvec (2,
5748 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5749 const0_rtx),
5750 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5751 GEN_INT (GET_MODE_SIZE (cmode)))));
5752 }
5753
5754 default:
5755 gcc_unreachable ();
5756 }
5757
5758 return gen_rtx_REG (mode, regnum);
5759 }
5760
5761 /* TCmode complex values are passed by invisible reference. We
5762 should not split these values. */
5763
5764 static bool
5765 alpha_split_complex_arg (const_tree type)
5766 {
5767 return TYPE_MODE (type) != TCmode;
5768 }
5769
5770 static tree
5771 alpha_build_builtin_va_list (void)
5772 {
5773 tree base, ofs, space, record, type_decl;
5774
5775 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5776 return ptr_type_node;
5777
5778 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5779 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5780 TREE_CHAIN (record) = type_decl;
5781 TYPE_NAME (record) = type_decl;
5782
5783 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5784
5785 /* Dummy field to prevent alignment warnings. */
5786 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5787 DECL_FIELD_CONTEXT (space) = record;
5788 DECL_ARTIFICIAL (space) = 1;
5789 DECL_IGNORED_P (space) = 1;
5790
5791 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5792 integer_type_node);
5793 DECL_FIELD_CONTEXT (ofs) = record;
5794 TREE_CHAIN (ofs) = space;
5795
5796 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5797 ptr_type_node);
5798 DECL_FIELD_CONTEXT (base) = record;
5799 TREE_CHAIN (base) = ofs;
5800
5801 TYPE_FIELDS (record) = base;
5802 layout_type (record);
5803
5804 va_list_gpr_counter_field = ofs;
5805 return record;
5806 }
5807
5808 #if TARGET_ABI_OSF
5809 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5810 and constant additions. */
5811
5812 static tree
5813 va_list_skip_additions (tree lhs)
5814 {
5815 tree rhs, stmt;
5816
5817 if (TREE_CODE (lhs) != SSA_NAME)
5818 return lhs;
5819
5820 for (;;)
5821 {
5822 stmt = SSA_NAME_DEF_STMT (lhs);
5823
5824 if (TREE_CODE (stmt) == PHI_NODE)
5825 return stmt;
5826
5827 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5828 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5829 return lhs;
5830
5831 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5832 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5833 rhs = TREE_OPERAND (rhs, 0);
5834
5835 if ((TREE_CODE (rhs) != NOP_EXPR
5836 && TREE_CODE (rhs) != CONVERT_EXPR
5837 && ((TREE_CODE (rhs) != PLUS_EXPR
5838 && TREE_CODE (rhs) != POINTER_PLUS_EXPR)
5839 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5840 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5841 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5842 return rhs;
5843
5844 lhs = TREE_OPERAND (rhs, 0);
5845 }
5846 }
5847
5848 /* Check if LHS = RHS statement is
5849 LHS = *(ap.__base + ap.__offset + cst)
5850 or
5851 LHS = *(ap.__base
5852 + ((ap.__offset + cst <= 47)
5853 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5854 If the former, indicate that GPR registers are needed,
5855 if the latter, indicate that FPR registers are needed.
5856
5857 Also look for LHS = (*ptr).field, where ptr is one of the forms
5858 listed above.
5859
5860 On alpha, cfun->va_list_gpr_size is used as size of the needed
5861 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5862 registers are needed and bit 1 set if FPR registers are needed.
5863 Return true if va_list references should not be scanned for the
5864 current statement. */
5865
5866 static bool
5867 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree rhs)
5868 {
5869 tree base, offset, arg1, arg2;
5870 int offset_arg = 1;
5871
5872 while (handled_component_p (rhs))
5873 rhs = TREE_OPERAND (rhs, 0);
5874 if (TREE_CODE (rhs) != INDIRECT_REF
5875 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5876 return false;
5877
5878 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5879 if (lhs == NULL_TREE
5880 || TREE_CODE (lhs) != POINTER_PLUS_EXPR)
5881 return false;
5882
5883 base = TREE_OPERAND (lhs, 0);
5884 if (TREE_CODE (base) == SSA_NAME)
5885 base = va_list_skip_additions (base);
5886
5887 if (TREE_CODE (base) != COMPONENT_REF
5888 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5889 {
5890 base = TREE_OPERAND (lhs, 0);
5891 if (TREE_CODE (base) == SSA_NAME)
5892 base = va_list_skip_additions (base);
5893
5894 if (TREE_CODE (base) != COMPONENT_REF
5895 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5896 return false;
5897
5898 offset_arg = 0;
5899 }
5900
5901 base = get_base_address (base);
5902 if (TREE_CODE (base) != VAR_DECL
5903 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5904 return false;
5905
5906 offset = TREE_OPERAND (lhs, offset_arg);
5907 if (TREE_CODE (offset) == SSA_NAME)
5908 offset = va_list_skip_additions (offset);
5909
5910 if (TREE_CODE (offset) == PHI_NODE)
5911 {
5912 HOST_WIDE_INT sub;
5913
5914 if (PHI_NUM_ARGS (offset) != 2)
5915 goto escapes;
5916
5917 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5918 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5919 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5920 {
5921 tree tem = arg1;
5922 arg1 = arg2;
5923 arg2 = tem;
5924
5925 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5926 goto escapes;
5927 }
5928 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5929 goto escapes;
5930
5931 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5932 if (TREE_CODE (arg2) == MINUS_EXPR)
5933 sub = -sub;
5934 if (sub < -48 || sub > -32)
5935 goto escapes;
5936
5937 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5938 if (arg1 != arg2)
5939 goto escapes;
5940
5941 if (TREE_CODE (arg1) == SSA_NAME)
5942 arg1 = va_list_skip_additions (arg1);
5943
5944 if (TREE_CODE (arg1) != COMPONENT_REF
5945 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5946 || get_base_address (arg1) != base)
5947 goto escapes;
5948
5949 /* Need floating point regs. */
5950 cfun->va_list_fpr_size |= 2;
5951 }
5952 else if (TREE_CODE (offset) != COMPONENT_REF
5953 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5954 || get_base_address (offset) != base)
5955 goto escapes;
5956 else
5957 /* Need general regs. */
5958 cfun->va_list_fpr_size |= 1;
5959 return false;
5960
5961 escapes:
5962 si->va_list_escapes = true;
5963 return false;
5964 }
5965 #endif
5966
5967 /* Perform any needed actions needed for a function that is receiving a
5968 variable number of arguments. */
5969
5970 static void
5971 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5972 tree type, int *pretend_size, int no_rtl)
5973 {
5974 CUMULATIVE_ARGS cum = *pcum;
5975
5976 /* Skip the current argument. */
5977 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5978
5979 #if TARGET_ABI_UNICOSMK
5980 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5981 arguments on the stack. Unfortunately, it doesn't always store the first
5982 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5983 with stdargs as we always have at least one named argument there. */
5984 if (cum.num_reg_words < 6)
5985 {
5986 if (!no_rtl)
5987 {
5988 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5989 emit_insn (gen_arg_home_umk ());
5990 }
5991 *pretend_size = 0;
5992 }
5993 #elif TARGET_ABI_OPEN_VMS
5994 /* For VMS, we allocate space for all 6 arg registers plus a count.
5995
5996 However, if NO registers need to be saved, don't allocate any space.
5997 This is not only because we won't need the space, but because AP
5998 includes the current_pretend_args_size and we don't want to mess up
5999 any ap-relative addresses already made. */
6000 if (cum.num_args < 6)
6001 {
6002 if (!no_rtl)
6003 {
6004 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6005 emit_insn (gen_arg_home ());
6006 }
6007 *pretend_size = 7 * UNITS_PER_WORD;
6008 }
6009 #else
6010 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6011 only push those that are remaining. However, if NO registers need to
6012 be saved, don't allocate any space. This is not only because we won't
6013 need the space, but because AP includes the current_pretend_args_size
6014 and we don't want to mess up any ap-relative addresses already made.
6015
6016 If we are not to use the floating-point registers, save the integer
6017 registers where we would put the floating-point registers. This is
6018 not the most efficient way to implement varargs with just one register
6019 class, but it isn't worth doing anything more efficient in this rare
6020 case. */
6021 if (cum >= 6)
6022 return;
6023
6024 if (!no_rtl)
6025 {
6026 int count;
6027 alias_set_type set = get_varargs_alias_set ();
6028 rtx tmp;
6029
6030 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6031 if (count > 6 - cum)
6032 count = 6 - cum;
6033
6034 /* Detect whether integer registers or floating-point registers
6035 are needed by the detected va_arg statements. See above for
6036 how these values are computed. Note that the "escape" value
6037 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6038 these bits set. */
6039 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6040
6041 if (cfun->va_list_fpr_size & 1)
6042 {
6043 tmp = gen_rtx_MEM (BLKmode,
6044 plus_constant (virtual_incoming_args_rtx,
6045 (cum + 6) * UNITS_PER_WORD));
6046 MEM_NOTRAP_P (tmp) = 1;
6047 set_mem_alias_set (tmp, set);
6048 move_block_from_reg (16 + cum, tmp, count);
6049 }
6050
6051 if (cfun->va_list_fpr_size & 2)
6052 {
6053 tmp = gen_rtx_MEM (BLKmode,
6054 plus_constant (virtual_incoming_args_rtx,
6055 cum * UNITS_PER_WORD));
6056 MEM_NOTRAP_P (tmp) = 1;
6057 set_mem_alias_set (tmp, set);
6058 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6059 }
6060 }
6061 *pretend_size = 12 * UNITS_PER_WORD;
6062 #endif
6063 }
6064
6065 void
6066 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6067 {
6068 HOST_WIDE_INT offset;
6069 tree t, offset_field, base_field;
6070
6071 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6072 return;
6073
6074 if (TARGET_ABI_UNICOSMK)
6075 std_expand_builtin_va_start (valist, nextarg);
6076
6077 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6078 up by 48, storing fp arg registers in the first 48 bytes, and the
6079 integer arg registers in the next 48 bytes. This is only done,
6080 however, if any integer registers need to be stored.
6081
6082 If no integer registers need be stored, then we must subtract 48
6083 in order to account for the integer arg registers which are counted
6084 in argsize above, but which are not actually stored on the stack.
6085 Must further be careful here about structures straddling the last
6086 integer argument register; that futzes with pretend_args_size,
6087 which changes the meaning of AP. */
6088
6089 if (NUM_ARGS < 6)
6090 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6091 else
6092 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6093
6094 if (TARGET_ABI_OPEN_VMS)
6095 {
6096 nextarg = plus_constant (nextarg, offset);
6097 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6098 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6099 make_tree (ptr_type_node, nextarg));
6100 TREE_SIDE_EFFECTS (t) = 1;
6101
6102 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6103 }
6104 else
6105 {
6106 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6107 offset_field = TREE_CHAIN (base_field);
6108
6109 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6110 valist, base_field, NULL_TREE);
6111 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6112 valist, offset_field, NULL_TREE);
6113
6114 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6115 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6116 size_int (offset));
6117 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6118 TREE_SIDE_EFFECTS (t) = 1;
6119 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6120
6121 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6122 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6123 offset_field, t);
6124 TREE_SIDE_EFFECTS (t) = 1;
6125 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6126 }
6127 }
6128
6129 static tree
6130 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6131 {
6132 tree type_size, ptr_type, addend, t, addr, internal_post;
6133
6134 /* If the type could not be passed in registers, skip the block
6135 reserved for the registers. */
6136 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6137 {
6138 t = build_int_cst (TREE_TYPE (offset), 6*8);
6139 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6140 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6141 gimplify_and_add (t, pre_p);
6142 }
6143
6144 addend = offset;
6145 ptr_type = build_pointer_type (type);
6146
6147 if (TREE_CODE (type) == COMPLEX_TYPE)
6148 {
6149 tree real_part, imag_part, real_temp;
6150
6151 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6152 offset, pre_p);
6153
6154 /* Copy the value into a new temporary, lest the formal temporary
6155 be reused out from under us. */
6156 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6157
6158 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6159 offset, pre_p);
6160
6161 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6162 }
6163 else if (TREE_CODE (type) == REAL_TYPE)
6164 {
6165 tree fpaddend, cond, fourtyeight;
6166
6167 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6168 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6169 addend, fourtyeight);
6170 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6171 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6172 fpaddend, addend);
6173 }
6174
6175 /* Build the final address and force that value into a temporary. */
6176 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6177 fold_convert (sizetype, addend));
6178 internal_post = NULL;
6179 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6180 append_to_statement_list (internal_post, pre_p);
6181
6182 /* Update the offset field. */
6183 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6184 if (type_size == NULL || TREE_OVERFLOW (type_size))
6185 t = size_zero_node;
6186 else
6187 {
6188 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6189 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6190 t = size_binop (MULT_EXPR, t, size_int (8));
6191 }
6192 t = fold_convert (TREE_TYPE (offset), t);
6193 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6194 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6195 gimplify_and_add (t, pre_p);
6196
6197 return build_va_arg_indirect_ref (addr);
6198 }
6199
6200 static tree
6201 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6202 {
6203 tree offset_field, base_field, offset, base, t, r;
6204 bool indirect;
6205
6206 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6207 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6208
6209 base_field = TYPE_FIELDS (va_list_type_node);
6210 offset_field = TREE_CHAIN (base_field);
6211 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6212 valist, base_field, NULL_TREE);
6213 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6214 valist, offset_field, NULL_TREE);
6215
6216 /* Pull the fields of the structure out into temporaries. Since we never
6217 modify the base field, we can use a formal temporary. Sign-extend the
6218 offset field so that it's the proper width for pointer arithmetic. */
6219 base = get_formal_tmp_var (base_field, pre_p);
6220
6221 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6222 offset = get_initialized_tmp_var (t, pre_p, NULL);
6223
6224 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6225 if (indirect)
6226 type = build_pointer_type (type);
6227
6228 /* Find the value. Note that this will be a stable indirection, or
6229 a composite of stable indirections in the case of complex. */
6230 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6231
6232 /* Stuff the offset temporary back into its field. */
6233 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6234 fold_convert (TREE_TYPE (offset_field), offset));
6235 gimplify_and_add (t, pre_p);
6236
6237 if (indirect)
6238 r = build_va_arg_indirect_ref (r);
6239
6240 return r;
6241 }
6242 \f
6243 /* Builtins. */
6244
6245 enum alpha_builtin
6246 {
6247 ALPHA_BUILTIN_CMPBGE,
6248 ALPHA_BUILTIN_EXTBL,
6249 ALPHA_BUILTIN_EXTWL,
6250 ALPHA_BUILTIN_EXTLL,
6251 ALPHA_BUILTIN_EXTQL,
6252 ALPHA_BUILTIN_EXTWH,
6253 ALPHA_BUILTIN_EXTLH,
6254 ALPHA_BUILTIN_EXTQH,
6255 ALPHA_BUILTIN_INSBL,
6256 ALPHA_BUILTIN_INSWL,
6257 ALPHA_BUILTIN_INSLL,
6258 ALPHA_BUILTIN_INSQL,
6259 ALPHA_BUILTIN_INSWH,
6260 ALPHA_BUILTIN_INSLH,
6261 ALPHA_BUILTIN_INSQH,
6262 ALPHA_BUILTIN_MSKBL,
6263 ALPHA_BUILTIN_MSKWL,
6264 ALPHA_BUILTIN_MSKLL,
6265 ALPHA_BUILTIN_MSKQL,
6266 ALPHA_BUILTIN_MSKWH,
6267 ALPHA_BUILTIN_MSKLH,
6268 ALPHA_BUILTIN_MSKQH,
6269 ALPHA_BUILTIN_UMULH,
6270 ALPHA_BUILTIN_ZAP,
6271 ALPHA_BUILTIN_ZAPNOT,
6272 ALPHA_BUILTIN_AMASK,
6273 ALPHA_BUILTIN_IMPLVER,
6274 ALPHA_BUILTIN_RPCC,
6275 ALPHA_BUILTIN_THREAD_POINTER,
6276 ALPHA_BUILTIN_SET_THREAD_POINTER,
6277
6278 /* TARGET_MAX */
6279 ALPHA_BUILTIN_MINUB8,
6280 ALPHA_BUILTIN_MINSB8,
6281 ALPHA_BUILTIN_MINUW4,
6282 ALPHA_BUILTIN_MINSW4,
6283 ALPHA_BUILTIN_MAXUB8,
6284 ALPHA_BUILTIN_MAXSB8,
6285 ALPHA_BUILTIN_MAXUW4,
6286 ALPHA_BUILTIN_MAXSW4,
6287 ALPHA_BUILTIN_PERR,
6288 ALPHA_BUILTIN_PKLB,
6289 ALPHA_BUILTIN_PKWB,
6290 ALPHA_BUILTIN_UNPKBL,
6291 ALPHA_BUILTIN_UNPKBW,
6292
6293 /* TARGET_CIX */
6294 ALPHA_BUILTIN_CTTZ,
6295 ALPHA_BUILTIN_CTLZ,
6296 ALPHA_BUILTIN_CTPOP,
6297
6298 ALPHA_BUILTIN_max
6299 };
6300
6301 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6302 CODE_FOR_builtin_cmpbge,
6303 CODE_FOR_builtin_extbl,
6304 CODE_FOR_builtin_extwl,
6305 CODE_FOR_builtin_extll,
6306 CODE_FOR_builtin_extql,
6307 CODE_FOR_builtin_extwh,
6308 CODE_FOR_builtin_extlh,
6309 CODE_FOR_builtin_extqh,
6310 CODE_FOR_builtin_insbl,
6311 CODE_FOR_builtin_inswl,
6312 CODE_FOR_builtin_insll,
6313 CODE_FOR_builtin_insql,
6314 CODE_FOR_builtin_inswh,
6315 CODE_FOR_builtin_inslh,
6316 CODE_FOR_builtin_insqh,
6317 CODE_FOR_builtin_mskbl,
6318 CODE_FOR_builtin_mskwl,
6319 CODE_FOR_builtin_mskll,
6320 CODE_FOR_builtin_mskql,
6321 CODE_FOR_builtin_mskwh,
6322 CODE_FOR_builtin_msklh,
6323 CODE_FOR_builtin_mskqh,
6324 CODE_FOR_umuldi3_highpart,
6325 CODE_FOR_builtin_zap,
6326 CODE_FOR_builtin_zapnot,
6327 CODE_FOR_builtin_amask,
6328 CODE_FOR_builtin_implver,
6329 CODE_FOR_builtin_rpcc,
6330 CODE_FOR_load_tp,
6331 CODE_FOR_set_tp,
6332
6333 /* TARGET_MAX */
6334 CODE_FOR_builtin_minub8,
6335 CODE_FOR_builtin_minsb8,
6336 CODE_FOR_builtin_minuw4,
6337 CODE_FOR_builtin_minsw4,
6338 CODE_FOR_builtin_maxub8,
6339 CODE_FOR_builtin_maxsb8,
6340 CODE_FOR_builtin_maxuw4,
6341 CODE_FOR_builtin_maxsw4,
6342 CODE_FOR_builtin_perr,
6343 CODE_FOR_builtin_pklb,
6344 CODE_FOR_builtin_pkwb,
6345 CODE_FOR_builtin_unpkbl,
6346 CODE_FOR_builtin_unpkbw,
6347
6348 /* TARGET_CIX */
6349 CODE_FOR_ctzdi2,
6350 CODE_FOR_clzdi2,
6351 CODE_FOR_popcountdi2
6352 };
6353
6354 struct alpha_builtin_def
6355 {
6356 const char *name;
6357 enum alpha_builtin code;
6358 unsigned int target_mask;
6359 bool is_const;
6360 };
6361
6362 static struct alpha_builtin_def const zero_arg_builtins[] = {
6363 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6364 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6365 };
6366
6367 static struct alpha_builtin_def const one_arg_builtins[] = {
6368 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6369 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6370 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6371 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6372 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6373 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6374 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6375 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6376 };
6377
6378 static struct alpha_builtin_def const two_arg_builtins[] = {
6379 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6380 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6381 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6382 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6383 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6384 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6385 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6386 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6387 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6388 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6389 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6390 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6391 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6392 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6393 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6394 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6395 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6396 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6397 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6398 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6399 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6400 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6401 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6402 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6403 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6404 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6405 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6406 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6407 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6408 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6409 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6410 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6411 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6412 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6413 };
6414
6415 static GTY(()) tree alpha_v8qi_u;
6416 static GTY(()) tree alpha_v8qi_s;
6417 static GTY(()) tree alpha_v4hi_u;
6418 static GTY(()) tree alpha_v4hi_s;
6419
6420 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6421 functions pointed to by P, with function type FTYPE. */
6422
6423 static void
6424 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6425 tree ftype)
6426 {
6427 tree decl;
6428 size_t i;
6429
6430 for (i = 0; i < count; ++i, ++p)
6431 if ((target_flags & p->target_mask) == p->target_mask)
6432 {
6433 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6434 NULL, NULL);
6435 if (p->is_const)
6436 TREE_READONLY (decl) = 1;
6437 TREE_NOTHROW (decl) = 1;
6438 }
6439 }
6440
6441
6442 static void
6443 alpha_init_builtins (void)
6444 {
6445 tree dimode_integer_type_node;
6446 tree ftype, decl;
6447
6448 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6449
6450 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6451 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6452 ftype);
6453
6454 ftype = build_function_type_list (dimode_integer_type_node,
6455 dimode_integer_type_node, NULL_TREE);
6456 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6457 ftype);
6458
6459 ftype = build_function_type_list (dimode_integer_type_node,
6460 dimode_integer_type_node,
6461 dimode_integer_type_node, NULL_TREE);
6462 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6463 ftype);
6464
6465 ftype = build_function_type (ptr_type_node, void_list_node);
6466 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6467 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6468 NULL, NULL);
6469 TREE_NOTHROW (decl) = 1;
6470
6471 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6472 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6473 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6474 NULL, NULL);
6475 TREE_NOTHROW (decl) = 1;
6476
6477 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6478 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6479 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6480 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6481 }
6482
6483 /* Expand an expression EXP that calls a built-in function,
6484 with result going to TARGET if that's convenient
6485 (and in mode MODE if that's convenient).
6486 SUBTARGET may be used as the target for computing one of EXP's operands.
6487 IGNORE is nonzero if the value is to be ignored. */
6488
6489 static rtx
6490 alpha_expand_builtin (tree exp, rtx target,
6491 rtx subtarget ATTRIBUTE_UNUSED,
6492 enum machine_mode mode ATTRIBUTE_UNUSED,
6493 int ignore ATTRIBUTE_UNUSED)
6494 {
6495 #define MAX_ARGS 2
6496
6497 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6498 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6499 tree arg;
6500 call_expr_arg_iterator iter;
6501 enum insn_code icode;
6502 rtx op[MAX_ARGS], pat;
6503 int arity;
6504 bool nonvoid;
6505
6506 if (fcode >= ALPHA_BUILTIN_max)
6507 internal_error ("bad builtin fcode");
6508 icode = code_for_builtin[fcode];
6509 if (icode == 0)
6510 internal_error ("bad builtin fcode");
6511
6512 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6513
6514 arity = 0;
6515 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6516 {
6517 const struct insn_operand_data *insn_op;
6518
6519 if (arg == error_mark_node)
6520 return NULL_RTX;
6521 if (arity > MAX_ARGS)
6522 return NULL_RTX;
6523
6524 insn_op = &insn_data[icode].operand[arity + nonvoid];
6525
6526 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6527
6528 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6529 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6530 arity++;
6531 }
6532
6533 if (nonvoid)
6534 {
6535 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6536 if (!target
6537 || GET_MODE (target) != tmode
6538 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6539 target = gen_reg_rtx (tmode);
6540 }
6541
6542 switch (arity)
6543 {
6544 case 0:
6545 pat = GEN_FCN (icode) (target);
6546 break;
6547 case 1:
6548 if (nonvoid)
6549 pat = GEN_FCN (icode) (target, op[0]);
6550 else
6551 pat = GEN_FCN (icode) (op[0]);
6552 break;
6553 case 2:
6554 pat = GEN_FCN (icode) (target, op[0], op[1]);
6555 break;
6556 default:
6557 gcc_unreachable ();
6558 }
6559 if (!pat)
6560 return NULL_RTX;
6561 emit_insn (pat);
6562
6563 if (nonvoid)
6564 return target;
6565 else
6566 return const0_rtx;
6567 }
6568
6569
6570 /* Several bits below assume HWI >= 64 bits. This should be enforced
6571 by config.gcc. */
6572 #if HOST_BITS_PER_WIDE_INT < 64
6573 # error "HOST_WIDE_INT too small"
6574 #endif
6575
6576 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6577 with an 8-bit output vector. OPINT contains the integer operands; bit N
6578 of OP_CONST is set if OPINT[N] is valid. */
6579
6580 static tree
6581 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6582 {
6583 if (op_const == 3)
6584 {
6585 int i, val;
6586 for (i = 0, val = 0; i < 8; ++i)
6587 {
6588 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6589 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6590 if (c0 >= c1)
6591 val |= 1 << i;
6592 }
6593 return build_int_cst (long_integer_type_node, val);
6594 }
6595 else if (op_const == 2 && opint[1] == 0)
6596 return build_int_cst (long_integer_type_node, 0xff);
6597 return NULL;
6598 }
6599
6600 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6601 specialized form of an AND operation. Other byte manipulation instructions
6602 are defined in terms of this instruction, so this is also used as a
6603 subroutine for other builtins.
6604
6605 OP contains the tree operands; OPINT contains the extracted integer values.
6606 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6607 OPINT may be considered. */
6608
6609 static tree
6610 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6611 long op_const)
6612 {
6613 if (op_const & 2)
6614 {
6615 unsigned HOST_WIDE_INT mask = 0;
6616 int i;
6617
6618 for (i = 0; i < 8; ++i)
6619 if ((opint[1] >> i) & 1)
6620 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6621
6622 if (op_const & 1)
6623 return build_int_cst (long_integer_type_node, opint[0] & mask);
6624
6625 if (op)
6626 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6627 build_int_cst (long_integer_type_node, mask));
6628 }
6629 else if ((op_const & 1) && opint[0] == 0)
6630 return build_int_cst (long_integer_type_node, 0);
6631 return NULL;
6632 }
6633
6634 /* Fold the builtins for the EXT family of instructions. */
6635
6636 static tree
6637 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6638 long op_const, unsigned HOST_WIDE_INT bytemask,
6639 bool is_high)
6640 {
6641 long zap_const = 2;
6642 tree *zap_op = NULL;
6643
6644 if (op_const & 2)
6645 {
6646 unsigned HOST_WIDE_INT loc;
6647
6648 loc = opint[1] & 7;
6649 if (BYTES_BIG_ENDIAN)
6650 loc ^= 7;
6651 loc *= 8;
6652
6653 if (loc != 0)
6654 {
6655 if (op_const & 1)
6656 {
6657 unsigned HOST_WIDE_INT temp = opint[0];
6658 if (is_high)
6659 temp <<= loc;
6660 else
6661 temp >>= loc;
6662 opint[0] = temp;
6663 zap_const = 3;
6664 }
6665 }
6666 else
6667 zap_op = op;
6668 }
6669
6670 opint[1] = bytemask;
6671 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6672 }
6673
6674 /* Fold the builtins for the INS family of instructions. */
6675
6676 static tree
6677 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6678 long op_const, unsigned HOST_WIDE_INT bytemask,
6679 bool is_high)
6680 {
6681 if ((op_const & 1) && opint[0] == 0)
6682 return build_int_cst (long_integer_type_node, 0);
6683
6684 if (op_const & 2)
6685 {
6686 unsigned HOST_WIDE_INT temp, loc, byteloc;
6687 tree *zap_op = NULL;
6688
6689 loc = opint[1] & 7;
6690 if (BYTES_BIG_ENDIAN)
6691 loc ^= 7;
6692 bytemask <<= loc;
6693
6694 temp = opint[0];
6695 if (is_high)
6696 {
6697 byteloc = (64 - (loc * 8)) & 0x3f;
6698 if (byteloc == 0)
6699 zap_op = op;
6700 else
6701 temp >>= byteloc;
6702 bytemask >>= 8;
6703 }
6704 else
6705 {
6706 byteloc = loc * 8;
6707 if (byteloc == 0)
6708 zap_op = op;
6709 else
6710 temp <<= byteloc;
6711 }
6712
6713 opint[0] = temp;
6714 opint[1] = bytemask;
6715 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6716 }
6717
6718 return NULL;
6719 }
6720
6721 static tree
6722 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6723 long op_const, unsigned HOST_WIDE_INT bytemask,
6724 bool is_high)
6725 {
6726 if (op_const & 2)
6727 {
6728 unsigned HOST_WIDE_INT loc;
6729
6730 loc = opint[1] & 7;
6731 if (BYTES_BIG_ENDIAN)
6732 loc ^= 7;
6733 bytemask <<= loc;
6734
6735 if (is_high)
6736 bytemask >>= 8;
6737
6738 opint[1] = bytemask ^ 0xff;
6739 }
6740
6741 return alpha_fold_builtin_zapnot (op, opint, op_const);
6742 }
6743
6744 static tree
6745 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6746 {
6747 switch (op_const)
6748 {
6749 case 3:
6750 {
6751 unsigned HOST_WIDE_INT l;
6752 HOST_WIDE_INT h;
6753
6754 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6755
6756 #if HOST_BITS_PER_WIDE_INT > 64
6757 # error fixme
6758 #endif
6759
6760 return build_int_cst (long_integer_type_node, h);
6761 }
6762
6763 case 1:
6764 opint[1] = opint[0];
6765 /* FALLTHRU */
6766 case 2:
6767 /* Note that (X*1) >> 64 == 0. */
6768 if (opint[1] == 0 || opint[1] == 1)
6769 return build_int_cst (long_integer_type_node, 0);
6770 break;
6771 }
6772 return NULL;
6773 }
6774
6775 static tree
6776 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6777 {
6778 tree op0 = fold_convert (vtype, op[0]);
6779 tree op1 = fold_convert (vtype, op[1]);
6780 tree val = fold_build2 (code, vtype, op0, op1);
6781 return fold_convert (long_integer_type_node, val);
6782 }
6783
6784 static tree
6785 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6786 {
6787 unsigned HOST_WIDE_INT temp = 0;
6788 int i;
6789
6790 if (op_const != 3)
6791 return NULL;
6792
6793 for (i = 0; i < 8; ++i)
6794 {
6795 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6796 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6797 if (a >= b)
6798 temp += a - b;
6799 else
6800 temp += b - a;
6801 }
6802
6803 return build_int_cst (long_integer_type_node, temp);
6804 }
6805
6806 static tree
6807 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6808 {
6809 unsigned HOST_WIDE_INT temp;
6810
6811 if (op_const == 0)
6812 return NULL;
6813
6814 temp = opint[0] & 0xff;
6815 temp |= (opint[0] >> 24) & 0xff00;
6816
6817 return build_int_cst (long_integer_type_node, temp);
6818 }
6819
6820 static tree
6821 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6822 {
6823 unsigned HOST_WIDE_INT temp;
6824
6825 if (op_const == 0)
6826 return NULL;
6827
6828 temp = opint[0] & 0xff;
6829 temp |= (opint[0] >> 8) & 0xff00;
6830 temp |= (opint[0] >> 16) & 0xff0000;
6831 temp |= (opint[0] >> 24) & 0xff000000;
6832
6833 return build_int_cst (long_integer_type_node, temp);
6834 }
6835
6836 static tree
6837 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6838 {
6839 unsigned HOST_WIDE_INT temp;
6840
6841 if (op_const == 0)
6842 return NULL;
6843
6844 temp = opint[0] & 0xff;
6845 temp |= (opint[0] & 0xff00) << 24;
6846
6847 return build_int_cst (long_integer_type_node, temp);
6848 }
6849
6850 static tree
6851 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6852 {
6853 unsigned HOST_WIDE_INT temp;
6854
6855 if (op_const == 0)
6856 return NULL;
6857
6858 temp = opint[0] & 0xff;
6859 temp |= (opint[0] & 0x0000ff00) << 8;
6860 temp |= (opint[0] & 0x00ff0000) << 16;
6861 temp |= (opint[0] & 0xff000000) << 24;
6862
6863 return build_int_cst (long_integer_type_node, temp);
6864 }
6865
6866 static tree
6867 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6868 {
6869 unsigned HOST_WIDE_INT temp;
6870
6871 if (op_const == 0)
6872 return NULL;
6873
6874 if (opint[0] == 0)
6875 temp = 64;
6876 else
6877 temp = exact_log2 (opint[0] & -opint[0]);
6878
6879 return build_int_cst (long_integer_type_node, temp);
6880 }
6881
6882 static tree
6883 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6884 {
6885 unsigned HOST_WIDE_INT temp;
6886
6887 if (op_const == 0)
6888 return NULL;
6889
6890 if (opint[0] == 0)
6891 temp = 64;
6892 else
6893 temp = 64 - floor_log2 (opint[0]) - 1;
6894
6895 return build_int_cst (long_integer_type_node, temp);
6896 }
6897
6898 static tree
6899 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6900 {
6901 unsigned HOST_WIDE_INT temp, op;
6902
6903 if (op_const == 0)
6904 return NULL;
6905
6906 op = opint[0];
6907 temp = 0;
6908 while (op)
6909 temp++, op &= op - 1;
6910
6911 return build_int_cst (long_integer_type_node, temp);
6912 }
6913
6914 /* Fold one of our builtin functions. */
6915
6916 static tree
6917 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6918 {
6919 tree op[MAX_ARGS], t;
6920 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6921 long op_const = 0, arity = 0;
6922
6923 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6924 {
6925 tree arg = TREE_VALUE (t);
6926 if (arg == error_mark_node)
6927 return NULL;
6928 if (arity >= MAX_ARGS)
6929 return NULL;
6930
6931 op[arity] = arg;
6932 opint[arity] = 0;
6933 if (TREE_CODE (arg) == INTEGER_CST)
6934 {
6935 op_const |= 1L << arity;
6936 opint[arity] = int_cst_value (arg);
6937 }
6938 }
6939
6940 switch (DECL_FUNCTION_CODE (fndecl))
6941 {
6942 case ALPHA_BUILTIN_CMPBGE:
6943 return alpha_fold_builtin_cmpbge (opint, op_const);
6944
6945 case ALPHA_BUILTIN_EXTBL:
6946 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6947 case ALPHA_BUILTIN_EXTWL:
6948 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6949 case ALPHA_BUILTIN_EXTLL:
6950 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6951 case ALPHA_BUILTIN_EXTQL:
6952 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6953 case ALPHA_BUILTIN_EXTWH:
6954 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6955 case ALPHA_BUILTIN_EXTLH:
6956 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6957 case ALPHA_BUILTIN_EXTQH:
6958 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6959
6960 case ALPHA_BUILTIN_INSBL:
6961 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6962 case ALPHA_BUILTIN_INSWL:
6963 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6964 case ALPHA_BUILTIN_INSLL:
6965 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6966 case ALPHA_BUILTIN_INSQL:
6967 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6968 case ALPHA_BUILTIN_INSWH:
6969 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6970 case ALPHA_BUILTIN_INSLH:
6971 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6972 case ALPHA_BUILTIN_INSQH:
6973 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6974
6975 case ALPHA_BUILTIN_MSKBL:
6976 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6977 case ALPHA_BUILTIN_MSKWL:
6978 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6979 case ALPHA_BUILTIN_MSKLL:
6980 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6981 case ALPHA_BUILTIN_MSKQL:
6982 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6983 case ALPHA_BUILTIN_MSKWH:
6984 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6985 case ALPHA_BUILTIN_MSKLH:
6986 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6987 case ALPHA_BUILTIN_MSKQH:
6988 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6989
6990 case ALPHA_BUILTIN_UMULH:
6991 return alpha_fold_builtin_umulh (opint, op_const);
6992
6993 case ALPHA_BUILTIN_ZAP:
6994 opint[1] ^= 0xff;
6995 /* FALLTHRU */
6996 case ALPHA_BUILTIN_ZAPNOT:
6997 return alpha_fold_builtin_zapnot (op, opint, op_const);
6998
6999 case ALPHA_BUILTIN_MINUB8:
7000 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7001 case ALPHA_BUILTIN_MINSB8:
7002 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7003 case ALPHA_BUILTIN_MINUW4:
7004 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7005 case ALPHA_BUILTIN_MINSW4:
7006 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7007 case ALPHA_BUILTIN_MAXUB8:
7008 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7009 case ALPHA_BUILTIN_MAXSB8:
7010 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7011 case ALPHA_BUILTIN_MAXUW4:
7012 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7013 case ALPHA_BUILTIN_MAXSW4:
7014 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7015
7016 case ALPHA_BUILTIN_PERR:
7017 return alpha_fold_builtin_perr (opint, op_const);
7018 case ALPHA_BUILTIN_PKLB:
7019 return alpha_fold_builtin_pklb (opint, op_const);
7020 case ALPHA_BUILTIN_PKWB:
7021 return alpha_fold_builtin_pkwb (opint, op_const);
7022 case ALPHA_BUILTIN_UNPKBL:
7023 return alpha_fold_builtin_unpkbl (opint, op_const);
7024 case ALPHA_BUILTIN_UNPKBW:
7025 return alpha_fold_builtin_unpkbw (opint, op_const);
7026
7027 case ALPHA_BUILTIN_CTTZ:
7028 return alpha_fold_builtin_cttz (opint, op_const);
7029 case ALPHA_BUILTIN_CTLZ:
7030 return alpha_fold_builtin_ctlz (opint, op_const);
7031 case ALPHA_BUILTIN_CTPOP:
7032 return alpha_fold_builtin_ctpop (opint, op_const);
7033
7034 case ALPHA_BUILTIN_AMASK:
7035 case ALPHA_BUILTIN_IMPLVER:
7036 case ALPHA_BUILTIN_RPCC:
7037 case ALPHA_BUILTIN_THREAD_POINTER:
7038 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7039 /* None of these are foldable at compile-time. */
7040 default:
7041 return NULL;
7042 }
7043 }
7044 \f
7045 /* This page contains routines that are used to determine what the function
7046 prologue and epilogue code will do and write them out. */
7047
7048 /* Compute the size of the save area in the stack. */
7049
7050 /* These variables are used for communication between the following functions.
7051 They indicate various things about the current function being compiled
7052 that are used to tell what kind of prologue, epilogue and procedure
7053 descriptor to generate. */
7054
7055 /* Nonzero if we need a stack procedure. */
7056 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7057 static enum alpha_procedure_types alpha_procedure_type;
7058
7059 /* Register number (either FP or SP) that is used to unwind the frame. */
7060 static int vms_unwind_regno;
7061
7062 /* Register number used to save FP. We need not have one for RA since
7063 we don't modify it for register procedures. This is only defined
7064 for register frame procedures. */
7065 static int vms_save_fp_regno;
7066
7067 /* Register number used to reference objects off our PV. */
7068 static int vms_base_regno;
7069
7070 /* Compute register masks for saved registers. */
7071
7072 static void
7073 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7074 {
7075 unsigned long imask = 0;
7076 unsigned long fmask = 0;
7077 unsigned int i;
7078
7079 /* When outputting a thunk, we don't have valid register life info,
7080 but assemble_start_function wants to output .frame and .mask
7081 directives. */
7082 if (current_function_is_thunk)
7083 {
7084 *imaskP = 0;
7085 *fmaskP = 0;
7086 return;
7087 }
7088
7089 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7090 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7091
7092 /* One for every register we have to save. */
7093 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7094 if (! fixed_regs[i] && ! call_used_regs[i]
7095 && df_regs_ever_live_p (i) && i != REG_RA
7096 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7097 {
7098 if (i < 32)
7099 imask |= (1UL << i);
7100 else
7101 fmask |= (1UL << (i - 32));
7102 }
7103
7104 /* We need to restore these for the handler. */
7105 if (current_function_calls_eh_return)
7106 {
7107 for (i = 0; ; ++i)
7108 {
7109 unsigned regno = EH_RETURN_DATA_REGNO (i);
7110 if (regno == INVALID_REGNUM)
7111 break;
7112 imask |= 1UL << regno;
7113 }
7114 }
7115
7116 /* If any register spilled, then spill the return address also. */
7117 /* ??? This is required by the Digital stack unwind specification
7118 and isn't needed if we're doing Dwarf2 unwinding. */
7119 if (imask || fmask || alpha_ra_ever_killed ())
7120 imask |= (1UL << REG_RA);
7121
7122 *imaskP = imask;
7123 *fmaskP = fmask;
7124 }
7125
7126 int
7127 alpha_sa_size (void)
7128 {
7129 unsigned long mask[2];
7130 int sa_size = 0;
7131 int i, j;
7132
7133 alpha_sa_mask (&mask[0], &mask[1]);
7134
7135 if (TARGET_ABI_UNICOSMK)
7136 {
7137 if (mask[0] || mask[1])
7138 sa_size = 14;
7139 }
7140 else
7141 {
7142 for (j = 0; j < 2; ++j)
7143 for (i = 0; i < 32; ++i)
7144 if ((mask[j] >> i) & 1)
7145 sa_size++;
7146 }
7147
7148 if (TARGET_ABI_UNICOSMK)
7149 {
7150 /* We might not need to generate a frame if we don't make any calls
7151 (including calls to __T3E_MISMATCH if this is a vararg function),
7152 don't have any local variables which require stack slots, don't
7153 use alloca and have not determined that we need a frame for other
7154 reasons. */
7155
7156 alpha_procedure_type
7157 = (sa_size || get_frame_size() != 0
7158 || current_function_outgoing_args_size
7159 || current_function_stdarg || current_function_calls_alloca
7160 || frame_pointer_needed)
7161 ? PT_STACK : PT_REGISTER;
7162
7163 /* Always reserve space for saving callee-saved registers if we
7164 need a frame as required by the calling convention. */
7165 if (alpha_procedure_type == PT_STACK)
7166 sa_size = 14;
7167 }
7168 else if (TARGET_ABI_OPEN_VMS)
7169 {
7170 /* Start by assuming we can use a register procedure if we don't
7171 make any calls (REG_RA not used) or need to save any
7172 registers and a stack procedure if we do. */
7173 if ((mask[0] >> REG_RA) & 1)
7174 alpha_procedure_type = PT_STACK;
7175 else if (get_frame_size() != 0)
7176 alpha_procedure_type = PT_REGISTER;
7177 else
7178 alpha_procedure_type = PT_NULL;
7179
7180 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7181 made the final decision on stack procedure vs register procedure. */
7182 if (alpha_procedure_type == PT_STACK)
7183 sa_size -= 2;
7184
7185 /* Decide whether to refer to objects off our PV via FP or PV.
7186 If we need FP for something else or if we receive a nonlocal
7187 goto (which expects PV to contain the value), we must use PV.
7188 Otherwise, start by assuming we can use FP. */
7189
7190 vms_base_regno
7191 = (frame_pointer_needed
7192 || current_function_has_nonlocal_label
7193 || alpha_procedure_type == PT_STACK
7194 || current_function_outgoing_args_size)
7195 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7196
7197 /* If we want to copy PV into FP, we need to find some register
7198 in which to save FP. */
7199
7200 vms_save_fp_regno = -1;
7201 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7202 for (i = 0; i < 32; i++)
7203 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7204 vms_save_fp_regno = i;
7205
7206 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7207 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7208 else if (alpha_procedure_type == PT_NULL)
7209 vms_base_regno = REG_PV;
7210
7211 /* Stack unwinding should be done via FP unless we use it for PV. */
7212 vms_unwind_regno = (vms_base_regno == REG_PV
7213 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7214
7215 /* If this is a stack procedure, allow space for saving FP and RA. */
7216 if (alpha_procedure_type == PT_STACK)
7217 sa_size += 2;
7218 }
7219 else
7220 {
7221 /* Our size must be even (multiple of 16 bytes). */
7222 if (sa_size & 1)
7223 sa_size++;
7224 }
7225
7226 return sa_size * 8;
7227 }
7228
7229 /* Define the offset between two registers, one to be eliminated,
7230 and the other its replacement, at the start of a routine. */
7231
7232 HOST_WIDE_INT
7233 alpha_initial_elimination_offset (unsigned int from,
7234 unsigned int to ATTRIBUTE_UNUSED)
7235 {
7236 HOST_WIDE_INT ret;
7237
7238 ret = alpha_sa_size ();
7239 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7240
7241 switch (from)
7242 {
7243 case FRAME_POINTER_REGNUM:
7244 break;
7245
7246 case ARG_POINTER_REGNUM:
7247 ret += (ALPHA_ROUND (get_frame_size ()
7248 + current_function_pretend_args_size)
7249 - current_function_pretend_args_size);
7250 break;
7251
7252 default:
7253 gcc_unreachable ();
7254 }
7255
7256 return ret;
7257 }
7258
7259 int
7260 alpha_pv_save_size (void)
7261 {
7262 alpha_sa_size ();
7263 return alpha_procedure_type == PT_STACK ? 8 : 0;
7264 }
7265
7266 int
7267 alpha_using_fp (void)
7268 {
7269 alpha_sa_size ();
7270 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7271 }
7272
7273 #if TARGET_ABI_OPEN_VMS
7274
7275 const struct attribute_spec vms_attribute_table[] =
7276 {
7277 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7278 { "overlaid", 0, 0, true, false, false, NULL },
7279 { "global", 0, 0, true, false, false, NULL },
7280 { "initialize", 0, 0, true, false, false, NULL },
7281 { NULL, 0, 0, false, false, false, NULL }
7282 };
7283
7284 #endif
7285
7286 static int
7287 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7288 {
7289 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7290 }
7291
7292 int
7293 alpha_find_lo_sum_using_gp (rtx insn)
7294 {
7295 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7296 }
7297
7298 static int
7299 alpha_does_function_need_gp (void)
7300 {
7301 rtx insn;
7302
7303 /* The GP being variable is an OSF abi thing. */
7304 if (! TARGET_ABI_OSF)
7305 return 0;
7306
7307 /* We need the gp to load the address of __mcount. */
7308 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7309 return 1;
7310
7311 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7312 if (current_function_is_thunk)
7313 return 1;
7314
7315 /* The nonlocal receiver pattern assumes that the gp is valid for
7316 the nested function. Reasonable because it's almost always set
7317 correctly already. For the cases where that's wrong, make sure
7318 the nested function loads its gp on entry. */
7319 if (current_function_has_nonlocal_goto)
7320 return 1;
7321
7322 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7323 Even if we are a static function, we still need to do this in case
7324 our address is taken and passed to something like qsort. */
7325
7326 push_topmost_sequence ();
7327 insn = get_insns ();
7328 pop_topmost_sequence ();
7329
7330 for (; insn; insn = NEXT_INSN (insn))
7331 if (INSN_P (insn)
7332 && ! JUMP_TABLE_DATA_P (insn)
7333 && GET_CODE (PATTERN (insn)) != USE
7334 && GET_CODE (PATTERN (insn)) != CLOBBER
7335 && get_attr_usegp (insn))
7336 return 1;
7337
7338 return 0;
7339 }
7340
7341 \f
7342 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7343 sequences. */
7344
7345 static rtx
7346 set_frame_related_p (void)
7347 {
7348 rtx seq = get_insns ();
7349 rtx insn;
7350
7351 end_sequence ();
7352
7353 if (!seq)
7354 return NULL_RTX;
7355
7356 if (INSN_P (seq))
7357 {
7358 insn = seq;
7359 while (insn != NULL_RTX)
7360 {
7361 RTX_FRAME_RELATED_P (insn) = 1;
7362 insn = NEXT_INSN (insn);
7363 }
7364 seq = emit_insn (seq);
7365 }
7366 else
7367 {
7368 seq = emit_insn (seq);
7369 RTX_FRAME_RELATED_P (seq) = 1;
7370 }
7371 return seq;
7372 }
7373
7374 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7375
7376 /* Generates a store with the proper unwind info attached. VALUE is
7377 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7378 contains SP+FRAME_BIAS, and that is the unwind info that should be
7379 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7380 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7381
7382 static void
7383 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7384 HOST_WIDE_INT base_ofs, rtx frame_reg)
7385 {
7386 rtx addr, mem, insn;
7387
7388 addr = plus_constant (base_reg, base_ofs);
7389 mem = gen_rtx_MEM (DImode, addr);
7390 set_mem_alias_set (mem, alpha_sr_alias_set);
7391
7392 insn = emit_move_insn (mem, value);
7393 RTX_FRAME_RELATED_P (insn) = 1;
7394
7395 if (frame_bias || value != frame_reg)
7396 {
7397 if (frame_bias)
7398 {
7399 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7400 mem = gen_rtx_MEM (DImode, addr);
7401 }
7402
7403 REG_NOTES (insn)
7404 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7405 gen_rtx_SET (VOIDmode, mem, frame_reg),
7406 REG_NOTES (insn));
7407 }
7408 }
7409
7410 static void
7411 emit_frame_store (unsigned int regno, rtx base_reg,
7412 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7413 {
7414 rtx reg = gen_rtx_REG (DImode, regno);
7415 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7416 }
7417
7418 /* Write function prologue. */
7419
7420 /* On vms we have two kinds of functions:
7421
7422 - stack frame (PROC_STACK)
7423 these are 'normal' functions with local vars and which are
7424 calling other functions
7425 - register frame (PROC_REGISTER)
7426 keeps all data in registers, needs no stack
7427
7428 We must pass this to the assembler so it can generate the
7429 proper pdsc (procedure descriptor)
7430 This is done with the '.pdesc' command.
7431
7432 On not-vms, we don't really differentiate between the two, as we can
7433 simply allocate stack without saving registers. */
7434
7435 void
7436 alpha_expand_prologue (void)
7437 {
7438 /* Registers to save. */
7439 unsigned long imask = 0;
7440 unsigned long fmask = 0;
7441 /* Stack space needed for pushing registers clobbered by us. */
7442 HOST_WIDE_INT sa_size;
7443 /* Complete stack size needed. */
7444 HOST_WIDE_INT frame_size;
7445 /* Offset from base reg to register save area. */
7446 HOST_WIDE_INT reg_offset;
7447 rtx sa_reg;
7448 int i;
7449
7450 sa_size = alpha_sa_size ();
7451
7452 frame_size = get_frame_size ();
7453 if (TARGET_ABI_OPEN_VMS)
7454 frame_size = ALPHA_ROUND (sa_size
7455 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7456 + frame_size
7457 + current_function_pretend_args_size);
7458 else if (TARGET_ABI_UNICOSMK)
7459 /* We have to allocate space for the DSIB if we generate a frame. */
7460 frame_size = ALPHA_ROUND (sa_size
7461 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7462 + ALPHA_ROUND (frame_size
7463 + current_function_outgoing_args_size);
7464 else
7465 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7466 + sa_size
7467 + ALPHA_ROUND (frame_size
7468 + current_function_pretend_args_size));
7469
7470 if (TARGET_ABI_OPEN_VMS)
7471 reg_offset = 8;
7472 else
7473 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7474
7475 alpha_sa_mask (&imask, &fmask);
7476
7477 /* Emit an insn to reload GP, if needed. */
7478 if (TARGET_ABI_OSF)
7479 {
7480 alpha_function_needs_gp = alpha_does_function_need_gp ();
7481 if (alpha_function_needs_gp)
7482 emit_insn (gen_prologue_ldgp ());
7483 }
7484
7485 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7486 the call to mcount ourselves, rather than having the linker do it
7487 magically in response to -pg. Since _mcount has special linkage,
7488 don't represent the call as a call. */
7489 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7490 emit_insn (gen_prologue_mcount ());
7491
7492 if (TARGET_ABI_UNICOSMK)
7493 unicosmk_gen_dsib (&imask);
7494
7495 /* Adjust the stack by the frame size. If the frame size is > 4096
7496 bytes, we need to be sure we probe somewhere in the first and last
7497 4096 bytes (we can probably get away without the latter test) and
7498 every 8192 bytes in between. If the frame size is > 32768, we
7499 do this in a loop. Otherwise, we generate the explicit probe
7500 instructions.
7501
7502 Note that we are only allowed to adjust sp once in the prologue. */
7503
7504 if (frame_size <= 32768)
7505 {
7506 if (frame_size > 4096)
7507 {
7508 int probed;
7509
7510 for (probed = 4096; probed < frame_size; probed += 8192)
7511 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7512 ? -probed + 64
7513 : -probed)));
7514
7515 /* We only have to do this probe if we aren't saving registers. */
7516 if (sa_size == 0 && frame_size > probed - 4096)
7517 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7518 }
7519
7520 if (frame_size != 0)
7521 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7522 GEN_INT (TARGET_ABI_UNICOSMK
7523 ? -frame_size + 64
7524 : -frame_size))));
7525 }
7526 else
7527 {
7528 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7529 number of 8192 byte blocks to probe. We then probe each block
7530 in the loop and then set SP to the proper location. If the
7531 amount remaining is > 4096, we have to do one more probe if we
7532 are not saving any registers. */
7533
7534 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7535 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7536 rtx ptr = gen_rtx_REG (DImode, 22);
7537 rtx count = gen_rtx_REG (DImode, 23);
7538 rtx seq;
7539
7540 emit_move_insn (count, GEN_INT (blocks));
7541 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7542 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7543
7544 /* Because of the difficulty in emitting a new basic block this
7545 late in the compilation, generate the loop as a single insn. */
7546 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7547
7548 if (leftover > 4096 && sa_size == 0)
7549 {
7550 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7551 MEM_VOLATILE_P (last) = 1;
7552 emit_move_insn (last, const0_rtx);
7553 }
7554
7555 if (TARGET_ABI_WINDOWS_NT)
7556 {
7557 /* For NT stack unwind (done by 'reverse execution'), it's
7558 not OK to take the result of a loop, even though the value
7559 is already in ptr, so we reload it via a single operation
7560 and subtract it to sp.
7561
7562 Yes, that's correct -- we have to reload the whole constant
7563 into a temporary via ldah+lda then subtract from sp. */
7564
7565 HOST_WIDE_INT lo, hi;
7566 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7567 hi = frame_size - lo;
7568
7569 emit_move_insn (ptr, GEN_INT (hi));
7570 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7571 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7572 ptr));
7573 }
7574 else
7575 {
7576 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7577 GEN_INT (-leftover)));
7578 }
7579
7580 /* This alternative is special, because the DWARF code cannot
7581 possibly intuit through the loop above. So we invent this
7582 note it looks at instead. */
7583 RTX_FRAME_RELATED_P (seq) = 1;
7584 REG_NOTES (seq)
7585 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7586 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7587 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7588 GEN_INT (TARGET_ABI_UNICOSMK
7589 ? -frame_size + 64
7590 : -frame_size))),
7591 REG_NOTES (seq));
7592 }
7593
7594 if (!TARGET_ABI_UNICOSMK)
7595 {
7596 HOST_WIDE_INT sa_bias = 0;
7597
7598 /* Cope with very large offsets to the register save area. */
7599 sa_reg = stack_pointer_rtx;
7600 if (reg_offset + sa_size > 0x8000)
7601 {
7602 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7603 rtx sa_bias_rtx;
7604
7605 if (low + sa_size <= 0x8000)
7606 sa_bias = reg_offset - low, reg_offset = low;
7607 else
7608 sa_bias = reg_offset, reg_offset = 0;
7609
7610 sa_reg = gen_rtx_REG (DImode, 24);
7611 sa_bias_rtx = GEN_INT (sa_bias);
7612
7613 if (add_operand (sa_bias_rtx, DImode))
7614 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7615 else
7616 {
7617 emit_move_insn (sa_reg, sa_bias_rtx);
7618 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7619 }
7620 }
7621
7622 /* Save regs in stack order. Beginning with VMS PV. */
7623 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7624 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7625
7626 /* Save register RA next. */
7627 if (imask & (1UL << REG_RA))
7628 {
7629 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7630 imask &= ~(1UL << REG_RA);
7631 reg_offset += 8;
7632 }
7633
7634 /* Now save any other registers required to be saved. */
7635 for (i = 0; i < 31; i++)
7636 if (imask & (1UL << i))
7637 {
7638 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7639 reg_offset += 8;
7640 }
7641
7642 for (i = 0; i < 31; i++)
7643 if (fmask & (1UL << i))
7644 {
7645 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7646 reg_offset += 8;
7647 }
7648 }
7649 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7650 {
7651 /* The standard frame on the T3E includes space for saving registers.
7652 We just have to use it. We don't have to save the return address and
7653 the old frame pointer here - they are saved in the DSIB. */
7654
7655 reg_offset = -56;
7656 for (i = 9; i < 15; i++)
7657 if (imask & (1UL << i))
7658 {
7659 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7660 reg_offset -= 8;
7661 }
7662 for (i = 2; i < 10; i++)
7663 if (fmask & (1UL << i))
7664 {
7665 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7666 reg_offset -= 8;
7667 }
7668 }
7669
7670 if (TARGET_ABI_OPEN_VMS)
7671 {
7672 if (alpha_procedure_type == PT_REGISTER)
7673 /* Register frame procedures save the fp.
7674 ?? Ought to have a dwarf2 save for this. */
7675 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7676 hard_frame_pointer_rtx);
7677
7678 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7679 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7680 gen_rtx_REG (DImode, REG_PV)));
7681
7682 if (alpha_procedure_type != PT_NULL
7683 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7684 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7685
7686 /* If we have to allocate space for outgoing args, do it now. */
7687 if (current_function_outgoing_args_size != 0)
7688 {
7689 rtx seq
7690 = emit_move_insn (stack_pointer_rtx,
7691 plus_constant
7692 (hard_frame_pointer_rtx,
7693 - (ALPHA_ROUND
7694 (current_function_outgoing_args_size))));
7695
7696 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7697 if ! frame_pointer_needed. Setting the bit will change the CFA
7698 computation rule to use sp again, which would be wrong if we had
7699 frame_pointer_needed, as this means sp might move unpredictably
7700 later on.
7701
7702 Also, note that
7703 frame_pointer_needed
7704 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7705 and
7706 current_function_outgoing_args_size != 0
7707 => alpha_procedure_type != PT_NULL,
7708
7709 so when we are not setting the bit here, we are guaranteed to
7710 have emitted an FRP frame pointer update just before. */
7711 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7712 }
7713 }
7714 else if (!TARGET_ABI_UNICOSMK)
7715 {
7716 /* If we need a frame pointer, set it from the stack pointer. */
7717 if (frame_pointer_needed)
7718 {
7719 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7720 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7721 else
7722 /* This must always be the last instruction in the
7723 prologue, thus we emit a special move + clobber. */
7724 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7725 stack_pointer_rtx, sa_reg)));
7726 }
7727 }
7728
7729 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7730 the prologue, for exception handling reasons, we cannot do this for
7731 any insn that might fault. We could prevent this for mems with a
7732 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7733 have to prevent all such scheduling with a blockage.
7734
7735 Linux, on the other hand, never bothered to implement OSF/1's
7736 exception handling, and so doesn't care about such things. Anyone
7737 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7738
7739 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7740 emit_insn (gen_blockage ());
7741 }
7742
7743 /* Count the number of .file directives, so that .loc is up to date. */
7744 int num_source_filenames = 0;
7745
7746 /* Output the textual info surrounding the prologue. */
7747
7748 void
7749 alpha_start_function (FILE *file, const char *fnname,
7750 tree decl ATTRIBUTE_UNUSED)
7751 {
7752 unsigned long imask = 0;
7753 unsigned long fmask = 0;
7754 /* Stack space needed for pushing registers clobbered by us. */
7755 HOST_WIDE_INT sa_size;
7756 /* Complete stack size needed. */
7757 unsigned HOST_WIDE_INT frame_size;
7758 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7759 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7760 ? 524288
7761 : 1UL << 31;
7762 /* Offset from base reg to register save area. */
7763 HOST_WIDE_INT reg_offset;
7764 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7765 int i;
7766
7767 /* Don't emit an extern directive for functions defined in the same file. */
7768 if (TARGET_ABI_UNICOSMK)
7769 {
7770 tree name_tree;
7771 name_tree = get_identifier (fnname);
7772 TREE_ASM_WRITTEN (name_tree) = 1;
7773 }
7774
7775 alpha_fnname = fnname;
7776 sa_size = alpha_sa_size ();
7777
7778 frame_size = get_frame_size ();
7779 if (TARGET_ABI_OPEN_VMS)
7780 frame_size = ALPHA_ROUND (sa_size
7781 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7782 + frame_size
7783 + current_function_pretend_args_size);
7784 else if (TARGET_ABI_UNICOSMK)
7785 frame_size = ALPHA_ROUND (sa_size
7786 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7787 + ALPHA_ROUND (frame_size
7788 + current_function_outgoing_args_size);
7789 else
7790 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7791 + sa_size
7792 + ALPHA_ROUND (frame_size
7793 + current_function_pretend_args_size));
7794
7795 if (TARGET_ABI_OPEN_VMS)
7796 reg_offset = 8;
7797 else
7798 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7799
7800 alpha_sa_mask (&imask, &fmask);
7801
7802 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7803 We have to do that before the .ent directive as we cannot switch
7804 files within procedures with native ecoff because line numbers are
7805 linked to procedure descriptors.
7806 Outputting the lineno helps debugging of one line functions as they
7807 would otherwise get no line number at all. Please note that we would
7808 like to put out last_linenum from final.c, but it is not accessible. */
7809
7810 if (write_symbols == SDB_DEBUG)
7811 {
7812 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7813 ASM_OUTPUT_SOURCE_FILENAME (file,
7814 DECL_SOURCE_FILE (current_function_decl));
7815 #endif
7816 #ifdef SDB_OUTPUT_SOURCE_LINE
7817 if (debug_info_level != DINFO_LEVEL_TERSE)
7818 SDB_OUTPUT_SOURCE_LINE (file,
7819 DECL_SOURCE_LINE (current_function_decl));
7820 #endif
7821 }
7822
7823 /* Issue function start and label. */
7824 if (TARGET_ABI_OPEN_VMS
7825 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7826 {
7827 fputs ("\t.ent ", file);
7828 assemble_name (file, fnname);
7829 putc ('\n', file);
7830
7831 /* If the function needs GP, we'll write the "..ng" label there.
7832 Otherwise, do it here. */
7833 if (TARGET_ABI_OSF
7834 && ! alpha_function_needs_gp
7835 && ! current_function_is_thunk)
7836 {
7837 putc ('$', file);
7838 assemble_name (file, fnname);
7839 fputs ("..ng:\n", file);
7840 }
7841 }
7842
7843 strcpy (entry_label, fnname);
7844 if (TARGET_ABI_OPEN_VMS)
7845 strcat (entry_label, "..en");
7846
7847 /* For public functions, the label must be globalized by appending an
7848 additional colon. */
7849 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7850 strcat (entry_label, ":");
7851
7852 ASM_OUTPUT_LABEL (file, entry_label);
7853 inside_function = TRUE;
7854
7855 if (TARGET_ABI_OPEN_VMS)
7856 fprintf (file, "\t.base $%d\n", vms_base_regno);
7857
7858 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7859 && !flag_inhibit_size_directive)
7860 {
7861 /* Set flags in procedure descriptor to request IEEE-conformant
7862 math-library routines. The value we set it to is PDSC_EXC_IEEE
7863 (/usr/include/pdsc.h). */
7864 fputs ("\t.eflag 48\n", file);
7865 }
7866
7867 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7868 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7869 alpha_arg_offset = -frame_size + 48;
7870
7871 /* Describe our frame. If the frame size is larger than an integer,
7872 print it as zero to avoid an assembler error. We won't be
7873 properly describing such a frame, but that's the best we can do. */
7874 if (TARGET_ABI_UNICOSMK)
7875 ;
7876 else if (TARGET_ABI_OPEN_VMS)
7877 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7878 HOST_WIDE_INT_PRINT_DEC "\n",
7879 vms_unwind_regno,
7880 frame_size >= (1UL << 31) ? 0 : frame_size,
7881 reg_offset);
7882 else if (!flag_inhibit_size_directive)
7883 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7884 (frame_pointer_needed
7885 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7886 frame_size >= max_frame_size ? 0 : frame_size,
7887 current_function_pretend_args_size);
7888
7889 /* Describe which registers were spilled. */
7890 if (TARGET_ABI_UNICOSMK)
7891 ;
7892 else if (TARGET_ABI_OPEN_VMS)
7893 {
7894 if (imask)
7895 /* ??? Does VMS care if mask contains ra? The old code didn't
7896 set it, so I don't here. */
7897 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7898 if (fmask)
7899 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7900 if (alpha_procedure_type == PT_REGISTER)
7901 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7902 }
7903 else if (!flag_inhibit_size_directive)
7904 {
7905 if (imask)
7906 {
7907 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7908 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7909
7910 for (i = 0; i < 32; ++i)
7911 if (imask & (1UL << i))
7912 reg_offset += 8;
7913 }
7914
7915 if (fmask)
7916 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7917 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7918 }
7919
7920 #if TARGET_ABI_OPEN_VMS
7921 /* Ifdef'ed cause link_section are only available then. */
7922 switch_to_section (readonly_data_section);
7923 fprintf (file, "\t.align 3\n");
7924 assemble_name (file, fnname); fputs ("..na:\n", file);
7925 fputs ("\t.ascii \"", file);
7926 assemble_name (file, fnname);
7927 fputs ("\\0\"\n", file);
7928 alpha_need_linkage (fnname, 1);
7929 switch_to_section (text_section);
7930 #endif
7931 }
7932
7933 /* Emit the .prologue note at the scheduled end of the prologue. */
7934
7935 static void
7936 alpha_output_function_end_prologue (FILE *file)
7937 {
7938 if (TARGET_ABI_UNICOSMK)
7939 ;
7940 else if (TARGET_ABI_OPEN_VMS)
7941 fputs ("\t.prologue\n", file);
7942 else if (TARGET_ABI_WINDOWS_NT)
7943 fputs ("\t.prologue 0\n", file);
7944 else if (!flag_inhibit_size_directive)
7945 fprintf (file, "\t.prologue %d\n",
7946 alpha_function_needs_gp || current_function_is_thunk);
7947 }
7948
7949 /* Write function epilogue. */
7950
7951 /* ??? At some point we will want to support full unwind, and so will
7952 need to mark the epilogue as well. At the moment, we just confuse
7953 dwarf2out. */
7954 #undef FRP
7955 #define FRP(exp) exp
7956
7957 void
7958 alpha_expand_epilogue (void)
7959 {
7960 /* Registers to save. */
7961 unsigned long imask = 0;
7962 unsigned long fmask = 0;
7963 /* Stack space needed for pushing registers clobbered by us. */
7964 HOST_WIDE_INT sa_size;
7965 /* Complete stack size needed. */
7966 HOST_WIDE_INT frame_size;
7967 /* Offset from base reg to register save area. */
7968 HOST_WIDE_INT reg_offset;
7969 int fp_is_frame_pointer, fp_offset;
7970 rtx sa_reg, sa_reg_exp = NULL;
7971 rtx sp_adj1, sp_adj2, mem;
7972 rtx eh_ofs;
7973 int i;
7974
7975 sa_size = alpha_sa_size ();
7976
7977 frame_size = get_frame_size ();
7978 if (TARGET_ABI_OPEN_VMS)
7979 frame_size = ALPHA_ROUND (sa_size
7980 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7981 + frame_size
7982 + current_function_pretend_args_size);
7983 else if (TARGET_ABI_UNICOSMK)
7984 frame_size = ALPHA_ROUND (sa_size
7985 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7986 + ALPHA_ROUND (frame_size
7987 + current_function_outgoing_args_size);
7988 else
7989 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7990 + sa_size
7991 + ALPHA_ROUND (frame_size
7992 + current_function_pretend_args_size));
7993
7994 if (TARGET_ABI_OPEN_VMS)
7995 {
7996 if (alpha_procedure_type == PT_STACK)
7997 reg_offset = 8;
7998 else
7999 reg_offset = 0;
8000 }
8001 else
8002 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8003
8004 alpha_sa_mask (&imask, &fmask);
8005
8006 fp_is_frame_pointer
8007 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8008 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8009 fp_offset = 0;
8010 sa_reg = stack_pointer_rtx;
8011
8012 if (current_function_calls_eh_return)
8013 eh_ofs = EH_RETURN_STACKADJ_RTX;
8014 else
8015 eh_ofs = NULL_RTX;
8016
8017 if (!TARGET_ABI_UNICOSMK && sa_size)
8018 {
8019 /* If we have a frame pointer, restore SP from it. */
8020 if ((TARGET_ABI_OPEN_VMS
8021 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8022 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8023 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8024
8025 /* Cope with very large offsets to the register save area. */
8026 if (reg_offset + sa_size > 0x8000)
8027 {
8028 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8029 HOST_WIDE_INT bias;
8030
8031 if (low + sa_size <= 0x8000)
8032 bias = reg_offset - low, reg_offset = low;
8033 else
8034 bias = reg_offset, reg_offset = 0;
8035
8036 sa_reg = gen_rtx_REG (DImode, 22);
8037 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8038
8039 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8040 }
8041
8042 /* Restore registers in order, excepting a true frame pointer. */
8043
8044 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8045 if (! eh_ofs)
8046 set_mem_alias_set (mem, alpha_sr_alias_set);
8047 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8048
8049 reg_offset += 8;
8050 imask &= ~(1UL << REG_RA);
8051
8052 for (i = 0; i < 31; ++i)
8053 if (imask & (1UL << i))
8054 {
8055 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8056 fp_offset = reg_offset;
8057 else
8058 {
8059 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8060 set_mem_alias_set (mem, alpha_sr_alias_set);
8061 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8062 }
8063 reg_offset += 8;
8064 }
8065
8066 for (i = 0; i < 31; ++i)
8067 if (fmask & (1UL << i))
8068 {
8069 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8070 set_mem_alias_set (mem, alpha_sr_alias_set);
8071 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8072 reg_offset += 8;
8073 }
8074 }
8075 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8076 {
8077 /* Restore callee-saved general-purpose registers. */
8078
8079 reg_offset = -56;
8080
8081 for (i = 9; i < 15; i++)
8082 if (imask & (1UL << i))
8083 {
8084 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8085 reg_offset));
8086 set_mem_alias_set (mem, alpha_sr_alias_set);
8087 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8088 reg_offset -= 8;
8089 }
8090
8091 for (i = 2; i < 10; i++)
8092 if (fmask & (1UL << i))
8093 {
8094 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8095 reg_offset));
8096 set_mem_alias_set (mem, alpha_sr_alias_set);
8097 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8098 reg_offset -= 8;
8099 }
8100
8101 /* Restore the return address from the DSIB. */
8102
8103 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8104 set_mem_alias_set (mem, alpha_sr_alias_set);
8105 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8106 }
8107
8108 if (frame_size || eh_ofs)
8109 {
8110 sp_adj1 = stack_pointer_rtx;
8111
8112 if (eh_ofs)
8113 {
8114 sp_adj1 = gen_rtx_REG (DImode, 23);
8115 emit_move_insn (sp_adj1,
8116 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8117 }
8118
8119 /* If the stack size is large, begin computation into a temporary
8120 register so as not to interfere with a potential fp restore,
8121 which must be consecutive with an SP restore. */
8122 if (frame_size < 32768
8123 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8124 sp_adj2 = GEN_INT (frame_size);
8125 else if (TARGET_ABI_UNICOSMK)
8126 {
8127 sp_adj1 = gen_rtx_REG (DImode, 23);
8128 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8129 sp_adj2 = const0_rtx;
8130 }
8131 else if (frame_size < 0x40007fffL)
8132 {
8133 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8134
8135 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8136 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8137 sp_adj1 = sa_reg;
8138 else
8139 {
8140 sp_adj1 = gen_rtx_REG (DImode, 23);
8141 FRP (emit_move_insn (sp_adj1, sp_adj2));
8142 }
8143 sp_adj2 = GEN_INT (low);
8144 }
8145 else
8146 {
8147 rtx tmp = gen_rtx_REG (DImode, 23);
8148 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8149 3, false));
8150 if (!sp_adj2)
8151 {
8152 /* We can't drop new things to memory this late, afaik,
8153 so build it up by pieces. */
8154 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8155 -(frame_size < 0)));
8156 gcc_assert (sp_adj2);
8157 }
8158 }
8159
8160 /* From now on, things must be in order. So emit blockages. */
8161
8162 /* Restore the frame pointer. */
8163 if (TARGET_ABI_UNICOSMK)
8164 {
8165 emit_insn (gen_blockage ());
8166 mem = gen_rtx_MEM (DImode,
8167 plus_constant (hard_frame_pointer_rtx, -16));
8168 set_mem_alias_set (mem, alpha_sr_alias_set);
8169 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8170 }
8171 else if (fp_is_frame_pointer)
8172 {
8173 emit_insn (gen_blockage ());
8174 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8175 set_mem_alias_set (mem, alpha_sr_alias_set);
8176 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8177 }
8178 else if (TARGET_ABI_OPEN_VMS)
8179 {
8180 emit_insn (gen_blockage ());
8181 FRP (emit_move_insn (hard_frame_pointer_rtx,
8182 gen_rtx_REG (DImode, vms_save_fp_regno)));
8183 }
8184
8185 /* Restore the stack pointer. */
8186 emit_insn (gen_blockage ());
8187 if (sp_adj2 == const0_rtx)
8188 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8189 else
8190 FRP (emit_move_insn (stack_pointer_rtx,
8191 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8192 }
8193 else
8194 {
8195 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8196 {
8197 emit_insn (gen_blockage ());
8198 FRP (emit_move_insn (hard_frame_pointer_rtx,
8199 gen_rtx_REG (DImode, vms_save_fp_regno)));
8200 }
8201 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8202 {
8203 /* Decrement the frame pointer if the function does not have a
8204 frame. */
8205
8206 emit_insn (gen_blockage ());
8207 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8208 hard_frame_pointer_rtx, constm1_rtx)));
8209 }
8210 }
8211 }
8212 \f
8213 /* Output the rest of the textual info surrounding the epilogue. */
8214
8215 void
8216 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8217 {
8218 rtx insn;
8219
8220 /* We output a nop after noreturn calls at the very end of the function to
8221 ensure that the return address always remains in the caller's code range,
8222 as not doing so might confuse unwinding engines. */
8223 insn = get_last_insn ();
8224 if (!INSN_P (insn))
8225 insn = prev_active_insn (insn);
8226 if (GET_CODE (insn) == CALL_INSN)
8227 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8228
8229 #if TARGET_ABI_OPEN_VMS
8230 alpha_write_linkage (file, fnname, decl);
8231 #endif
8232
8233 /* End the function. */
8234 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8235 {
8236 fputs ("\t.end ", file);
8237 assemble_name (file, fnname);
8238 putc ('\n', file);
8239 }
8240 inside_function = FALSE;
8241
8242 /* Output jump tables and the static subroutine information block. */
8243 if (TARGET_ABI_UNICOSMK)
8244 {
8245 unicosmk_output_ssib (file, fnname);
8246 unicosmk_output_deferred_case_vectors (file);
8247 }
8248 }
8249
8250 #if TARGET_ABI_OSF
8251 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8252
8253 In order to avoid the hordes of differences between generated code
8254 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8255 lots of code loading up large constants, generate rtl and emit it
8256 instead of going straight to text.
8257
8258 Not sure why this idea hasn't been explored before... */
8259
8260 static void
8261 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8262 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8263 tree function)
8264 {
8265 HOST_WIDE_INT hi, lo;
8266 rtx this, insn, funexp;
8267
8268 /* We always require a valid GP. */
8269 emit_insn (gen_prologue_ldgp ());
8270 emit_note (NOTE_INSN_PROLOGUE_END);
8271
8272 /* Find the "this" pointer. If the function returns a structure,
8273 the structure return pointer is in $16. */
8274 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8275 this = gen_rtx_REG (Pmode, 17);
8276 else
8277 this = gen_rtx_REG (Pmode, 16);
8278
8279 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8280 entire constant for the add. */
8281 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8282 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8283 if (hi + lo == delta)
8284 {
8285 if (hi)
8286 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8287 if (lo)
8288 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8289 }
8290 else
8291 {
8292 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8293 delta, -(delta < 0));
8294 emit_insn (gen_adddi3 (this, this, tmp));
8295 }
8296
8297 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8298 if (vcall_offset)
8299 {
8300 rtx tmp, tmp2;
8301
8302 tmp = gen_rtx_REG (Pmode, 0);
8303 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8304
8305 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8306 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8307 if (hi + lo == vcall_offset)
8308 {
8309 if (hi)
8310 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8311 }
8312 else
8313 {
8314 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8315 vcall_offset, -(vcall_offset < 0));
8316 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8317 lo = 0;
8318 }
8319 if (lo)
8320 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8321 else
8322 tmp2 = tmp;
8323 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8324
8325 emit_insn (gen_adddi3 (this, this, tmp));
8326 }
8327
8328 /* Generate a tail call to the target function. */
8329 if (! TREE_USED (function))
8330 {
8331 assemble_external (function);
8332 TREE_USED (function) = 1;
8333 }
8334 funexp = XEXP (DECL_RTL (function), 0);
8335 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8336 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8337 SIBLING_CALL_P (insn) = 1;
8338
8339 /* Run just enough of rest_of_compilation to get the insns emitted.
8340 There's not really enough bulk here to make other passes such as
8341 instruction scheduling worth while. Note that use_thunk calls
8342 assemble_start_function and assemble_end_function. */
8343 insn = get_insns ();
8344 insn_locators_alloc ();
8345 shorten_branches (insn);
8346 final_start_function (insn, file, 1);
8347 final (insn, file, 1);
8348 final_end_function ();
8349 }
8350 #endif /* TARGET_ABI_OSF */
8351 \f
8352 /* Debugging support. */
8353
8354 #include "gstab.h"
8355
8356 /* Count the number of sdb related labels are generated (to find block
8357 start and end boundaries). */
8358
8359 int sdb_label_count = 0;
8360
8361 /* Name of the file containing the current function. */
8362
8363 static const char *current_function_file = "";
8364
8365 /* Offsets to alpha virtual arg/local debugging pointers. */
8366
8367 long alpha_arg_offset;
8368 long alpha_auto_offset;
8369 \f
8370 /* Emit a new filename to a stream. */
8371
8372 void
8373 alpha_output_filename (FILE *stream, const char *name)
8374 {
8375 static int first_time = TRUE;
8376
8377 if (first_time)
8378 {
8379 first_time = FALSE;
8380 ++num_source_filenames;
8381 current_function_file = name;
8382 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8383 output_quoted_string (stream, name);
8384 fprintf (stream, "\n");
8385 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8386 fprintf (stream, "\t#@stabs\n");
8387 }
8388
8389 else if (write_symbols == DBX_DEBUG)
8390 /* dbxout.c will emit an appropriate .stabs directive. */
8391 return;
8392
8393 else if (name != current_function_file
8394 && strcmp (name, current_function_file) != 0)
8395 {
8396 if (inside_function && ! TARGET_GAS)
8397 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8398 else
8399 {
8400 ++num_source_filenames;
8401 current_function_file = name;
8402 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8403 }
8404
8405 output_quoted_string (stream, name);
8406 fprintf (stream, "\n");
8407 }
8408 }
8409 \f
8410 /* Structure to show the current status of registers and memory. */
8411
8412 struct shadow_summary
8413 {
8414 struct {
8415 unsigned int i : 31; /* Mask of int regs */
8416 unsigned int fp : 31; /* Mask of fp regs */
8417 unsigned int mem : 1; /* mem == imem | fpmem */
8418 } used, defd;
8419 };
8420
8421 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8422 to the summary structure. SET is nonzero if the insn is setting the
8423 object, otherwise zero. */
8424
8425 static void
8426 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8427 {
8428 const char *format_ptr;
8429 int i, j;
8430
8431 if (x == 0)
8432 return;
8433
8434 switch (GET_CODE (x))
8435 {
8436 /* ??? Note that this case would be incorrect if the Alpha had a
8437 ZERO_EXTRACT in SET_DEST. */
8438 case SET:
8439 summarize_insn (SET_SRC (x), sum, 0);
8440 summarize_insn (SET_DEST (x), sum, 1);
8441 break;
8442
8443 case CLOBBER:
8444 summarize_insn (XEXP (x, 0), sum, 1);
8445 break;
8446
8447 case USE:
8448 summarize_insn (XEXP (x, 0), sum, 0);
8449 break;
8450
8451 case ASM_OPERANDS:
8452 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8453 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8454 break;
8455
8456 case PARALLEL:
8457 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8458 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8459 break;
8460
8461 case SUBREG:
8462 summarize_insn (SUBREG_REG (x), sum, 0);
8463 break;
8464
8465 case REG:
8466 {
8467 int regno = REGNO (x);
8468 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8469
8470 if (regno == 31 || regno == 63)
8471 break;
8472
8473 if (set)
8474 {
8475 if (regno < 32)
8476 sum->defd.i |= mask;
8477 else
8478 sum->defd.fp |= mask;
8479 }
8480 else
8481 {
8482 if (regno < 32)
8483 sum->used.i |= mask;
8484 else
8485 sum->used.fp |= mask;
8486 }
8487 }
8488 break;
8489
8490 case MEM:
8491 if (set)
8492 sum->defd.mem = 1;
8493 else
8494 sum->used.mem = 1;
8495
8496 /* Find the regs used in memory address computation: */
8497 summarize_insn (XEXP (x, 0), sum, 0);
8498 break;
8499
8500 case CONST_INT: case CONST_DOUBLE:
8501 case SYMBOL_REF: case LABEL_REF: case CONST:
8502 case SCRATCH: case ASM_INPUT:
8503 break;
8504
8505 /* Handle common unary and binary ops for efficiency. */
8506 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8507 case MOD: case UDIV: case UMOD: case AND: case IOR:
8508 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8509 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8510 case NE: case EQ: case GE: case GT: case LE:
8511 case LT: case GEU: case GTU: case LEU: case LTU:
8512 summarize_insn (XEXP (x, 0), sum, 0);
8513 summarize_insn (XEXP (x, 1), sum, 0);
8514 break;
8515
8516 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8517 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8518 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8519 case SQRT: case FFS:
8520 summarize_insn (XEXP (x, 0), sum, 0);
8521 break;
8522
8523 default:
8524 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8525 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8526 switch (format_ptr[i])
8527 {
8528 case 'e':
8529 summarize_insn (XEXP (x, i), sum, 0);
8530 break;
8531
8532 case 'E':
8533 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8534 summarize_insn (XVECEXP (x, i, j), sum, 0);
8535 break;
8536
8537 case 'i':
8538 break;
8539
8540 default:
8541 gcc_unreachable ();
8542 }
8543 }
8544 }
8545
8546 /* Ensure a sufficient number of `trapb' insns are in the code when
8547 the user requests code with a trap precision of functions or
8548 instructions.
8549
8550 In naive mode, when the user requests a trap-precision of
8551 "instruction", a trapb is needed after every instruction that may
8552 generate a trap. This ensures that the code is resumption safe but
8553 it is also slow.
8554
8555 When optimizations are turned on, we delay issuing a trapb as long
8556 as possible. In this context, a trap shadow is the sequence of
8557 instructions that starts with a (potentially) trap generating
8558 instruction and extends to the next trapb or call_pal instruction
8559 (but GCC never generates call_pal by itself). We can delay (and
8560 therefore sometimes omit) a trapb subject to the following
8561 conditions:
8562
8563 (a) On entry to the trap shadow, if any Alpha register or memory
8564 location contains a value that is used as an operand value by some
8565 instruction in the trap shadow (live on entry), then no instruction
8566 in the trap shadow may modify the register or memory location.
8567
8568 (b) Within the trap shadow, the computation of the base register
8569 for a memory load or store instruction may not involve using the
8570 result of an instruction that might generate an UNPREDICTABLE
8571 result.
8572
8573 (c) Within the trap shadow, no register may be used more than once
8574 as a destination register. (This is to make life easier for the
8575 trap-handler.)
8576
8577 (d) The trap shadow may not include any branch instructions. */
8578
8579 static void
8580 alpha_handle_trap_shadows (void)
8581 {
8582 struct shadow_summary shadow;
8583 int trap_pending, exception_nesting;
8584 rtx i, n;
8585
8586 trap_pending = 0;
8587 exception_nesting = 0;
8588 shadow.used.i = 0;
8589 shadow.used.fp = 0;
8590 shadow.used.mem = 0;
8591 shadow.defd = shadow.used;
8592
8593 for (i = get_insns (); i ; i = NEXT_INSN (i))
8594 {
8595 if (GET_CODE (i) == NOTE)
8596 {
8597 switch (NOTE_KIND (i))
8598 {
8599 case NOTE_INSN_EH_REGION_BEG:
8600 exception_nesting++;
8601 if (trap_pending)
8602 goto close_shadow;
8603 break;
8604
8605 case NOTE_INSN_EH_REGION_END:
8606 exception_nesting--;
8607 if (trap_pending)
8608 goto close_shadow;
8609 break;
8610
8611 case NOTE_INSN_EPILOGUE_BEG:
8612 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8613 goto close_shadow;
8614 break;
8615 }
8616 }
8617 else if (trap_pending)
8618 {
8619 if (alpha_tp == ALPHA_TP_FUNC)
8620 {
8621 if (GET_CODE (i) == JUMP_INSN
8622 && GET_CODE (PATTERN (i)) == RETURN)
8623 goto close_shadow;
8624 }
8625 else if (alpha_tp == ALPHA_TP_INSN)
8626 {
8627 if (optimize > 0)
8628 {
8629 struct shadow_summary sum;
8630
8631 sum.used.i = 0;
8632 sum.used.fp = 0;
8633 sum.used.mem = 0;
8634 sum.defd = sum.used;
8635
8636 switch (GET_CODE (i))
8637 {
8638 case INSN:
8639 /* Annoyingly, get_attr_trap will die on these. */
8640 if (GET_CODE (PATTERN (i)) == USE
8641 || GET_CODE (PATTERN (i)) == CLOBBER)
8642 break;
8643
8644 summarize_insn (PATTERN (i), &sum, 0);
8645
8646 if ((sum.defd.i & shadow.defd.i)
8647 || (sum.defd.fp & shadow.defd.fp))
8648 {
8649 /* (c) would be violated */
8650 goto close_shadow;
8651 }
8652
8653 /* Combine shadow with summary of current insn: */
8654 shadow.used.i |= sum.used.i;
8655 shadow.used.fp |= sum.used.fp;
8656 shadow.used.mem |= sum.used.mem;
8657 shadow.defd.i |= sum.defd.i;
8658 shadow.defd.fp |= sum.defd.fp;
8659 shadow.defd.mem |= sum.defd.mem;
8660
8661 if ((sum.defd.i & shadow.used.i)
8662 || (sum.defd.fp & shadow.used.fp)
8663 || (sum.defd.mem & shadow.used.mem))
8664 {
8665 /* (a) would be violated (also takes care of (b)) */
8666 gcc_assert (get_attr_trap (i) != TRAP_YES
8667 || (!(sum.defd.i & sum.used.i)
8668 && !(sum.defd.fp & sum.used.fp)));
8669
8670 goto close_shadow;
8671 }
8672 break;
8673
8674 case JUMP_INSN:
8675 case CALL_INSN:
8676 case CODE_LABEL:
8677 goto close_shadow;
8678
8679 default:
8680 gcc_unreachable ();
8681 }
8682 }
8683 else
8684 {
8685 close_shadow:
8686 n = emit_insn_before (gen_trapb (), i);
8687 PUT_MODE (n, TImode);
8688 PUT_MODE (i, TImode);
8689 trap_pending = 0;
8690 shadow.used.i = 0;
8691 shadow.used.fp = 0;
8692 shadow.used.mem = 0;
8693 shadow.defd = shadow.used;
8694 }
8695 }
8696 }
8697
8698 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8699 && GET_CODE (i) == INSN
8700 && GET_CODE (PATTERN (i)) != USE
8701 && GET_CODE (PATTERN (i)) != CLOBBER
8702 && get_attr_trap (i) == TRAP_YES)
8703 {
8704 if (optimize && !trap_pending)
8705 summarize_insn (PATTERN (i), &shadow, 0);
8706 trap_pending = 1;
8707 }
8708 }
8709 }
8710 \f
8711 /* Alpha can only issue instruction groups simultaneously if they are
8712 suitably aligned. This is very processor-specific. */
8713 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8714 that are marked "fake". These instructions do not exist on that target,
8715 but it is possible to see these insns with deranged combinations of
8716 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8717 choose a result at random. */
8718
8719 enum alphaev4_pipe {
8720 EV4_STOP = 0,
8721 EV4_IB0 = 1,
8722 EV4_IB1 = 2,
8723 EV4_IBX = 4
8724 };
8725
8726 enum alphaev5_pipe {
8727 EV5_STOP = 0,
8728 EV5_NONE = 1,
8729 EV5_E01 = 2,
8730 EV5_E0 = 4,
8731 EV5_E1 = 8,
8732 EV5_FAM = 16,
8733 EV5_FA = 32,
8734 EV5_FM = 64
8735 };
8736
8737 static enum alphaev4_pipe
8738 alphaev4_insn_pipe (rtx insn)
8739 {
8740 if (recog_memoized (insn) < 0)
8741 return EV4_STOP;
8742 if (get_attr_length (insn) != 4)
8743 return EV4_STOP;
8744
8745 switch (get_attr_type (insn))
8746 {
8747 case TYPE_ILD:
8748 case TYPE_LDSYM:
8749 case TYPE_FLD:
8750 case TYPE_LD_L:
8751 return EV4_IBX;
8752
8753 case TYPE_IADD:
8754 case TYPE_ILOG:
8755 case TYPE_ICMOV:
8756 case TYPE_ICMP:
8757 case TYPE_FST:
8758 case TYPE_SHIFT:
8759 case TYPE_IMUL:
8760 case TYPE_FBR:
8761 case TYPE_MVI: /* fake */
8762 return EV4_IB0;
8763
8764 case TYPE_IST:
8765 case TYPE_MISC:
8766 case TYPE_IBR:
8767 case TYPE_JSR:
8768 case TYPE_CALLPAL:
8769 case TYPE_FCPYS:
8770 case TYPE_FCMOV:
8771 case TYPE_FADD:
8772 case TYPE_FDIV:
8773 case TYPE_FMUL:
8774 case TYPE_ST_C:
8775 case TYPE_MB:
8776 case TYPE_FSQRT: /* fake */
8777 case TYPE_FTOI: /* fake */
8778 case TYPE_ITOF: /* fake */
8779 return EV4_IB1;
8780
8781 default:
8782 gcc_unreachable ();
8783 }
8784 }
8785
8786 static enum alphaev5_pipe
8787 alphaev5_insn_pipe (rtx insn)
8788 {
8789 if (recog_memoized (insn) < 0)
8790 return EV5_STOP;
8791 if (get_attr_length (insn) != 4)
8792 return EV5_STOP;
8793
8794 switch (get_attr_type (insn))
8795 {
8796 case TYPE_ILD:
8797 case TYPE_FLD:
8798 case TYPE_LDSYM:
8799 case TYPE_IADD:
8800 case TYPE_ILOG:
8801 case TYPE_ICMOV:
8802 case TYPE_ICMP:
8803 return EV5_E01;
8804
8805 case TYPE_IST:
8806 case TYPE_FST:
8807 case TYPE_SHIFT:
8808 case TYPE_IMUL:
8809 case TYPE_MISC:
8810 case TYPE_MVI:
8811 case TYPE_LD_L:
8812 case TYPE_ST_C:
8813 case TYPE_MB:
8814 case TYPE_FTOI: /* fake */
8815 case TYPE_ITOF: /* fake */
8816 return EV5_E0;
8817
8818 case TYPE_IBR:
8819 case TYPE_JSR:
8820 case TYPE_CALLPAL:
8821 return EV5_E1;
8822
8823 case TYPE_FCPYS:
8824 return EV5_FAM;
8825
8826 case TYPE_FBR:
8827 case TYPE_FCMOV:
8828 case TYPE_FADD:
8829 case TYPE_FDIV:
8830 case TYPE_FSQRT: /* fake */
8831 return EV5_FA;
8832
8833 case TYPE_FMUL:
8834 return EV5_FM;
8835
8836 default:
8837 gcc_unreachable ();
8838 }
8839 }
8840
8841 /* IN_USE is a mask of the slots currently filled within the insn group.
8842 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8843 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8844
8845 LEN is, of course, the length of the group in bytes. */
8846
8847 static rtx
8848 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8849 {
8850 int len, in_use;
8851
8852 len = in_use = 0;
8853
8854 if (! INSN_P (insn)
8855 || GET_CODE (PATTERN (insn)) == CLOBBER
8856 || GET_CODE (PATTERN (insn)) == USE)
8857 goto next_and_done;
8858
8859 while (1)
8860 {
8861 enum alphaev4_pipe pipe;
8862
8863 pipe = alphaev4_insn_pipe (insn);
8864 switch (pipe)
8865 {
8866 case EV4_STOP:
8867 /* Force complex instructions to start new groups. */
8868 if (in_use)
8869 goto done;
8870
8871 /* If this is a completely unrecognized insn, it's an asm.
8872 We don't know how long it is, so record length as -1 to
8873 signal a needed realignment. */
8874 if (recog_memoized (insn) < 0)
8875 len = -1;
8876 else
8877 len = get_attr_length (insn);
8878 goto next_and_done;
8879
8880 case EV4_IBX:
8881 if (in_use & EV4_IB0)
8882 {
8883 if (in_use & EV4_IB1)
8884 goto done;
8885 in_use |= EV4_IB1;
8886 }
8887 else
8888 in_use |= EV4_IB0 | EV4_IBX;
8889 break;
8890
8891 case EV4_IB0:
8892 if (in_use & EV4_IB0)
8893 {
8894 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8895 goto done;
8896 in_use |= EV4_IB1;
8897 }
8898 in_use |= EV4_IB0;
8899 break;
8900
8901 case EV4_IB1:
8902 if (in_use & EV4_IB1)
8903 goto done;
8904 in_use |= EV4_IB1;
8905 break;
8906
8907 default:
8908 gcc_unreachable ();
8909 }
8910 len += 4;
8911
8912 /* Haifa doesn't do well scheduling branches. */
8913 if (GET_CODE (insn) == JUMP_INSN)
8914 goto next_and_done;
8915
8916 next:
8917 insn = next_nonnote_insn (insn);
8918
8919 if (!insn || ! INSN_P (insn))
8920 goto done;
8921
8922 /* Let Haifa tell us where it thinks insn group boundaries are. */
8923 if (GET_MODE (insn) == TImode)
8924 goto done;
8925
8926 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8927 goto next;
8928 }
8929
8930 next_and_done:
8931 insn = next_nonnote_insn (insn);
8932
8933 done:
8934 *plen = len;
8935 *pin_use = in_use;
8936 return insn;
8937 }
8938
8939 /* IN_USE is a mask of the slots currently filled within the insn group.
8940 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8941 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8942
8943 LEN is, of course, the length of the group in bytes. */
8944
8945 static rtx
8946 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8947 {
8948 int len, in_use;
8949
8950 len = in_use = 0;
8951
8952 if (! INSN_P (insn)
8953 || GET_CODE (PATTERN (insn)) == CLOBBER
8954 || GET_CODE (PATTERN (insn)) == USE)
8955 goto next_and_done;
8956
8957 while (1)
8958 {
8959 enum alphaev5_pipe pipe;
8960
8961 pipe = alphaev5_insn_pipe (insn);
8962 switch (pipe)
8963 {
8964 case EV5_STOP:
8965 /* Force complex instructions to start new groups. */
8966 if (in_use)
8967 goto done;
8968
8969 /* If this is a completely unrecognized insn, it's an asm.
8970 We don't know how long it is, so record length as -1 to
8971 signal a needed realignment. */
8972 if (recog_memoized (insn) < 0)
8973 len = -1;
8974 else
8975 len = get_attr_length (insn);
8976 goto next_and_done;
8977
8978 /* ??? Most of the places below, we would like to assert never
8979 happen, as it would indicate an error either in Haifa, or
8980 in the scheduling description. Unfortunately, Haifa never
8981 schedules the last instruction of the BB, so we don't have
8982 an accurate TI bit to go off. */
8983 case EV5_E01:
8984 if (in_use & EV5_E0)
8985 {
8986 if (in_use & EV5_E1)
8987 goto done;
8988 in_use |= EV5_E1;
8989 }
8990 else
8991 in_use |= EV5_E0 | EV5_E01;
8992 break;
8993
8994 case EV5_E0:
8995 if (in_use & EV5_E0)
8996 {
8997 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8998 goto done;
8999 in_use |= EV5_E1;
9000 }
9001 in_use |= EV5_E0;
9002 break;
9003
9004 case EV5_E1:
9005 if (in_use & EV5_E1)
9006 goto done;
9007 in_use |= EV5_E1;
9008 break;
9009
9010 case EV5_FAM:
9011 if (in_use & EV5_FA)
9012 {
9013 if (in_use & EV5_FM)
9014 goto done;
9015 in_use |= EV5_FM;
9016 }
9017 else
9018 in_use |= EV5_FA | EV5_FAM;
9019 break;
9020
9021 case EV5_FA:
9022 if (in_use & EV5_FA)
9023 goto done;
9024 in_use |= EV5_FA;
9025 break;
9026
9027 case EV5_FM:
9028 if (in_use & EV5_FM)
9029 goto done;
9030 in_use |= EV5_FM;
9031 break;
9032
9033 case EV5_NONE:
9034 break;
9035
9036 default:
9037 gcc_unreachable ();
9038 }
9039 len += 4;
9040
9041 /* Haifa doesn't do well scheduling branches. */
9042 /* ??? If this is predicted not-taken, slotting continues, except
9043 that no more IBR, FBR, or JSR insns may be slotted. */
9044 if (GET_CODE (insn) == JUMP_INSN)
9045 goto next_and_done;
9046
9047 next:
9048 insn = next_nonnote_insn (insn);
9049
9050 if (!insn || ! INSN_P (insn))
9051 goto done;
9052
9053 /* Let Haifa tell us where it thinks insn group boundaries are. */
9054 if (GET_MODE (insn) == TImode)
9055 goto done;
9056
9057 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9058 goto next;
9059 }
9060
9061 next_and_done:
9062 insn = next_nonnote_insn (insn);
9063
9064 done:
9065 *plen = len;
9066 *pin_use = in_use;
9067 return insn;
9068 }
9069
9070 static rtx
9071 alphaev4_next_nop (int *pin_use)
9072 {
9073 int in_use = *pin_use;
9074 rtx nop;
9075
9076 if (!(in_use & EV4_IB0))
9077 {
9078 in_use |= EV4_IB0;
9079 nop = gen_nop ();
9080 }
9081 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9082 {
9083 in_use |= EV4_IB1;
9084 nop = gen_nop ();
9085 }
9086 else if (TARGET_FP && !(in_use & EV4_IB1))
9087 {
9088 in_use |= EV4_IB1;
9089 nop = gen_fnop ();
9090 }
9091 else
9092 nop = gen_unop ();
9093
9094 *pin_use = in_use;
9095 return nop;
9096 }
9097
9098 static rtx
9099 alphaev5_next_nop (int *pin_use)
9100 {
9101 int in_use = *pin_use;
9102 rtx nop;
9103
9104 if (!(in_use & EV5_E1))
9105 {
9106 in_use |= EV5_E1;
9107 nop = gen_nop ();
9108 }
9109 else if (TARGET_FP && !(in_use & EV5_FA))
9110 {
9111 in_use |= EV5_FA;
9112 nop = gen_fnop ();
9113 }
9114 else if (TARGET_FP && !(in_use & EV5_FM))
9115 {
9116 in_use |= EV5_FM;
9117 nop = gen_fnop ();
9118 }
9119 else
9120 nop = gen_unop ();
9121
9122 *pin_use = in_use;
9123 return nop;
9124 }
9125
9126 /* The instruction group alignment main loop. */
9127
9128 static void
9129 alpha_align_insns (unsigned int max_align,
9130 rtx (*next_group) (rtx, int *, int *),
9131 rtx (*next_nop) (int *))
9132 {
9133 /* ALIGN is the known alignment for the insn group. */
9134 unsigned int align;
9135 /* OFS is the offset of the current insn in the insn group. */
9136 int ofs;
9137 int prev_in_use, in_use, len, ldgp;
9138 rtx i, next;
9139
9140 /* Let shorten branches care for assigning alignments to code labels. */
9141 shorten_branches (get_insns ());
9142
9143 if (align_functions < 4)
9144 align = 4;
9145 else if ((unsigned int) align_functions < max_align)
9146 align = align_functions;
9147 else
9148 align = max_align;
9149
9150 ofs = prev_in_use = 0;
9151 i = get_insns ();
9152 if (GET_CODE (i) == NOTE)
9153 i = next_nonnote_insn (i);
9154
9155 ldgp = alpha_function_needs_gp ? 8 : 0;
9156
9157 while (i)
9158 {
9159 next = (*next_group) (i, &in_use, &len);
9160
9161 /* When we see a label, resync alignment etc. */
9162 if (GET_CODE (i) == CODE_LABEL)
9163 {
9164 unsigned int new_align = 1 << label_to_alignment (i);
9165
9166 if (new_align >= align)
9167 {
9168 align = new_align < max_align ? new_align : max_align;
9169 ofs = 0;
9170 }
9171
9172 else if (ofs & (new_align-1))
9173 ofs = (ofs | (new_align-1)) + 1;
9174 gcc_assert (!len);
9175 }
9176
9177 /* Handle complex instructions special. */
9178 else if (in_use == 0)
9179 {
9180 /* Asms will have length < 0. This is a signal that we have
9181 lost alignment knowledge. Assume, however, that the asm
9182 will not mis-align instructions. */
9183 if (len < 0)
9184 {
9185 ofs = 0;
9186 align = 4;
9187 len = 0;
9188 }
9189 }
9190
9191 /* If the known alignment is smaller than the recognized insn group,
9192 realign the output. */
9193 else if ((int) align < len)
9194 {
9195 unsigned int new_log_align = len > 8 ? 4 : 3;
9196 rtx prev, where;
9197
9198 where = prev = prev_nonnote_insn (i);
9199 if (!where || GET_CODE (where) != CODE_LABEL)
9200 where = i;
9201
9202 /* Can't realign between a call and its gp reload. */
9203 if (! (TARGET_EXPLICIT_RELOCS
9204 && prev && GET_CODE (prev) == CALL_INSN))
9205 {
9206 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9207 align = 1 << new_log_align;
9208 ofs = 0;
9209 }
9210 }
9211
9212 /* We may not insert padding inside the initial ldgp sequence. */
9213 else if (ldgp > 0)
9214 ldgp -= len;
9215
9216 /* If the group won't fit in the same INT16 as the previous,
9217 we need to add padding to keep the group together. Rather
9218 than simply leaving the insn filling to the assembler, we
9219 can make use of the knowledge of what sorts of instructions
9220 were issued in the previous group to make sure that all of
9221 the added nops are really free. */
9222 else if (ofs + len > (int) align)
9223 {
9224 int nop_count = (align - ofs) / 4;
9225 rtx where;
9226
9227 /* Insert nops before labels, branches, and calls to truly merge
9228 the execution of the nops with the previous instruction group. */
9229 where = prev_nonnote_insn (i);
9230 if (where)
9231 {
9232 if (GET_CODE (where) == CODE_LABEL)
9233 {
9234 rtx where2 = prev_nonnote_insn (where);
9235 if (where2 && GET_CODE (where2) == JUMP_INSN)
9236 where = where2;
9237 }
9238 else if (GET_CODE (where) == INSN)
9239 where = i;
9240 }
9241 else
9242 where = i;
9243
9244 do
9245 emit_insn_before ((*next_nop)(&prev_in_use), where);
9246 while (--nop_count);
9247 ofs = 0;
9248 }
9249
9250 ofs = (ofs + len) & (align - 1);
9251 prev_in_use = in_use;
9252 i = next;
9253 }
9254 }
9255 \f
9256 /* Machine dependent reorg pass. */
9257
9258 static void
9259 alpha_reorg (void)
9260 {
9261 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9262 alpha_handle_trap_shadows ();
9263
9264 /* Due to the number of extra trapb insns, don't bother fixing up
9265 alignment when trap precision is instruction. Moreover, we can
9266 only do our job when sched2 is run. */
9267 if (optimize && !optimize_size
9268 && alpha_tp != ALPHA_TP_INSN
9269 && flag_schedule_insns_after_reload)
9270 {
9271 if (alpha_tune == PROCESSOR_EV4)
9272 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9273 else if (alpha_tune == PROCESSOR_EV5)
9274 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9275 }
9276 }
9277 \f
9278 #if !TARGET_ABI_UNICOSMK
9279
9280 #ifdef HAVE_STAMP_H
9281 #include <stamp.h>
9282 #endif
9283
9284 static void
9285 alpha_file_start (void)
9286 {
9287 #ifdef OBJECT_FORMAT_ELF
9288 /* If emitting dwarf2 debug information, we cannot generate a .file
9289 directive to start the file, as it will conflict with dwarf2out
9290 file numbers. So it's only useful when emitting mdebug output. */
9291 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9292 #endif
9293
9294 default_file_start ();
9295 #ifdef MS_STAMP
9296 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9297 #endif
9298
9299 fputs ("\t.set noreorder\n", asm_out_file);
9300 fputs ("\t.set volatile\n", asm_out_file);
9301 if (!TARGET_ABI_OPEN_VMS)
9302 fputs ("\t.set noat\n", asm_out_file);
9303 if (TARGET_EXPLICIT_RELOCS)
9304 fputs ("\t.set nomacro\n", asm_out_file);
9305 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9306 {
9307 const char *arch;
9308
9309 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9310 arch = "ev6";
9311 else if (TARGET_MAX)
9312 arch = "pca56";
9313 else if (TARGET_BWX)
9314 arch = "ev56";
9315 else if (alpha_cpu == PROCESSOR_EV5)
9316 arch = "ev5";
9317 else
9318 arch = "ev4";
9319
9320 fprintf (asm_out_file, "\t.arch %s\n", arch);
9321 }
9322 }
9323 #endif
9324
9325 #ifdef OBJECT_FORMAT_ELF
9326 /* Since we don't have a .dynbss section, we should not allow global
9327 relocations in the .rodata section. */
9328
9329 static int
9330 alpha_elf_reloc_rw_mask (void)
9331 {
9332 return flag_pic ? 3 : 2;
9333 }
9334
9335 /* Return a section for X. The only special thing we do here is to
9336 honor small data. */
9337
9338 static section *
9339 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9340 unsigned HOST_WIDE_INT align)
9341 {
9342 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9343 /* ??? Consider using mergeable sdata sections. */
9344 return sdata_section;
9345 else
9346 return default_elf_select_rtx_section (mode, x, align);
9347 }
9348
9349 static unsigned int
9350 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9351 {
9352 unsigned int flags = 0;
9353
9354 if (strcmp (name, ".sdata") == 0
9355 || strncmp (name, ".sdata.", 7) == 0
9356 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9357 || strcmp (name, ".sbss") == 0
9358 || strncmp (name, ".sbss.", 6) == 0
9359 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9360 flags = SECTION_SMALL;
9361
9362 flags |= default_section_type_flags (decl, name, reloc);
9363 return flags;
9364 }
9365 #endif /* OBJECT_FORMAT_ELF */
9366 \f
9367 /* Structure to collect function names for final output in link section. */
9368 /* Note that items marked with GTY can't be ifdef'ed out. */
9369
9370 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9371 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9372
9373 struct alpha_links GTY(())
9374 {
9375 int num;
9376 rtx linkage;
9377 enum links_kind lkind;
9378 enum reloc_kind rkind;
9379 };
9380
9381 struct alpha_funcs GTY(())
9382 {
9383 int num;
9384 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9385 links;
9386 };
9387
9388 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9389 splay_tree alpha_links_tree;
9390 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9391 splay_tree alpha_funcs_tree;
9392
9393 static GTY(()) int alpha_funcs_num;
9394
9395 #if TARGET_ABI_OPEN_VMS
9396
9397 /* Return the VMS argument type corresponding to MODE. */
9398
9399 enum avms_arg_type
9400 alpha_arg_type (enum machine_mode mode)
9401 {
9402 switch (mode)
9403 {
9404 case SFmode:
9405 return TARGET_FLOAT_VAX ? FF : FS;
9406 case DFmode:
9407 return TARGET_FLOAT_VAX ? FD : FT;
9408 default:
9409 return I64;
9410 }
9411 }
9412
9413 /* Return an rtx for an integer representing the VMS Argument Information
9414 register value. */
9415
9416 rtx
9417 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9418 {
9419 unsigned HOST_WIDE_INT regval = cum.num_args;
9420 int i;
9421
9422 for (i = 0; i < 6; i++)
9423 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9424
9425 return GEN_INT (regval);
9426 }
9427 \f
9428 /* Make (or fake) .linkage entry for function call.
9429
9430 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9431
9432 Return an SYMBOL_REF rtx for the linkage. */
9433
9434 rtx
9435 alpha_need_linkage (const char *name, int is_local)
9436 {
9437 splay_tree_node node;
9438 struct alpha_links *al;
9439
9440 if (name[0] == '*')
9441 name++;
9442
9443 if (is_local)
9444 {
9445 struct alpha_funcs *cfaf;
9446
9447 if (!alpha_funcs_tree)
9448 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9449 splay_tree_compare_pointers);
9450
9451 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9452
9453 cfaf->links = 0;
9454 cfaf->num = ++alpha_funcs_num;
9455
9456 splay_tree_insert (alpha_funcs_tree,
9457 (splay_tree_key) current_function_decl,
9458 (splay_tree_value) cfaf);
9459 }
9460
9461 if (alpha_links_tree)
9462 {
9463 /* Is this name already defined? */
9464
9465 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9466 if (node)
9467 {
9468 al = (struct alpha_links *) node->value;
9469 if (is_local)
9470 {
9471 /* Defined here but external assumed. */
9472 if (al->lkind == KIND_EXTERN)
9473 al->lkind = KIND_LOCAL;
9474 }
9475 else
9476 {
9477 /* Used here but unused assumed. */
9478 if (al->lkind == KIND_UNUSED)
9479 al->lkind = KIND_LOCAL;
9480 }
9481 return al->linkage;
9482 }
9483 }
9484 else
9485 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9486
9487 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9488 name = ggc_strdup (name);
9489
9490 /* Assume external if no definition. */
9491 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9492
9493 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9494 get_identifier (name);
9495
9496 /* Construct a SYMBOL_REF for us to call. */
9497 {
9498 size_t name_len = strlen (name);
9499 char *linksym = alloca (name_len + 6);
9500 linksym[0] = '$';
9501 memcpy (linksym + 1, name, name_len);
9502 memcpy (linksym + 1 + name_len, "..lk", 5);
9503 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9504 ggc_alloc_string (linksym, name_len + 5));
9505 }
9506
9507 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9508 (splay_tree_value) al);
9509
9510 return al->linkage;
9511 }
9512
9513 rtx
9514 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9515 {
9516 splay_tree_node cfunnode;
9517 struct alpha_funcs *cfaf;
9518 struct alpha_links *al;
9519 const char *name = XSTR (linkage, 0);
9520
9521 cfaf = (struct alpha_funcs *) 0;
9522 al = (struct alpha_links *) 0;
9523
9524 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9525 cfaf = (struct alpha_funcs *) cfunnode->value;
9526
9527 if (cfaf->links)
9528 {
9529 splay_tree_node lnode;
9530
9531 /* Is this name already defined? */
9532
9533 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9534 if (lnode)
9535 al = (struct alpha_links *) lnode->value;
9536 }
9537 else
9538 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9539
9540 if (!al)
9541 {
9542 size_t name_len;
9543 size_t buflen;
9544 char buf [512];
9545 char *linksym;
9546 splay_tree_node node = 0;
9547 struct alpha_links *anl;
9548
9549 if (name[0] == '*')
9550 name++;
9551
9552 name_len = strlen (name);
9553
9554 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9555 al->num = cfaf->num;
9556
9557 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9558 if (node)
9559 {
9560 anl = (struct alpha_links *) node->value;
9561 al->lkind = anl->lkind;
9562 }
9563
9564 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9565 buflen = strlen (buf);
9566 linksym = alloca (buflen + 1);
9567 memcpy (linksym, buf, buflen + 1);
9568
9569 al->linkage = gen_rtx_SYMBOL_REF
9570 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9571
9572 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9573 (splay_tree_value) al);
9574 }
9575
9576 if (rflag)
9577 al->rkind = KIND_CODEADDR;
9578 else
9579 al->rkind = KIND_LINKAGE;
9580
9581 if (lflag)
9582 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9583 else
9584 return al->linkage;
9585 }
9586
9587 static int
9588 alpha_write_one_linkage (splay_tree_node node, void *data)
9589 {
9590 const char *const name = (const char *) node->key;
9591 struct alpha_links *link = (struct alpha_links *) node->value;
9592 FILE *stream = (FILE *) data;
9593
9594 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9595 if (link->rkind == KIND_CODEADDR)
9596 {
9597 if (link->lkind == KIND_LOCAL)
9598 {
9599 /* Local and used */
9600 fprintf (stream, "\t.quad %s..en\n", name);
9601 }
9602 else
9603 {
9604 /* External and used, request code address. */
9605 fprintf (stream, "\t.code_address %s\n", name);
9606 }
9607 }
9608 else
9609 {
9610 if (link->lkind == KIND_LOCAL)
9611 {
9612 /* Local and used, build linkage pair. */
9613 fprintf (stream, "\t.quad %s..en\n", name);
9614 fprintf (stream, "\t.quad %s\n", name);
9615 }
9616 else
9617 {
9618 /* External and used, request linkage pair. */
9619 fprintf (stream, "\t.linkage %s\n", name);
9620 }
9621 }
9622
9623 return 0;
9624 }
9625
9626 static void
9627 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9628 {
9629 splay_tree_node node;
9630 struct alpha_funcs *func;
9631
9632 fprintf (stream, "\t.link\n");
9633 fprintf (stream, "\t.align 3\n");
9634 in_section = NULL;
9635
9636 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9637 func = (struct alpha_funcs *) node->value;
9638
9639 fputs ("\t.name ", stream);
9640 assemble_name (stream, funname);
9641 fputs ("..na\n", stream);
9642 ASM_OUTPUT_LABEL (stream, funname);
9643 fprintf (stream, "\t.pdesc ");
9644 assemble_name (stream, funname);
9645 fprintf (stream, "..en,%s\n",
9646 alpha_procedure_type == PT_STACK ? "stack"
9647 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9648
9649 if (func->links)
9650 {
9651 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9652 /* splay_tree_delete (func->links); */
9653 }
9654 }
9655
9656 /* Given a decl, a section name, and whether the decl initializer
9657 has relocs, choose attributes for the section. */
9658
9659 #define SECTION_VMS_OVERLAY SECTION_FORGET
9660 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9661 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9662
9663 static unsigned int
9664 vms_section_type_flags (tree decl, const char *name, int reloc)
9665 {
9666 unsigned int flags = default_section_type_flags (decl, name, reloc);
9667
9668 if (decl && DECL_ATTRIBUTES (decl)
9669 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9670 flags |= SECTION_VMS_OVERLAY;
9671 if (decl && DECL_ATTRIBUTES (decl)
9672 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9673 flags |= SECTION_VMS_GLOBAL;
9674 if (decl && DECL_ATTRIBUTES (decl)
9675 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9676 flags |= SECTION_VMS_INITIALIZE;
9677
9678 return flags;
9679 }
9680
9681 /* Switch to an arbitrary section NAME with attributes as specified
9682 by FLAGS. ALIGN specifies any known alignment requirements for
9683 the section; 0 if the default should be used. */
9684
9685 static void
9686 vms_asm_named_section (const char *name, unsigned int flags,
9687 tree decl ATTRIBUTE_UNUSED)
9688 {
9689 fputc ('\n', asm_out_file);
9690 fprintf (asm_out_file, ".section\t%s", name);
9691
9692 if (flags & SECTION_VMS_OVERLAY)
9693 fprintf (asm_out_file, ",OVR");
9694 if (flags & SECTION_VMS_GLOBAL)
9695 fprintf (asm_out_file, ",GBL");
9696 if (flags & SECTION_VMS_INITIALIZE)
9697 fprintf (asm_out_file, ",NOMOD");
9698 if (flags & SECTION_DEBUG)
9699 fprintf (asm_out_file, ",NOWRT");
9700
9701 fputc ('\n', asm_out_file);
9702 }
9703
9704 /* Record an element in the table of global constructors. SYMBOL is
9705 a SYMBOL_REF of the function to be called; PRIORITY is a number
9706 between 0 and MAX_INIT_PRIORITY.
9707
9708 Differs from default_ctors_section_asm_out_constructor in that the
9709 width of the .ctors entry is always 64 bits, rather than the 32 bits
9710 used by a normal pointer. */
9711
9712 static void
9713 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9714 {
9715 switch_to_section (ctors_section);
9716 assemble_align (BITS_PER_WORD);
9717 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9718 }
9719
9720 static void
9721 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9722 {
9723 switch_to_section (dtors_section);
9724 assemble_align (BITS_PER_WORD);
9725 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9726 }
9727 #else
9728
9729 rtx
9730 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9731 int is_local ATTRIBUTE_UNUSED)
9732 {
9733 return NULL_RTX;
9734 }
9735
9736 rtx
9737 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9738 tree cfundecl ATTRIBUTE_UNUSED,
9739 int lflag ATTRIBUTE_UNUSED,
9740 int rflag ATTRIBUTE_UNUSED)
9741 {
9742 return NULL_RTX;
9743 }
9744
9745 #endif /* TARGET_ABI_OPEN_VMS */
9746 \f
9747 #if TARGET_ABI_UNICOSMK
9748
9749 /* This evaluates to true if we do not know how to pass TYPE solely in
9750 registers. This is the case for all arguments that do not fit in two
9751 registers. */
9752
9753 static bool
9754 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9755 {
9756 if (type == NULL)
9757 return false;
9758
9759 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9760 return true;
9761 if (TREE_ADDRESSABLE (type))
9762 return true;
9763
9764 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9765 }
9766
9767 /* Define the offset between two registers, one to be eliminated, and the
9768 other its replacement, at the start of a routine. */
9769
9770 int
9771 unicosmk_initial_elimination_offset (int from, int to)
9772 {
9773 int fixed_size;
9774
9775 fixed_size = alpha_sa_size();
9776 if (fixed_size != 0)
9777 fixed_size += 48;
9778
9779 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9780 return -fixed_size;
9781 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9782 return 0;
9783 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9784 return (ALPHA_ROUND (current_function_outgoing_args_size)
9785 + ALPHA_ROUND (get_frame_size()));
9786 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9787 return (ALPHA_ROUND (fixed_size)
9788 + ALPHA_ROUND (get_frame_size()
9789 + current_function_outgoing_args_size));
9790 else
9791 gcc_unreachable ();
9792 }
9793
9794 /* Output the module name for .ident and .end directives. We have to strip
9795 directories and add make sure that the module name starts with a letter
9796 or '$'. */
9797
9798 static void
9799 unicosmk_output_module_name (FILE *file)
9800 {
9801 const char *name = lbasename (main_input_filename);
9802 unsigned len = strlen (name);
9803 char *clean_name = alloca (len + 2);
9804 char *ptr = clean_name;
9805
9806 /* CAM only accepts module names that start with a letter or '$'. We
9807 prefix the module name with a '$' if necessary. */
9808
9809 if (!ISALPHA (*name))
9810 *ptr++ = '$';
9811 memcpy (ptr, name, len + 1);
9812 clean_symbol_name (clean_name);
9813 fputs (clean_name, file);
9814 }
9815
9816 /* Output the definition of a common variable. */
9817
9818 void
9819 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9820 {
9821 tree name_tree;
9822 printf ("T3E__: common %s\n", name);
9823
9824 in_section = NULL;
9825 fputs("\t.endp\n\n\t.psect ", file);
9826 assemble_name(file, name);
9827 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9828 fprintf(file, "\t.byte\t0:%d\n", size);
9829
9830 /* Mark the symbol as defined in this module. */
9831 name_tree = get_identifier (name);
9832 TREE_ASM_WRITTEN (name_tree) = 1;
9833 }
9834
9835 #define SECTION_PUBLIC SECTION_MACH_DEP
9836 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9837 static int current_section_align;
9838
9839 /* A get_unnamed_section callback for switching to the text section. */
9840
9841 static void
9842 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9843 {
9844 static int count = 0;
9845 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9846 }
9847
9848 /* A get_unnamed_section callback for switching to the data section. */
9849
9850 static void
9851 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9852 {
9853 static int count = 1;
9854 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9855 }
9856
9857 /* Implement TARGET_ASM_INIT_SECTIONS.
9858
9859 The Cray assembler is really weird with respect to sections. It has only
9860 named sections and you can't reopen a section once it has been closed.
9861 This means that we have to generate unique names whenever we want to
9862 reenter the text or the data section. */
9863
9864 static void
9865 unicosmk_init_sections (void)
9866 {
9867 text_section = get_unnamed_section (SECTION_CODE,
9868 unicosmk_output_text_section_asm_op,
9869 NULL);
9870 data_section = get_unnamed_section (SECTION_WRITE,
9871 unicosmk_output_data_section_asm_op,
9872 NULL);
9873 readonly_data_section = data_section;
9874 }
9875
9876 static unsigned int
9877 unicosmk_section_type_flags (tree decl, const char *name,
9878 int reloc ATTRIBUTE_UNUSED)
9879 {
9880 unsigned int flags = default_section_type_flags (decl, name, reloc);
9881
9882 if (!decl)
9883 return flags;
9884
9885 if (TREE_CODE (decl) == FUNCTION_DECL)
9886 {
9887 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9888 if (align_functions_log > current_section_align)
9889 current_section_align = align_functions_log;
9890
9891 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9892 flags |= SECTION_MAIN;
9893 }
9894 else
9895 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9896
9897 if (TREE_PUBLIC (decl))
9898 flags |= SECTION_PUBLIC;
9899
9900 return flags;
9901 }
9902
9903 /* Generate a section name for decl and associate it with the
9904 declaration. */
9905
9906 static void
9907 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9908 {
9909 const char *name;
9910 int len;
9911
9912 gcc_assert (decl);
9913
9914 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9915 name = default_strip_name_encoding (name);
9916 len = strlen (name);
9917
9918 if (TREE_CODE (decl) == FUNCTION_DECL)
9919 {
9920 char *string;
9921
9922 /* It is essential that we prefix the section name here because
9923 otherwise the section names generated for constructors and
9924 destructors confuse collect2. */
9925
9926 string = alloca (len + 6);
9927 sprintf (string, "code@%s", name);
9928 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9929 }
9930 else if (TREE_PUBLIC (decl))
9931 DECL_SECTION_NAME (decl) = build_string (len, name);
9932 else
9933 {
9934 char *string;
9935
9936 string = alloca (len + 6);
9937 sprintf (string, "data@%s", name);
9938 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9939 }
9940 }
9941
9942 /* Switch to an arbitrary section NAME with attributes as specified
9943 by FLAGS. ALIGN specifies any known alignment requirements for
9944 the section; 0 if the default should be used. */
9945
9946 static void
9947 unicosmk_asm_named_section (const char *name, unsigned int flags,
9948 tree decl ATTRIBUTE_UNUSED)
9949 {
9950 const char *kind;
9951
9952 /* Close the previous section. */
9953
9954 fputs ("\t.endp\n\n", asm_out_file);
9955
9956 /* Find out what kind of section we are opening. */
9957
9958 if (flags & SECTION_MAIN)
9959 fputs ("\t.start\tmain\n", asm_out_file);
9960
9961 if (flags & SECTION_CODE)
9962 kind = "code";
9963 else if (flags & SECTION_PUBLIC)
9964 kind = "common";
9965 else
9966 kind = "data";
9967
9968 if (current_section_align != 0)
9969 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9970 current_section_align, kind);
9971 else
9972 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9973 }
9974
9975 static void
9976 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9977 {
9978 if (DECL_P (decl)
9979 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9980 unicosmk_unique_section (decl, 0);
9981 }
9982
9983 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9984 in code sections because .align fill unused space with zeroes. */
9985
9986 void
9987 unicosmk_output_align (FILE *file, int align)
9988 {
9989 if (inside_function)
9990 fprintf (file, "\tgcc@code@align\t%d\n", align);
9991 else
9992 fprintf (file, "\t.align\t%d\n", align);
9993 }
9994
9995 /* Add a case vector to the current function's list of deferred case
9996 vectors. Case vectors have to be put into a separate section because CAM
9997 does not allow data definitions in code sections. */
9998
9999 void
10000 unicosmk_defer_case_vector (rtx lab, rtx vec)
10001 {
10002 struct machine_function *machine = cfun->machine;
10003
10004 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10005 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10006 machine->addr_list);
10007 }
10008
10009 /* Output a case vector. */
10010
10011 static void
10012 unicosmk_output_addr_vec (FILE *file, rtx vec)
10013 {
10014 rtx lab = XEXP (vec, 0);
10015 rtx body = XEXP (vec, 1);
10016 int vlen = XVECLEN (body, 0);
10017 int idx;
10018
10019 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10020
10021 for (idx = 0; idx < vlen; idx++)
10022 {
10023 ASM_OUTPUT_ADDR_VEC_ELT
10024 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10025 }
10026 }
10027
10028 /* Output current function's deferred case vectors. */
10029
10030 static void
10031 unicosmk_output_deferred_case_vectors (FILE *file)
10032 {
10033 struct machine_function *machine = cfun->machine;
10034 rtx t;
10035
10036 if (machine->addr_list == NULL_RTX)
10037 return;
10038
10039 switch_to_section (data_section);
10040 for (t = machine->addr_list; t; t = XEXP (t, 1))
10041 unicosmk_output_addr_vec (file, XEXP (t, 0));
10042 }
10043
10044 /* Generate the name of the SSIB section for the current function. */
10045
10046 #define SSIB_PREFIX "__SSIB_"
10047 #define SSIB_PREFIX_LEN 7
10048
10049 static const char *
10050 unicosmk_ssib_name (void)
10051 {
10052 /* This is ok since CAM won't be able to deal with names longer than that
10053 anyway. */
10054
10055 static char name[256];
10056
10057 rtx x;
10058 const char *fnname;
10059 int len;
10060
10061 x = DECL_RTL (cfun->decl);
10062 gcc_assert (GET_CODE (x) == MEM);
10063 x = XEXP (x, 0);
10064 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10065 fnname = XSTR (x, 0);
10066
10067 len = strlen (fnname);
10068 if (len + SSIB_PREFIX_LEN > 255)
10069 len = 255 - SSIB_PREFIX_LEN;
10070
10071 strcpy (name, SSIB_PREFIX);
10072 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10073 name[len + SSIB_PREFIX_LEN] = 0;
10074
10075 return name;
10076 }
10077
10078 /* Set up the dynamic subprogram information block (DSIB) and update the
10079 frame pointer register ($15) for subroutines which have a frame. If the
10080 subroutine doesn't have a frame, simply increment $15. */
10081
10082 static void
10083 unicosmk_gen_dsib (unsigned long *imaskP)
10084 {
10085 if (alpha_procedure_type == PT_STACK)
10086 {
10087 const char *ssib_name;
10088 rtx mem;
10089
10090 /* Allocate 64 bytes for the DSIB. */
10091
10092 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10093 GEN_INT (-64))));
10094 emit_insn (gen_blockage ());
10095
10096 /* Save the return address. */
10097
10098 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10099 set_mem_alias_set (mem, alpha_sr_alias_set);
10100 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10101 (*imaskP) &= ~(1UL << REG_RA);
10102
10103 /* Save the old frame pointer. */
10104
10105 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10106 set_mem_alias_set (mem, alpha_sr_alias_set);
10107 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10108 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10109
10110 emit_insn (gen_blockage ());
10111
10112 /* Store the SSIB pointer. */
10113
10114 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10115 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10116 set_mem_alias_set (mem, alpha_sr_alias_set);
10117
10118 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10119 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10120 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10121
10122 /* Save the CIW index. */
10123
10124 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10125 set_mem_alias_set (mem, alpha_sr_alias_set);
10126 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10127
10128 emit_insn (gen_blockage ());
10129
10130 /* Set the new frame pointer. */
10131
10132 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10133 stack_pointer_rtx, GEN_INT (64))));
10134
10135 }
10136 else
10137 {
10138 /* Increment the frame pointer register to indicate that we do not
10139 have a frame. */
10140
10141 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10142 hard_frame_pointer_rtx, const1_rtx)));
10143 }
10144 }
10145
10146 /* Output the static subroutine information block for the current
10147 function. */
10148
10149 static void
10150 unicosmk_output_ssib (FILE *file, const char *fnname)
10151 {
10152 int len;
10153 int i;
10154 rtx x;
10155 rtx ciw;
10156 struct machine_function *machine = cfun->machine;
10157
10158 in_section = NULL;
10159 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10160 unicosmk_ssib_name ());
10161
10162 /* Some required stuff and the function name length. */
10163
10164 len = strlen (fnname);
10165 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10166
10167 /* Saved registers
10168 ??? We don't do that yet. */
10169
10170 fputs ("\t.quad\t0\n", file);
10171
10172 /* Function address. */
10173
10174 fputs ("\t.quad\t", file);
10175 assemble_name (file, fnname);
10176 putc ('\n', file);
10177
10178 fputs ("\t.quad\t0\n", file);
10179 fputs ("\t.quad\t0\n", file);
10180
10181 /* Function name.
10182 ??? We do it the same way Cray CC does it but this could be
10183 simplified. */
10184
10185 for( i = 0; i < len; i++ )
10186 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10187 if( (len % 8) == 0 )
10188 fputs ("\t.quad\t0\n", file);
10189 else
10190 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10191
10192 /* All call information words used in the function. */
10193
10194 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10195 {
10196 ciw = XEXP (x, 0);
10197 #if HOST_BITS_PER_WIDE_INT == 32
10198 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10199 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10200 #else
10201 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10202 #endif
10203 }
10204 }
10205
10206 /* Add a call information word (CIW) to the list of the current function's
10207 CIWs and return its index.
10208
10209 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10210
10211 rtx
10212 unicosmk_add_call_info_word (rtx x)
10213 {
10214 rtx node;
10215 struct machine_function *machine = cfun->machine;
10216
10217 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10218 if (machine->first_ciw == NULL_RTX)
10219 machine->first_ciw = node;
10220 else
10221 XEXP (machine->last_ciw, 1) = node;
10222
10223 machine->last_ciw = node;
10224 ++machine->ciw_count;
10225
10226 return GEN_INT (machine->ciw_count
10227 + strlen (current_function_name ())/8 + 5);
10228 }
10229
10230 /* The Cray assembler doesn't accept extern declarations for symbols which
10231 are defined in the same file. We have to keep track of all global
10232 symbols which are referenced and/or defined in a source file and output
10233 extern declarations for those which are referenced but not defined at
10234 the end of file. */
10235
10236 /* List of identifiers for which an extern declaration might have to be
10237 emitted. */
10238 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10239
10240 struct unicosmk_extern_list
10241 {
10242 struct unicosmk_extern_list *next;
10243 const char *name;
10244 };
10245
10246 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10247
10248 /* Output extern declarations which are required for every asm file. */
10249
10250 static void
10251 unicosmk_output_default_externs (FILE *file)
10252 {
10253 static const char *const externs[] =
10254 { "__T3E_MISMATCH" };
10255
10256 int i;
10257 int n;
10258
10259 n = ARRAY_SIZE (externs);
10260
10261 for (i = 0; i < n; i++)
10262 fprintf (file, "\t.extern\t%s\n", externs[i]);
10263 }
10264
10265 /* Output extern declarations for global symbols which are have been
10266 referenced but not defined. */
10267
10268 static void
10269 unicosmk_output_externs (FILE *file)
10270 {
10271 struct unicosmk_extern_list *p;
10272 const char *real_name;
10273 int len;
10274 tree name_tree;
10275
10276 len = strlen (user_label_prefix);
10277 for (p = unicosmk_extern_head; p != 0; p = p->next)
10278 {
10279 /* We have to strip the encoding and possibly remove user_label_prefix
10280 from the identifier in order to handle -fleading-underscore and
10281 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10282 real_name = default_strip_name_encoding (p->name);
10283 if (len && p->name[0] == '*'
10284 && !memcmp (real_name, user_label_prefix, len))
10285 real_name += len;
10286
10287 name_tree = get_identifier (real_name);
10288 if (! TREE_ASM_WRITTEN (name_tree))
10289 {
10290 TREE_ASM_WRITTEN (name_tree) = 1;
10291 fputs ("\t.extern\t", file);
10292 assemble_name (file, p->name);
10293 putc ('\n', file);
10294 }
10295 }
10296 }
10297
10298 /* Record an extern. */
10299
10300 void
10301 unicosmk_add_extern (const char *name)
10302 {
10303 struct unicosmk_extern_list *p;
10304
10305 p = (struct unicosmk_extern_list *)
10306 xmalloc (sizeof (struct unicosmk_extern_list));
10307 p->next = unicosmk_extern_head;
10308 p->name = name;
10309 unicosmk_extern_head = p;
10310 }
10311
10312 /* The Cray assembler generates incorrect code if identifiers which
10313 conflict with register names are used as instruction operands. We have
10314 to replace such identifiers with DEX expressions. */
10315
10316 /* Structure to collect identifiers which have been replaced by DEX
10317 expressions. */
10318 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10319
10320 struct unicosmk_dex {
10321 struct unicosmk_dex *next;
10322 const char *name;
10323 };
10324
10325 /* List of identifiers which have been replaced by DEX expressions. The DEX
10326 number is determined by the position in the list. */
10327
10328 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10329
10330 /* The number of elements in the DEX list. */
10331
10332 static int unicosmk_dex_count = 0;
10333
10334 /* Check if NAME must be replaced by a DEX expression. */
10335
10336 static int
10337 unicosmk_special_name (const char *name)
10338 {
10339 if (name[0] == '*')
10340 ++name;
10341
10342 if (name[0] == '$')
10343 ++name;
10344
10345 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10346 return 0;
10347
10348 switch (name[1])
10349 {
10350 case '1': case '2':
10351 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10352
10353 case '3':
10354 return (name[2] == '\0'
10355 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10356
10357 default:
10358 return (ISDIGIT (name[1]) && name[2] == '\0');
10359 }
10360 }
10361
10362 /* Return the DEX number if X must be replaced by a DEX expression and 0
10363 otherwise. */
10364
10365 static int
10366 unicosmk_need_dex (rtx x)
10367 {
10368 struct unicosmk_dex *dex;
10369 const char *name;
10370 int i;
10371
10372 if (GET_CODE (x) != SYMBOL_REF)
10373 return 0;
10374
10375 name = XSTR (x,0);
10376 if (! unicosmk_special_name (name))
10377 return 0;
10378
10379 i = unicosmk_dex_count;
10380 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10381 {
10382 if (! strcmp (name, dex->name))
10383 return i;
10384 --i;
10385 }
10386
10387 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10388 dex->name = name;
10389 dex->next = unicosmk_dex_list;
10390 unicosmk_dex_list = dex;
10391
10392 ++unicosmk_dex_count;
10393 return unicosmk_dex_count;
10394 }
10395
10396 /* Output the DEX definitions for this file. */
10397
10398 static void
10399 unicosmk_output_dex (FILE *file)
10400 {
10401 struct unicosmk_dex *dex;
10402 int i;
10403
10404 if (unicosmk_dex_list == NULL)
10405 return;
10406
10407 fprintf (file, "\t.dexstart\n");
10408
10409 i = unicosmk_dex_count;
10410 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10411 {
10412 fprintf (file, "\tDEX (%d) = ", i);
10413 assemble_name (file, dex->name);
10414 putc ('\n', file);
10415 --i;
10416 }
10417
10418 fprintf (file, "\t.dexend\n");
10419 }
10420
10421 /* Output text that to appear at the beginning of an assembler file. */
10422
10423 static void
10424 unicosmk_file_start (void)
10425 {
10426 int i;
10427
10428 fputs ("\t.ident\t", asm_out_file);
10429 unicosmk_output_module_name (asm_out_file);
10430 fputs ("\n\n", asm_out_file);
10431
10432 /* The Unicos/Mk assembler uses different register names. Instead of trying
10433 to support them, we simply use micro definitions. */
10434
10435 /* CAM has different register names: rN for the integer register N and fN
10436 for the floating-point register N. Instead of trying to use these in
10437 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10438 register. */
10439
10440 for (i = 0; i < 32; ++i)
10441 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10442
10443 for (i = 0; i < 32; ++i)
10444 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10445
10446 putc ('\n', asm_out_file);
10447
10448 /* The .align directive fill unused space with zeroes which does not work
10449 in code sections. We define the macro 'gcc@code@align' which uses nops
10450 instead. Note that it assumes that code sections always have the
10451 biggest possible alignment since . refers to the current offset from
10452 the beginning of the section. */
10453
10454 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10455 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10456 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10457 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10458 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10459 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10460 fputs ("\t.endr\n", asm_out_file);
10461 fputs ("\t.endif\n", asm_out_file);
10462 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10463
10464 /* Output extern declarations which should always be visible. */
10465 unicosmk_output_default_externs (asm_out_file);
10466
10467 /* Open a dummy section. We always need to be inside a section for the
10468 section-switching code to work correctly.
10469 ??? This should be a module id or something like that. I still have to
10470 figure out what the rules for those are. */
10471 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10472 }
10473
10474 /* Output text to appear at the end of an assembler file. This includes all
10475 pending extern declarations and DEX expressions. */
10476
10477 static void
10478 unicosmk_file_end (void)
10479 {
10480 fputs ("\t.endp\n\n", asm_out_file);
10481
10482 /* Output all pending externs. */
10483
10484 unicosmk_output_externs (asm_out_file);
10485
10486 /* Output dex definitions used for functions whose names conflict with
10487 register names. */
10488
10489 unicosmk_output_dex (asm_out_file);
10490
10491 fputs ("\t.end\t", asm_out_file);
10492 unicosmk_output_module_name (asm_out_file);
10493 putc ('\n', asm_out_file);
10494 }
10495
10496 #else
10497
10498 static void
10499 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10500 {}
10501
10502 static void
10503 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10504 {}
10505
10506 static void
10507 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10508 const char * fnname ATTRIBUTE_UNUSED)
10509 {}
10510
10511 rtx
10512 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10513 {
10514 return NULL_RTX;
10515 }
10516
10517 static int
10518 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10519 {
10520 return 0;
10521 }
10522
10523 #endif /* TARGET_ABI_UNICOSMK */
10524
10525 static void
10526 alpha_init_libfuncs (void)
10527 {
10528 if (TARGET_ABI_UNICOSMK)
10529 {
10530 /* Prevent gcc from generating calls to __divsi3. */
10531 set_optab_libfunc (sdiv_optab, SImode, 0);
10532 set_optab_libfunc (udiv_optab, SImode, 0);
10533
10534 /* Use the functions provided by the system library
10535 for DImode integer division. */
10536 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10537 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10538 }
10539 else if (TARGET_ABI_OPEN_VMS)
10540 {
10541 /* Use the VMS runtime library functions for division and
10542 remainder. */
10543 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10544 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10545 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10546 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10547 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10548 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10549 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10550 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10551 }
10552 }
10553
10554 \f
10555 /* Initialize the GCC target structure. */
10556 #if TARGET_ABI_OPEN_VMS
10557 # undef TARGET_ATTRIBUTE_TABLE
10558 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10559 # undef TARGET_SECTION_TYPE_FLAGS
10560 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10561 #endif
10562
10563 #undef TARGET_IN_SMALL_DATA_P
10564 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10565
10566 #if TARGET_ABI_UNICOSMK
10567 # undef TARGET_INSERT_ATTRIBUTES
10568 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10569 # undef TARGET_SECTION_TYPE_FLAGS
10570 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10571 # undef TARGET_ASM_UNIQUE_SECTION
10572 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10573 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10574 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10575 # undef TARGET_ASM_GLOBALIZE_LABEL
10576 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10577 # undef TARGET_MUST_PASS_IN_STACK
10578 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10579 #endif
10580
10581 #undef TARGET_ASM_ALIGNED_HI_OP
10582 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10583 #undef TARGET_ASM_ALIGNED_DI_OP
10584 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10585
10586 /* Default unaligned ops are provided for ELF systems. To get unaligned
10587 data for non-ELF systems, we have to turn off auto alignment. */
10588 #ifndef OBJECT_FORMAT_ELF
10589 #undef TARGET_ASM_UNALIGNED_HI_OP
10590 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10591 #undef TARGET_ASM_UNALIGNED_SI_OP
10592 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10593 #undef TARGET_ASM_UNALIGNED_DI_OP
10594 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10595 #endif
10596
10597 #ifdef OBJECT_FORMAT_ELF
10598 #undef TARGET_ASM_RELOC_RW_MASK
10599 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10600 #undef TARGET_ASM_SELECT_RTX_SECTION
10601 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10602 #undef TARGET_SECTION_TYPE_FLAGS
10603 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10604 #endif
10605
10606 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10607 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10608
10609 #undef TARGET_INIT_LIBFUNCS
10610 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10611
10612 #if TARGET_ABI_UNICOSMK
10613 #undef TARGET_ASM_FILE_START
10614 #define TARGET_ASM_FILE_START unicosmk_file_start
10615 #undef TARGET_ASM_FILE_END
10616 #define TARGET_ASM_FILE_END unicosmk_file_end
10617 #else
10618 #undef TARGET_ASM_FILE_START
10619 #define TARGET_ASM_FILE_START alpha_file_start
10620 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10621 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10622 #endif
10623
10624 #undef TARGET_SCHED_ADJUST_COST
10625 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10626 #undef TARGET_SCHED_ISSUE_RATE
10627 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10628 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10629 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10630 alpha_multipass_dfa_lookahead
10631
10632 #undef TARGET_HAVE_TLS
10633 #define TARGET_HAVE_TLS HAVE_AS_TLS
10634
10635 #undef TARGET_INIT_BUILTINS
10636 #define TARGET_INIT_BUILTINS alpha_init_builtins
10637 #undef TARGET_EXPAND_BUILTIN
10638 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10639 #undef TARGET_FOLD_BUILTIN
10640 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10641
10642 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10643 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10644 #undef TARGET_CANNOT_COPY_INSN_P
10645 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10646 #undef TARGET_CANNOT_FORCE_CONST_MEM
10647 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10648
10649 #if TARGET_ABI_OSF
10650 #undef TARGET_ASM_OUTPUT_MI_THUNK
10651 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10652 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10653 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10654 #undef TARGET_STDARG_OPTIMIZE_HOOK
10655 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10656 #endif
10657
10658 #undef TARGET_RTX_COSTS
10659 #define TARGET_RTX_COSTS alpha_rtx_costs
10660 #undef TARGET_ADDRESS_COST
10661 #define TARGET_ADDRESS_COST hook_int_rtx_0
10662
10663 #undef TARGET_MACHINE_DEPENDENT_REORG
10664 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10665
10666 #undef TARGET_PROMOTE_FUNCTION_ARGS
10667 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10668 #undef TARGET_PROMOTE_FUNCTION_RETURN
10669 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10670 #undef TARGET_PROMOTE_PROTOTYPES
10671 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10672 #undef TARGET_RETURN_IN_MEMORY
10673 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10674 #undef TARGET_PASS_BY_REFERENCE
10675 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10676 #undef TARGET_SETUP_INCOMING_VARARGS
10677 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10678 #undef TARGET_STRICT_ARGUMENT_NAMING
10679 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10680 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10681 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10682 #undef TARGET_SPLIT_COMPLEX_ARG
10683 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10684 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10685 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10686 #undef TARGET_ARG_PARTIAL_BYTES
10687 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10688
10689 #undef TARGET_SECONDARY_RELOAD
10690 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10691
10692 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10693 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10694 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10695 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10696
10697 #undef TARGET_BUILD_BUILTIN_VA_LIST
10698 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10699
10700 /* The Alpha architecture does not require sequential consistency. See
10701 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10702 for an example of how it can be violated in practice. */
10703 #undef TARGET_RELAXED_ORDERING
10704 #define TARGET_RELAXED_ORDERING true
10705
10706 #undef TARGET_DEFAULT_TARGET_FLAGS
10707 #define TARGET_DEFAULT_TARGET_FLAGS \
10708 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10709 #undef TARGET_HANDLE_OPTION
10710 #define TARGET_HANDLE_OPTION alpha_handle_option
10711
10712 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10713 #undef TARGET_MANGLE_TYPE
10714 #define TARGET_MANGLE_TYPE alpha_mangle_type
10715 #endif
10716
10717 struct gcc_target targetm = TARGET_INITIALIZER;
10718
10719 \f
10720 #include "gt-alpha.h"