]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/alpha/alpha.c
backport: ChangeLog.tuples: ChangeLog from gimple-tuples-branch.
[thirdparty/gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
59
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
62
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
65
66 static const char * const alpha_cpu_name[] =
67 {
68 "ev4", "ev5", "ev6"
69 };
70
71 /* Specify how accurate floating-point traps need to be. */
72
73 enum alpha_trap_precision alpha_tp;
74
75 /* Specify the floating-point rounding mode. */
76
77 enum alpha_fp_rounding_mode alpha_fprm;
78
79 /* Specify which things cause traps. */
80
81 enum alpha_fp_trap_mode alpha_fptm;
82
83 /* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
85
86 struct alpha_compare alpha_compare;
87
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
90
91 static int inside_function = FALSE;
92
93 /* The number of cycles of latency we should assume on memory reads. */
94
95 int alpha_memory_latency = 3;
96
97 /* Whether the function needs the GP. */
98
99 static int alpha_function_needs_gp;
100
101 /* The alias set for prologue/epilogue register save/restore. */
102
103 static GTY(()) alias_set_type alpha_sr_alias_set;
104
105 /* The assembler name of the current function. */
106
107 static const char *alpha_fnname;
108
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
112
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
119
120 /* Costs of various operations on the different architectures. */
121
122 struct alpha_rtx_cost_data
123 {
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
133 };
134
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 {
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
147 },
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
158 },
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
169 },
170 };
171
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
176
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 {
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
188 };
189
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS crtl->args.info.num_args
193 #else
194 #define NUM_ARGS crtl->args.info
195 #endif
196
197 #define REG_PV 27
198 #define REG_RA 26
199
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
206 #endif
207
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
212 \f
213 /* Implement TARGET_HANDLE_OPTION. */
214
215 static bool
216 alpha_handle_option (size_t code, const char *arg, int value)
217 {
218 switch (code)
219 {
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
224
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
229
230 case OPT_mtls_size_:
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
234 }
235
236 return true;
237 }
238
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
241
242 static const char *
243 alpha_mangle_type (const_tree type)
244 {
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
248
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
251 }
252 #endif
253
254 /* Parse target option strings. */
255
256 void
257 override_options (void)
258 {
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
263 } cpu_table[] = {
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { 0, 0, 0 }
279 };
280
281 int i;
282
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
285 {
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
289 }
290
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
297
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
300
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
306
307 if (TARGET_IEEE)
308 {
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
311 else
312 {
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
315 }
316 }
317
318 if (TARGET_IEEE_WITH_INEXACT)
319 {
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
322 else
323 {
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
326 }
327 }
328
329 if (alpha_tp_string)
330 {
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
337 else
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
339 }
340
341 if (alpha_fprm_string)
342 {
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
351 else
352 error ("bad value %qs for -mfp-rounding-mode switch",
353 alpha_fprm_string);
354 }
355
356 if (alpha_fptm_string)
357 {
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
368 }
369
370 if (alpha_cpu_string)
371 {
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 {
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
378 break;
379 }
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
382 }
383
384 if (alpha_tune_string)
385 {
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 {
389 alpha_tune = cpu_table [i].processor;
390 break;
391 }
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
394 }
395
396 /* Do some sanity checks on the above options. */
397
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 {
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
402 }
403
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
406 {
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
409 }
410
411 if (alpha_cpu == PROCESSOR_EV6)
412 {
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
417 }
418
419 if (TARGET_FLOAT_VAX)
420 {
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 {
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
425 }
426 if (alpha_fptm == ALPHA_FPTM_SUI)
427 {
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
430 }
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
434 }
435
436 {
437 char *end;
438 int lat;
439
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
442
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 ;
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
449 {
450 static int const cache_latency[][4] =
451 {
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
455 };
456
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
459 {
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
462 lat = 3;
463 }
464 else
465 lat = cache_latency[alpha_tune][lat-1];
466 }
467 else if (! strcmp (alpha_mlat_string, "main"))
468 {
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
472 }
473 else
474 {
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
476 lat = 3;
477 }
478
479 alpha_memory_latency = lat;
480 }
481
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
485
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
491
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 {
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
501 }
502 if (align_functions <= 0)
503 align_functions = 16;
504
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
507
508 /* Register variables and functions with the garbage collector. */
509
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
512
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
515 {
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
519 }
520
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524 #endif
525
526 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
527 can be optimized to ap = __builtin_next_arg (0). */
528 if (TARGET_ABI_UNICOSMK)
529 targetm.expand_builtin_va_start = NULL;
530 }
531 \f
532 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
533
534 int
535 zap_mask (HOST_WIDE_INT value)
536 {
537 int i;
538
539 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
540 i++, value >>= 8)
541 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
542 return 0;
543
544 return 1;
545 }
546
547 /* Return true if OP is valid for a particular TLS relocation.
548 We are already guaranteed that OP is a CONST. */
549
550 int
551 tls_symbolic_operand_1 (rtx op, int size, int unspec)
552 {
553 op = XEXP (op, 0);
554
555 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
556 return 0;
557 op = XVECEXP (op, 0, 0);
558
559 if (GET_CODE (op) != SYMBOL_REF)
560 return 0;
561
562 switch (SYMBOL_REF_TLS_MODEL (op))
563 {
564 case TLS_MODEL_LOCAL_DYNAMIC:
565 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
566 case TLS_MODEL_INITIAL_EXEC:
567 return unspec == UNSPEC_TPREL && size == 64;
568 case TLS_MODEL_LOCAL_EXEC:
569 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
570 default:
571 gcc_unreachable ();
572 }
573 }
574
575 /* Used by aligned_memory_operand and unaligned_memory_operand to
576 resolve what reload is going to do with OP if it's a register. */
577
578 rtx
579 resolve_reload_operand (rtx op)
580 {
581 if (reload_in_progress)
582 {
583 rtx tmp = op;
584 if (GET_CODE (tmp) == SUBREG)
585 tmp = SUBREG_REG (tmp);
586 if (GET_CODE (tmp) == REG
587 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
588 {
589 op = reg_equiv_memory_loc[REGNO (tmp)];
590 if (op == 0)
591 return 0;
592 }
593 }
594 return op;
595 }
596
597 /* The scalar modes supported differs from the default check-what-c-supports
598 version in that sometimes TFmode is available even when long double
599 indicates only DFmode. On unicosmk, we have the situation that HImode
600 doesn't map to any C type, but of course we still support that. */
601
602 static bool
603 alpha_scalar_mode_supported_p (enum machine_mode mode)
604 {
605 switch (mode)
606 {
607 case QImode:
608 case HImode:
609 case SImode:
610 case DImode:
611 case TImode: /* via optabs.c */
612 return true;
613
614 case SFmode:
615 case DFmode:
616 return true;
617
618 case TFmode:
619 return TARGET_HAS_XFLOATING_LIBS;
620
621 default:
622 return false;
623 }
624 }
625
626 /* Alpha implements a couple of integer vector mode operations when
627 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
628 which allows the vectorizer to operate on e.g. move instructions,
629 or when expand_vector_operations can do something useful. */
630
631 static bool
632 alpha_vector_mode_supported_p (enum machine_mode mode)
633 {
634 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
635 }
636
637 /* Return 1 if this function can directly return via $26. */
638
639 int
640 direct_return (void)
641 {
642 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
643 && reload_completed
644 && alpha_sa_size () == 0
645 && get_frame_size () == 0
646 && crtl->outgoing_args_size == 0
647 && crtl->args.pretend_args_size == 0);
648 }
649
650 /* Return the ADDR_VEC associated with a tablejump insn. */
651
652 rtx
653 alpha_tablejump_addr_vec (rtx insn)
654 {
655 rtx tmp;
656
657 tmp = JUMP_LABEL (insn);
658 if (!tmp)
659 return NULL_RTX;
660 tmp = NEXT_INSN (tmp);
661 if (!tmp)
662 return NULL_RTX;
663 if (GET_CODE (tmp) == JUMP_INSN
664 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
665 return PATTERN (tmp);
666 return NULL_RTX;
667 }
668
669 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
670
671 rtx
672 alpha_tablejump_best_label (rtx insn)
673 {
674 rtx jump_table = alpha_tablejump_addr_vec (insn);
675 rtx best_label = NULL_RTX;
676
677 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
678 there for edge frequency counts from profile data. */
679
680 if (jump_table)
681 {
682 int n_labels = XVECLEN (jump_table, 1);
683 int best_count = -1;
684 int i, j;
685
686 for (i = 0; i < n_labels; i++)
687 {
688 int count = 1;
689
690 for (j = i + 1; j < n_labels; j++)
691 if (XEXP (XVECEXP (jump_table, 1, i), 0)
692 == XEXP (XVECEXP (jump_table, 1, j), 0))
693 count++;
694
695 if (count > best_count)
696 best_count = count, best_label = XVECEXP (jump_table, 1, i);
697 }
698 }
699
700 return best_label ? best_label : const0_rtx;
701 }
702
703 /* Return the TLS model to use for SYMBOL. */
704
705 static enum tls_model
706 tls_symbolic_operand_type (rtx symbol)
707 {
708 enum tls_model model;
709
710 if (GET_CODE (symbol) != SYMBOL_REF)
711 return 0;
712 model = SYMBOL_REF_TLS_MODEL (symbol);
713
714 /* Local-exec with a 64-bit size is the same code as initial-exec. */
715 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
716 model = TLS_MODEL_INITIAL_EXEC;
717
718 return model;
719 }
720 \f
721 /* Return true if the function DECL will share the same GP as any
722 function in the current unit of translation. */
723
724 static bool
725 decl_has_samegp (const_tree decl)
726 {
727 /* Functions that are not local can be overridden, and thus may
728 not share the same gp. */
729 if (!(*targetm.binds_local_p) (decl))
730 return false;
731
732 /* If -msmall-data is in effect, assume that there is only one GP
733 for the module, and so any local symbol has this property. We
734 need explicit relocations to be able to enforce this for symbols
735 not defined in this unit of translation, however. */
736 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
737 return true;
738
739 /* Functions that are not external are defined in this UoT. */
740 /* ??? Irritatingly, static functions not yet emitted are still
741 marked "external". Apply this to non-static functions only. */
742 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
743 }
744
745 /* Return true if EXP should be placed in the small data section. */
746
747 static bool
748 alpha_in_small_data_p (const_tree exp)
749 {
750 /* We want to merge strings, so we never consider them small data. */
751 if (TREE_CODE (exp) == STRING_CST)
752 return false;
753
754 /* Functions are never in the small data area. Duh. */
755 if (TREE_CODE (exp) == FUNCTION_DECL)
756 return false;
757
758 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
759 {
760 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
761 if (strcmp (section, ".sdata") == 0
762 || strcmp (section, ".sbss") == 0)
763 return true;
764 }
765 else
766 {
767 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
768
769 /* If this is an incomplete type with size 0, then we can't put it
770 in sdata because it might be too big when completed. */
771 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
772 return true;
773 }
774
775 return false;
776 }
777
778 #if TARGET_ABI_OPEN_VMS
779 static bool
780 alpha_linkage_symbol_p (const char *symname)
781 {
782 int symlen = strlen (symname);
783
784 if (symlen > 4)
785 return strcmp (&symname [symlen - 4], "..lk") == 0;
786
787 return false;
788 }
789
790 #define LINKAGE_SYMBOL_REF_P(X) \
791 ((GET_CODE (X) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (X, 0))) \
793 || (GET_CODE (X) == CONST \
794 && GET_CODE (XEXP (X, 0)) == PLUS \
795 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
796 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
797 #endif
798
799 /* legitimate_address_p recognizes an RTL expression that is a valid
800 memory address for an instruction. The MODE argument is the
801 machine mode for the MEM expression that wants to use this address.
802
803 For Alpha, we have either a constant address or the sum of a
804 register and a constant address, or just a register. For DImode,
805 any of those forms can be surrounded with an AND that clear the
806 low-order three bits; this is an "unaligned" access. */
807
808 bool
809 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
810 {
811 /* If this is an ldq_u type address, discard the outer AND. */
812 if (mode == DImode
813 && GET_CODE (x) == AND
814 && GET_CODE (XEXP (x, 1)) == CONST_INT
815 && INTVAL (XEXP (x, 1)) == -8)
816 x = XEXP (x, 0);
817
818 /* Discard non-paradoxical subregs. */
819 if (GET_CODE (x) == SUBREG
820 && (GET_MODE_SIZE (GET_MODE (x))
821 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
822 x = SUBREG_REG (x);
823
824 /* Unadorned general registers are valid. */
825 if (REG_P (x)
826 && (strict
827 ? STRICT_REG_OK_FOR_BASE_P (x)
828 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
829 return true;
830
831 /* Constant addresses (i.e. +/- 32k) are valid. */
832 if (CONSTANT_ADDRESS_P (x))
833 return true;
834
835 #if TARGET_ABI_OPEN_VMS
836 if (LINKAGE_SYMBOL_REF_P (x))
837 return true;
838 #endif
839
840 /* Register plus a small constant offset is valid. */
841 if (GET_CODE (x) == PLUS)
842 {
843 rtx ofs = XEXP (x, 1);
844 x = XEXP (x, 0);
845
846 /* Discard non-paradoxical subregs. */
847 if (GET_CODE (x) == SUBREG
848 && (GET_MODE_SIZE (GET_MODE (x))
849 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
850 x = SUBREG_REG (x);
851
852 if (REG_P (x))
853 {
854 if (! strict
855 && NONSTRICT_REG_OK_FP_BASE_P (x)
856 && GET_CODE (ofs) == CONST_INT)
857 return true;
858 if ((strict
859 ? STRICT_REG_OK_FOR_BASE_P (x)
860 : NONSTRICT_REG_OK_FOR_BASE_P (x))
861 && CONSTANT_ADDRESS_P (ofs))
862 return true;
863 }
864 }
865
866 /* If we're managing explicit relocations, LO_SUM is valid, as
867 are small data symbols. */
868 else if (TARGET_EXPLICIT_RELOCS)
869 {
870 if (small_symbolic_operand (x, Pmode))
871 return true;
872
873 if (GET_CODE (x) == LO_SUM)
874 {
875 rtx ofs = XEXP (x, 1);
876 x = XEXP (x, 0);
877
878 /* Discard non-paradoxical subregs. */
879 if (GET_CODE (x) == SUBREG
880 && (GET_MODE_SIZE (GET_MODE (x))
881 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
882 x = SUBREG_REG (x);
883
884 /* Must have a valid base register. */
885 if (! (REG_P (x)
886 && (strict
887 ? STRICT_REG_OK_FOR_BASE_P (x)
888 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
889 return false;
890
891 /* The symbol must be local. */
892 if (local_symbolic_operand (ofs, Pmode)
893 || dtp32_symbolic_operand (ofs, Pmode)
894 || tp32_symbolic_operand (ofs, Pmode))
895 return true;
896 }
897 }
898
899 return false;
900 }
901
902 /* Build the SYMBOL_REF for __tls_get_addr. */
903
904 static GTY(()) rtx tls_get_addr_libfunc;
905
906 static rtx
907 get_tls_get_addr (void)
908 {
909 if (!tls_get_addr_libfunc)
910 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
911 return tls_get_addr_libfunc;
912 }
913
914 /* Try machine-dependent ways of modifying an illegitimate address
915 to be legitimate. If we find one, return the new, valid address. */
916
917 rtx
918 alpha_legitimize_address (rtx x, rtx scratch,
919 enum machine_mode mode ATTRIBUTE_UNUSED)
920 {
921 HOST_WIDE_INT addend;
922
923 /* If the address is (plus reg const_int) and the CONST_INT is not a
924 valid offset, compute the high part of the constant and add it to
925 the register. Then our address is (plus temp low-part-const). */
926 if (GET_CODE (x) == PLUS
927 && GET_CODE (XEXP (x, 0)) == REG
928 && GET_CODE (XEXP (x, 1)) == CONST_INT
929 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
930 {
931 addend = INTVAL (XEXP (x, 1));
932 x = XEXP (x, 0);
933 goto split_addend;
934 }
935
936 /* If the address is (const (plus FOO const_int)), find the low-order
937 part of the CONST_INT. Then load FOO plus any high-order part of the
938 CONST_INT into a register. Our address is (plus reg low-part-const).
939 This is done to reduce the number of GOT entries. */
940 if (can_create_pseudo_p ()
941 && GET_CODE (x) == CONST
942 && GET_CODE (XEXP (x, 0)) == PLUS
943 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
944 {
945 addend = INTVAL (XEXP (XEXP (x, 0), 1));
946 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
947 goto split_addend;
948 }
949
950 /* If we have a (plus reg const), emit the load as in (2), then add
951 the two registers, and finally generate (plus reg low-part-const) as
952 our address. */
953 if (can_create_pseudo_p ()
954 && GET_CODE (x) == PLUS
955 && GET_CODE (XEXP (x, 0)) == REG
956 && GET_CODE (XEXP (x, 1)) == CONST
957 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
958 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
959 {
960 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
961 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
962 XEXP (XEXP (XEXP (x, 1), 0), 0),
963 NULL_RTX, 1, OPTAB_LIB_WIDEN);
964 goto split_addend;
965 }
966
967 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
968 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
969 {
970 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
971
972 switch (tls_symbolic_operand_type (x))
973 {
974 case TLS_MODEL_NONE:
975 break;
976
977 case TLS_MODEL_GLOBAL_DYNAMIC:
978 start_sequence ();
979
980 r0 = gen_rtx_REG (Pmode, 0);
981 r16 = gen_rtx_REG (Pmode, 16);
982 tga = get_tls_get_addr ();
983 dest = gen_reg_rtx (Pmode);
984 seq = GEN_INT (alpha_next_sequence_number++);
985
986 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
987 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
988 insn = emit_call_insn (insn);
989 RTL_CONST_CALL_P (insn) = 1;
990 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
991
992 insn = get_insns ();
993 end_sequence ();
994
995 emit_libcall_block (insn, dest, r0, x);
996 return dest;
997
998 case TLS_MODEL_LOCAL_DYNAMIC:
999 start_sequence ();
1000
1001 r0 = gen_rtx_REG (Pmode, 0);
1002 r16 = gen_rtx_REG (Pmode, 16);
1003 tga = get_tls_get_addr ();
1004 scratch = gen_reg_rtx (Pmode);
1005 seq = GEN_INT (alpha_next_sequence_number++);
1006
1007 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1008 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1009 insn = emit_call_insn (insn);
1010 RTL_CONST_CALL_P (insn) = 1;
1011 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1012
1013 insn = get_insns ();
1014 end_sequence ();
1015
1016 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1017 UNSPEC_TLSLDM_CALL);
1018 emit_libcall_block (insn, scratch, r0, eqv);
1019
1020 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1021 eqv = gen_rtx_CONST (Pmode, eqv);
1022
1023 if (alpha_tls_size == 64)
1024 {
1025 dest = gen_reg_rtx (Pmode);
1026 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1027 emit_insn (gen_adddi3 (dest, dest, scratch));
1028 return dest;
1029 }
1030 if (alpha_tls_size == 32)
1031 {
1032 insn = gen_rtx_HIGH (Pmode, eqv);
1033 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1034 scratch = gen_reg_rtx (Pmode);
1035 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1036 }
1037 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1038
1039 case TLS_MODEL_INITIAL_EXEC:
1040 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1041 eqv = gen_rtx_CONST (Pmode, eqv);
1042 tp = gen_reg_rtx (Pmode);
1043 scratch = gen_reg_rtx (Pmode);
1044 dest = gen_reg_rtx (Pmode);
1045
1046 emit_insn (gen_load_tp (tp));
1047 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1048 emit_insn (gen_adddi3 (dest, tp, scratch));
1049 return dest;
1050
1051 case TLS_MODEL_LOCAL_EXEC:
1052 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1053 eqv = gen_rtx_CONST (Pmode, eqv);
1054 tp = gen_reg_rtx (Pmode);
1055
1056 emit_insn (gen_load_tp (tp));
1057 if (alpha_tls_size == 32)
1058 {
1059 insn = gen_rtx_HIGH (Pmode, eqv);
1060 insn = gen_rtx_PLUS (Pmode, tp, insn);
1061 tp = gen_reg_rtx (Pmode);
1062 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1063 }
1064 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1065
1066 default:
1067 gcc_unreachable ();
1068 }
1069
1070 if (local_symbolic_operand (x, Pmode))
1071 {
1072 if (small_symbolic_operand (x, Pmode))
1073 return x;
1074 else
1075 {
1076 if (can_create_pseudo_p ())
1077 scratch = gen_reg_rtx (Pmode);
1078 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1079 gen_rtx_HIGH (Pmode, x)));
1080 return gen_rtx_LO_SUM (Pmode, scratch, x);
1081 }
1082 }
1083 }
1084
1085 return NULL;
1086
1087 split_addend:
1088 {
1089 HOST_WIDE_INT low, high;
1090
1091 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1092 addend -= low;
1093 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1094 addend -= high;
1095
1096 if (addend)
1097 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1098 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1099 1, OPTAB_LIB_WIDEN);
1100 if (high)
1101 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1102 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1103 1, OPTAB_LIB_WIDEN);
1104
1105 return plus_constant (x, low);
1106 }
1107 }
1108
1109 /* Primarily this is required for TLS symbols, but given that our move
1110 patterns *ought* to be able to handle any symbol at any time, we
1111 should never be spilling symbolic operands to the constant pool, ever. */
1112
1113 static bool
1114 alpha_cannot_force_const_mem (rtx x)
1115 {
1116 enum rtx_code code = GET_CODE (x);
1117 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1118 }
1119
1120 /* We do not allow indirect calls to be optimized into sibling calls, nor
1121 can we allow a call to a function with a different GP to be optimized
1122 into a sibcall. */
1123
1124 static bool
1125 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1126 {
1127 /* Can't do indirect tail calls, since we don't know if the target
1128 uses the same GP. */
1129 if (!decl)
1130 return false;
1131
1132 /* Otherwise, we can make a tail call if the target function shares
1133 the same GP. */
1134 return decl_has_samegp (decl);
1135 }
1136
1137 int
1138 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1139 {
1140 rtx x = *px;
1141
1142 /* Don't re-split. */
1143 if (GET_CODE (x) == LO_SUM)
1144 return -1;
1145
1146 return small_symbolic_operand (x, Pmode) != 0;
1147 }
1148
1149 static int
1150 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1151 {
1152 rtx x = *px;
1153
1154 /* Don't re-split. */
1155 if (GET_CODE (x) == LO_SUM)
1156 return -1;
1157
1158 if (small_symbolic_operand (x, Pmode))
1159 {
1160 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1161 *px = x;
1162 return -1;
1163 }
1164
1165 return 0;
1166 }
1167
1168 rtx
1169 split_small_symbolic_operand (rtx x)
1170 {
1171 x = copy_insn (x);
1172 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1173 return x;
1174 }
1175
1176 /* Indicate that INSN cannot be duplicated. This is true for any insn
1177 that we've marked with gpdisp relocs, since those have to stay in
1178 1-1 correspondence with one another.
1179
1180 Technically we could copy them if we could set up a mapping from one
1181 sequence number to another, across the set of insns to be duplicated.
1182 This seems overly complicated and error-prone since interblock motion
1183 from sched-ebb could move one of the pair of insns to a different block.
1184
1185 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1186 then they'll be in a different block from their ldgp. Which could lead
1187 the bb reorder code to think that it would be ok to copy just the block
1188 containing the call and branch to the block containing the ldgp. */
1189
1190 static bool
1191 alpha_cannot_copy_insn_p (rtx insn)
1192 {
1193 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1194 return false;
1195 if (recog_memoized (insn) >= 0)
1196 return get_attr_cannot_copy (insn);
1197 else
1198 return false;
1199 }
1200
1201
1202 /* Try a machine-dependent way of reloading an illegitimate address
1203 operand. If we find one, push the reload and return the new rtx. */
1204
1205 rtx
1206 alpha_legitimize_reload_address (rtx x,
1207 enum machine_mode mode ATTRIBUTE_UNUSED,
1208 int opnum, int type,
1209 int ind_levels ATTRIBUTE_UNUSED)
1210 {
1211 /* We must recognize output that we have already generated ourselves. */
1212 if (GET_CODE (x) == PLUS
1213 && GET_CODE (XEXP (x, 0)) == PLUS
1214 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1215 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1216 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1217 {
1218 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1219 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1220 opnum, type);
1221 return x;
1222 }
1223
1224 /* We wish to handle large displacements off a base register by
1225 splitting the addend across an ldah and the mem insn. This
1226 cuts number of extra insns needed from 3 to 1. */
1227 if (GET_CODE (x) == PLUS
1228 && GET_CODE (XEXP (x, 0)) == REG
1229 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1230 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1231 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1232 {
1233 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1234 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1235 HOST_WIDE_INT high
1236 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1237
1238 /* Check for 32-bit overflow. */
1239 if (high + low != val)
1240 return NULL_RTX;
1241
1242 /* Reload the high part into a base reg; leave the low part
1243 in the mem directly. */
1244 x = gen_rtx_PLUS (GET_MODE (x),
1245 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1246 GEN_INT (high)),
1247 GEN_INT (low));
1248
1249 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1250 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1251 opnum, type);
1252 return x;
1253 }
1254
1255 return NULL_RTX;
1256 }
1257 \f
1258 /* Compute a (partial) cost for rtx X. Return true if the complete
1259 cost has been computed, and false if subexpressions should be
1260 scanned. In either case, *TOTAL contains the cost result. */
1261
1262 static bool
1263 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1264 {
1265 enum machine_mode mode = GET_MODE (x);
1266 bool float_mode_p = FLOAT_MODE_P (mode);
1267 const struct alpha_rtx_cost_data *cost_data;
1268
1269 if (optimize_size)
1270 cost_data = &alpha_rtx_cost_size;
1271 else
1272 cost_data = &alpha_rtx_cost_data[alpha_tune];
1273
1274 switch (code)
1275 {
1276 case CONST_INT:
1277 /* If this is an 8-bit constant, return zero since it can be used
1278 nearly anywhere with no cost. If it is a valid operand for an
1279 ADD or AND, likewise return 0 if we know it will be used in that
1280 context. Otherwise, return 2 since it might be used there later.
1281 All other constants take at least two insns. */
1282 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1283 {
1284 *total = 0;
1285 return true;
1286 }
1287 /* FALLTHRU */
1288
1289 case CONST_DOUBLE:
1290 if (x == CONST0_RTX (mode))
1291 *total = 0;
1292 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1293 || (outer_code == AND && and_operand (x, VOIDmode)))
1294 *total = 0;
1295 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1296 *total = 2;
1297 else
1298 *total = COSTS_N_INSNS (2);
1299 return true;
1300
1301 case CONST:
1302 case SYMBOL_REF:
1303 case LABEL_REF:
1304 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1305 *total = COSTS_N_INSNS (outer_code != MEM);
1306 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1307 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1308 else if (tls_symbolic_operand_type (x))
1309 /* Estimate of cost for call_pal rduniq. */
1310 /* ??? How many insns do we emit here? More than one... */
1311 *total = COSTS_N_INSNS (15);
1312 else
1313 /* Otherwise we do a load from the GOT. */
1314 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1315 return true;
1316
1317 case HIGH:
1318 /* This is effectively an add_operand. */
1319 *total = 2;
1320 return true;
1321
1322 case PLUS:
1323 case MINUS:
1324 if (float_mode_p)
1325 *total = cost_data->fp_add;
1326 else if (GET_CODE (XEXP (x, 0)) == MULT
1327 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1328 {
1329 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1330 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1331 return true;
1332 }
1333 return false;
1334
1335 case MULT:
1336 if (float_mode_p)
1337 *total = cost_data->fp_mult;
1338 else if (mode == DImode)
1339 *total = cost_data->int_mult_di;
1340 else
1341 *total = cost_data->int_mult_si;
1342 return false;
1343
1344 case ASHIFT:
1345 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1346 && INTVAL (XEXP (x, 1)) <= 3)
1347 {
1348 *total = COSTS_N_INSNS (1);
1349 return false;
1350 }
1351 /* FALLTHRU */
1352
1353 case ASHIFTRT:
1354 case LSHIFTRT:
1355 *total = cost_data->int_shift;
1356 return false;
1357
1358 case IF_THEN_ELSE:
1359 if (float_mode_p)
1360 *total = cost_data->fp_add;
1361 else
1362 *total = cost_data->int_cmov;
1363 return false;
1364
1365 case DIV:
1366 case UDIV:
1367 case MOD:
1368 case UMOD:
1369 if (!float_mode_p)
1370 *total = cost_data->int_div;
1371 else if (mode == SFmode)
1372 *total = cost_data->fp_div_sf;
1373 else
1374 *total = cost_data->fp_div_df;
1375 return false;
1376
1377 case MEM:
1378 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1379 return true;
1380
1381 case NEG:
1382 if (! float_mode_p)
1383 {
1384 *total = COSTS_N_INSNS (1);
1385 return false;
1386 }
1387 /* FALLTHRU */
1388
1389 case ABS:
1390 if (! float_mode_p)
1391 {
1392 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1393 return false;
1394 }
1395 /* FALLTHRU */
1396
1397 case FLOAT:
1398 case UNSIGNED_FLOAT:
1399 case FIX:
1400 case UNSIGNED_FIX:
1401 case FLOAT_TRUNCATE:
1402 *total = cost_data->fp_add;
1403 return false;
1404
1405 case FLOAT_EXTEND:
1406 if (GET_CODE (XEXP (x, 0)) == MEM)
1407 *total = 0;
1408 else
1409 *total = cost_data->fp_add;
1410 return false;
1411
1412 default:
1413 return false;
1414 }
1415 }
1416 \f
1417 /* REF is an alignable memory location. Place an aligned SImode
1418 reference into *PALIGNED_MEM and the number of bits to shift into
1419 *PBITNUM. SCRATCH is a free register for use in reloading out
1420 of range stack slots. */
1421
1422 void
1423 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1424 {
1425 rtx base;
1426 HOST_WIDE_INT disp, offset;
1427
1428 gcc_assert (GET_CODE (ref) == MEM);
1429
1430 if (reload_in_progress
1431 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1432 {
1433 base = find_replacement (&XEXP (ref, 0));
1434 gcc_assert (memory_address_p (GET_MODE (ref), base));
1435 }
1436 else
1437 base = XEXP (ref, 0);
1438
1439 if (GET_CODE (base) == PLUS)
1440 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1441 else
1442 disp = 0;
1443
1444 /* Find the byte offset within an aligned word. If the memory itself is
1445 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1446 will have examined the base register and determined it is aligned, and
1447 thus displacements from it are naturally alignable. */
1448 if (MEM_ALIGN (ref) >= 32)
1449 offset = 0;
1450 else
1451 offset = disp & 3;
1452
1453 /* Access the entire aligned word. */
1454 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1455
1456 /* Convert the byte offset within the word to a bit offset. */
1457 if (WORDS_BIG_ENDIAN)
1458 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1459 else
1460 offset *= 8;
1461 *pbitnum = GEN_INT (offset);
1462 }
1463
1464 /* Similar, but just get the address. Handle the two reload cases.
1465 Add EXTRA_OFFSET to the address we return. */
1466
1467 rtx
1468 get_unaligned_address (rtx ref)
1469 {
1470 rtx base;
1471 HOST_WIDE_INT offset = 0;
1472
1473 gcc_assert (GET_CODE (ref) == MEM);
1474
1475 if (reload_in_progress
1476 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1477 {
1478 base = find_replacement (&XEXP (ref, 0));
1479
1480 gcc_assert (memory_address_p (GET_MODE (ref), base));
1481 }
1482 else
1483 base = XEXP (ref, 0);
1484
1485 if (GET_CODE (base) == PLUS)
1486 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1487
1488 return plus_constant (base, offset);
1489 }
1490
1491 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1492 X is always returned in a register. */
1493
1494 rtx
1495 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1496 {
1497 if (GET_CODE (addr) == PLUS)
1498 {
1499 ofs += INTVAL (XEXP (addr, 1));
1500 addr = XEXP (addr, 0);
1501 }
1502
1503 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1504 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1505 }
1506
1507 /* On the Alpha, all (non-symbolic) constants except zero go into
1508 a floating-point register via memory. Note that we cannot
1509 return anything that is not a subset of CLASS, and that some
1510 symbolic constants cannot be dropped to memory. */
1511
1512 enum reg_class
1513 alpha_preferred_reload_class(rtx x, enum reg_class class)
1514 {
1515 /* Zero is present in any register class. */
1516 if (x == CONST0_RTX (GET_MODE (x)))
1517 return class;
1518
1519 /* These sorts of constants we can easily drop to memory. */
1520 if (GET_CODE (x) == CONST_INT
1521 || GET_CODE (x) == CONST_DOUBLE
1522 || GET_CODE (x) == CONST_VECTOR)
1523 {
1524 if (class == FLOAT_REGS)
1525 return NO_REGS;
1526 if (class == ALL_REGS)
1527 return GENERAL_REGS;
1528 return class;
1529 }
1530
1531 /* All other kinds of constants should not (and in the case of HIGH
1532 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1533 secondary reload. */
1534 if (CONSTANT_P (x))
1535 return (class == ALL_REGS ? GENERAL_REGS : class);
1536
1537 return class;
1538 }
1539
1540 /* Inform reload about cases where moving X with a mode MODE to a register in
1541 CLASS requires an extra scratch or immediate register. Return the class
1542 needed for the immediate register. */
1543
1544 static enum reg_class
1545 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1546 enum machine_mode mode, secondary_reload_info *sri)
1547 {
1548 /* Loading and storing HImode or QImode values to and from memory
1549 usually requires a scratch register. */
1550 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1551 {
1552 if (any_memory_operand (x, mode))
1553 {
1554 if (in_p)
1555 {
1556 if (!aligned_memory_operand (x, mode))
1557 sri->icode = reload_in_optab[mode];
1558 }
1559 else
1560 sri->icode = reload_out_optab[mode];
1561 return NO_REGS;
1562 }
1563 }
1564
1565 /* We also cannot do integral arithmetic into FP regs, as might result
1566 from register elimination into a DImode fp register. */
1567 if (class == FLOAT_REGS)
1568 {
1569 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1570 return GENERAL_REGS;
1571 if (in_p && INTEGRAL_MODE_P (mode)
1572 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1573 return GENERAL_REGS;
1574 }
1575
1576 return NO_REGS;
1577 }
1578 \f
1579 /* Subfunction of the following function. Update the flags of any MEM
1580 found in part of X. */
1581
1582 static int
1583 alpha_set_memflags_1 (rtx *xp, void *data)
1584 {
1585 rtx x = *xp, orig = (rtx) data;
1586
1587 if (GET_CODE (x) != MEM)
1588 return 0;
1589
1590 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1591 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1592 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1593 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1594 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1595
1596 /* Sadly, we cannot use alias sets because the extra aliasing
1597 produced by the AND interferes. Given that two-byte quantities
1598 are the only thing we would be able to differentiate anyway,
1599 there does not seem to be any point in convoluting the early
1600 out of the alias check. */
1601
1602 return -1;
1603 }
1604
1605 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1606 generated to perform a memory operation, look for any MEMs in either
1607 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1608 volatile flags from REF into each of the MEMs found. If REF is not
1609 a MEM, don't do anything. */
1610
1611 void
1612 alpha_set_memflags (rtx insn, rtx ref)
1613 {
1614 rtx *base_ptr;
1615
1616 if (GET_CODE (ref) != MEM)
1617 return;
1618
1619 /* This is only called from alpha.md, after having had something
1620 generated from one of the insn patterns. So if everything is
1621 zero, the pattern is already up-to-date. */
1622 if (!MEM_VOLATILE_P (ref)
1623 && !MEM_IN_STRUCT_P (ref)
1624 && !MEM_SCALAR_P (ref)
1625 && !MEM_NOTRAP_P (ref)
1626 && !MEM_READONLY_P (ref))
1627 return;
1628
1629 if (INSN_P (insn))
1630 base_ptr = &PATTERN (insn);
1631 else
1632 base_ptr = &insn;
1633 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1634 }
1635 \f
1636 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1637 int, bool);
1638
1639 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1640 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1641 and return pc_rtx if successful. */
1642
1643 static rtx
1644 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1645 HOST_WIDE_INT c, int n, bool no_output)
1646 {
1647 HOST_WIDE_INT new;
1648 int i, bits;
1649 /* Use a pseudo if highly optimizing and still generating RTL. */
1650 rtx subtarget
1651 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1652 rtx temp, insn;
1653
1654 /* If this is a sign-extended 32-bit constant, we can do this in at most
1655 three insns, so do it if we have enough insns left. We always have
1656 a sign-extended 32-bit constant when compiling on a narrow machine. */
1657
1658 if (HOST_BITS_PER_WIDE_INT != 64
1659 || c >> 31 == -1 || c >> 31 == 0)
1660 {
1661 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1662 HOST_WIDE_INT tmp1 = c - low;
1663 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1664 HOST_WIDE_INT extra = 0;
1665
1666 /* If HIGH will be interpreted as negative but the constant is
1667 positive, we must adjust it to do two ldha insns. */
1668
1669 if ((high & 0x8000) != 0 && c >= 0)
1670 {
1671 extra = 0x4000;
1672 tmp1 -= 0x40000000;
1673 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1674 }
1675
1676 if (c == low || (low == 0 && extra == 0))
1677 {
1678 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1679 but that meant that we can't handle INT_MIN on 32-bit machines
1680 (like NT/Alpha), because we recurse indefinitely through
1681 emit_move_insn to gen_movdi. So instead, since we know exactly
1682 what we want, create it explicitly. */
1683
1684 if (no_output)
1685 return pc_rtx;
1686 if (target == NULL)
1687 target = gen_reg_rtx (mode);
1688 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1689 return target;
1690 }
1691 else if (n >= 2 + (extra != 0))
1692 {
1693 if (no_output)
1694 return pc_rtx;
1695 if (!can_create_pseudo_p ())
1696 {
1697 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1698 temp = target;
1699 }
1700 else
1701 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1702 subtarget, mode);
1703
1704 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1705 This means that if we go through expand_binop, we'll try to
1706 generate extensions, etc, which will require new pseudos, which
1707 will fail during some split phases. The SImode add patterns
1708 still exist, but are not named. So build the insns by hand. */
1709
1710 if (extra != 0)
1711 {
1712 if (! subtarget)
1713 subtarget = gen_reg_rtx (mode);
1714 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1715 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1716 emit_insn (insn);
1717 temp = subtarget;
1718 }
1719
1720 if (target == NULL)
1721 target = gen_reg_rtx (mode);
1722 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1723 insn = gen_rtx_SET (VOIDmode, target, insn);
1724 emit_insn (insn);
1725 return target;
1726 }
1727 }
1728
1729 /* If we couldn't do it that way, try some other methods. But if we have
1730 no instructions left, don't bother. Likewise, if this is SImode and
1731 we can't make pseudos, we can't do anything since the expand_binop
1732 and expand_unop calls will widen and try to make pseudos. */
1733
1734 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1735 return 0;
1736
1737 /* Next, see if we can load a related constant and then shift and possibly
1738 negate it to get the constant we want. Try this once each increasing
1739 numbers of insns. */
1740
1741 for (i = 1; i < n; i++)
1742 {
1743 /* First, see if minus some low bits, we've an easy load of
1744 high bits. */
1745
1746 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1747 if (new != 0)
1748 {
1749 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1750 if (temp)
1751 {
1752 if (no_output)
1753 return temp;
1754 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1755 target, 0, OPTAB_WIDEN);
1756 }
1757 }
1758
1759 /* Next try complementing. */
1760 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1761 if (temp)
1762 {
1763 if (no_output)
1764 return temp;
1765 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1766 }
1767
1768 /* Next try to form a constant and do a left shift. We can do this
1769 if some low-order bits are zero; the exact_log2 call below tells
1770 us that information. The bits we are shifting out could be any
1771 value, but here we'll just try the 0- and sign-extended forms of
1772 the constant. To try to increase the chance of having the same
1773 constant in more than one insn, start at the highest number of
1774 bits to shift, but try all possibilities in case a ZAPNOT will
1775 be useful. */
1776
1777 bits = exact_log2 (c & -c);
1778 if (bits > 0)
1779 for (; bits > 0; bits--)
1780 {
1781 new = c >> bits;
1782 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1783 if (!temp && c < 0)
1784 {
1785 new = (unsigned HOST_WIDE_INT)c >> bits;
1786 temp = alpha_emit_set_const (subtarget, mode, new,
1787 i, no_output);
1788 }
1789 if (temp)
1790 {
1791 if (no_output)
1792 return temp;
1793 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1794 target, 0, OPTAB_WIDEN);
1795 }
1796 }
1797
1798 /* Now try high-order zero bits. Here we try the shifted-in bits as
1799 all zero and all ones. Be careful to avoid shifting outside the
1800 mode and to avoid shifting outside the host wide int size. */
1801 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1802 confuse the recursive call and set all of the high 32 bits. */
1803
1804 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1805 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1806 if (bits > 0)
1807 for (; bits > 0; bits--)
1808 {
1809 new = c << bits;
1810 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1811 if (!temp)
1812 {
1813 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1814 temp = alpha_emit_set_const (subtarget, mode, new,
1815 i, no_output);
1816 }
1817 if (temp)
1818 {
1819 if (no_output)
1820 return temp;
1821 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1822 target, 1, OPTAB_WIDEN);
1823 }
1824 }
1825
1826 /* Now try high-order 1 bits. We get that with a sign-extension.
1827 But one bit isn't enough here. Be careful to avoid shifting outside
1828 the mode and to avoid shifting outside the host wide int size. */
1829
1830 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1831 - floor_log2 (~ c) - 2);
1832 if (bits > 0)
1833 for (; bits > 0; bits--)
1834 {
1835 new = c << bits;
1836 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1837 if (!temp)
1838 {
1839 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1840 temp = alpha_emit_set_const (subtarget, mode, new,
1841 i, no_output);
1842 }
1843 if (temp)
1844 {
1845 if (no_output)
1846 return temp;
1847 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1848 target, 0, OPTAB_WIDEN);
1849 }
1850 }
1851 }
1852
1853 #if HOST_BITS_PER_WIDE_INT == 64
1854 /* Finally, see if can load a value into the target that is the same as the
1855 constant except that all bytes that are 0 are changed to be 0xff. If we
1856 can, then we can do a ZAPNOT to obtain the desired constant. */
1857
1858 new = c;
1859 for (i = 0; i < 64; i += 8)
1860 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1861 new |= (HOST_WIDE_INT) 0xff << i;
1862
1863 /* We are only called for SImode and DImode. If this is SImode, ensure that
1864 we are sign extended to a full word. */
1865
1866 if (mode == SImode)
1867 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1868
1869 if (new != c)
1870 {
1871 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1872 if (temp)
1873 {
1874 if (no_output)
1875 return temp;
1876 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1877 target, 0, OPTAB_WIDEN);
1878 }
1879 }
1880 #endif
1881
1882 return 0;
1883 }
1884
1885 /* Try to output insns to set TARGET equal to the constant C if it can be
1886 done in less than N insns. Do all computations in MODE. Returns the place
1887 where the output has been placed if it can be done and the insns have been
1888 emitted. If it would take more than N insns, zero is returned and no
1889 insns and emitted. */
1890
1891 static rtx
1892 alpha_emit_set_const (rtx target, enum machine_mode mode,
1893 HOST_WIDE_INT c, int n, bool no_output)
1894 {
1895 enum machine_mode orig_mode = mode;
1896 rtx orig_target = target;
1897 rtx result = 0;
1898 int i;
1899
1900 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1901 can't load this constant in one insn, do this in DImode. */
1902 if (!can_create_pseudo_p () && mode == SImode
1903 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1904 {
1905 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1906 if (result)
1907 return result;
1908
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1910 mode = DImode;
1911 }
1912 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1913 {
1914 target = no_output ? NULL : gen_lowpart (DImode, target);
1915 mode = DImode;
1916 }
1917
1918 /* Try 1 insn, then 2, then up to N. */
1919 for (i = 1; i <= n; i++)
1920 {
1921 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1922 if (result)
1923 {
1924 rtx insn, set;
1925
1926 if (no_output)
1927 return result;
1928
1929 insn = get_last_insn ();
1930 set = single_set (insn);
1931 if (! CONSTANT_P (SET_SRC (set)))
1932 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1933 break;
1934 }
1935 }
1936
1937 /* Allow for the case where we changed the mode of TARGET. */
1938 if (result)
1939 {
1940 if (result == target)
1941 result = orig_target;
1942 else if (mode != orig_mode)
1943 result = gen_lowpart (orig_mode, result);
1944 }
1945
1946 return result;
1947 }
1948
1949 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1950 fall back to a straight forward decomposition. We do this to avoid
1951 exponential run times encountered when looking for longer sequences
1952 with alpha_emit_set_const. */
1953
1954 static rtx
1955 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1956 {
1957 HOST_WIDE_INT d1, d2, d3, d4;
1958
1959 /* Decompose the entire word */
1960 #if HOST_BITS_PER_WIDE_INT >= 64
1961 gcc_assert (c2 == -(c1 < 0));
1962 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1963 c1 -= d1;
1964 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1965 c1 = (c1 - d2) >> 32;
1966 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d3;
1968 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d4);
1970 #else
1971 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1972 c1 -= d1;
1973 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c1 == d2);
1975 c2 += (d2 < 0);
1976 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1977 c2 -= d3;
1978 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1979 gcc_assert (c2 == d4);
1980 #endif
1981
1982 /* Construct the high word */
1983 if (d4)
1984 {
1985 emit_move_insn (target, GEN_INT (d4));
1986 if (d3)
1987 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1988 }
1989 else
1990 emit_move_insn (target, GEN_INT (d3));
1991
1992 /* Shift it into place */
1993 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1994
1995 /* Add in the low bits. */
1996 if (d2)
1997 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1998 if (d1)
1999 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2000
2001 return target;
2002 }
2003
2004 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2005 the low 64 bits. */
2006
2007 static void
2008 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2009 {
2010 HOST_WIDE_INT i0, i1;
2011
2012 if (GET_CODE (x) == CONST_VECTOR)
2013 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2014
2015
2016 if (GET_CODE (x) == CONST_INT)
2017 {
2018 i0 = INTVAL (x);
2019 i1 = -(i0 < 0);
2020 }
2021 else if (HOST_BITS_PER_WIDE_INT >= 64)
2022 {
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = -(i0 < 0);
2025 }
2026 else
2027 {
2028 i0 = CONST_DOUBLE_LOW (x);
2029 i1 = CONST_DOUBLE_HIGH (x);
2030 }
2031
2032 *p0 = i0;
2033 *p1 = i1;
2034 }
2035
2036 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2037 are willing to load the value into a register via a move pattern.
2038 Normally this is all symbolic constants, integral constants that
2039 take three or fewer instructions, and floating-point zero. */
2040
2041 bool
2042 alpha_legitimate_constant_p (rtx x)
2043 {
2044 enum machine_mode mode = GET_MODE (x);
2045 HOST_WIDE_INT i0, i1;
2046
2047 switch (GET_CODE (x))
2048 {
2049 case CONST:
2050 case LABEL_REF:
2051 case HIGH:
2052 return true;
2053
2054 case SYMBOL_REF:
2055 /* TLS symbols are never valid. */
2056 return SYMBOL_REF_TLS_MODEL (x) == 0;
2057
2058 case CONST_DOUBLE:
2059 if (x == CONST0_RTX (mode))
2060 return true;
2061 if (FLOAT_MODE_P (mode))
2062 return false;
2063 goto do_integer;
2064
2065 case CONST_VECTOR:
2066 if (x == CONST0_RTX (mode))
2067 return true;
2068 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2069 return false;
2070 if (GET_MODE_SIZE (mode) != 8)
2071 return false;
2072 goto do_integer;
2073
2074 case CONST_INT:
2075 do_integer:
2076 if (TARGET_BUILD_CONSTANTS)
2077 return true;
2078 alpha_extract_integer (x, &i0, &i1);
2079 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2080 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2081 return false;
2082
2083 default:
2084 return false;
2085 }
2086 }
2087
2088 /* Operand 1 is known to be a constant, and should require more than one
2089 instruction to load. Emit that multi-part load. */
2090
2091 bool
2092 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2093 {
2094 HOST_WIDE_INT i0, i1;
2095 rtx temp = NULL_RTX;
2096
2097 alpha_extract_integer (operands[1], &i0, &i1);
2098
2099 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2100 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2101
2102 if (!temp && TARGET_BUILD_CONSTANTS)
2103 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2104
2105 if (temp)
2106 {
2107 if (!rtx_equal_p (operands[0], temp))
2108 emit_move_insn (operands[0], temp);
2109 return true;
2110 }
2111
2112 return false;
2113 }
2114
2115 /* Expand a move instruction; return true if all work is done.
2116 We don't handle non-bwx subword loads here. */
2117
2118 bool
2119 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2120 {
2121 /* If the output is not a register, the input must be. */
2122 if (GET_CODE (operands[0]) == MEM
2123 && ! reg_or_0_operand (operands[1], mode))
2124 operands[1] = force_reg (mode, operands[1]);
2125
2126 /* Allow legitimize_address to perform some simplifications. */
2127 if (mode == Pmode && symbolic_operand (operands[1], mode))
2128 {
2129 rtx tmp;
2130
2131 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2132 if (tmp)
2133 {
2134 if (tmp == operands[0])
2135 return true;
2136 operands[1] = tmp;
2137 return false;
2138 }
2139 }
2140
2141 /* Early out for non-constants and valid constants. */
2142 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2143 return false;
2144
2145 /* Split large integers. */
2146 if (GET_CODE (operands[1]) == CONST_INT
2147 || GET_CODE (operands[1]) == CONST_DOUBLE
2148 || GET_CODE (operands[1]) == CONST_VECTOR)
2149 {
2150 if (alpha_split_const_mov (mode, operands))
2151 return true;
2152 }
2153
2154 /* Otherwise we've nothing left but to drop the thing to memory. */
2155 operands[1] = force_const_mem (mode, operands[1]);
2156 if (reload_in_progress)
2157 {
2158 emit_move_insn (operands[0], XEXP (operands[1], 0));
2159 operands[1] = replace_equiv_address (operands[1], operands[0]);
2160 }
2161 else
2162 operands[1] = validize_mem (operands[1]);
2163 return false;
2164 }
2165
2166 /* Expand a non-bwx QImode or HImode move instruction;
2167 return true if all work is done. */
2168
2169 bool
2170 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2171 {
2172 rtx seq;
2173
2174 /* If the output is not a register, the input must be. */
2175 if (MEM_P (operands[0]))
2176 operands[1] = force_reg (mode, operands[1]);
2177
2178 /* Handle four memory cases, unaligned and aligned for either the input
2179 or the output. The only case where we can be called during reload is
2180 for aligned loads; all other cases require temporaries. */
2181
2182 if (any_memory_operand (operands[1], mode))
2183 {
2184 if (aligned_memory_operand (operands[1], mode))
2185 {
2186 if (reload_in_progress)
2187 {
2188 if (mode == QImode)
2189 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2190 else
2191 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2192 emit_insn (seq);
2193 }
2194 else
2195 {
2196 rtx aligned_mem, bitnum;
2197 rtx scratch = gen_reg_rtx (SImode);
2198 rtx subtarget;
2199 bool copyout;
2200
2201 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2202
2203 subtarget = operands[0];
2204 if (GET_CODE (subtarget) == REG)
2205 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2206 else
2207 subtarget = gen_reg_rtx (DImode), copyout = true;
2208
2209 if (mode == QImode)
2210 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2211 bitnum, scratch);
2212 else
2213 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2214 bitnum, scratch);
2215 emit_insn (seq);
2216
2217 if (copyout)
2218 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2219 }
2220 }
2221 else
2222 {
2223 /* Don't pass these as parameters since that makes the generated
2224 code depend on parameter evaluation order which will cause
2225 bootstrap failures. */
2226
2227 rtx temp1, temp2, subtarget, ua;
2228 bool copyout;
2229
2230 temp1 = gen_reg_rtx (DImode);
2231 temp2 = gen_reg_rtx (DImode);
2232
2233 subtarget = operands[0];
2234 if (GET_CODE (subtarget) == REG)
2235 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2236 else
2237 subtarget = gen_reg_rtx (DImode), copyout = true;
2238
2239 ua = get_unaligned_address (operands[1]);
2240 if (mode == QImode)
2241 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2242 else
2243 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2244
2245 alpha_set_memflags (seq, operands[1]);
2246 emit_insn (seq);
2247
2248 if (copyout)
2249 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2250 }
2251 return true;
2252 }
2253
2254 if (any_memory_operand (operands[0], mode))
2255 {
2256 if (aligned_memory_operand (operands[0], mode))
2257 {
2258 rtx aligned_mem, bitnum;
2259 rtx temp1 = gen_reg_rtx (SImode);
2260 rtx temp2 = gen_reg_rtx (SImode);
2261
2262 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2263
2264 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2265 temp1, temp2));
2266 }
2267 else
2268 {
2269 rtx temp1 = gen_reg_rtx (DImode);
2270 rtx temp2 = gen_reg_rtx (DImode);
2271 rtx temp3 = gen_reg_rtx (DImode);
2272 rtx ua = get_unaligned_address (operands[0]);
2273
2274 if (mode == QImode)
2275 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2276 else
2277 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2278
2279 alpha_set_memflags (seq, operands[0]);
2280 emit_insn (seq);
2281 }
2282 return true;
2283 }
2284
2285 return false;
2286 }
2287
2288 /* Implement the movmisalign patterns. One of the operands is a memory
2289 that is not naturally aligned. Emit instructions to load it. */
2290
2291 void
2292 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2293 {
2294 /* Honor misaligned loads, for those we promised to do so. */
2295 if (MEM_P (operands[1]))
2296 {
2297 rtx tmp;
2298
2299 if (register_operand (operands[0], mode))
2300 tmp = operands[0];
2301 else
2302 tmp = gen_reg_rtx (mode);
2303
2304 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2305 if (tmp != operands[0])
2306 emit_move_insn (operands[0], tmp);
2307 }
2308 else if (MEM_P (operands[0]))
2309 {
2310 if (!reg_or_0_operand (operands[1], mode))
2311 operands[1] = force_reg (mode, operands[1]);
2312 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2313 }
2314 else
2315 gcc_unreachable ();
2316 }
2317
2318 /* Generate an unsigned DImode to FP conversion. This is the same code
2319 optabs would emit if we didn't have TFmode patterns.
2320
2321 For SFmode, this is the only construction I've found that can pass
2322 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2323 intermediates will work, because you'll get intermediate rounding
2324 that ruins the end result. Some of this could be fixed by turning
2325 on round-to-positive-infinity, but that requires diddling the fpsr,
2326 which kills performance. I tried turning this around and converting
2327 to a negative number, so that I could turn on /m, but either I did
2328 it wrong or there's something else cause I wound up with the exact
2329 same single-bit error. There is a branch-less form of this same code:
2330
2331 srl $16,1,$1
2332 and $16,1,$2
2333 cmplt $16,0,$3
2334 or $1,$2,$2
2335 cmovge $16,$16,$2
2336 itoft $3,$f10
2337 itoft $2,$f11
2338 cvtqs $f11,$f11
2339 adds $f11,$f11,$f0
2340 fcmoveq $f10,$f11,$f0
2341
2342 I'm not using it because it's the same number of instructions as
2343 this branch-full form, and it has more serialized long latency
2344 instructions on the critical path.
2345
2346 For DFmode, we can avoid rounding errors by breaking up the word
2347 into two pieces, converting them separately, and adding them back:
2348
2349 LC0: .long 0,0x5f800000
2350
2351 itoft $16,$f11
2352 lda $2,LC0
2353 cmplt $16,0,$1
2354 cpyse $f11,$f31,$f10
2355 cpyse $f31,$f11,$f11
2356 s4addq $1,$2,$1
2357 lds $f12,0($1)
2358 cvtqt $f10,$f10
2359 cvtqt $f11,$f11
2360 addt $f12,$f10,$f0
2361 addt $f0,$f11,$f0
2362
2363 This doesn't seem to be a clear-cut win over the optabs form.
2364 It probably all depends on the distribution of numbers being
2365 converted -- in the optabs form, all but high-bit-set has a
2366 much lower minimum execution time. */
2367
2368 void
2369 alpha_emit_floatuns (rtx operands[2])
2370 {
2371 rtx neglab, donelab, i0, i1, f0, in, out;
2372 enum machine_mode mode;
2373
2374 out = operands[0];
2375 in = force_reg (DImode, operands[1]);
2376 mode = GET_MODE (out);
2377 neglab = gen_label_rtx ();
2378 donelab = gen_label_rtx ();
2379 i0 = gen_reg_rtx (DImode);
2380 i1 = gen_reg_rtx (DImode);
2381 f0 = gen_reg_rtx (mode);
2382
2383 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2384
2385 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2386 emit_jump_insn (gen_jump (donelab));
2387 emit_barrier ();
2388
2389 emit_label (neglab);
2390
2391 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2392 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2393 emit_insn (gen_iordi3 (i0, i0, i1));
2394 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2395 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2396
2397 emit_label (donelab);
2398 }
2399
2400 /* Generate the comparison for a conditional branch. */
2401
2402 rtx
2403 alpha_emit_conditional_branch (enum rtx_code code)
2404 {
2405 enum rtx_code cmp_code, branch_code;
2406 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2407 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2408 rtx tem;
2409
2410 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2411 {
2412 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2413 op1 = const0_rtx;
2414 alpha_compare.fp_p = 0;
2415 }
2416
2417 /* The general case: fold the comparison code to the types of compares
2418 that we have, choosing the branch as necessary. */
2419 switch (code)
2420 {
2421 case EQ: case LE: case LT: case LEU: case LTU:
2422 case UNORDERED:
2423 /* We have these compares: */
2424 cmp_code = code, branch_code = NE;
2425 break;
2426
2427 case NE:
2428 case ORDERED:
2429 /* These must be reversed. */
2430 cmp_code = reverse_condition (code), branch_code = EQ;
2431 break;
2432
2433 case GE: case GT: case GEU: case GTU:
2434 /* For FP, we swap them, for INT, we reverse them. */
2435 if (alpha_compare.fp_p)
2436 {
2437 cmp_code = swap_condition (code);
2438 branch_code = NE;
2439 tem = op0, op0 = op1, op1 = tem;
2440 }
2441 else
2442 {
2443 cmp_code = reverse_condition (code);
2444 branch_code = EQ;
2445 }
2446 break;
2447
2448 default:
2449 gcc_unreachable ();
2450 }
2451
2452 if (alpha_compare.fp_p)
2453 {
2454 cmp_mode = DFmode;
2455 if (flag_unsafe_math_optimizations)
2456 {
2457 /* When we are not as concerned about non-finite values, and we
2458 are comparing against zero, we can branch directly. */
2459 if (op1 == CONST0_RTX (DFmode))
2460 cmp_code = UNKNOWN, branch_code = code;
2461 else if (op0 == CONST0_RTX (DFmode))
2462 {
2463 /* Undo the swap we probably did just above. */
2464 tem = op0, op0 = op1, op1 = tem;
2465 branch_code = swap_condition (cmp_code);
2466 cmp_code = UNKNOWN;
2467 }
2468 }
2469 else
2470 {
2471 /* ??? We mark the branch mode to be CCmode to prevent the
2472 compare and branch from being combined, since the compare
2473 insn follows IEEE rules that the branch does not. */
2474 branch_mode = CCmode;
2475 }
2476 }
2477 else
2478 {
2479 cmp_mode = DImode;
2480
2481 /* The following optimizations are only for signed compares. */
2482 if (code != LEU && code != LTU && code != GEU && code != GTU)
2483 {
2484 /* Whee. Compare and branch against 0 directly. */
2485 if (op1 == const0_rtx)
2486 cmp_code = UNKNOWN, branch_code = code;
2487
2488 /* If the constants doesn't fit into an immediate, but can
2489 be generated by lda/ldah, we adjust the argument and
2490 compare against zero, so we can use beq/bne directly. */
2491 /* ??? Don't do this when comparing against symbols, otherwise
2492 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2493 be declared false out of hand (at least for non-weak). */
2494 else if (GET_CODE (op1) == CONST_INT
2495 && (code == EQ || code == NE)
2496 && !(symbolic_operand (op0, VOIDmode)
2497 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2498 {
2499 rtx n_op1 = GEN_INT (-INTVAL (op1));
2500
2501 if (! satisfies_constraint_I (op1)
2502 && (satisfies_constraint_K (n_op1)
2503 || satisfies_constraint_L (n_op1)))
2504 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2505 }
2506 }
2507
2508 if (!reg_or_0_operand (op0, DImode))
2509 op0 = force_reg (DImode, op0);
2510 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2511 op1 = force_reg (DImode, op1);
2512 }
2513
2514 /* Emit an initial compare instruction, if necessary. */
2515 tem = op0;
2516 if (cmp_code != UNKNOWN)
2517 {
2518 tem = gen_reg_rtx (cmp_mode);
2519 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2520 }
2521
2522 /* Zero the operands. */
2523 memset (&alpha_compare, 0, sizeof (alpha_compare));
2524
2525 /* Return the branch comparison. */
2526 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2527 }
2528
2529 /* Certain simplifications can be done to make invalid setcc operations
2530 valid. Return the final comparison, or NULL if we can't work. */
2531
2532 rtx
2533 alpha_emit_setcc (enum rtx_code code)
2534 {
2535 enum rtx_code cmp_code;
2536 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2537 int fp_p = alpha_compare.fp_p;
2538 rtx tmp;
2539
2540 /* Zero the operands. */
2541 memset (&alpha_compare, 0, sizeof (alpha_compare));
2542
2543 if (fp_p && GET_MODE (op0) == TFmode)
2544 {
2545 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2546 op1 = const0_rtx;
2547 fp_p = 0;
2548 }
2549
2550 if (fp_p && !TARGET_FIX)
2551 return NULL_RTX;
2552
2553 /* The general case: fold the comparison code to the types of compares
2554 that we have, choosing the branch as necessary. */
2555
2556 cmp_code = UNKNOWN;
2557 switch (code)
2558 {
2559 case EQ: case LE: case LT: case LEU: case LTU:
2560 case UNORDERED:
2561 /* We have these compares. */
2562 if (fp_p)
2563 cmp_code = code, code = NE;
2564 break;
2565
2566 case NE:
2567 if (!fp_p && op1 == const0_rtx)
2568 break;
2569 /* FALLTHRU */
2570
2571 case ORDERED:
2572 cmp_code = reverse_condition (code);
2573 code = EQ;
2574 break;
2575
2576 case GE: case GT: case GEU: case GTU:
2577 /* These normally need swapping, but for integer zero we have
2578 special patterns that recognize swapped operands. */
2579 if (!fp_p && op1 == const0_rtx)
2580 break;
2581 code = swap_condition (code);
2582 if (fp_p)
2583 cmp_code = code, code = NE;
2584 tmp = op0, op0 = op1, op1 = tmp;
2585 break;
2586
2587 default:
2588 gcc_unreachable ();
2589 }
2590
2591 if (!fp_p)
2592 {
2593 if (!register_operand (op0, DImode))
2594 op0 = force_reg (DImode, op0);
2595 if (!reg_or_8bit_operand (op1, DImode))
2596 op1 = force_reg (DImode, op1);
2597 }
2598
2599 /* Emit an initial compare instruction, if necessary. */
2600 if (cmp_code != UNKNOWN)
2601 {
2602 enum machine_mode mode = fp_p ? DFmode : DImode;
2603
2604 tmp = gen_reg_rtx (mode);
2605 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2606 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2607
2608 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2609 op1 = const0_rtx;
2610 }
2611
2612 /* Return the setcc comparison. */
2613 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2614 }
2615
2616
2617 /* Rewrite a comparison against zero CMP of the form
2618 (CODE (cc0) (const_int 0)) so it can be written validly in
2619 a conditional move (if_then_else CMP ...).
2620 If both of the operands that set cc0 are nonzero we must emit
2621 an insn to perform the compare (it can't be done within
2622 the conditional move). */
2623
2624 rtx
2625 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2626 {
2627 enum rtx_code code = GET_CODE (cmp);
2628 enum rtx_code cmov_code = NE;
2629 rtx op0 = alpha_compare.op0;
2630 rtx op1 = alpha_compare.op1;
2631 int fp_p = alpha_compare.fp_p;
2632 enum machine_mode cmp_mode
2633 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2634 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2635 enum machine_mode cmov_mode = VOIDmode;
2636 int local_fast_math = flag_unsafe_math_optimizations;
2637 rtx tem;
2638
2639 /* Zero the operands. */
2640 memset (&alpha_compare, 0, sizeof (alpha_compare));
2641
2642 if (fp_p != FLOAT_MODE_P (mode))
2643 {
2644 enum rtx_code cmp_code;
2645
2646 if (! TARGET_FIX)
2647 return 0;
2648
2649 /* If we have fp<->int register move instructions, do a cmov by
2650 performing the comparison in fp registers, and move the
2651 zero/nonzero value to integer registers, where we can then
2652 use a normal cmov, or vice-versa. */
2653
2654 switch (code)
2655 {
2656 case EQ: case LE: case LT: case LEU: case LTU:
2657 /* We have these compares. */
2658 cmp_code = code, code = NE;
2659 break;
2660
2661 case NE:
2662 /* This must be reversed. */
2663 cmp_code = EQ, code = EQ;
2664 break;
2665
2666 case GE: case GT: case GEU: case GTU:
2667 /* These normally need swapping, but for integer zero we have
2668 special patterns that recognize swapped operands. */
2669 if (!fp_p && op1 == const0_rtx)
2670 cmp_code = code, code = NE;
2671 else
2672 {
2673 cmp_code = swap_condition (code);
2674 code = NE;
2675 tem = op0, op0 = op1, op1 = tem;
2676 }
2677 break;
2678
2679 default:
2680 gcc_unreachable ();
2681 }
2682
2683 tem = gen_reg_rtx (cmp_op_mode);
2684 emit_insn (gen_rtx_SET (VOIDmode, tem,
2685 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2686 op0, op1)));
2687
2688 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2689 op0 = gen_lowpart (cmp_op_mode, tem);
2690 op1 = CONST0_RTX (cmp_op_mode);
2691 fp_p = !fp_p;
2692 local_fast_math = 1;
2693 }
2694
2695 /* We may be able to use a conditional move directly.
2696 This avoids emitting spurious compares. */
2697 if (signed_comparison_operator (cmp, VOIDmode)
2698 && (!fp_p || local_fast_math)
2699 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2700 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2701
2702 /* We can't put the comparison inside the conditional move;
2703 emit a compare instruction and put that inside the
2704 conditional move. Make sure we emit only comparisons we have;
2705 swap or reverse as necessary. */
2706
2707 if (!can_create_pseudo_p ())
2708 return NULL_RTX;
2709
2710 switch (code)
2711 {
2712 case EQ: case LE: case LT: case LEU: case LTU:
2713 /* We have these compares: */
2714 break;
2715
2716 case NE:
2717 /* This must be reversed. */
2718 code = reverse_condition (code);
2719 cmov_code = EQ;
2720 break;
2721
2722 case GE: case GT: case GEU: case GTU:
2723 /* These must be swapped. */
2724 if (op1 != CONST0_RTX (cmp_mode))
2725 {
2726 code = swap_condition (code);
2727 tem = op0, op0 = op1, op1 = tem;
2728 }
2729 break;
2730
2731 default:
2732 gcc_unreachable ();
2733 }
2734
2735 if (!fp_p)
2736 {
2737 if (!reg_or_0_operand (op0, DImode))
2738 op0 = force_reg (DImode, op0);
2739 if (!reg_or_8bit_operand (op1, DImode))
2740 op1 = force_reg (DImode, op1);
2741 }
2742
2743 /* ??? We mark the branch mode to be CCmode to prevent the compare
2744 and cmov from being combined, since the compare insn follows IEEE
2745 rules that the cmov does not. */
2746 if (fp_p && !local_fast_math)
2747 cmov_mode = CCmode;
2748
2749 tem = gen_reg_rtx (cmp_op_mode);
2750 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2751 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2752 }
2753
2754 /* Simplify a conditional move of two constants into a setcc with
2755 arithmetic. This is done with a splitter since combine would
2756 just undo the work if done during code generation. It also catches
2757 cases we wouldn't have before cse. */
2758
2759 int
2760 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2761 rtx t_rtx, rtx f_rtx)
2762 {
2763 HOST_WIDE_INT t, f, diff;
2764 enum machine_mode mode;
2765 rtx target, subtarget, tmp;
2766
2767 mode = GET_MODE (dest);
2768 t = INTVAL (t_rtx);
2769 f = INTVAL (f_rtx);
2770 diff = t - f;
2771
2772 if (((code == NE || code == EQ) && diff < 0)
2773 || (code == GE || code == GT))
2774 {
2775 code = reverse_condition (code);
2776 diff = t, t = f, f = diff;
2777 diff = t - f;
2778 }
2779
2780 subtarget = target = dest;
2781 if (mode != DImode)
2782 {
2783 target = gen_lowpart (DImode, dest);
2784 if (can_create_pseudo_p ())
2785 subtarget = gen_reg_rtx (DImode);
2786 else
2787 subtarget = target;
2788 }
2789 /* Below, we must be careful to use copy_rtx on target and subtarget
2790 in intermediate insns, as they may be a subreg rtx, which may not
2791 be shared. */
2792
2793 if (f == 0 && exact_log2 (diff) > 0
2794 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2795 viable over a longer latency cmove. On EV5, the E0 slot is a
2796 scarce resource, and on EV4 shift has the same latency as a cmove. */
2797 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2798 {
2799 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2800 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2801
2802 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2803 GEN_INT (exact_log2 (t)));
2804 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2805 }
2806 else if (f == 0 && t == -1)
2807 {
2808 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2809 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2810
2811 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2812 }
2813 else if (diff == 1 || diff == 4 || diff == 8)
2814 {
2815 rtx add_op;
2816
2817 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2818 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2819
2820 if (diff == 1)
2821 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2822 else
2823 {
2824 add_op = GEN_INT (f);
2825 if (sext_add_operand (add_op, mode))
2826 {
2827 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2828 GEN_INT (diff));
2829 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2830 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2831 }
2832 else
2833 return 0;
2834 }
2835 }
2836 else
2837 return 0;
2838
2839 return 1;
2840 }
2841 \f
2842 /* Look up the function X_floating library function name for the
2843 given operation. */
2844
2845 struct xfloating_op GTY(())
2846 {
2847 const enum rtx_code code;
2848 const char *const GTY((skip)) osf_func;
2849 const char *const GTY((skip)) vms_func;
2850 rtx libcall;
2851 };
2852
2853 static GTY(()) struct xfloating_op xfloating_ops[] =
2854 {
2855 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2856 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2857 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2858 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2859 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2860 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2861 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2862 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2863 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2864 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2865 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2866 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2867 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2868 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2869 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2870 };
2871
2872 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2873 {
2874 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2875 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2876 };
2877
2878 static rtx
2879 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2880 {
2881 struct xfloating_op *ops = xfloating_ops;
2882 long n = ARRAY_SIZE (xfloating_ops);
2883 long i;
2884
2885 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2886
2887 /* How irritating. Nothing to key off for the main table. */
2888 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2889 {
2890 ops = vax_cvt_ops;
2891 n = ARRAY_SIZE (vax_cvt_ops);
2892 }
2893
2894 for (i = 0; i < n; ++i, ++ops)
2895 if (ops->code == code)
2896 {
2897 rtx func = ops->libcall;
2898 if (!func)
2899 {
2900 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2901 ? ops->vms_func : ops->osf_func);
2902 ops->libcall = func;
2903 }
2904 return func;
2905 }
2906
2907 gcc_unreachable ();
2908 }
2909
2910 /* Most X_floating operations take the rounding mode as an argument.
2911 Compute that here. */
2912
2913 static int
2914 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2915 enum alpha_fp_rounding_mode round)
2916 {
2917 int mode;
2918
2919 switch (round)
2920 {
2921 case ALPHA_FPRM_NORM:
2922 mode = 2;
2923 break;
2924 case ALPHA_FPRM_MINF:
2925 mode = 1;
2926 break;
2927 case ALPHA_FPRM_CHOP:
2928 mode = 0;
2929 break;
2930 case ALPHA_FPRM_DYN:
2931 mode = 4;
2932 break;
2933 default:
2934 gcc_unreachable ();
2935
2936 /* XXX For reference, round to +inf is mode = 3. */
2937 }
2938
2939 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2940 mode |= 0x10000;
2941
2942 return mode;
2943 }
2944
2945 /* Emit an X_floating library function call.
2946
2947 Note that these functions do not follow normal calling conventions:
2948 TFmode arguments are passed in two integer registers (as opposed to
2949 indirect); TFmode return values appear in R16+R17.
2950
2951 FUNC is the function to call.
2952 TARGET is where the output belongs.
2953 OPERANDS are the inputs.
2954 NOPERANDS is the count of inputs.
2955 EQUIV is the expression equivalent for the function.
2956 */
2957
2958 static void
2959 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2960 int noperands, rtx equiv)
2961 {
2962 rtx usage = NULL_RTX, tmp, reg;
2963 int regno = 16, i;
2964
2965 start_sequence ();
2966
2967 for (i = 0; i < noperands; ++i)
2968 {
2969 switch (GET_MODE (operands[i]))
2970 {
2971 case TFmode:
2972 reg = gen_rtx_REG (TFmode, regno);
2973 regno += 2;
2974 break;
2975
2976 case DFmode:
2977 reg = gen_rtx_REG (DFmode, regno + 32);
2978 regno += 1;
2979 break;
2980
2981 case VOIDmode:
2982 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2983 /* FALLTHRU */
2984 case DImode:
2985 reg = gen_rtx_REG (DImode, regno);
2986 regno += 1;
2987 break;
2988
2989 default:
2990 gcc_unreachable ();
2991 }
2992
2993 emit_move_insn (reg, operands[i]);
2994 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2995 }
2996
2997 switch (GET_MODE (target))
2998 {
2999 case TFmode:
3000 reg = gen_rtx_REG (TFmode, 16);
3001 break;
3002 case DFmode:
3003 reg = gen_rtx_REG (DFmode, 32);
3004 break;
3005 case DImode:
3006 reg = gen_rtx_REG (DImode, 0);
3007 break;
3008 default:
3009 gcc_unreachable ();
3010 }
3011
3012 tmp = gen_rtx_MEM (QImode, func);
3013 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3014 const0_rtx, const0_rtx));
3015 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3016 RTL_CONST_CALL_P (tmp) = 1;
3017
3018 tmp = get_insns ();
3019 end_sequence ();
3020
3021 emit_libcall_block (tmp, target, reg, equiv);
3022 }
3023
3024 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3025
3026 void
3027 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3028 {
3029 rtx func;
3030 int mode;
3031 rtx out_operands[3];
3032
3033 func = alpha_lookup_xfloating_lib_func (code);
3034 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3035
3036 out_operands[0] = operands[1];
3037 out_operands[1] = operands[2];
3038 out_operands[2] = GEN_INT (mode);
3039 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3040 gen_rtx_fmt_ee (code, TFmode, operands[1],
3041 operands[2]));
3042 }
3043
3044 /* Emit an X_floating library function call for a comparison. */
3045
3046 static rtx
3047 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3048 {
3049 enum rtx_code cmp_code, res_code;
3050 rtx func, out, operands[2], note;
3051
3052 /* X_floating library comparison functions return
3053 -1 unordered
3054 0 false
3055 1 true
3056 Convert the compare against the raw return value. */
3057
3058 cmp_code = *pcode;
3059 switch (cmp_code)
3060 {
3061 case UNORDERED:
3062 cmp_code = EQ;
3063 res_code = LT;
3064 break;
3065 case ORDERED:
3066 cmp_code = EQ;
3067 res_code = GE;
3068 break;
3069 case NE:
3070 res_code = NE;
3071 break;
3072 case EQ:
3073 case LT:
3074 case GT:
3075 case LE:
3076 case GE:
3077 res_code = GT;
3078 break;
3079 default:
3080 gcc_unreachable ();
3081 }
3082 *pcode = res_code;
3083
3084 func = alpha_lookup_xfloating_lib_func (cmp_code);
3085
3086 operands[0] = op0;
3087 operands[1] = op1;
3088 out = gen_reg_rtx (DImode);
3089
3090 /* What's actually returned is -1,0,1, not a proper boolean value,
3091 so use an EXPR_LIST as with a generic libcall instead of a
3092 comparison type expression. */
3093 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3094 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3095 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3096 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3097
3098 return out;
3099 }
3100
3101 /* Emit an X_floating library function call for a conversion. */
3102
3103 void
3104 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3105 {
3106 int noperands = 1, mode;
3107 rtx out_operands[2];
3108 rtx func;
3109 enum rtx_code code = orig_code;
3110
3111 if (code == UNSIGNED_FIX)
3112 code = FIX;
3113
3114 func = alpha_lookup_xfloating_lib_func (code);
3115
3116 out_operands[0] = operands[1];
3117
3118 switch (code)
3119 {
3120 case FIX:
3121 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3122 out_operands[1] = GEN_INT (mode);
3123 noperands = 2;
3124 break;
3125 case FLOAT_TRUNCATE:
3126 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3127 out_operands[1] = GEN_INT (mode);
3128 noperands = 2;
3129 break;
3130 default:
3131 break;
3132 }
3133
3134 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3135 gen_rtx_fmt_e (orig_code,
3136 GET_MODE (operands[0]),
3137 operands[1]));
3138 }
3139
3140 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3141 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3142 guarantee that the sequence
3143 set (OP[0] OP[2])
3144 set (OP[1] OP[3])
3145 is valid. Naturally, output operand ordering is little-endian.
3146 This is used by *movtf_internal and *movti_internal. */
3147
3148 void
3149 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3150 bool fixup_overlap)
3151 {
3152 switch (GET_CODE (operands[1]))
3153 {
3154 case REG:
3155 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3156 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3157 break;
3158
3159 case MEM:
3160 operands[3] = adjust_address (operands[1], DImode, 8);
3161 operands[2] = adjust_address (operands[1], DImode, 0);
3162 break;
3163
3164 case CONST_INT:
3165 case CONST_DOUBLE:
3166 gcc_assert (operands[1] == CONST0_RTX (mode));
3167 operands[2] = operands[3] = const0_rtx;
3168 break;
3169
3170 default:
3171 gcc_unreachable ();
3172 }
3173
3174 switch (GET_CODE (operands[0]))
3175 {
3176 case REG:
3177 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3178 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3179 break;
3180
3181 case MEM:
3182 operands[1] = adjust_address (operands[0], DImode, 8);
3183 operands[0] = adjust_address (operands[0], DImode, 0);
3184 break;
3185
3186 default:
3187 gcc_unreachable ();
3188 }
3189
3190 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3191 {
3192 rtx tmp;
3193 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3194 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3195 }
3196 }
3197
3198 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3199 op2 is a register containing the sign bit, operation is the
3200 logical operation to be performed. */
3201
3202 void
3203 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3204 {
3205 rtx high_bit = operands[2];
3206 rtx scratch;
3207 int move;
3208
3209 alpha_split_tmode_pair (operands, TFmode, false);
3210
3211 /* Detect three flavors of operand overlap. */
3212 move = 1;
3213 if (rtx_equal_p (operands[0], operands[2]))
3214 move = 0;
3215 else if (rtx_equal_p (operands[1], operands[2]))
3216 {
3217 if (rtx_equal_p (operands[0], high_bit))
3218 move = 2;
3219 else
3220 move = -1;
3221 }
3222
3223 if (move < 0)
3224 emit_move_insn (operands[0], operands[2]);
3225
3226 /* ??? If the destination overlaps both source tf and high_bit, then
3227 assume source tf is dead in its entirety and use the other half
3228 for a scratch register. Otherwise "scratch" is just the proper
3229 destination register. */
3230 scratch = operands[move < 2 ? 1 : 3];
3231
3232 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3233
3234 if (move > 0)
3235 {
3236 emit_move_insn (operands[0], operands[2]);
3237 if (move > 1)
3238 emit_move_insn (operands[1], scratch);
3239 }
3240 }
3241 \f
3242 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3243 unaligned data:
3244
3245 unsigned: signed:
3246 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3248 lda r3,X(r11) lda r3,X+2(r11)
3249 extwl r1,r3,r1 extql r1,r3,r1
3250 extwh r2,r3,r2 extqh r2,r3,r2
3251 or r1.r2.r1 or r1,r2,r1
3252 sra r1,48,r1
3253
3254 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3255 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3256 lda r3,X(r11) lda r3,X(r11)
3257 extll r1,r3,r1 extll r1,r3,r1
3258 extlh r2,r3,r2 extlh r2,r3,r2
3259 or r1.r2.r1 addl r1,r2,r1
3260
3261 quad: ldq_u r1,X(r11)
3262 ldq_u r2,X+7(r11)
3263 lda r3,X(r11)
3264 extql r1,r3,r1
3265 extqh r2,r3,r2
3266 or r1.r2.r1
3267 */
3268
3269 void
3270 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3271 HOST_WIDE_INT ofs, int sign)
3272 {
3273 rtx meml, memh, addr, extl, exth, tmp, mema;
3274 enum machine_mode mode;
3275
3276 if (TARGET_BWX && size == 2)
3277 {
3278 meml = adjust_address (mem, QImode, ofs);
3279 memh = adjust_address (mem, QImode, ofs+1);
3280 if (BYTES_BIG_ENDIAN)
3281 tmp = meml, meml = memh, memh = tmp;
3282 extl = gen_reg_rtx (DImode);
3283 exth = gen_reg_rtx (DImode);
3284 emit_insn (gen_zero_extendqidi2 (extl, meml));
3285 emit_insn (gen_zero_extendqidi2 (exth, memh));
3286 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3287 NULL, 1, OPTAB_LIB_WIDEN);
3288 addr = expand_simple_binop (DImode, IOR, extl, exth,
3289 NULL, 1, OPTAB_LIB_WIDEN);
3290
3291 if (sign && GET_MODE (tgt) != HImode)
3292 {
3293 addr = gen_lowpart (HImode, addr);
3294 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3295 }
3296 else
3297 {
3298 if (GET_MODE (tgt) != DImode)
3299 addr = gen_lowpart (GET_MODE (tgt), addr);
3300 emit_move_insn (tgt, addr);
3301 }
3302 return;
3303 }
3304
3305 meml = gen_reg_rtx (DImode);
3306 memh = gen_reg_rtx (DImode);
3307 addr = gen_reg_rtx (DImode);
3308 extl = gen_reg_rtx (DImode);
3309 exth = gen_reg_rtx (DImode);
3310
3311 mema = XEXP (mem, 0);
3312 if (GET_CODE (mema) == LO_SUM)
3313 mema = force_reg (Pmode, mema);
3314
3315 /* AND addresses cannot be in any alias set, since they may implicitly
3316 alias surrounding code. Ideally we'd have some alias set that
3317 covered all types except those with alignment 8 or higher. */
3318
3319 tmp = change_address (mem, DImode,
3320 gen_rtx_AND (DImode,
3321 plus_constant (mema, ofs),
3322 GEN_INT (-8)));
3323 set_mem_alias_set (tmp, 0);
3324 emit_move_insn (meml, tmp);
3325
3326 tmp = change_address (mem, DImode,
3327 gen_rtx_AND (DImode,
3328 plus_constant (mema, ofs + size - 1),
3329 GEN_INT (-8)));
3330 set_mem_alias_set (tmp, 0);
3331 emit_move_insn (memh, tmp);
3332
3333 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3334 {
3335 emit_move_insn (addr, plus_constant (mema, -1));
3336
3337 emit_insn (gen_extqh_be (extl, meml, addr));
3338 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3339
3340 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3341 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3342 addr, 1, OPTAB_WIDEN);
3343 }
3344 else if (sign && size == 2)
3345 {
3346 emit_move_insn (addr, plus_constant (mema, ofs+2));
3347
3348 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3349 emit_insn (gen_extqh_le (exth, memh, addr));
3350
3351 /* We must use tgt here for the target. Alpha-vms port fails if we use
3352 addr for the target, because addr is marked as a pointer and combine
3353 knows that pointers are always sign-extended 32-bit values. */
3354 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3355 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3356 addr, 1, OPTAB_WIDEN);
3357 }
3358 else
3359 {
3360 if (WORDS_BIG_ENDIAN)
3361 {
3362 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3363 switch ((int) size)
3364 {
3365 case 2:
3366 emit_insn (gen_extwh_be (extl, meml, addr));
3367 mode = HImode;
3368 break;
3369
3370 case 4:
3371 emit_insn (gen_extlh_be (extl, meml, addr));
3372 mode = SImode;
3373 break;
3374
3375 case 8:
3376 emit_insn (gen_extqh_be (extl, meml, addr));
3377 mode = DImode;
3378 break;
3379
3380 default:
3381 gcc_unreachable ();
3382 }
3383 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3384 }
3385 else
3386 {
3387 emit_move_insn (addr, plus_constant (mema, ofs));
3388 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3389 switch ((int) size)
3390 {
3391 case 2:
3392 emit_insn (gen_extwh_le (exth, memh, addr));
3393 mode = HImode;
3394 break;
3395
3396 case 4:
3397 emit_insn (gen_extlh_le (exth, memh, addr));
3398 mode = SImode;
3399 break;
3400
3401 case 8:
3402 emit_insn (gen_extqh_le (exth, memh, addr));
3403 mode = DImode;
3404 break;
3405
3406 default:
3407 gcc_unreachable ();
3408 }
3409 }
3410
3411 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3412 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3413 sign, OPTAB_WIDEN);
3414 }
3415
3416 if (addr != tgt)
3417 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3418 }
3419
3420 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3421
3422 void
3423 alpha_expand_unaligned_store (rtx dst, rtx src,
3424 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3425 {
3426 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3427
3428 if (TARGET_BWX && size == 2)
3429 {
3430 if (src != const0_rtx)
3431 {
3432 dstl = gen_lowpart (QImode, src);
3433 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3434 NULL, 1, OPTAB_LIB_WIDEN);
3435 dsth = gen_lowpart (QImode, dsth);
3436 }
3437 else
3438 dstl = dsth = const0_rtx;
3439
3440 meml = adjust_address (dst, QImode, ofs);
3441 memh = adjust_address (dst, QImode, ofs+1);
3442 if (BYTES_BIG_ENDIAN)
3443 addr = meml, meml = memh, memh = addr;
3444
3445 emit_move_insn (meml, dstl);
3446 emit_move_insn (memh, dsth);
3447 return;
3448 }
3449
3450 dstl = gen_reg_rtx (DImode);
3451 dsth = gen_reg_rtx (DImode);
3452 insl = gen_reg_rtx (DImode);
3453 insh = gen_reg_rtx (DImode);
3454
3455 dsta = XEXP (dst, 0);
3456 if (GET_CODE (dsta) == LO_SUM)
3457 dsta = force_reg (Pmode, dsta);
3458
3459 /* AND addresses cannot be in any alias set, since they may implicitly
3460 alias surrounding code. Ideally we'd have some alias set that
3461 covered all types except those with alignment 8 or higher. */
3462
3463 meml = change_address (dst, DImode,
3464 gen_rtx_AND (DImode,
3465 plus_constant (dsta, ofs),
3466 GEN_INT (-8)));
3467 set_mem_alias_set (meml, 0);
3468
3469 memh = change_address (dst, DImode,
3470 gen_rtx_AND (DImode,
3471 plus_constant (dsta, ofs + size - 1),
3472 GEN_INT (-8)));
3473 set_mem_alias_set (memh, 0);
3474
3475 emit_move_insn (dsth, memh);
3476 emit_move_insn (dstl, meml);
3477 if (WORDS_BIG_ENDIAN)
3478 {
3479 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3480
3481 if (src != const0_rtx)
3482 {
3483 switch ((int) size)
3484 {
3485 case 2:
3486 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3487 break;
3488 case 4:
3489 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3490 break;
3491 case 8:
3492 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3493 break;
3494 }
3495 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3496 GEN_INT (size*8), addr));
3497 }
3498
3499 switch ((int) size)
3500 {
3501 case 2:
3502 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3503 break;
3504 case 4:
3505 {
3506 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3507 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3508 break;
3509 }
3510 case 8:
3511 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3512 break;
3513 }
3514
3515 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3516 }
3517 else
3518 {
3519 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3520
3521 if (src != CONST0_RTX (GET_MODE (src)))
3522 {
3523 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3524 GEN_INT (size*8), addr));
3525
3526 switch ((int) size)
3527 {
3528 case 2:
3529 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3530 break;
3531 case 4:
3532 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3533 break;
3534 case 8:
3535 emit_insn (gen_insql_le (insl, src, addr));
3536 break;
3537 }
3538 }
3539
3540 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3541
3542 switch ((int) size)
3543 {
3544 case 2:
3545 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3546 break;
3547 case 4:
3548 {
3549 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3550 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3551 break;
3552 }
3553 case 8:
3554 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3555 break;
3556 }
3557 }
3558
3559 if (src != CONST0_RTX (GET_MODE (src)))
3560 {
3561 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3562 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3563 }
3564
3565 if (WORDS_BIG_ENDIAN)
3566 {
3567 emit_move_insn (meml, dstl);
3568 emit_move_insn (memh, dsth);
3569 }
3570 else
3571 {
3572 /* Must store high before low for degenerate case of aligned. */
3573 emit_move_insn (memh, dsth);
3574 emit_move_insn (meml, dstl);
3575 }
3576 }
3577
3578 /* The block move code tries to maximize speed by separating loads and
3579 stores at the expense of register pressure: we load all of the data
3580 before we store it back out. There are two secondary effects worth
3581 mentioning, that this speeds copying to/from aligned and unaligned
3582 buffers, and that it makes the code significantly easier to write. */
3583
3584 #define MAX_MOVE_WORDS 8
3585
3586 /* Load an integral number of consecutive unaligned quadwords. */
3587
3588 static void
3589 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3590 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3591 {
3592 rtx const im8 = GEN_INT (-8);
3593 rtx const i64 = GEN_INT (64);
3594 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3595 rtx sreg, areg, tmp, smema;
3596 HOST_WIDE_INT i;
3597
3598 smema = XEXP (smem, 0);
3599 if (GET_CODE (smema) == LO_SUM)
3600 smema = force_reg (Pmode, smema);
3601
3602 /* Generate all the tmp registers we need. */
3603 for (i = 0; i < words; ++i)
3604 {
3605 data_regs[i] = out_regs[i];
3606 ext_tmps[i] = gen_reg_rtx (DImode);
3607 }
3608 data_regs[words] = gen_reg_rtx (DImode);
3609
3610 if (ofs != 0)
3611 smem = adjust_address (smem, GET_MODE (smem), ofs);
3612
3613 /* Load up all of the source data. */
3614 for (i = 0; i < words; ++i)
3615 {
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*i),
3619 im8));
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[i], tmp);
3622 }
3623
3624 tmp = change_address (smem, DImode,
3625 gen_rtx_AND (DImode,
3626 plus_constant (smema, 8*words - 1),
3627 im8));
3628 set_mem_alias_set (tmp, 0);
3629 emit_move_insn (data_regs[words], tmp);
3630
3631 /* Extract the half-word fragments. Unfortunately DEC decided to make
3632 extxh with offset zero a noop instead of zeroing the register, so
3633 we must take care of that edge condition ourselves with cmov. */
3634
3635 sreg = copy_addr_to_reg (smema);
3636 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3637 1, OPTAB_WIDEN);
3638 if (WORDS_BIG_ENDIAN)
3639 emit_move_insn (sreg, plus_constant (sreg, 7));
3640 for (i = 0; i < words; ++i)
3641 {
3642 if (WORDS_BIG_ENDIAN)
3643 {
3644 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3645 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3646 }
3647 else
3648 {
3649 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3650 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3651 }
3652 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3653 gen_rtx_IF_THEN_ELSE (DImode,
3654 gen_rtx_EQ (DImode, areg,
3655 const0_rtx),
3656 const0_rtx, ext_tmps[i])));
3657 }
3658
3659 /* Merge the half-words into whole words. */
3660 for (i = 0; i < words; ++i)
3661 {
3662 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3663 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3664 }
3665 }
3666
3667 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3668 may be NULL to store zeros. */
3669
3670 static void
3671 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3672 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3673 {
3674 rtx const im8 = GEN_INT (-8);
3675 rtx const i64 = GEN_INT (64);
3676 rtx ins_tmps[MAX_MOVE_WORDS];
3677 rtx st_tmp_1, st_tmp_2, dreg;
3678 rtx st_addr_1, st_addr_2, dmema;
3679 HOST_WIDE_INT i;
3680
3681 dmema = XEXP (dmem, 0);
3682 if (GET_CODE (dmema) == LO_SUM)
3683 dmema = force_reg (Pmode, dmema);
3684
3685 /* Generate all the tmp registers we need. */
3686 if (data_regs != NULL)
3687 for (i = 0; i < words; ++i)
3688 ins_tmps[i] = gen_reg_rtx(DImode);
3689 st_tmp_1 = gen_reg_rtx(DImode);
3690 st_tmp_2 = gen_reg_rtx(DImode);
3691
3692 if (ofs != 0)
3693 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3694
3695 st_addr_2 = change_address (dmem, DImode,
3696 gen_rtx_AND (DImode,
3697 plus_constant (dmema, words*8 - 1),
3698 im8));
3699 set_mem_alias_set (st_addr_2, 0);
3700
3701 st_addr_1 = change_address (dmem, DImode,
3702 gen_rtx_AND (DImode, dmema, im8));
3703 set_mem_alias_set (st_addr_1, 0);
3704
3705 /* Load up the destination end bits. */
3706 emit_move_insn (st_tmp_2, st_addr_2);
3707 emit_move_insn (st_tmp_1, st_addr_1);
3708
3709 /* Shift the input data into place. */
3710 dreg = copy_addr_to_reg (dmema);
3711 if (WORDS_BIG_ENDIAN)
3712 emit_move_insn (dreg, plus_constant (dreg, 7));
3713 if (data_regs != NULL)
3714 {
3715 for (i = words-1; i >= 0; --i)
3716 {
3717 if (WORDS_BIG_ENDIAN)
3718 {
3719 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3720 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3721 }
3722 else
3723 {
3724 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3725 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3726 }
3727 }
3728 for (i = words-1; i > 0; --i)
3729 {
3730 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3731 ins_tmps[i-1], ins_tmps[i-1], 1,
3732 OPTAB_WIDEN);
3733 }
3734 }
3735
3736 /* Split and merge the ends with the destination data. */
3737 if (WORDS_BIG_ENDIAN)
3738 {
3739 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3740 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3741 }
3742 else
3743 {
3744 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3745 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3746 }
3747
3748 if (data_regs != NULL)
3749 {
3750 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3751 st_tmp_2, 1, OPTAB_WIDEN);
3752 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3753 st_tmp_1, 1, OPTAB_WIDEN);
3754 }
3755
3756 /* Store it all. */
3757 if (WORDS_BIG_ENDIAN)
3758 emit_move_insn (st_addr_1, st_tmp_1);
3759 else
3760 emit_move_insn (st_addr_2, st_tmp_2);
3761 for (i = words-1; i > 0; --i)
3762 {
3763 rtx tmp = change_address (dmem, DImode,
3764 gen_rtx_AND (DImode,
3765 plus_constant(dmema,
3766 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3767 im8));
3768 set_mem_alias_set (tmp, 0);
3769 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3770 }
3771 if (WORDS_BIG_ENDIAN)
3772 emit_move_insn (st_addr_2, st_tmp_2);
3773 else
3774 emit_move_insn (st_addr_1, st_tmp_1);
3775 }
3776
3777
3778 /* Expand string/block move operations.
3779
3780 operands[0] is the pointer to the destination.
3781 operands[1] is the pointer to the source.
3782 operands[2] is the number of bytes to move.
3783 operands[3] is the alignment. */
3784
3785 int
3786 alpha_expand_block_move (rtx operands[])
3787 {
3788 rtx bytes_rtx = operands[2];
3789 rtx align_rtx = operands[3];
3790 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3791 HOST_WIDE_INT bytes = orig_bytes;
3792 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3793 HOST_WIDE_INT dst_align = src_align;
3794 rtx orig_src = operands[1];
3795 rtx orig_dst = operands[0];
3796 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3797 rtx tmp;
3798 unsigned int i, words, ofs, nregs = 0;
3799
3800 if (orig_bytes <= 0)
3801 return 1;
3802 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3803 return 0;
3804
3805 /* Look for additional alignment information from recorded register info. */
3806
3807 tmp = XEXP (orig_src, 0);
3808 if (GET_CODE (tmp) == REG)
3809 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3810 else if (GET_CODE (tmp) == PLUS
3811 && GET_CODE (XEXP (tmp, 0)) == REG
3812 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3813 {
3814 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3815 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3816
3817 if (a > src_align)
3818 {
3819 if (a >= 64 && c % 8 == 0)
3820 src_align = 64;
3821 else if (a >= 32 && c % 4 == 0)
3822 src_align = 32;
3823 else if (a >= 16 && c % 2 == 0)
3824 src_align = 16;
3825 }
3826 }
3827
3828 tmp = XEXP (orig_dst, 0);
3829 if (GET_CODE (tmp) == REG)
3830 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3831 else if (GET_CODE (tmp) == PLUS
3832 && GET_CODE (XEXP (tmp, 0)) == REG
3833 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3834 {
3835 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3836 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3837
3838 if (a > dst_align)
3839 {
3840 if (a >= 64 && c % 8 == 0)
3841 dst_align = 64;
3842 else if (a >= 32 && c % 4 == 0)
3843 dst_align = 32;
3844 else if (a >= 16 && c % 2 == 0)
3845 dst_align = 16;
3846 }
3847 }
3848
3849 ofs = 0;
3850 if (src_align >= 64 && bytes >= 8)
3851 {
3852 words = bytes / 8;
3853
3854 for (i = 0; i < words; ++i)
3855 data_regs[nregs + i] = gen_reg_rtx (DImode);
3856
3857 for (i = 0; i < words; ++i)
3858 emit_move_insn (data_regs[nregs + i],
3859 adjust_address (orig_src, DImode, ofs + i * 8));
3860
3861 nregs += words;
3862 bytes -= words * 8;
3863 ofs += words * 8;
3864 }
3865
3866 if (src_align >= 32 && bytes >= 4)
3867 {
3868 words = bytes / 4;
3869
3870 for (i = 0; i < words; ++i)
3871 data_regs[nregs + i] = gen_reg_rtx (SImode);
3872
3873 for (i = 0; i < words; ++i)
3874 emit_move_insn (data_regs[nregs + i],
3875 adjust_address (orig_src, SImode, ofs + i * 4));
3876
3877 nregs += words;
3878 bytes -= words * 4;
3879 ofs += words * 4;
3880 }
3881
3882 if (bytes >= 8)
3883 {
3884 words = bytes / 8;
3885
3886 for (i = 0; i < words+1; ++i)
3887 data_regs[nregs + i] = gen_reg_rtx (DImode);
3888
3889 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3890 words, ofs);
3891
3892 nregs += words;
3893 bytes -= words * 8;
3894 ofs += words * 8;
3895 }
3896
3897 if (! TARGET_BWX && bytes >= 4)
3898 {
3899 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3900 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3901 bytes -= 4;
3902 ofs += 4;
3903 }
3904
3905 if (bytes >= 2)
3906 {
3907 if (src_align >= 16)
3908 {
3909 do {
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3912 bytes -= 2;
3913 ofs += 2;
3914 } while (bytes >= 2);
3915 }
3916 else if (! TARGET_BWX)
3917 {
3918 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3919 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3920 bytes -= 2;
3921 ofs += 2;
3922 }
3923 }
3924
3925 while (bytes > 0)
3926 {
3927 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3928 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3929 bytes -= 1;
3930 ofs += 1;
3931 }
3932
3933 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3934
3935 /* Now save it back out again. */
3936
3937 i = 0, ofs = 0;
3938
3939 /* Write out the data in whatever chunks reading the source allowed. */
3940 if (dst_align >= 64)
3941 {
3942 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3943 {
3944 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3945 data_regs[i]);
3946 ofs += 8;
3947 i++;
3948 }
3949 }
3950
3951 if (dst_align >= 32)
3952 {
3953 /* If the source has remaining DImode regs, write them out in
3954 two pieces. */
3955 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3956 {
3957 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3958 NULL_RTX, 1, OPTAB_WIDEN);
3959
3960 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3961 gen_lowpart (SImode, data_regs[i]));
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3963 gen_lowpart (SImode, tmp));
3964 ofs += 8;
3965 i++;
3966 }
3967
3968 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3969 {
3970 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3971 data_regs[i]);
3972 ofs += 4;
3973 i++;
3974 }
3975 }
3976
3977 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3978 {
3979 /* Write out a remaining block of words using unaligned methods. */
3980
3981 for (words = 1; i + words < nregs; words++)
3982 if (GET_MODE (data_regs[i + words]) != DImode)
3983 break;
3984
3985 if (words == 1)
3986 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3987 else
3988 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3989 words, ofs);
3990
3991 i += words;
3992 ofs += words * 8;
3993 }
3994
3995 /* Due to the above, this won't be aligned. */
3996 /* ??? If we have more than one of these, consider constructing full
3997 words in registers and using alpha_expand_unaligned_store_words. */
3998 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3999 {
4000 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4001 ofs += 4;
4002 i++;
4003 }
4004
4005 if (dst_align >= 16)
4006 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4007 {
4008 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4009 i++;
4010 ofs += 2;
4011 }
4012 else
4013 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4014 {
4015 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4016 i++;
4017 ofs += 2;
4018 }
4019
4020 /* The remainder must be byte copies. */
4021 while (i < nregs)
4022 {
4023 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4024 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4025 i++;
4026 ofs += 1;
4027 }
4028
4029 return 1;
4030 }
4031
4032 int
4033 alpha_expand_block_clear (rtx operands[])
4034 {
4035 rtx bytes_rtx = operands[1];
4036 rtx align_rtx = operands[3];
4037 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4038 HOST_WIDE_INT bytes = orig_bytes;
4039 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4040 HOST_WIDE_INT alignofs = 0;
4041 rtx orig_dst = operands[0];
4042 rtx tmp;
4043 int i, words, ofs = 0;
4044
4045 if (orig_bytes <= 0)
4046 return 1;
4047 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4048 return 0;
4049
4050 /* Look for stricter alignment. */
4051 tmp = XEXP (orig_dst, 0);
4052 if (GET_CODE (tmp) == REG)
4053 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4054 else if (GET_CODE (tmp) == PLUS
4055 && GET_CODE (XEXP (tmp, 0)) == REG
4056 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4057 {
4058 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4059 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4060
4061 if (a > align)
4062 {
4063 if (a >= 64)
4064 align = a, alignofs = 8 - c % 8;
4065 else if (a >= 32)
4066 align = a, alignofs = 4 - c % 4;
4067 else if (a >= 16)
4068 align = a, alignofs = 2 - c % 2;
4069 }
4070 }
4071
4072 /* Handle an unaligned prefix first. */
4073
4074 if (alignofs > 0)
4075 {
4076 #if HOST_BITS_PER_WIDE_INT >= 64
4077 /* Given that alignofs is bounded by align, the only time BWX could
4078 generate three stores is for a 7 byte fill. Prefer two individual
4079 stores over a load/mask/store sequence. */
4080 if ((!TARGET_BWX || alignofs == 7)
4081 && align >= 32
4082 && !(alignofs == 4 && bytes >= 4))
4083 {
4084 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4085 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4086 rtx mem, tmp;
4087 HOST_WIDE_INT mask;
4088
4089 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4090 set_mem_alias_set (mem, 0);
4091
4092 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4093 if (bytes < alignofs)
4094 {
4095 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4096 ofs += bytes;
4097 bytes = 0;
4098 }
4099 else
4100 {
4101 bytes -= alignofs;
4102 ofs += alignofs;
4103 }
4104 alignofs = 0;
4105
4106 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4107 NULL_RTX, 1, OPTAB_WIDEN);
4108
4109 emit_move_insn (mem, tmp);
4110 }
4111 #endif
4112
4113 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4114 {
4115 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4116 bytes -= 1;
4117 ofs += 1;
4118 alignofs -= 1;
4119 }
4120 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4121 {
4122 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4123 bytes -= 2;
4124 ofs += 2;
4125 alignofs -= 2;
4126 }
4127 if (alignofs == 4 && bytes >= 4)
4128 {
4129 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4130 bytes -= 4;
4131 ofs += 4;
4132 alignofs = 0;
4133 }
4134
4135 /* If we've not used the extra lead alignment information by now,
4136 we won't be able to. Downgrade align to match what's left over. */
4137 if (alignofs > 0)
4138 {
4139 alignofs = alignofs & -alignofs;
4140 align = MIN (align, alignofs * BITS_PER_UNIT);
4141 }
4142 }
4143
4144 /* Handle a block of contiguous long-words. */
4145
4146 if (align >= 64 && bytes >= 8)
4147 {
4148 words = bytes / 8;
4149
4150 for (i = 0; i < words; ++i)
4151 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4152 const0_rtx);
4153
4154 bytes -= words * 8;
4155 ofs += words * 8;
4156 }
4157
4158 /* If the block is large and appropriately aligned, emit a single
4159 store followed by a sequence of stq_u insns. */
4160
4161 if (align >= 32 && bytes > 16)
4162 {
4163 rtx orig_dsta;
4164
4165 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4166 bytes -= 4;
4167 ofs += 4;
4168
4169 orig_dsta = XEXP (orig_dst, 0);
4170 if (GET_CODE (orig_dsta) == LO_SUM)
4171 orig_dsta = force_reg (Pmode, orig_dsta);
4172
4173 words = bytes / 8;
4174 for (i = 0; i < words; ++i)
4175 {
4176 rtx mem
4177 = change_address (orig_dst, DImode,
4178 gen_rtx_AND (DImode,
4179 plus_constant (orig_dsta, ofs + i*8),
4180 GEN_INT (-8)));
4181 set_mem_alias_set (mem, 0);
4182 emit_move_insn (mem, const0_rtx);
4183 }
4184
4185 /* Depending on the alignment, the first stq_u may have overlapped
4186 with the initial stl, which means that the last stq_u didn't
4187 write as much as it would appear. Leave those questionable bytes
4188 unaccounted for. */
4189 bytes -= words * 8 - 4;
4190 ofs += words * 8 - 4;
4191 }
4192
4193 /* Handle a smaller block of aligned words. */
4194
4195 if ((align >= 64 && bytes == 4)
4196 || (align == 32 && bytes >= 4))
4197 {
4198 words = bytes / 4;
4199
4200 for (i = 0; i < words; ++i)
4201 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4202 const0_rtx);
4203
4204 bytes -= words * 4;
4205 ofs += words * 4;
4206 }
4207
4208 /* An unaligned block uses stq_u stores for as many as possible. */
4209
4210 if (bytes >= 8)
4211 {
4212 words = bytes / 8;
4213
4214 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4215
4216 bytes -= words * 8;
4217 ofs += words * 8;
4218 }
4219
4220 /* Next clean up any trailing pieces. */
4221
4222 #if HOST_BITS_PER_WIDE_INT >= 64
4223 /* Count the number of bits in BYTES for which aligned stores could
4224 be emitted. */
4225 words = 0;
4226 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4227 if (bytes & i)
4228 words += 1;
4229
4230 /* If we have appropriate alignment (and it wouldn't take too many
4231 instructions otherwise), mask out the bytes we need. */
4232 if (TARGET_BWX ? words > 2 : bytes > 0)
4233 {
4234 if (align >= 64)
4235 {
4236 rtx mem, tmp;
4237 HOST_WIDE_INT mask;
4238
4239 mem = adjust_address (orig_dst, DImode, ofs);
4240 set_mem_alias_set (mem, 0);
4241
4242 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4243
4244 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4245 NULL_RTX, 1, OPTAB_WIDEN);
4246
4247 emit_move_insn (mem, tmp);
4248 return 1;
4249 }
4250 else if (align >= 32 && bytes < 4)
4251 {
4252 rtx mem, tmp;
4253 HOST_WIDE_INT mask;
4254
4255 mem = adjust_address (orig_dst, SImode, ofs);
4256 set_mem_alias_set (mem, 0);
4257
4258 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4259
4260 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4261 NULL_RTX, 1, OPTAB_WIDEN);
4262
4263 emit_move_insn (mem, tmp);
4264 return 1;
4265 }
4266 }
4267 #endif
4268
4269 if (!TARGET_BWX && bytes >= 4)
4270 {
4271 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4272 bytes -= 4;
4273 ofs += 4;
4274 }
4275
4276 if (bytes >= 2)
4277 {
4278 if (align >= 16)
4279 {
4280 do {
4281 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4282 const0_rtx);
4283 bytes -= 2;
4284 ofs += 2;
4285 } while (bytes >= 2);
4286 }
4287 else if (! TARGET_BWX)
4288 {
4289 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4290 bytes -= 2;
4291 ofs += 2;
4292 }
4293 }
4294
4295 while (bytes > 0)
4296 {
4297 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4298 bytes -= 1;
4299 ofs += 1;
4300 }
4301
4302 return 1;
4303 }
4304
4305 /* Returns a mask so that zap(x, value) == x & mask. */
4306
4307 rtx
4308 alpha_expand_zap_mask (HOST_WIDE_INT value)
4309 {
4310 rtx result;
4311 int i;
4312
4313 if (HOST_BITS_PER_WIDE_INT >= 64)
4314 {
4315 HOST_WIDE_INT mask = 0;
4316
4317 for (i = 7; i >= 0; --i)
4318 {
4319 mask <<= 8;
4320 if (!((value >> i) & 1))
4321 mask |= 0xff;
4322 }
4323
4324 result = gen_int_mode (mask, DImode);
4325 }
4326 else
4327 {
4328 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4329
4330 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4331
4332 for (i = 7; i >= 4; --i)
4333 {
4334 mask_hi <<= 8;
4335 if (!((value >> i) & 1))
4336 mask_hi |= 0xff;
4337 }
4338
4339 for (i = 3; i >= 0; --i)
4340 {
4341 mask_lo <<= 8;
4342 if (!((value >> i) & 1))
4343 mask_lo |= 0xff;
4344 }
4345
4346 result = immed_double_const (mask_lo, mask_hi, DImode);
4347 }
4348
4349 return result;
4350 }
4351
4352 void
4353 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4354 enum machine_mode mode,
4355 rtx op0, rtx op1, rtx op2)
4356 {
4357 op0 = gen_lowpart (mode, op0);
4358
4359 if (op1 == const0_rtx)
4360 op1 = CONST0_RTX (mode);
4361 else
4362 op1 = gen_lowpart (mode, op1);
4363
4364 if (op2 == const0_rtx)
4365 op2 = CONST0_RTX (mode);
4366 else
4367 op2 = gen_lowpart (mode, op2);
4368
4369 emit_insn ((*gen) (op0, op1, op2));
4370 }
4371
4372 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4373 COND is true. Mark the jump as unlikely to be taken. */
4374
4375 static void
4376 emit_unlikely_jump (rtx cond, rtx label)
4377 {
4378 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4379 rtx x;
4380
4381 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4382 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4383 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4384 }
4385
4386 /* A subroutine of the atomic operation splitters. Emit a load-locked
4387 instruction in MODE. */
4388
4389 static void
4390 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4391 {
4392 rtx (*fn) (rtx, rtx) = NULL;
4393 if (mode == SImode)
4394 fn = gen_load_locked_si;
4395 else if (mode == DImode)
4396 fn = gen_load_locked_di;
4397 emit_insn (fn (reg, mem));
4398 }
4399
4400 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4401 instruction in MODE. */
4402
4403 static void
4404 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4405 {
4406 rtx (*fn) (rtx, rtx, rtx) = NULL;
4407 if (mode == SImode)
4408 fn = gen_store_conditional_si;
4409 else if (mode == DImode)
4410 fn = gen_store_conditional_di;
4411 emit_insn (fn (res, mem, val));
4412 }
4413
4414 /* A subroutine of the atomic operation splitters. Emit an insxl
4415 instruction in MODE. */
4416
4417 static rtx
4418 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4419 {
4420 rtx ret = gen_reg_rtx (DImode);
4421 rtx (*fn) (rtx, rtx, rtx);
4422
4423 if (WORDS_BIG_ENDIAN)
4424 {
4425 if (mode == QImode)
4426 fn = gen_insbl_be;
4427 else
4428 fn = gen_inswl_be;
4429 }
4430 else
4431 {
4432 if (mode == QImode)
4433 fn = gen_insbl_le;
4434 else
4435 fn = gen_inswl_le;
4436 }
4437 /* The insbl and inswl patterns require a register operand. */
4438 op1 = force_reg (mode, op1);
4439 emit_insn (fn (ret, op1, op2));
4440
4441 return ret;
4442 }
4443
4444 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4445 to perform. MEM is the memory on which to operate. VAL is the second
4446 operand of the binary operator. BEFORE and AFTER are optional locations to
4447 return the value of MEM either before of after the operation. SCRATCH is
4448 a scratch register. */
4449
4450 void
4451 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4452 rtx before, rtx after, rtx scratch)
4453 {
4454 enum machine_mode mode = GET_MODE (mem);
4455 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4456
4457 emit_insn (gen_memory_barrier ());
4458
4459 label = gen_label_rtx ();
4460 emit_label (label);
4461 label = gen_rtx_LABEL_REF (DImode, label);
4462
4463 if (before == NULL)
4464 before = scratch;
4465 emit_load_locked (mode, before, mem);
4466
4467 if (code == NOT)
4468 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4469 else
4470 x = gen_rtx_fmt_ee (code, mode, before, val);
4471 if (after)
4472 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4473 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4474
4475 emit_store_conditional (mode, cond, mem, scratch);
4476
4477 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4478 emit_unlikely_jump (x, label);
4479
4480 emit_insn (gen_memory_barrier ());
4481 }
4482
4483 /* Expand a compare and swap operation. */
4484
4485 void
4486 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4487 rtx scratch)
4488 {
4489 enum machine_mode mode = GET_MODE (mem);
4490 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4491
4492 emit_insn (gen_memory_barrier ());
4493
4494 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4495 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4496 emit_label (XEXP (label1, 0));
4497
4498 emit_load_locked (mode, retval, mem);
4499
4500 x = gen_lowpart (DImode, retval);
4501 if (oldval == const0_rtx)
4502 x = gen_rtx_NE (DImode, x, const0_rtx);
4503 else
4504 {
4505 x = gen_rtx_EQ (DImode, x, oldval);
4506 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4507 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4508 }
4509 emit_unlikely_jump (x, label2);
4510
4511 emit_move_insn (scratch, newval);
4512 emit_store_conditional (mode, cond, mem, scratch);
4513
4514 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4515 emit_unlikely_jump (x, label1);
4516
4517 emit_insn (gen_memory_barrier ());
4518 emit_label (XEXP (label2, 0));
4519 }
4520
4521 void
4522 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4523 {
4524 enum machine_mode mode = GET_MODE (mem);
4525 rtx addr, align, wdst;
4526 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4527
4528 addr = force_reg (DImode, XEXP (mem, 0));
4529 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4530 NULL_RTX, 1, OPTAB_DIRECT);
4531
4532 oldval = convert_modes (DImode, mode, oldval, 1);
4533 newval = emit_insxl (mode, newval, addr);
4534
4535 wdst = gen_reg_rtx (DImode);
4536 if (mode == QImode)
4537 fn5 = gen_sync_compare_and_swapqi_1;
4538 else
4539 fn5 = gen_sync_compare_and_swaphi_1;
4540 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4541
4542 emit_move_insn (dst, gen_lowpart (mode, wdst));
4543 }
4544
4545 void
4546 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4547 rtx oldval, rtx newval, rtx align,
4548 rtx scratch, rtx cond)
4549 {
4550 rtx label1, label2, mem, width, mask, x;
4551
4552 mem = gen_rtx_MEM (DImode, align);
4553 MEM_VOLATILE_P (mem) = 1;
4554
4555 emit_insn (gen_memory_barrier ());
4556 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4557 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4558 emit_label (XEXP (label1, 0));
4559
4560 emit_load_locked (DImode, scratch, mem);
4561
4562 width = GEN_INT (GET_MODE_BITSIZE (mode));
4563 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4564 if (WORDS_BIG_ENDIAN)
4565 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4566 else
4567 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4568
4569 if (oldval == const0_rtx)
4570 x = gen_rtx_NE (DImode, dest, const0_rtx);
4571 else
4572 {
4573 x = gen_rtx_EQ (DImode, dest, oldval);
4574 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4575 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4576 }
4577 emit_unlikely_jump (x, label2);
4578
4579 if (WORDS_BIG_ENDIAN)
4580 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4581 else
4582 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4583 emit_insn (gen_iordi3 (scratch, scratch, newval));
4584
4585 emit_store_conditional (DImode, scratch, mem, scratch);
4586
4587 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4588 emit_unlikely_jump (x, label1);
4589
4590 emit_insn (gen_memory_barrier ());
4591 emit_label (XEXP (label2, 0));
4592 }
4593
4594 /* Expand an atomic exchange operation. */
4595
4596 void
4597 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4598 {
4599 enum machine_mode mode = GET_MODE (mem);
4600 rtx label, x, cond = gen_lowpart (DImode, scratch);
4601
4602 emit_insn (gen_memory_barrier ());
4603
4604 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4605 emit_label (XEXP (label, 0));
4606
4607 emit_load_locked (mode, retval, mem);
4608 emit_move_insn (scratch, val);
4609 emit_store_conditional (mode, cond, mem, scratch);
4610
4611 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4612 emit_unlikely_jump (x, label);
4613 }
4614
4615 void
4616 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4617 {
4618 enum machine_mode mode = GET_MODE (mem);
4619 rtx addr, align, wdst;
4620 rtx (*fn4) (rtx, rtx, rtx, rtx);
4621
4622 /* Force the address into a register. */
4623 addr = force_reg (DImode, XEXP (mem, 0));
4624
4625 /* Align it to a multiple of 8. */
4626 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4627 NULL_RTX, 1, OPTAB_DIRECT);
4628
4629 /* Insert val into the correct byte location within the word. */
4630 val = emit_insxl (mode, val, addr);
4631
4632 wdst = gen_reg_rtx (DImode);
4633 if (mode == QImode)
4634 fn4 = gen_sync_lock_test_and_setqi_1;
4635 else
4636 fn4 = gen_sync_lock_test_and_sethi_1;
4637 emit_insn (fn4 (wdst, addr, val, align));
4638
4639 emit_move_insn (dst, gen_lowpart (mode, wdst));
4640 }
4641
4642 void
4643 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4644 rtx val, rtx align, rtx scratch)
4645 {
4646 rtx label, mem, width, mask, x;
4647
4648 mem = gen_rtx_MEM (DImode, align);
4649 MEM_VOLATILE_P (mem) = 1;
4650
4651 emit_insn (gen_memory_barrier ());
4652 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4653 emit_label (XEXP (label, 0));
4654
4655 emit_load_locked (DImode, scratch, mem);
4656
4657 width = GEN_INT (GET_MODE_BITSIZE (mode));
4658 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4659 if (WORDS_BIG_ENDIAN)
4660 {
4661 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4662 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4663 }
4664 else
4665 {
4666 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4667 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4668 }
4669 emit_insn (gen_iordi3 (scratch, scratch, val));
4670
4671 emit_store_conditional (DImode, scratch, mem, scratch);
4672
4673 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4674 emit_unlikely_jump (x, label);
4675 }
4676 \f
4677 /* Adjust the cost of a scheduling dependency. Return the new cost of
4678 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4679
4680 static int
4681 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4682 {
4683 enum attr_type insn_type, dep_insn_type;
4684
4685 /* If the dependence is an anti-dependence, there is no cost. For an
4686 output dependence, there is sometimes a cost, but it doesn't seem
4687 worth handling those few cases. */
4688 if (REG_NOTE_KIND (link) != 0)
4689 return cost;
4690
4691 /* If we can't recognize the insns, we can't really do anything. */
4692 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4693 return cost;
4694
4695 insn_type = get_attr_type (insn);
4696 dep_insn_type = get_attr_type (dep_insn);
4697
4698 /* Bring in the user-defined memory latency. */
4699 if (dep_insn_type == TYPE_ILD
4700 || dep_insn_type == TYPE_FLD
4701 || dep_insn_type == TYPE_LDSYM)
4702 cost += alpha_memory_latency-1;
4703
4704 /* Everything else handled in DFA bypasses now. */
4705
4706 return cost;
4707 }
4708
4709 /* The number of instructions that can be issued per cycle. */
4710
4711 static int
4712 alpha_issue_rate (void)
4713 {
4714 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4715 }
4716
4717 /* How many alternative schedules to try. This should be as wide as the
4718 scheduling freedom in the DFA, but no wider. Making this value too
4719 large results extra work for the scheduler.
4720
4721 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4722 alternative schedules. For EV5, we can choose between E0/E1 and
4723 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4724
4725 static int
4726 alpha_multipass_dfa_lookahead (void)
4727 {
4728 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4729 }
4730 \f
4731 /* Machine-specific function data. */
4732
4733 struct machine_function GTY(())
4734 {
4735 /* For unicosmk. */
4736 /* List of call information words for calls from this function. */
4737 struct rtx_def *first_ciw;
4738 struct rtx_def *last_ciw;
4739 int ciw_count;
4740
4741 /* List of deferred case vectors. */
4742 struct rtx_def *addr_list;
4743
4744 /* For OSF. */
4745 const char *some_ld_name;
4746
4747 /* For TARGET_LD_BUGGY_LDGP. */
4748 struct rtx_def *gp_save_rtx;
4749 };
4750
4751 /* How to allocate a 'struct machine_function'. */
4752
4753 static struct machine_function *
4754 alpha_init_machine_status (void)
4755 {
4756 return ((struct machine_function *)
4757 ggc_alloc_cleared (sizeof (struct machine_function)));
4758 }
4759
4760 /* Functions to save and restore alpha_return_addr_rtx. */
4761
4762 /* Start the ball rolling with RETURN_ADDR_RTX. */
4763
4764 rtx
4765 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4766 {
4767 if (count != 0)
4768 return const0_rtx;
4769
4770 return get_hard_reg_initial_val (Pmode, REG_RA);
4771 }
4772
4773 /* Return or create a memory slot containing the gp value for the current
4774 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4775
4776 rtx
4777 alpha_gp_save_rtx (void)
4778 {
4779 rtx seq, m = cfun->machine->gp_save_rtx;
4780
4781 if (m == NULL)
4782 {
4783 start_sequence ();
4784
4785 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4786 m = validize_mem (m);
4787 emit_move_insn (m, pic_offset_table_rtx);
4788
4789 seq = get_insns ();
4790 end_sequence ();
4791
4792 /* We used to simply emit the sequence after entry_of_function.
4793 However this breaks the CFG if the first instruction in the
4794 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4795 label. Emit the sequence properly on the edge. We are only
4796 invoked from dw2_build_landing_pads and finish_eh_generation
4797 will call commit_edge_insertions thanks to a kludge. */
4798 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4799
4800 cfun->machine->gp_save_rtx = m;
4801 }
4802
4803 return m;
4804 }
4805
4806 static int
4807 alpha_ra_ever_killed (void)
4808 {
4809 rtx top;
4810
4811 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4812 return (int)df_regs_ever_live_p (REG_RA);
4813
4814 push_topmost_sequence ();
4815 top = get_insns ();
4816 pop_topmost_sequence ();
4817
4818 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4819 }
4820
4821 \f
4822 /* Return the trap mode suffix applicable to the current
4823 instruction, or NULL. */
4824
4825 static const char *
4826 get_trap_mode_suffix (void)
4827 {
4828 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4829
4830 switch (s)
4831 {
4832 case TRAP_SUFFIX_NONE:
4833 return NULL;
4834
4835 case TRAP_SUFFIX_SU:
4836 if (alpha_fptm >= ALPHA_FPTM_SU)
4837 return "su";
4838 return NULL;
4839
4840 case TRAP_SUFFIX_SUI:
4841 if (alpha_fptm >= ALPHA_FPTM_SUI)
4842 return "sui";
4843 return NULL;
4844
4845 case TRAP_SUFFIX_V_SV:
4846 switch (alpha_fptm)
4847 {
4848 case ALPHA_FPTM_N:
4849 return NULL;
4850 case ALPHA_FPTM_U:
4851 return "v";
4852 case ALPHA_FPTM_SU:
4853 case ALPHA_FPTM_SUI:
4854 return "sv";
4855 default:
4856 gcc_unreachable ();
4857 }
4858
4859 case TRAP_SUFFIX_V_SV_SVI:
4860 switch (alpha_fptm)
4861 {
4862 case ALPHA_FPTM_N:
4863 return NULL;
4864 case ALPHA_FPTM_U:
4865 return "v";
4866 case ALPHA_FPTM_SU:
4867 return "sv";
4868 case ALPHA_FPTM_SUI:
4869 return "svi";
4870 default:
4871 gcc_unreachable ();
4872 }
4873 break;
4874
4875 case TRAP_SUFFIX_U_SU_SUI:
4876 switch (alpha_fptm)
4877 {
4878 case ALPHA_FPTM_N:
4879 return NULL;
4880 case ALPHA_FPTM_U:
4881 return "u";
4882 case ALPHA_FPTM_SU:
4883 return "su";
4884 case ALPHA_FPTM_SUI:
4885 return "sui";
4886 default:
4887 gcc_unreachable ();
4888 }
4889 break;
4890
4891 default:
4892 gcc_unreachable ();
4893 }
4894 gcc_unreachable ();
4895 }
4896
4897 /* Return the rounding mode suffix applicable to the current
4898 instruction, or NULL. */
4899
4900 static const char *
4901 get_round_mode_suffix (void)
4902 {
4903 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4904
4905 switch (s)
4906 {
4907 case ROUND_SUFFIX_NONE:
4908 return NULL;
4909 case ROUND_SUFFIX_NORMAL:
4910 switch (alpha_fprm)
4911 {
4912 case ALPHA_FPRM_NORM:
4913 return NULL;
4914 case ALPHA_FPRM_MINF:
4915 return "m";
4916 case ALPHA_FPRM_CHOP:
4917 return "c";
4918 case ALPHA_FPRM_DYN:
4919 return "d";
4920 default:
4921 gcc_unreachable ();
4922 }
4923 break;
4924
4925 case ROUND_SUFFIX_C:
4926 return "c";
4927
4928 default:
4929 gcc_unreachable ();
4930 }
4931 gcc_unreachable ();
4932 }
4933
4934 /* Locate some local-dynamic symbol still in use by this function
4935 so that we can print its name in some movdi_er_tlsldm pattern. */
4936
4937 static int
4938 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4939 {
4940 rtx x = *px;
4941
4942 if (GET_CODE (x) == SYMBOL_REF
4943 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4944 {
4945 cfun->machine->some_ld_name = XSTR (x, 0);
4946 return 1;
4947 }
4948
4949 return 0;
4950 }
4951
4952 static const char *
4953 get_some_local_dynamic_name (void)
4954 {
4955 rtx insn;
4956
4957 if (cfun->machine->some_ld_name)
4958 return cfun->machine->some_ld_name;
4959
4960 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4961 if (INSN_P (insn)
4962 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4963 return cfun->machine->some_ld_name;
4964
4965 gcc_unreachable ();
4966 }
4967
4968 /* Print an operand. Recognize special options, documented below. */
4969
4970 void
4971 print_operand (FILE *file, rtx x, int code)
4972 {
4973 int i;
4974
4975 switch (code)
4976 {
4977 case '~':
4978 /* Print the assembler name of the current function. */
4979 assemble_name (file, alpha_fnname);
4980 break;
4981
4982 case '&':
4983 assemble_name (file, get_some_local_dynamic_name ());
4984 break;
4985
4986 case '/':
4987 {
4988 const char *trap = get_trap_mode_suffix ();
4989 const char *round = get_round_mode_suffix ();
4990
4991 if (trap || round)
4992 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4993 (trap ? trap : ""), (round ? round : ""));
4994 break;
4995 }
4996
4997 case ',':
4998 /* Generates single precision instruction suffix. */
4999 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5000 break;
5001
5002 case '-':
5003 /* Generates double precision instruction suffix. */
5004 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5005 break;
5006
5007 case '#':
5008 if (alpha_this_literal_sequence_number == 0)
5009 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5010 fprintf (file, "%d", alpha_this_literal_sequence_number);
5011 break;
5012
5013 case '*':
5014 if (alpha_this_gpdisp_sequence_number == 0)
5015 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5016 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5017 break;
5018
5019 case 'H':
5020 if (GET_CODE (x) == HIGH)
5021 output_addr_const (file, XEXP (x, 0));
5022 else
5023 output_operand_lossage ("invalid %%H value");
5024 break;
5025
5026 case 'J':
5027 {
5028 const char *lituse;
5029
5030 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5031 {
5032 x = XVECEXP (x, 0, 0);
5033 lituse = "lituse_tlsgd";
5034 }
5035 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5036 {
5037 x = XVECEXP (x, 0, 0);
5038 lituse = "lituse_tlsldm";
5039 }
5040 else if (GET_CODE (x) == CONST_INT)
5041 lituse = "lituse_jsr";
5042 else
5043 {
5044 output_operand_lossage ("invalid %%J value");
5045 break;
5046 }
5047
5048 if (x != const0_rtx)
5049 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5050 }
5051 break;
5052
5053 case 'j':
5054 {
5055 const char *lituse;
5056
5057 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5058 lituse = "lituse_jsrdirect";
5059 #else
5060 lituse = "lituse_jsr";
5061 #endif
5062
5063 gcc_assert (INTVAL (x) != 0);
5064 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5065 }
5066 break;
5067 case 'r':
5068 /* If this operand is the constant zero, write it as "$31". */
5069 if (GET_CODE (x) == REG)
5070 fprintf (file, "%s", reg_names[REGNO (x)]);
5071 else if (x == CONST0_RTX (GET_MODE (x)))
5072 fprintf (file, "$31");
5073 else
5074 output_operand_lossage ("invalid %%r value");
5075 break;
5076
5077 case 'R':
5078 /* Similar, but for floating-point. */
5079 if (GET_CODE (x) == REG)
5080 fprintf (file, "%s", reg_names[REGNO (x)]);
5081 else if (x == CONST0_RTX (GET_MODE (x)))
5082 fprintf (file, "$f31");
5083 else
5084 output_operand_lossage ("invalid %%R value");
5085 break;
5086
5087 case 'N':
5088 /* Write the 1's complement of a constant. */
5089 if (GET_CODE (x) != CONST_INT)
5090 output_operand_lossage ("invalid %%N value");
5091
5092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5093 break;
5094
5095 case 'P':
5096 /* Write 1 << C, for a constant C. */
5097 if (GET_CODE (x) != CONST_INT)
5098 output_operand_lossage ("invalid %%P value");
5099
5100 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5101 break;
5102
5103 case 'h':
5104 /* Write the high-order 16 bits of a constant, sign-extended. */
5105 if (GET_CODE (x) != CONST_INT)
5106 output_operand_lossage ("invalid %%h value");
5107
5108 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5109 break;
5110
5111 case 'L':
5112 /* Write the low-order 16 bits of a constant, sign-extended. */
5113 if (GET_CODE (x) != CONST_INT)
5114 output_operand_lossage ("invalid %%L value");
5115
5116 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5117 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5118 break;
5119
5120 case 'm':
5121 /* Write mask for ZAP insn. */
5122 if (GET_CODE (x) == CONST_DOUBLE)
5123 {
5124 HOST_WIDE_INT mask = 0;
5125 HOST_WIDE_INT value;
5126
5127 value = CONST_DOUBLE_LOW (x);
5128 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5129 i++, value >>= 8)
5130 if (value & 0xff)
5131 mask |= (1 << i);
5132
5133 value = CONST_DOUBLE_HIGH (x);
5134 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5135 i++, value >>= 8)
5136 if (value & 0xff)
5137 mask |= (1 << (i + sizeof (int)));
5138
5139 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5140 }
5141
5142 else if (GET_CODE (x) == CONST_INT)
5143 {
5144 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5145
5146 for (i = 0; i < 8; i++, value >>= 8)
5147 if (value & 0xff)
5148 mask |= (1 << i);
5149
5150 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5151 }
5152 else
5153 output_operand_lossage ("invalid %%m value");
5154 break;
5155
5156 case 'M':
5157 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5158 if (GET_CODE (x) != CONST_INT
5159 || (INTVAL (x) != 8 && INTVAL (x) != 16
5160 && INTVAL (x) != 32 && INTVAL (x) != 64))
5161 output_operand_lossage ("invalid %%M value");
5162
5163 fprintf (file, "%s",
5164 (INTVAL (x) == 8 ? "b"
5165 : INTVAL (x) == 16 ? "w"
5166 : INTVAL (x) == 32 ? "l"
5167 : "q"));
5168 break;
5169
5170 case 'U':
5171 /* Similar, except do it from the mask. */
5172 if (GET_CODE (x) == CONST_INT)
5173 {
5174 HOST_WIDE_INT value = INTVAL (x);
5175
5176 if (value == 0xff)
5177 {
5178 fputc ('b', file);
5179 break;
5180 }
5181 if (value == 0xffff)
5182 {
5183 fputc ('w', file);
5184 break;
5185 }
5186 if (value == 0xffffffff)
5187 {
5188 fputc ('l', file);
5189 break;
5190 }
5191 if (value == -1)
5192 {
5193 fputc ('q', file);
5194 break;
5195 }
5196 }
5197 else if (HOST_BITS_PER_WIDE_INT == 32
5198 && GET_CODE (x) == CONST_DOUBLE
5199 && CONST_DOUBLE_LOW (x) == 0xffffffff
5200 && CONST_DOUBLE_HIGH (x) == 0)
5201 {
5202 fputc ('l', file);
5203 break;
5204 }
5205 output_operand_lossage ("invalid %%U value");
5206 break;
5207
5208 case 's':
5209 /* Write the constant value divided by 8 for little-endian mode or
5210 (56 - value) / 8 for big-endian mode. */
5211
5212 if (GET_CODE (x) != CONST_INT
5213 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5214 ? 56
5215 : 64)
5216 || (INTVAL (x) & 7) != 0)
5217 output_operand_lossage ("invalid %%s value");
5218
5219 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5220 WORDS_BIG_ENDIAN
5221 ? (56 - INTVAL (x)) / 8
5222 : INTVAL (x) / 8);
5223 break;
5224
5225 case 'S':
5226 /* Same, except compute (64 - c) / 8 */
5227
5228 if (GET_CODE (x) != CONST_INT
5229 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5230 && (INTVAL (x) & 7) != 8)
5231 output_operand_lossage ("invalid %%s value");
5232
5233 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5234 break;
5235
5236 case 't':
5237 {
5238 /* On Unicos/Mk systems: use a DEX expression if the symbol
5239 clashes with a register name. */
5240 int dex = unicosmk_need_dex (x);
5241 if (dex)
5242 fprintf (file, "DEX(%d)", dex);
5243 else
5244 output_addr_const (file, x);
5245 }
5246 break;
5247
5248 case 'C': case 'D': case 'c': case 'd':
5249 /* Write out comparison name. */
5250 {
5251 enum rtx_code c = GET_CODE (x);
5252
5253 if (!COMPARISON_P (x))
5254 output_operand_lossage ("invalid %%C value");
5255
5256 else if (code == 'D')
5257 c = reverse_condition (c);
5258 else if (code == 'c')
5259 c = swap_condition (c);
5260 else if (code == 'd')
5261 c = swap_condition (reverse_condition (c));
5262
5263 if (c == LEU)
5264 fprintf (file, "ule");
5265 else if (c == LTU)
5266 fprintf (file, "ult");
5267 else if (c == UNORDERED)
5268 fprintf (file, "un");
5269 else
5270 fprintf (file, "%s", GET_RTX_NAME (c));
5271 }
5272 break;
5273
5274 case 'E':
5275 /* Write the divide or modulus operator. */
5276 switch (GET_CODE (x))
5277 {
5278 case DIV:
5279 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5280 break;
5281 case UDIV:
5282 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5283 break;
5284 case MOD:
5285 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5286 break;
5287 case UMOD:
5288 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5289 break;
5290 default:
5291 output_operand_lossage ("invalid %%E value");
5292 break;
5293 }
5294 break;
5295
5296 case 'A':
5297 /* Write "_u" for unaligned access. */
5298 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5299 fprintf (file, "_u");
5300 break;
5301
5302 case 0:
5303 if (GET_CODE (x) == REG)
5304 fprintf (file, "%s", reg_names[REGNO (x)]);
5305 else if (GET_CODE (x) == MEM)
5306 output_address (XEXP (x, 0));
5307 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5308 {
5309 switch (XINT (XEXP (x, 0), 1))
5310 {
5311 case UNSPEC_DTPREL:
5312 case UNSPEC_TPREL:
5313 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5314 break;
5315 default:
5316 output_operand_lossage ("unknown relocation unspec");
5317 break;
5318 }
5319 }
5320 else
5321 output_addr_const (file, x);
5322 break;
5323
5324 default:
5325 output_operand_lossage ("invalid %%xn code");
5326 }
5327 }
5328
5329 void
5330 print_operand_address (FILE *file, rtx addr)
5331 {
5332 int basereg = 31;
5333 HOST_WIDE_INT offset = 0;
5334
5335 if (GET_CODE (addr) == AND)
5336 addr = XEXP (addr, 0);
5337
5338 if (GET_CODE (addr) == PLUS
5339 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5340 {
5341 offset = INTVAL (XEXP (addr, 1));
5342 addr = XEXP (addr, 0);
5343 }
5344
5345 if (GET_CODE (addr) == LO_SUM)
5346 {
5347 const char *reloc16, *reloclo;
5348 rtx op1 = XEXP (addr, 1);
5349
5350 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5351 {
5352 op1 = XEXP (op1, 0);
5353 switch (XINT (op1, 1))
5354 {
5355 case UNSPEC_DTPREL:
5356 reloc16 = NULL;
5357 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5358 break;
5359 case UNSPEC_TPREL:
5360 reloc16 = NULL;
5361 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5362 break;
5363 default:
5364 output_operand_lossage ("unknown relocation unspec");
5365 return;
5366 }
5367
5368 output_addr_const (file, XVECEXP (op1, 0, 0));
5369 }
5370 else
5371 {
5372 reloc16 = "gprel";
5373 reloclo = "gprellow";
5374 output_addr_const (file, op1);
5375 }
5376
5377 if (offset)
5378 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5379
5380 addr = XEXP (addr, 0);
5381 switch (GET_CODE (addr))
5382 {
5383 case REG:
5384 basereg = REGNO (addr);
5385 break;
5386
5387 case SUBREG:
5388 basereg = subreg_regno (addr);
5389 break;
5390
5391 default:
5392 gcc_unreachable ();
5393 }
5394
5395 fprintf (file, "($%d)\t\t!%s", basereg,
5396 (basereg == 29 ? reloc16 : reloclo));
5397 return;
5398 }
5399
5400 switch (GET_CODE (addr))
5401 {
5402 case REG:
5403 basereg = REGNO (addr);
5404 break;
5405
5406 case SUBREG:
5407 basereg = subreg_regno (addr);
5408 break;
5409
5410 case CONST_INT:
5411 offset = INTVAL (addr);
5412 break;
5413
5414 #if TARGET_ABI_OPEN_VMS
5415 case SYMBOL_REF:
5416 fprintf (file, "%s", XSTR (addr, 0));
5417 return;
5418
5419 case CONST:
5420 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5421 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5422 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5423 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5424 INTVAL (XEXP (XEXP (addr, 0), 1)));
5425 return;
5426
5427 #endif
5428 default:
5429 gcc_unreachable ();
5430 }
5431
5432 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5433 }
5434 \f
5435 /* Emit RTL insns to initialize the variable parts of a trampoline at
5436 TRAMP. FNADDR is an RTX for the address of the function's pure
5437 code. CXT is an RTX for the static chain value for the function.
5438
5439 The three offset parameters are for the individual template's
5440 layout. A JMPOFS < 0 indicates that the trampoline does not
5441 contain instructions at all.
5442
5443 We assume here that a function will be called many more times than
5444 its address is taken (e.g., it might be passed to qsort), so we
5445 take the trouble to initialize the "hint" field in the JMP insn.
5446 Note that the hint field is PC (new) + 4 * bits 13:0. */
5447
5448 void
5449 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5450 int fnofs, int cxtofs, int jmpofs)
5451 {
5452 rtx addr;
5453 /* VMS really uses DImode pointers in memory at this point. */
5454 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5455
5456 #ifdef POINTERS_EXTEND_UNSIGNED
5457 fnaddr = convert_memory_address (mode, fnaddr);
5458 cxt = convert_memory_address (mode, cxt);
5459 #endif
5460
5461 /* Store function address and CXT. */
5462 addr = memory_address (mode, plus_constant (tramp, fnofs));
5463 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5464 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5465 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5466
5467 #ifdef ENABLE_EXECUTE_STACK
5468 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5469 0, VOIDmode, 1, tramp, Pmode);
5470 #endif
5471
5472 if (jmpofs >= 0)
5473 emit_insn (gen_imb ());
5474 }
5475 \f
5476 /* Determine where to put an argument to a function.
5477 Value is zero to push the argument on the stack,
5478 or a hard register in which to store the argument.
5479
5480 MODE is the argument's machine mode.
5481 TYPE is the data type of the argument (as a tree).
5482 This is null for libcalls where that information may
5483 not be available.
5484 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5485 the preceding args and about the function being called.
5486 NAMED is nonzero if this argument is a named parameter
5487 (otherwise it is an extra parameter matching an ellipsis).
5488
5489 On Alpha the first 6 words of args are normally in registers
5490 and the rest are pushed. */
5491
5492 rtx
5493 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5494 int named ATTRIBUTE_UNUSED)
5495 {
5496 int basereg;
5497 int num_args;
5498
5499 /* Don't get confused and pass small structures in FP registers. */
5500 if (type && AGGREGATE_TYPE_P (type))
5501 basereg = 16;
5502 else
5503 {
5504 #ifdef ENABLE_CHECKING
5505 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5506 values here. */
5507 gcc_assert (!COMPLEX_MODE_P (mode));
5508 #endif
5509
5510 /* Set up defaults for FP operands passed in FP registers, and
5511 integral operands passed in integer registers. */
5512 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5513 basereg = 32 + 16;
5514 else
5515 basereg = 16;
5516 }
5517
5518 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5519 the three platforms, so we can't avoid conditional compilation. */
5520 #if TARGET_ABI_OPEN_VMS
5521 {
5522 if (mode == VOIDmode)
5523 return alpha_arg_info_reg_val (cum);
5524
5525 num_args = cum.num_args;
5526 if (num_args >= 6
5527 || targetm.calls.must_pass_in_stack (mode, type))
5528 return NULL_RTX;
5529 }
5530 #elif TARGET_ABI_UNICOSMK
5531 {
5532 int size;
5533
5534 /* If this is the last argument, generate the call info word (CIW). */
5535 /* ??? We don't include the caller's line number in the CIW because
5536 I don't know how to determine it if debug infos are turned off. */
5537 if (mode == VOIDmode)
5538 {
5539 int i;
5540 HOST_WIDE_INT lo;
5541 HOST_WIDE_INT hi;
5542 rtx ciw;
5543
5544 lo = 0;
5545
5546 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5547 if (cum.reg_args_type[i])
5548 lo |= (1 << (7 - i));
5549
5550 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5551 lo |= 7;
5552 else
5553 lo |= cum.num_reg_words;
5554
5555 #if HOST_BITS_PER_WIDE_INT == 32
5556 hi = (cum.num_args << 20) | cum.num_arg_words;
5557 #else
5558 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5559 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5560 hi = 0;
5561 #endif
5562 ciw = immed_double_const (lo, hi, DImode);
5563
5564 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5565 UNSPEC_UMK_LOAD_CIW);
5566 }
5567
5568 size = ALPHA_ARG_SIZE (mode, type, named);
5569 num_args = cum.num_reg_words;
5570 if (cum.force_stack
5571 || cum.num_reg_words + size > 6
5572 || targetm.calls.must_pass_in_stack (mode, type))
5573 return NULL_RTX;
5574 else if (type && TYPE_MODE (type) == BLKmode)
5575 {
5576 rtx reg1, reg2;
5577
5578 reg1 = gen_rtx_REG (DImode, num_args + 16);
5579 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5580
5581 /* The argument fits in two registers. Note that we still need to
5582 reserve a register for empty structures. */
5583 if (size == 0)
5584 return NULL_RTX;
5585 else if (size == 1)
5586 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5587 else
5588 {
5589 reg2 = gen_rtx_REG (DImode, num_args + 17);
5590 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5591 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5592 }
5593 }
5594 }
5595 #elif TARGET_ABI_OSF
5596 {
5597 if (cum >= 6)
5598 return NULL_RTX;
5599 num_args = cum;
5600
5601 /* VOID is passed as a special flag for "last argument". */
5602 if (type == void_type_node)
5603 basereg = 16;
5604 else if (targetm.calls.must_pass_in_stack (mode, type))
5605 return NULL_RTX;
5606 }
5607 #else
5608 #error Unhandled ABI
5609 #endif
5610
5611 return gen_rtx_REG (mode, num_args + basereg);
5612 }
5613
5614 static int
5615 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5616 enum machine_mode mode ATTRIBUTE_UNUSED,
5617 tree type ATTRIBUTE_UNUSED,
5618 bool named ATTRIBUTE_UNUSED)
5619 {
5620 int words = 0;
5621
5622 #if TARGET_ABI_OPEN_VMS
5623 if (cum->num_args < 6
5624 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5625 words = 6 - cum->num_args;
5626 #elif TARGET_ABI_UNICOSMK
5627 /* Never any split arguments. */
5628 #elif TARGET_ABI_OSF
5629 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5630 words = 6 - *cum;
5631 #else
5632 #error Unhandled ABI
5633 #endif
5634
5635 return words * UNITS_PER_WORD;
5636 }
5637
5638
5639 /* Return true if TYPE must be returned in memory, instead of in registers. */
5640
5641 static bool
5642 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5643 {
5644 enum machine_mode mode = VOIDmode;
5645 int size;
5646
5647 if (type)
5648 {
5649 mode = TYPE_MODE (type);
5650
5651 /* All aggregates are returned in memory. */
5652 if (AGGREGATE_TYPE_P (type))
5653 return true;
5654 }
5655
5656 size = GET_MODE_SIZE (mode);
5657 switch (GET_MODE_CLASS (mode))
5658 {
5659 case MODE_VECTOR_FLOAT:
5660 /* Pass all float vectors in memory, like an aggregate. */
5661 return true;
5662
5663 case MODE_COMPLEX_FLOAT:
5664 /* We judge complex floats on the size of their element,
5665 not the size of the whole type. */
5666 size = GET_MODE_UNIT_SIZE (mode);
5667 break;
5668
5669 case MODE_INT:
5670 case MODE_FLOAT:
5671 case MODE_COMPLEX_INT:
5672 case MODE_VECTOR_INT:
5673 break;
5674
5675 default:
5676 /* ??? We get called on all sorts of random stuff from
5677 aggregate_value_p. We must return something, but it's not
5678 clear what's safe to return. Pretend it's a struct I
5679 guess. */
5680 return true;
5681 }
5682
5683 /* Otherwise types must fit in one register. */
5684 return size > UNITS_PER_WORD;
5685 }
5686
5687 /* Return true if TYPE should be passed by invisible reference. */
5688
5689 static bool
5690 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5691 enum machine_mode mode,
5692 const_tree type ATTRIBUTE_UNUSED,
5693 bool named ATTRIBUTE_UNUSED)
5694 {
5695 return mode == TFmode || mode == TCmode;
5696 }
5697
5698 /* Define how to find the value returned by a function. VALTYPE is the
5699 data type of the value (as a tree). If the precise function being
5700 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5701 MODE is set instead of VALTYPE for libcalls.
5702
5703 On Alpha the value is found in $0 for integer functions and
5704 $f0 for floating-point functions. */
5705
5706 rtx
5707 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5708 enum machine_mode mode)
5709 {
5710 unsigned int regnum, dummy;
5711 enum mode_class class;
5712
5713 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5714
5715 if (valtype)
5716 mode = TYPE_MODE (valtype);
5717
5718 class = GET_MODE_CLASS (mode);
5719 switch (class)
5720 {
5721 case MODE_INT:
5722 PROMOTE_MODE (mode, dummy, valtype);
5723 /* FALLTHRU */
5724
5725 case MODE_COMPLEX_INT:
5726 case MODE_VECTOR_INT:
5727 regnum = 0;
5728 break;
5729
5730 case MODE_FLOAT:
5731 regnum = 32;
5732 break;
5733
5734 case MODE_COMPLEX_FLOAT:
5735 {
5736 enum machine_mode cmode = GET_MODE_INNER (mode);
5737
5738 return gen_rtx_PARALLEL
5739 (VOIDmode,
5740 gen_rtvec (2,
5741 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5742 const0_rtx),
5743 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5744 GEN_INT (GET_MODE_SIZE (cmode)))));
5745 }
5746
5747 default:
5748 gcc_unreachable ();
5749 }
5750
5751 return gen_rtx_REG (mode, regnum);
5752 }
5753
5754 /* TCmode complex values are passed by invisible reference. We
5755 should not split these values. */
5756
5757 static bool
5758 alpha_split_complex_arg (const_tree type)
5759 {
5760 return TYPE_MODE (type) != TCmode;
5761 }
5762
5763 static tree
5764 alpha_build_builtin_va_list (void)
5765 {
5766 tree base, ofs, space, record, type_decl;
5767
5768 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5769 return ptr_type_node;
5770
5771 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5772 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5773 TREE_CHAIN (record) = type_decl;
5774 TYPE_NAME (record) = type_decl;
5775
5776 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5777
5778 /* Dummy field to prevent alignment warnings. */
5779 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5780 DECL_FIELD_CONTEXT (space) = record;
5781 DECL_ARTIFICIAL (space) = 1;
5782 DECL_IGNORED_P (space) = 1;
5783
5784 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5785 integer_type_node);
5786 DECL_FIELD_CONTEXT (ofs) = record;
5787 TREE_CHAIN (ofs) = space;
5788
5789 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5790 ptr_type_node);
5791 DECL_FIELD_CONTEXT (base) = record;
5792 TREE_CHAIN (base) = ofs;
5793
5794 TYPE_FIELDS (record) = base;
5795 layout_type (record);
5796
5797 va_list_gpr_counter_field = ofs;
5798 return record;
5799 }
5800
5801 #if TARGET_ABI_OSF
5802 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5803 and constant additions. */
5804
5805 static tree
5806 va_list_skip_additions (tree lhs)
5807 {
5808 tree rhs, stmt;
5809
5810 if (TREE_CODE (lhs) != SSA_NAME)
5811 return lhs;
5812
5813 for (;;)
5814 {
5815 stmt = SSA_NAME_DEF_STMT (lhs);
5816
5817 if (TREE_CODE (stmt) == PHI_NODE)
5818 return stmt;
5819
5820 if (TREE_CODE (stmt) != MODIFY_EXPR
5821 || TREE_OPERAND (stmt, 0) != lhs)
5822 return lhs;
5823
5824 rhs = TREE_OPERAND (stmt, 1);
5825 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5826 rhs = TREE_OPERAND (rhs, 0);
5827
5828 if (((!CONVERT_EXPR_P (rhs))
5829 && ((TREE_CODE (rhs) != PLUS_EXPR
5830 && TREE_CODE (rhs) != POINTER_PLUS_EXPR)
5831 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5832 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5833 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5834 return rhs;
5835
5836 lhs = TREE_OPERAND (rhs, 0);
5837 }
5838 }
5839
5840 /* Check if LHS = RHS statement is
5841 LHS = *(ap.__base + ap.__offset + cst)
5842 or
5843 LHS = *(ap.__base
5844 + ((ap.__offset + cst <= 47)
5845 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5846 If the former, indicate that GPR registers are needed,
5847 if the latter, indicate that FPR registers are needed.
5848
5849 Also look for LHS = (*ptr).field, where ptr is one of the forms
5850 listed above.
5851
5852 On alpha, cfun->va_list_gpr_size is used as size of the needed
5853 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5854 registers are needed and bit 1 set if FPR registers are needed.
5855 Return true if va_list references should not be scanned for the
5856 current statement. */
5857
5858 static bool
5859 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5860 {
5861 tree base, offset, arg1, arg2;
5862 int offset_arg = 1;
5863
5864 #if 1
5865 /* FIXME tuples. */
5866 (void) si;
5867 (void) stmt;
5868 return false;
5869 #else
5870 while (handled_component_p (rhs))
5871 rhs = TREE_OPERAND (rhs, 0);
5872 if (TREE_CODE (rhs) != INDIRECT_REF
5873 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5874 return false;
5875
5876 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5877 if (lhs == NULL_TREE
5878 || TREE_CODE (lhs) != POINTER_PLUS_EXPR)
5879 return false;
5880
5881 base = TREE_OPERAND (lhs, 0);
5882 if (TREE_CODE (base) == SSA_NAME)
5883 base = va_list_skip_additions (base);
5884
5885 if (TREE_CODE (base) != COMPONENT_REF
5886 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5887 {
5888 base = TREE_OPERAND (lhs, 0);
5889 if (TREE_CODE (base) == SSA_NAME)
5890 base = va_list_skip_additions (base);
5891
5892 if (TREE_CODE (base) != COMPONENT_REF
5893 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5894 return false;
5895
5896 offset_arg = 0;
5897 }
5898
5899 base = get_base_address (base);
5900 if (TREE_CODE (base) != VAR_DECL
5901 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5902 return false;
5903
5904 offset = TREE_OPERAND (lhs, offset_arg);
5905 if (TREE_CODE (offset) == SSA_NAME)
5906 offset = va_list_skip_additions (offset);
5907
5908 if (TREE_CODE (offset) == PHI_NODE)
5909 {
5910 HOST_WIDE_INT sub;
5911
5912 if (PHI_NUM_ARGS (offset) != 2)
5913 goto escapes;
5914
5915 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5916 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5917 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5918 {
5919 tree tem = arg1;
5920 arg1 = arg2;
5921 arg2 = tem;
5922
5923 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5924 goto escapes;
5925 }
5926 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5927 goto escapes;
5928
5929 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5930 if (TREE_CODE (arg2) == MINUS_EXPR)
5931 sub = -sub;
5932 if (sub < -48 || sub > -32)
5933 goto escapes;
5934
5935 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5936 if (arg1 != arg2)
5937 goto escapes;
5938
5939 if (TREE_CODE (arg1) == SSA_NAME)
5940 arg1 = va_list_skip_additions (arg1);
5941
5942 if (TREE_CODE (arg1) != COMPONENT_REF
5943 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5944 || get_base_address (arg1) != base)
5945 goto escapes;
5946
5947 /* Need floating point regs. */
5948 cfun->va_list_fpr_size |= 2;
5949 }
5950 else if (TREE_CODE (offset) != COMPONENT_REF
5951 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5952 || get_base_address (offset) != base)
5953 goto escapes;
5954 else
5955 /* Need general regs. */
5956 cfun->va_list_fpr_size |= 1;
5957 return false;
5958
5959 escapes:
5960 si->va_list_escapes = true;
5961 return false;
5962 #endif
5963 }
5964 #endif
5965
5966 /* Perform any needed actions needed for a function that is receiving a
5967 variable number of arguments. */
5968
5969 static void
5970 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5971 tree type, int *pretend_size, int no_rtl)
5972 {
5973 CUMULATIVE_ARGS cum = *pcum;
5974
5975 /* Skip the current argument. */
5976 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5977
5978 #if TARGET_ABI_UNICOSMK
5979 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5980 arguments on the stack. Unfortunately, it doesn't always store the first
5981 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5982 with stdargs as we always have at least one named argument there. */
5983 if (cum.num_reg_words < 6)
5984 {
5985 if (!no_rtl)
5986 {
5987 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5988 emit_insn (gen_arg_home_umk ());
5989 }
5990 *pretend_size = 0;
5991 }
5992 #elif TARGET_ABI_OPEN_VMS
5993 /* For VMS, we allocate space for all 6 arg registers plus a count.
5994
5995 However, if NO registers need to be saved, don't allocate any space.
5996 This is not only because we won't need the space, but because AP
5997 includes the current_pretend_args_size and we don't want to mess up
5998 any ap-relative addresses already made. */
5999 if (cum.num_args < 6)
6000 {
6001 if (!no_rtl)
6002 {
6003 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6004 emit_insn (gen_arg_home ());
6005 }
6006 *pretend_size = 7 * UNITS_PER_WORD;
6007 }
6008 #else
6009 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6010 only push those that are remaining. However, if NO registers need to
6011 be saved, don't allocate any space. This is not only because we won't
6012 need the space, but because AP includes the current_pretend_args_size
6013 and we don't want to mess up any ap-relative addresses already made.
6014
6015 If we are not to use the floating-point registers, save the integer
6016 registers where we would put the floating-point registers. This is
6017 not the most efficient way to implement varargs with just one register
6018 class, but it isn't worth doing anything more efficient in this rare
6019 case. */
6020 if (cum >= 6)
6021 return;
6022
6023 if (!no_rtl)
6024 {
6025 int count;
6026 alias_set_type set = get_varargs_alias_set ();
6027 rtx tmp;
6028
6029 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6030 if (count > 6 - cum)
6031 count = 6 - cum;
6032
6033 /* Detect whether integer registers or floating-point registers
6034 are needed by the detected va_arg statements. See above for
6035 how these values are computed. Note that the "escape" value
6036 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6037 these bits set. */
6038 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6039
6040 if (cfun->va_list_fpr_size & 1)
6041 {
6042 tmp = gen_rtx_MEM (BLKmode,
6043 plus_constant (virtual_incoming_args_rtx,
6044 (cum + 6) * UNITS_PER_WORD));
6045 MEM_NOTRAP_P (tmp) = 1;
6046 set_mem_alias_set (tmp, set);
6047 move_block_from_reg (16 + cum, tmp, count);
6048 }
6049
6050 if (cfun->va_list_fpr_size & 2)
6051 {
6052 tmp = gen_rtx_MEM (BLKmode,
6053 plus_constant (virtual_incoming_args_rtx,
6054 cum * UNITS_PER_WORD));
6055 MEM_NOTRAP_P (tmp) = 1;
6056 set_mem_alias_set (tmp, set);
6057 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6058 }
6059 }
6060 *pretend_size = 12 * UNITS_PER_WORD;
6061 #endif
6062 }
6063
6064 static void
6065 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6066 {
6067 HOST_WIDE_INT offset;
6068 tree t, offset_field, base_field;
6069
6070 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6071 return;
6072
6073 if (TARGET_ABI_UNICOSMK)
6074 std_expand_builtin_va_start (valist, nextarg);
6075
6076 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6077 up by 48, storing fp arg registers in the first 48 bytes, and the
6078 integer arg registers in the next 48 bytes. This is only done,
6079 however, if any integer registers need to be stored.
6080
6081 If no integer registers need be stored, then we must subtract 48
6082 in order to account for the integer arg registers which are counted
6083 in argsize above, but which are not actually stored on the stack.
6084 Must further be careful here about structures straddling the last
6085 integer argument register; that futzes with pretend_args_size,
6086 which changes the meaning of AP. */
6087
6088 if (NUM_ARGS < 6)
6089 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6090 else
6091 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6092
6093 if (TARGET_ABI_OPEN_VMS)
6094 {
6095 nextarg = plus_constant (nextarg, offset);
6096 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6097 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6098 make_tree (ptr_type_node, nextarg));
6099 TREE_SIDE_EFFECTS (t) = 1;
6100
6101 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6102 }
6103 else
6104 {
6105 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6106 offset_field = TREE_CHAIN (base_field);
6107
6108 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6109 valist, base_field, NULL_TREE);
6110 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6111 valist, offset_field, NULL_TREE);
6112
6113 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6114 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6115 size_int (offset));
6116 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6117 TREE_SIDE_EFFECTS (t) = 1;
6118 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6119
6120 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6121 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6122 TREE_SIDE_EFFECTS (t) = 1;
6123 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6124 }
6125 }
6126
6127 static tree
6128 alpha_gimplify_va_arg_1 (tree type, tree base, gimple_seq offset,
6129 gimple_seq *pre_p)
6130 {
6131 tree type_size, ptr_type, addend, t, addr, internal_post;
6132
6133 /* If the type could not be passed in registers, skip the block
6134 reserved for the registers. */
6135 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6136 {
6137 t = build_int_cst (TREE_TYPE (offset), 6*8);
6138 gimplify_assign (offset,
6139 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6140 pre_p);
6141 }
6142
6143 addend = offset;
6144 ptr_type = build_pointer_type (type);
6145
6146 if (TREE_CODE (type) == COMPLEX_TYPE)
6147 {
6148 tree real_part, imag_part, real_temp;
6149
6150 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6151 offset, pre_p);
6152
6153 /* Copy the value into a new temporary, lest the formal temporary
6154 be reused out from under us. */
6155 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6156
6157 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6158 offset, pre_p);
6159
6160 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6161 }
6162 else if (TREE_CODE (type) == REAL_TYPE)
6163 {
6164 tree fpaddend, cond, fourtyeight;
6165
6166 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6167 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6168 addend, fourtyeight);
6169 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6170 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6171 fpaddend, addend);
6172 }
6173
6174 /* Build the final address and force that value into a temporary. */
6175 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6176 fold_convert (sizetype, addend));
6177 internal_post = NULL;
6178 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6179 append_to_statement_list (internal_post, pre_p);
6180
6181 /* Update the offset field. */
6182 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6183 if (type_size == NULL || TREE_OVERFLOW (type_size))
6184 t = size_zero_node;
6185 else
6186 {
6187 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6188 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6189 t = size_binop (MULT_EXPR, t, size_int (8));
6190 }
6191 t = fold_convert (TREE_TYPE (offset), t);
6192 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6193 pre_p);
6194
6195 return build_va_arg_indirect_ref (addr);
6196 }
6197
6198 static tree
6199 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6200 gimple_seq *post_p)
6201 {
6202 tree offset_field, base_field, offset, base, t, r;
6203 bool indirect;
6204
6205 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6206 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6207
6208 base_field = TYPE_FIELDS (va_list_type_node);
6209 offset_field = TREE_CHAIN (base_field);
6210 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6211 valist, base_field, NULL_TREE);
6212 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6213 valist, offset_field, NULL_TREE);
6214
6215 /* Pull the fields of the structure out into temporaries. Since we never
6216 modify the base field, we can use a formal temporary. Sign-extend the
6217 offset field so that it's the proper width for pointer arithmetic. */
6218 base = get_formal_tmp_var (base_field, pre_p);
6219
6220 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6221 offset = get_initialized_tmp_var (t, pre_p, NULL);
6222
6223 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6224 if (indirect)
6225 type = build_pointer_type (type);
6226
6227 /* Find the value. Note that this will be a stable indirection, or
6228 a composite of stable indirections in the case of complex. */
6229 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6230
6231 /* Stuff the offset temporary back into its field. */
6232 gimplify_assign (offset_field,
6233 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6234
6235 if (indirect)
6236 r = build_va_arg_indirect_ref (r);
6237
6238 return r;
6239 }
6240 \f
6241 /* Builtins. */
6242
6243 enum alpha_builtin
6244 {
6245 ALPHA_BUILTIN_CMPBGE,
6246 ALPHA_BUILTIN_EXTBL,
6247 ALPHA_BUILTIN_EXTWL,
6248 ALPHA_BUILTIN_EXTLL,
6249 ALPHA_BUILTIN_EXTQL,
6250 ALPHA_BUILTIN_EXTWH,
6251 ALPHA_BUILTIN_EXTLH,
6252 ALPHA_BUILTIN_EXTQH,
6253 ALPHA_BUILTIN_INSBL,
6254 ALPHA_BUILTIN_INSWL,
6255 ALPHA_BUILTIN_INSLL,
6256 ALPHA_BUILTIN_INSQL,
6257 ALPHA_BUILTIN_INSWH,
6258 ALPHA_BUILTIN_INSLH,
6259 ALPHA_BUILTIN_INSQH,
6260 ALPHA_BUILTIN_MSKBL,
6261 ALPHA_BUILTIN_MSKWL,
6262 ALPHA_BUILTIN_MSKLL,
6263 ALPHA_BUILTIN_MSKQL,
6264 ALPHA_BUILTIN_MSKWH,
6265 ALPHA_BUILTIN_MSKLH,
6266 ALPHA_BUILTIN_MSKQH,
6267 ALPHA_BUILTIN_UMULH,
6268 ALPHA_BUILTIN_ZAP,
6269 ALPHA_BUILTIN_ZAPNOT,
6270 ALPHA_BUILTIN_AMASK,
6271 ALPHA_BUILTIN_IMPLVER,
6272 ALPHA_BUILTIN_RPCC,
6273 ALPHA_BUILTIN_THREAD_POINTER,
6274 ALPHA_BUILTIN_SET_THREAD_POINTER,
6275
6276 /* TARGET_MAX */
6277 ALPHA_BUILTIN_MINUB8,
6278 ALPHA_BUILTIN_MINSB8,
6279 ALPHA_BUILTIN_MINUW4,
6280 ALPHA_BUILTIN_MINSW4,
6281 ALPHA_BUILTIN_MAXUB8,
6282 ALPHA_BUILTIN_MAXSB8,
6283 ALPHA_BUILTIN_MAXUW4,
6284 ALPHA_BUILTIN_MAXSW4,
6285 ALPHA_BUILTIN_PERR,
6286 ALPHA_BUILTIN_PKLB,
6287 ALPHA_BUILTIN_PKWB,
6288 ALPHA_BUILTIN_UNPKBL,
6289 ALPHA_BUILTIN_UNPKBW,
6290
6291 /* TARGET_CIX */
6292 ALPHA_BUILTIN_CTTZ,
6293 ALPHA_BUILTIN_CTLZ,
6294 ALPHA_BUILTIN_CTPOP,
6295
6296 ALPHA_BUILTIN_max
6297 };
6298
6299 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6300 CODE_FOR_builtin_cmpbge,
6301 CODE_FOR_builtin_extbl,
6302 CODE_FOR_builtin_extwl,
6303 CODE_FOR_builtin_extll,
6304 CODE_FOR_builtin_extql,
6305 CODE_FOR_builtin_extwh,
6306 CODE_FOR_builtin_extlh,
6307 CODE_FOR_builtin_extqh,
6308 CODE_FOR_builtin_insbl,
6309 CODE_FOR_builtin_inswl,
6310 CODE_FOR_builtin_insll,
6311 CODE_FOR_builtin_insql,
6312 CODE_FOR_builtin_inswh,
6313 CODE_FOR_builtin_inslh,
6314 CODE_FOR_builtin_insqh,
6315 CODE_FOR_builtin_mskbl,
6316 CODE_FOR_builtin_mskwl,
6317 CODE_FOR_builtin_mskll,
6318 CODE_FOR_builtin_mskql,
6319 CODE_FOR_builtin_mskwh,
6320 CODE_FOR_builtin_msklh,
6321 CODE_FOR_builtin_mskqh,
6322 CODE_FOR_umuldi3_highpart,
6323 CODE_FOR_builtin_zap,
6324 CODE_FOR_builtin_zapnot,
6325 CODE_FOR_builtin_amask,
6326 CODE_FOR_builtin_implver,
6327 CODE_FOR_builtin_rpcc,
6328 CODE_FOR_load_tp,
6329 CODE_FOR_set_tp,
6330
6331 /* TARGET_MAX */
6332 CODE_FOR_builtin_minub8,
6333 CODE_FOR_builtin_minsb8,
6334 CODE_FOR_builtin_minuw4,
6335 CODE_FOR_builtin_minsw4,
6336 CODE_FOR_builtin_maxub8,
6337 CODE_FOR_builtin_maxsb8,
6338 CODE_FOR_builtin_maxuw4,
6339 CODE_FOR_builtin_maxsw4,
6340 CODE_FOR_builtin_perr,
6341 CODE_FOR_builtin_pklb,
6342 CODE_FOR_builtin_pkwb,
6343 CODE_FOR_builtin_unpkbl,
6344 CODE_FOR_builtin_unpkbw,
6345
6346 /* TARGET_CIX */
6347 CODE_FOR_ctzdi2,
6348 CODE_FOR_clzdi2,
6349 CODE_FOR_popcountdi2
6350 };
6351
6352 struct alpha_builtin_def
6353 {
6354 const char *name;
6355 enum alpha_builtin code;
6356 unsigned int target_mask;
6357 bool is_const;
6358 };
6359
6360 static struct alpha_builtin_def const zero_arg_builtins[] = {
6361 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6362 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6363 };
6364
6365 static struct alpha_builtin_def const one_arg_builtins[] = {
6366 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6367 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6368 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6369 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6370 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6371 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6372 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6373 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6374 };
6375
6376 static struct alpha_builtin_def const two_arg_builtins[] = {
6377 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6378 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6379 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6380 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6381 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6382 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6383 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6384 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6385 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6386 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6387 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6388 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6389 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6390 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6391 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6392 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6393 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6394 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6395 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6396 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6397 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6398 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6399 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6400 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6401 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6402 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6403 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6404 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6405 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6406 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6407 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6408 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6409 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6410 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6411 };
6412
6413 static GTY(()) tree alpha_v8qi_u;
6414 static GTY(()) tree alpha_v8qi_s;
6415 static GTY(()) tree alpha_v4hi_u;
6416 static GTY(()) tree alpha_v4hi_s;
6417
6418 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6419 functions pointed to by P, with function type FTYPE. */
6420
6421 static void
6422 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6423 tree ftype)
6424 {
6425 tree decl;
6426 size_t i;
6427
6428 for (i = 0; i < count; ++i, ++p)
6429 if ((target_flags & p->target_mask) == p->target_mask)
6430 {
6431 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6432 NULL, NULL);
6433 if (p->is_const)
6434 TREE_READONLY (decl) = 1;
6435 TREE_NOTHROW (decl) = 1;
6436 }
6437 }
6438
6439
6440 static void
6441 alpha_init_builtins (void)
6442 {
6443 tree dimode_integer_type_node;
6444 tree ftype, decl;
6445
6446 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6447
6448 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6449 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6450 ftype);
6451
6452 ftype = build_function_type_list (dimode_integer_type_node,
6453 dimode_integer_type_node, NULL_TREE);
6454 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6455 ftype);
6456
6457 ftype = build_function_type_list (dimode_integer_type_node,
6458 dimode_integer_type_node,
6459 dimode_integer_type_node, NULL_TREE);
6460 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6461 ftype);
6462
6463 ftype = build_function_type (ptr_type_node, void_list_node);
6464 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6465 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6466 NULL, NULL);
6467 TREE_NOTHROW (decl) = 1;
6468
6469 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6470 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6471 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6472 NULL, NULL);
6473 TREE_NOTHROW (decl) = 1;
6474
6475 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6476 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6477 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6478 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6479 }
6480
6481 /* Expand an expression EXP that calls a built-in function,
6482 with result going to TARGET if that's convenient
6483 (and in mode MODE if that's convenient).
6484 SUBTARGET may be used as the target for computing one of EXP's operands.
6485 IGNORE is nonzero if the value is to be ignored. */
6486
6487 static rtx
6488 alpha_expand_builtin (tree exp, rtx target,
6489 rtx subtarget ATTRIBUTE_UNUSED,
6490 enum machine_mode mode ATTRIBUTE_UNUSED,
6491 int ignore ATTRIBUTE_UNUSED)
6492 {
6493 #define MAX_ARGS 2
6494
6495 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6496 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6497 tree arg;
6498 call_expr_arg_iterator iter;
6499 enum insn_code icode;
6500 rtx op[MAX_ARGS], pat;
6501 int arity;
6502 bool nonvoid;
6503
6504 if (fcode >= ALPHA_BUILTIN_max)
6505 internal_error ("bad builtin fcode");
6506 icode = code_for_builtin[fcode];
6507 if (icode == 0)
6508 internal_error ("bad builtin fcode");
6509
6510 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6511
6512 arity = 0;
6513 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6514 {
6515 const struct insn_operand_data *insn_op;
6516
6517 if (arg == error_mark_node)
6518 return NULL_RTX;
6519 if (arity > MAX_ARGS)
6520 return NULL_RTX;
6521
6522 insn_op = &insn_data[icode].operand[arity + nonvoid];
6523
6524 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6525
6526 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6527 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6528 arity++;
6529 }
6530
6531 if (nonvoid)
6532 {
6533 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6534 if (!target
6535 || GET_MODE (target) != tmode
6536 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6537 target = gen_reg_rtx (tmode);
6538 }
6539
6540 switch (arity)
6541 {
6542 case 0:
6543 pat = GEN_FCN (icode) (target);
6544 break;
6545 case 1:
6546 if (nonvoid)
6547 pat = GEN_FCN (icode) (target, op[0]);
6548 else
6549 pat = GEN_FCN (icode) (op[0]);
6550 break;
6551 case 2:
6552 pat = GEN_FCN (icode) (target, op[0], op[1]);
6553 break;
6554 default:
6555 gcc_unreachable ();
6556 }
6557 if (!pat)
6558 return NULL_RTX;
6559 emit_insn (pat);
6560
6561 if (nonvoid)
6562 return target;
6563 else
6564 return const0_rtx;
6565 }
6566
6567
6568 /* Several bits below assume HWI >= 64 bits. This should be enforced
6569 by config.gcc. */
6570 #if HOST_BITS_PER_WIDE_INT < 64
6571 # error "HOST_WIDE_INT too small"
6572 #endif
6573
6574 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6575 with an 8-bit output vector. OPINT contains the integer operands; bit N
6576 of OP_CONST is set if OPINT[N] is valid. */
6577
6578 static tree
6579 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6580 {
6581 if (op_const == 3)
6582 {
6583 int i, val;
6584 for (i = 0, val = 0; i < 8; ++i)
6585 {
6586 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6587 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6588 if (c0 >= c1)
6589 val |= 1 << i;
6590 }
6591 return build_int_cst (long_integer_type_node, val);
6592 }
6593 else if (op_const == 2 && opint[1] == 0)
6594 return build_int_cst (long_integer_type_node, 0xff);
6595 return NULL;
6596 }
6597
6598 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6599 specialized form of an AND operation. Other byte manipulation instructions
6600 are defined in terms of this instruction, so this is also used as a
6601 subroutine for other builtins.
6602
6603 OP contains the tree operands; OPINT contains the extracted integer values.
6604 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6605 OPINT may be considered. */
6606
6607 static tree
6608 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6609 long op_const)
6610 {
6611 if (op_const & 2)
6612 {
6613 unsigned HOST_WIDE_INT mask = 0;
6614 int i;
6615
6616 for (i = 0; i < 8; ++i)
6617 if ((opint[1] >> i) & 1)
6618 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6619
6620 if (op_const & 1)
6621 return build_int_cst (long_integer_type_node, opint[0] & mask);
6622
6623 if (op)
6624 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6625 build_int_cst (long_integer_type_node, mask));
6626 }
6627 else if ((op_const & 1) && opint[0] == 0)
6628 return build_int_cst (long_integer_type_node, 0);
6629 return NULL;
6630 }
6631
6632 /* Fold the builtins for the EXT family of instructions. */
6633
6634 static tree
6635 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6636 long op_const, unsigned HOST_WIDE_INT bytemask,
6637 bool is_high)
6638 {
6639 long zap_const = 2;
6640 tree *zap_op = NULL;
6641
6642 if (op_const & 2)
6643 {
6644 unsigned HOST_WIDE_INT loc;
6645
6646 loc = opint[1] & 7;
6647 if (BYTES_BIG_ENDIAN)
6648 loc ^= 7;
6649 loc *= 8;
6650
6651 if (loc != 0)
6652 {
6653 if (op_const & 1)
6654 {
6655 unsigned HOST_WIDE_INT temp = opint[0];
6656 if (is_high)
6657 temp <<= loc;
6658 else
6659 temp >>= loc;
6660 opint[0] = temp;
6661 zap_const = 3;
6662 }
6663 }
6664 else
6665 zap_op = op;
6666 }
6667
6668 opint[1] = bytemask;
6669 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6670 }
6671
6672 /* Fold the builtins for the INS family of instructions. */
6673
6674 static tree
6675 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6676 long op_const, unsigned HOST_WIDE_INT bytemask,
6677 bool is_high)
6678 {
6679 if ((op_const & 1) && opint[0] == 0)
6680 return build_int_cst (long_integer_type_node, 0);
6681
6682 if (op_const & 2)
6683 {
6684 unsigned HOST_WIDE_INT temp, loc, byteloc;
6685 tree *zap_op = NULL;
6686
6687 loc = opint[1] & 7;
6688 if (BYTES_BIG_ENDIAN)
6689 loc ^= 7;
6690 bytemask <<= loc;
6691
6692 temp = opint[0];
6693 if (is_high)
6694 {
6695 byteloc = (64 - (loc * 8)) & 0x3f;
6696 if (byteloc == 0)
6697 zap_op = op;
6698 else
6699 temp >>= byteloc;
6700 bytemask >>= 8;
6701 }
6702 else
6703 {
6704 byteloc = loc * 8;
6705 if (byteloc == 0)
6706 zap_op = op;
6707 else
6708 temp <<= byteloc;
6709 }
6710
6711 opint[0] = temp;
6712 opint[1] = bytemask;
6713 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6714 }
6715
6716 return NULL;
6717 }
6718
6719 static tree
6720 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6721 long op_const, unsigned HOST_WIDE_INT bytemask,
6722 bool is_high)
6723 {
6724 if (op_const & 2)
6725 {
6726 unsigned HOST_WIDE_INT loc;
6727
6728 loc = opint[1] & 7;
6729 if (BYTES_BIG_ENDIAN)
6730 loc ^= 7;
6731 bytemask <<= loc;
6732
6733 if (is_high)
6734 bytemask >>= 8;
6735
6736 opint[1] = bytemask ^ 0xff;
6737 }
6738
6739 return alpha_fold_builtin_zapnot (op, opint, op_const);
6740 }
6741
6742 static tree
6743 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6744 {
6745 switch (op_const)
6746 {
6747 case 3:
6748 {
6749 unsigned HOST_WIDE_INT l;
6750 HOST_WIDE_INT h;
6751
6752 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6753
6754 #if HOST_BITS_PER_WIDE_INT > 64
6755 # error fixme
6756 #endif
6757
6758 return build_int_cst (long_integer_type_node, h);
6759 }
6760
6761 case 1:
6762 opint[1] = opint[0];
6763 /* FALLTHRU */
6764 case 2:
6765 /* Note that (X*1) >> 64 == 0. */
6766 if (opint[1] == 0 || opint[1] == 1)
6767 return build_int_cst (long_integer_type_node, 0);
6768 break;
6769 }
6770 return NULL;
6771 }
6772
6773 static tree
6774 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6775 {
6776 tree op0 = fold_convert (vtype, op[0]);
6777 tree op1 = fold_convert (vtype, op[1]);
6778 tree val = fold_build2 (code, vtype, op0, op1);
6779 return fold_convert (long_integer_type_node, val);
6780 }
6781
6782 static tree
6783 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6784 {
6785 unsigned HOST_WIDE_INT temp = 0;
6786 int i;
6787
6788 if (op_const != 3)
6789 return NULL;
6790
6791 for (i = 0; i < 8; ++i)
6792 {
6793 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6794 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6795 if (a >= b)
6796 temp += a - b;
6797 else
6798 temp += b - a;
6799 }
6800
6801 return build_int_cst (long_integer_type_node, temp);
6802 }
6803
6804 static tree
6805 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6806 {
6807 unsigned HOST_WIDE_INT temp;
6808
6809 if (op_const == 0)
6810 return NULL;
6811
6812 temp = opint[0] & 0xff;
6813 temp |= (opint[0] >> 24) & 0xff00;
6814
6815 return build_int_cst (long_integer_type_node, temp);
6816 }
6817
6818 static tree
6819 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6820 {
6821 unsigned HOST_WIDE_INT temp;
6822
6823 if (op_const == 0)
6824 return NULL;
6825
6826 temp = opint[0] & 0xff;
6827 temp |= (opint[0] >> 8) & 0xff00;
6828 temp |= (opint[0] >> 16) & 0xff0000;
6829 temp |= (opint[0] >> 24) & 0xff000000;
6830
6831 return build_int_cst (long_integer_type_node, temp);
6832 }
6833
6834 static tree
6835 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6836 {
6837 unsigned HOST_WIDE_INT temp;
6838
6839 if (op_const == 0)
6840 return NULL;
6841
6842 temp = opint[0] & 0xff;
6843 temp |= (opint[0] & 0xff00) << 24;
6844
6845 return build_int_cst (long_integer_type_node, temp);
6846 }
6847
6848 static tree
6849 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6850 {
6851 unsigned HOST_WIDE_INT temp;
6852
6853 if (op_const == 0)
6854 return NULL;
6855
6856 temp = opint[0] & 0xff;
6857 temp |= (opint[0] & 0x0000ff00) << 8;
6858 temp |= (opint[0] & 0x00ff0000) << 16;
6859 temp |= (opint[0] & 0xff000000) << 24;
6860
6861 return build_int_cst (long_integer_type_node, temp);
6862 }
6863
6864 static tree
6865 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6866 {
6867 unsigned HOST_WIDE_INT temp;
6868
6869 if (op_const == 0)
6870 return NULL;
6871
6872 if (opint[0] == 0)
6873 temp = 64;
6874 else
6875 temp = exact_log2 (opint[0] & -opint[0]);
6876
6877 return build_int_cst (long_integer_type_node, temp);
6878 }
6879
6880 static tree
6881 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6882 {
6883 unsigned HOST_WIDE_INT temp;
6884
6885 if (op_const == 0)
6886 return NULL;
6887
6888 if (opint[0] == 0)
6889 temp = 64;
6890 else
6891 temp = 64 - floor_log2 (opint[0]) - 1;
6892
6893 return build_int_cst (long_integer_type_node, temp);
6894 }
6895
6896 static tree
6897 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6898 {
6899 unsigned HOST_WIDE_INT temp, op;
6900
6901 if (op_const == 0)
6902 return NULL;
6903
6904 op = opint[0];
6905 temp = 0;
6906 while (op)
6907 temp++, op &= op - 1;
6908
6909 return build_int_cst (long_integer_type_node, temp);
6910 }
6911
6912 /* Fold one of our builtin functions. */
6913
6914 static tree
6915 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6916 {
6917 tree op[MAX_ARGS], t;
6918 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6919 long op_const = 0, arity = 0;
6920
6921 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6922 {
6923 tree arg = TREE_VALUE (t);
6924 if (arg == error_mark_node)
6925 return NULL;
6926 if (arity >= MAX_ARGS)
6927 return NULL;
6928
6929 op[arity] = arg;
6930 opint[arity] = 0;
6931 if (TREE_CODE (arg) == INTEGER_CST)
6932 {
6933 op_const |= 1L << arity;
6934 opint[arity] = int_cst_value (arg);
6935 }
6936 }
6937
6938 switch (DECL_FUNCTION_CODE (fndecl))
6939 {
6940 case ALPHA_BUILTIN_CMPBGE:
6941 return alpha_fold_builtin_cmpbge (opint, op_const);
6942
6943 case ALPHA_BUILTIN_EXTBL:
6944 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6945 case ALPHA_BUILTIN_EXTWL:
6946 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6947 case ALPHA_BUILTIN_EXTLL:
6948 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6949 case ALPHA_BUILTIN_EXTQL:
6950 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6951 case ALPHA_BUILTIN_EXTWH:
6952 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6953 case ALPHA_BUILTIN_EXTLH:
6954 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6955 case ALPHA_BUILTIN_EXTQH:
6956 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6957
6958 case ALPHA_BUILTIN_INSBL:
6959 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6960 case ALPHA_BUILTIN_INSWL:
6961 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6962 case ALPHA_BUILTIN_INSLL:
6963 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6964 case ALPHA_BUILTIN_INSQL:
6965 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6966 case ALPHA_BUILTIN_INSWH:
6967 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6968 case ALPHA_BUILTIN_INSLH:
6969 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6970 case ALPHA_BUILTIN_INSQH:
6971 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6972
6973 case ALPHA_BUILTIN_MSKBL:
6974 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6975 case ALPHA_BUILTIN_MSKWL:
6976 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6977 case ALPHA_BUILTIN_MSKLL:
6978 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6979 case ALPHA_BUILTIN_MSKQL:
6980 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6981 case ALPHA_BUILTIN_MSKWH:
6982 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6983 case ALPHA_BUILTIN_MSKLH:
6984 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6985 case ALPHA_BUILTIN_MSKQH:
6986 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6987
6988 case ALPHA_BUILTIN_UMULH:
6989 return alpha_fold_builtin_umulh (opint, op_const);
6990
6991 case ALPHA_BUILTIN_ZAP:
6992 opint[1] ^= 0xff;
6993 /* FALLTHRU */
6994 case ALPHA_BUILTIN_ZAPNOT:
6995 return alpha_fold_builtin_zapnot (op, opint, op_const);
6996
6997 case ALPHA_BUILTIN_MINUB8:
6998 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
6999 case ALPHA_BUILTIN_MINSB8:
7000 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7001 case ALPHA_BUILTIN_MINUW4:
7002 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7003 case ALPHA_BUILTIN_MINSW4:
7004 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7005 case ALPHA_BUILTIN_MAXUB8:
7006 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7007 case ALPHA_BUILTIN_MAXSB8:
7008 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7009 case ALPHA_BUILTIN_MAXUW4:
7010 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7011 case ALPHA_BUILTIN_MAXSW4:
7012 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7013
7014 case ALPHA_BUILTIN_PERR:
7015 return alpha_fold_builtin_perr (opint, op_const);
7016 case ALPHA_BUILTIN_PKLB:
7017 return alpha_fold_builtin_pklb (opint, op_const);
7018 case ALPHA_BUILTIN_PKWB:
7019 return alpha_fold_builtin_pkwb (opint, op_const);
7020 case ALPHA_BUILTIN_UNPKBL:
7021 return alpha_fold_builtin_unpkbl (opint, op_const);
7022 case ALPHA_BUILTIN_UNPKBW:
7023 return alpha_fold_builtin_unpkbw (opint, op_const);
7024
7025 case ALPHA_BUILTIN_CTTZ:
7026 return alpha_fold_builtin_cttz (opint, op_const);
7027 case ALPHA_BUILTIN_CTLZ:
7028 return alpha_fold_builtin_ctlz (opint, op_const);
7029 case ALPHA_BUILTIN_CTPOP:
7030 return alpha_fold_builtin_ctpop (opint, op_const);
7031
7032 case ALPHA_BUILTIN_AMASK:
7033 case ALPHA_BUILTIN_IMPLVER:
7034 case ALPHA_BUILTIN_RPCC:
7035 case ALPHA_BUILTIN_THREAD_POINTER:
7036 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7037 /* None of these are foldable at compile-time. */
7038 default:
7039 return NULL;
7040 }
7041 }
7042 \f
7043 /* This page contains routines that are used to determine what the function
7044 prologue and epilogue code will do and write them out. */
7045
7046 /* Compute the size of the save area in the stack. */
7047
7048 /* These variables are used for communication between the following functions.
7049 They indicate various things about the current function being compiled
7050 that are used to tell what kind of prologue, epilogue and procedure
7051 descriptor to generate. */
7052
7053 /* Nonzero if we need a stack procedure. */
7054 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7055 static enum alpha_procedure_types alpha_procedure_type;
7056
7057 /* Register number (either FP or SP) that is used to unwind the frame. */
7058 static int vms_unwind_regno;
7059
7060 /* Register number used to save FP. We need not have one for RA since
7061 we don't modify it for register procedures. This is only defined
7062 for register frame procedures. */
7063 static int vms_save_fp_regno;
7064
7065 /* Register number used to reference objects off our PV. */
7066 static int vms_base_regno;
7067
7068 /* Compute register masks for saved registers. */
7069
7070 static void
7071 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7072 {
7073 unsigned long imask = 0;
7074 unsigned long fmask = 0;
7075 unsigned int i;
7076
7077 /* When outputting a thunk, we don't have valid register life info,
7078 but assemble_start_function wants to output .frame and .mask
7079 directives. */
7080 if (crtl->is_thunk)
7081 {
7082 *imaskP = 0;
7083 *fmaskP = 0;
7084 return;
7085 }
7086
7087 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7088 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7089
7090 /* One for every register we have to save. */
7091 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7092 if (! fixed_regs[i] && ! call_used_regs[i]
7093 && df_regs_ever_live_p (i) && i != REG_RA
7094 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7095 {
7096 if (i < 32)
7097 imask |= (1UL << i);
7098 else
7099 fmask |= (1UL << (i - 32));
7100 }
7101
7102 /* We need to restore these for the handler. */
7103 if (crtl->calls_eh_return)
7104 {
7105 for (i = 0; ; ++i)
7106 {
7107 unsigned regno = EH_RETURN_DATA_REGNO (i);
7108 if (regno == INVALID_REGNUM)
7109 break;
7110 imask |= 1UL << regno;
7111 }
7112 }
7113
7114 /* If any register spilled, then spill the return address also. */
7115 /* ??? This is required by the Digital stack unwind specification
7116 and isn't needed if we're doing Dwarf2 unwinding. */
7117 if (imask || fmask || alpha_ra_ever_killed ())
7118 imask |= (1UL << REG_RA);
7119
7120 *imaskP = imask;
7121 *fmaskP = fmask;
7122 }
7123
7124 int
7125 alpha_sa_size (void)
7126 {
7127 unsigned long mask[2];
7128 int sa_size = 0;
7129 int i, j;
7130
7131 alpha_sa_mask (&mask[0], &mask[1]);
7132
7133 if (TARGET_ABI_UNICOSMK)
7134 {
7135 if (mask[0] || mask[1])
7136 sa_size = 14;
7137 }
7138 else
7139 {
7140 for (j = 0; j < 2; ++j)
7141 for (i = 0; i < 32; ++i)
7142 if ((mask[j] >> i) & 1)
7143 sa_size++;
7144 }
7145
7146 if (TARGET_ABI_UNICOSMK)
7147 {
7148 /* We might not need to generate a frame if we don't make any calls
7149 (including calls to __T3E_MISMATCH if this is a vararg function),
7150 don't have any local variables which require stack slots, don't
7151 use alloca and have not determined that we need a frame for other
7152 reasons. */
7153
7154 alpha_procedure_type
7155 = (sa_size || get_frame_size() != 0
7156 || crtl->outgoing_args_size
7157 || cfun->stdarg || cfun->calls_alloca
7158 || frame_pointer_needed)
7159 ? PT_STACK : PT_REGISTER;
7160
7161 /* Always reserve space for saving callee-saved registers if we
7162 need a frame as required by the calling convention. */
7163 if (alpha_procedure_type == PT_STACK)
7164 sa_size = 14;
7165 }
7166 else if (TARGET_ABI_OPEN_VMS)
7167 {
7168 /* Start by assuming we can use a register procedure if we don't
7169 make any calls (REG_RA not used) or need to save any
7170 registers and a stack procedure if we do. */
7171 if ((mask[0] >> REG_RA) & 1)
7172 alpha_procedure_type = PT_STACK;
7173 else if (get_frame_size() != 0)
7174 alpha_procedure_type = PT_REGISTER;
7175 else
7176 alpha_procedure_type = PT_NULL;
7177
7178 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7179 made the final decision on stack procedure vs register procedure. */
7180 if (alpha_procedure_type == PT_STACK)
7181 sa_size -= 2;
7182
7183 /* Decide whether to refer to objects off our PV via FP or PV.
7184 If we need FP for something else or if we receive a nonlocal
7185 goto (which expects PV to contain the value), we must use PV.
7186 Otherwise, start by assuming we can use FP. */
7187
7188 vms_base_regno
7189 = (frame_pointer_needed
7190 || cfun->has_nonlocal_label
7191 || alpha_procedure_type == PT_STACK
7192 || crtl->outgoing_args_size)
7193 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7194
7195 /* If we want to copy PV into FP, we need to find some register
7196 in which to save FP. */
7197
7198 vms_save_fp_regno = -1;
7199 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7200 for (i = 0; i < 32; i++)
7201 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7202 vms_save_fp_regno = i;
7203
7204 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7205 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7206 else if (alpha_procedure_type == PT_NULL)
7207 vms_base_regno = REG_PV;
7208
7209 /* Stack unwinding should be done via FP unless we use it for PV. */
7210 vms_unwind_regno = (vms_base_regno == REG_PV
7211 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7212
7213 /* If this is a stack procedure, allow space for saving FP and RA. */
7214 if (alpha_procedure_type == PT_STACK)
7215 sa_size += 2;
7216 }
7217 else
7218 {
7219 /* Our size must be even (multiple of 16 bytes). */
7220 if (sa_size & 1)
7221 sa_size++;
7222 }
7223
7224 return sa_size * 8;
7225 }
7226
7227 /* Define the offset between two registers, one to be eliminated,
7228 and the other its replacement, at the start of a routine. */
7229
7230 HOST_WIDE_INT
7231 alpha_initial_elimination_offset (unsigned int from,
7232 unsigned int to ATTRIBUTE_UNUSED)
7233 {
7234 HOST_WIDE_INT ret;
7235
7236 ret = alpha_sa_size ();
7237 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7238
7239 switch (from)
7240 {
7241 case FRAME_POINTER_REGNUM:
7242 break;
7243
7244 case ARG_POINTER_REGNUM:
7245 ret += (ALPHA_ROUND (get_frame_size ()
7246 + crtl->args.pretend_args_size)
7247 - crtl->args.pretend_args_size);
7248 break;
7249
7250 default:
7251 gcc_unreachable ();
7252 }
7253
7254 return ret;
7255 }
7256
7257 int
7258 alpha_pv_save_size (void)
7259 {
7260 alpha_sa_size ();
7261 return alpha_procedure_type == PT_STACK ? 8 : 0;
7262 }
7263
7264 int
7265 alpha_using_fp (void)
7266 {
7267 alpha_sa_size ();
7268 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7269 }
7270
7271 #if TARGET_ABI_OPEN_VMS
7272
7273 const struct attribute_spec vms_attribute_table[] =
7274 {
7275 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7276 { "overlaid", 0, 0, true, false, false, NULL },
7277 { "global", 0, 0, true, false, false, NULL },
7278 { "initialize", 0, 0, true, false, false, NULL },
7279 { NULL, 0, 0, false, false, false, NULL }
7280 };
7281
7282 #endif
7283
7284 static int
7285 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7286 {
7287 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7288 }
7289
7290 int
7291 alpha_find_lo_sum_using_gp (rtx insn)
7292 {
7293 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7294 }
7295
7296 static int
7297 alpha_does_function_need_gp (void)
7298 {
7299 rtx insn;
7300
7301 /* The GP being variable is an OSF abi thing. */
7302 if (! TARGET_ABI_OSF)
7303 return 0;
7304
7305 /* We need the gp to load the address of __mcount. */
7306 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7307 return 1;
7308
7309 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7310 if (crtl->is_thunk)
7311 return 1;
7312
7313 /* The nonlocal receiver pattern assumes that the gp is valid for
7314 the nested function. Reasonable because it's almost always set
7315 correctly already. For the cases where that's wrong, make sure
7316 the nested function loads its gp on entry. */
7317 if (crtl->has_nonlocal_goto)
7318 return 1;
7319
7320 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7321 Even if we are a static function, we still need to do this in case
7322 our address is taken and passed to something like qsort. */
7323
7324 push_topmost_sequence ();
7325 insn = get_insns ();
7326 pop_topmost_sequence ();
7327
7328 for (; insn; insn = NEXT_INSN (insn))
7329 if (INSN_P (insn)
7330 && ! JUMP_TABLE_DATA_P (insn)
7331 && GET_CODE (PATTERN (insn)) != USE
7332 && GET_CODE (PATTERN (insn)) != CLOBBER
7333 && get_attr_usegp (insn))
7334 return 1;
7335
7336 return 0;
7337 }
7338
7339 \f
7340 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7341 sequences. */
7342
7343 static rtx
7344 set_frame_related_p (void)
7345 {
7346 rtx seq = get_insns ();
7347 rtx insn;
7348
7349 end_sequence ();
7350
7351 if (!seq)
7352 return NULL_RTX;
7353
7354 if (INSN_P (seq))
7355 {
7356 insn = seq;
7357 while (insn != NULL_RTX)
7358 {
7359 RTX_FRAME_RELATED_P (insn) = 1;
7360 insn = NEXT_INSN (insn);
7361 }
7362 seq = emit_insn (seq);
7363 }
7364 else
7365 {
7366 seq = emit_insn (seq);
7367 RTX_FRAME_RELATED_P (seq) = 1;
7368 }
7369 return seq;
7370 }
7371
7372 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7373
7374 /* Generates a store with the proper unwind info attached. VALUE is
7375 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7376 contains SP+FRAME_BIAS, and that is the unwind info that should be
7377 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7378 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7379
7380 static void
7381 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7382 HOST_WIDE_INT base_ofs, rtx frame_reg)
7383 {
7384 rtx addr, mem, insn;
7385
7386 addr = plus_constant (base_reg, base_ofs);
7387 mem = gen_rtx_MEM (DImode, addr);
7388 set_mem_alias_set (mem, alpha_sr_alias_set);
7389
7390 insn = emit_move_insn (mem, value);
7391 RTX_FRAME_RELATED_P (insn) = 1;
7392
7393 if (frame_bias || value != frame_reg)
7394 {
7395 if (frame_bias)
7396 {
7397 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7398 mem = gen_rtx_MEM (DImode, addr);
7399 }
7400
7401 REG_NOTES (insn)
7402 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7403 gen_rtx_SET (VOIDmode, mem, frame_reg),
7404 REG_NOTES (insn));
7405 }
7406 }
7407
7408 static void
7409 emit_frame_store (unsigned int regno, rtx base_reg,
7410 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7411 {
7412 rtx reg = gen_rtx_REG (DImode, regno);
7413 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7414 }
7415
7416 /* Write function prologue. */
7417
7418 /* On vms we have two kinds of functions:
7419
7420 - stack frame (PROC_STACK)
7421 these are 'normal' functions with local vars and which are
7422 calling other functions
7423 - register frame (PROC_REGISTER)
7424 keeps all data in registers, needs no stack
7425
7426 We must pass this to the assembler so it can generate the
7427 proper pdsc (procedure descriptor)
7428 This is done with the '.pdesc' command.
7429
7430 On not-vms, we don't really differentiate between the two, as we can
7431 simply allocate stack without saving registers. */
7432
7433 void
7434 alpha_expand_prologue (void)
7435 {
7436 /* Registers to save. */
7437 unsigned long imask = 0;
7438 unsigned long fmask = 0;
7439 /* Stack space needed for pushing registers clobbered by us. */
7440 HOST_WIDE_INT sa_size;
7441 /* Complete stack size needed. */
7442 HOST_WIDE_INT frame_size;
7443 /* Offset from base reg to register save area. */
7444 HOST_WIDE_INT reg_offset;
7445 rtx sa_reg;
7446 int i;
7447
7448 sa_size = alpha_sa_size ();
7449
7450 frame_size = get_frame_size ();
7451 if (TARGET_ABI_OPEN_VMS)
7452 frame_size = ALPHA_ROUND (sa_size
7453 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7454 + frame_size
7455 + crtl->args.pretend_args_size);
7456 else if (TARGET_ABI_UNICOSMK)
7457 /* We have to allocate space for the DSIB if we generate a frame. */
7458 frame_size = ALPHA_ROUND (sa_size
7459 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7460 + ALPHA_ROUND (frame_size
7461 + crtl->outgoing_args_size);
7462 else
7463 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7464 + sa_size
7465 + ALPHA_ROUND (frame_size
7466 + crtl->args.pretend_args_size));
7467
7468 if (TARGET_ABI_OPEN_VMS)
7469 reg_offset = 8;
7470 else
7471 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7472
7473 alpha_sa_mask (&imask, &fmask);
7474
7475 /* Emit an insn to reload GP, if needed. */
7476 if (TARGET_ABI_OSF)
7477 {
7478 alpha_function_needs_gp = alpha_does_function_need_gp ();
7479 if (alpha_function_needs_gp)
7480 emit_insn (gen_prologue_ldgp ());
7481 }
7482
7483 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7484 the call to mcount ourselves, rather than having the linker do it
7485 magically in response to -pg. Since _mcount has special linkage,
7486 don't represent the call as a call. */
7487 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7488 emit_insn (gen_prologue_mcount ());
7489
7490 if (TARGET_ABI_UNICOSMK)
7491 unicosmk_gen_dsib (&imask);
7492
7493 /* Adjust the stack by the frame size. If the frame size is > 4096
7494 bytes, we need to be sure we probe somewhere in the first and last
7495 4096 bytes (we can probably get away without the latter test) and
7496 every 8192 bytes in between. If the frame size is > 32768, we
7497 do this in a loop. Otherwise, we generate the explicit probe
7498 instructions.
7499
7500 Note that we are only allowed to adjust sp once in the prologue. */
7501
7502 if (frame_size <= 32768)
7503 {
7504 if (frame_size > 4096)
7505 {
7506 int probed;
7507
7508 for (probed = 4096; probed < frame_size; probed += 8192)
7509 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7510 ? -probed + 64
7511 : -probed)));
7512
7513 /* We only have to do this probe if we aren't saving registers. */
7514 if (sa_size == 0 && frame_size > probed - 4096)
7515 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7516 }
7517
7518 if (frame_size != 0)
7519 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7520 GEN_INT (TARGET_ABI_UNICOSMK
7521 ? -frame_size + 64
7522 : -frame_size))));
7523 }
7524 else
7525 {
7526 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7527 number of 8192 byte blocks to probe. We then probe each block
7528 in the loop and then set SP to the proper location. If the
7529 amount remaining is > 4096, we have to do one more probe if we
7530 are not saving any registers. */
7531
7532 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7533 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7534 rtx ptr = gen_rtx_REG (DImode, 22);
7535 rtx count = gen_rtx_REG (DImode, 23);
7536 rtx seq;
7537
7538 emit_move_insn (count, GEN_INT (blocks));
7539 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7540 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7541
7542 /* Because of the difficulty in emitting a new basic block this
7543 late in the compilation, generate the loop as a single insn. */
7544 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7545
7546 if (leftover > 4096 && sa_size == 0)
7547 {
7548 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7549 MEM_VOLATILE_P (last) = 1;
7550 emit_move_insn (last, const0_rtx);
7551 }
7552
7553 if (TARGET_ABI_WINDOWS_NT)
7554 {
7555 /* For NT stack unwind (done by 'reverse execution'), it's
7556 not OK to take the result of a loop, even though the value
7557 is already in ptr, so we reload it via a single operation
7558 and subtract it to sp.
7559
7560 Yes, that's correct -- we have to reload the whole constant
7561 into a temporary via ldah+lda then subtract from sp. */
7562
7563 HOST_WIDE_INT lo, hi;
7564 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7565 hi = frame_size - lo;
7566
7567 emit_move_insn (ptr, GEN_INT (hi));
7568 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7569 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7570 ptr));
7571 }
7572 else
7573 {
7574 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7575 GEN_INT (-leftover)));
7576 }
7577
7578 /* This alternative is special, because the DWARF code cannot
7579 possibly intuit through the loop above. So we invent this
7580 note it looks at instead. */
7581 RTX_FRAME_RELATED_P (seq) = 1;
7582 REG_NOTES (seq)
7583 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7584 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7585 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7586 GEN_INT (TARGET_ABI_UNICOSMK
7587 ? -frame_size + 64
7588 : -frame_size))),
7589 REG_NOTES (seq));
7590 }
7591
7592 if (!TARGET_ABI_UNICOSMK)
7593 {
7594 HOST_WIDE_INT sa_bias = 0;
7595
7596 /* Cope with very large offsets to the register save area. */
7597 sa_reg = stack_pointer_rtx;
7598 if (reg_offset + sa_size > 0x8000)
7599 {
7600 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7601 rtx sa_bias_rtx;
7602
7603 if (low + sa_size <= 0x8000)
7604 sa_bias = reg_offset - low, reg_offset = low;
7605 else
7606 sa_bias = reg_offset, reg_offset = 0;
7607
7608 sa_reg = gen_rtx_REG (DImode, 24);
7609 sa_bias_rtx = GEN_INT (sa_bias);
7610
7611 if (add_operand (sa_bias_rtx, DImode))
7612 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7613 else
7614 {
7615 emit_move_insn (sa_reg, sa_bias_rtx);
7616 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7617 }
7618 }
7619
7620 /* Save regs in stack order. Beginning with VMS PV. */
7621 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7622 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7623
7624 /* Save register RA next. */
7625 if (imask & (1UL << REG_RA))
7626 {
7627 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7628 imask &= ~(1UL << REG_RA);
7629 reg_offset += 8;
7630 }
7631
7632 /* Now save any other registers required to be saved. */
7633 for (i = 0; i < 31; i++)
7634 if (imask & (1UL << i))
7635 {
7636 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7637 reg_offset += 8;
7638 }
7639
7640 for (i = 0; i < 31; i++)
7641 if (fmask & (1UL << i))
7642 {
7643 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7644 reg_offset += 8;
7645 }
7646 }
7647 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7648 {
7649 /* The standard frame on the T3E includes space for saving registers.
7650 We just have to use it. We don't have to save the return address and
7651 the old frame pointer here - they are saved in the DSIB. */
7652
7653 reg_offset = -56;
7654 for (i = 9; i < 15; i++)
7655 if (imask & (1UL << i))
7656 {
7657 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7658 reg_offset -= 8;
7659 }
7660 for (i = 2; i < 10; i++)
7661 if (fmask & (1UL << i))
7662 {
7663 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7664 reg_offset -= 8;
7665 }
7666 }
7667
7668 if (TARGET_ABI_OPEN_VMS)
7669 {
7670 if (alpha_procedure_type == PT_REGISTER)
7671 /* Register frame procedures save the fp.
7672 ?? Ought to have a dwarf2 save for this. */
7673 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7674 hard_frame_pointer_rtx);
7675
7676 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7677 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7678 gen_rtx_REG (DImode, REG_PV)));
7679
7680 if (alpha_procedure_type != PT_NULL
7681 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7682 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7683
7684 /* If we have to allocate space for outgoing args, do it now. */
7685 if (crtl->outgoing_args_size != 0)
7686 {
7687 rtx seq
7688 = emit_move_insn (stack_pointer_rtx,
7689 plus_constant
7690 (hard_frame_pointer_rtx,
7691 - (ALPHA_ROUND
7692 (crtl->outgoing_args_size))));
7693
7694 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7695 if ! frame_pointer_needed. Setting the bit will change the CFA
7696 computation rule to use sp again, which would be wrong if we had
7697 frame_pointer_needed, as this means sp might move unpredictably
7698 later on.
7699
7700 Also, note that
7701 frame_pointer_needed
7702 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7703 and
7704 crtl->outgoing_args_size != 0
7705 => alpha_procedure_type != PT_NULL,
7706
7707 so when we are not setting the bit here, we are guaranteed to
7708 have emitted an FRP frame pointer update just before. */
7709 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7710 }
7711 }
7712 else if (!TARGET_ABI_UNICOSMK)
7713 {
7714 /* If we need a frame pointer, set it from the stack pointer. */
7715 if (frame_pointer_needed)
7716 {
7717 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7718 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7719 else
7720 /* This must always be the last instruction in the
7721 prologue, thus we emit a special move + clobber. */
7722 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7723 stack_pointer_rtx, sa_reg)));
7724 }
7725 }
7726
7727 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7728 the prologue, for exception handling reasons, we cannot do this for
7729 any insn that might fault. We could prevent this for mems with a
7730 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7731 have to prevent all such scheduling with a blockage.
7732
7733 Linux, on the other hand, never bothered to implement OSF/1's
7734 exception handling, and so doesn't care about such things. Anyone
7735 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7736
7737 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7738 emit_insn (gen_blockage ());
7739 }
7740
7741 /* Count the number of .file directives, so that .loc is up to date. */
7742 int num_source_filenames = 0;
7743
7744 /* Output the textual info surrounding the prologue. */
7745
7746 void
7747 alpha_start_function (FILE *file, const char *fnname,
7748 tree decl ATTRIBUTE_UNUSED)
7749 {
7750 unsigned long imask = 0;
7751 unsigned long fmask = 0;
7752 /* Stack space needed for pushing registers clobbered by us. */
7753 HOST_WIDE_INT sa_size;
7754 /* Complete stack size needed. */
7755 unsigned HOST_WIDE_INT frame_size;
7756 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7757 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7758 ? 524288
7759 : 1UL << 31;
7760 /* Offset from base reg to register save area. */
7761 HOST_WIDE_INT reg_offset;
7762 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7763 int i;
7764
7765 /* Don't emit an extern directive for functions defined in the same file. */
7766 if (TARGET_ABI_UNICOSMK)
7767 {
7768 tree name_tree;
7769 name_tree = get_identifier (fnname);
7770 TREE_ASM_WRITTEN (name_tree) = 1;
7771 }
7772
7773 alpha_fnname = fnname;
7774 sa_size = alpha_sa_size ();
7775
7776 frame_size = get_frame_size ();
7777 if (TARGET_ABI_OPEN_VMS)
7778 frame_size = ALPHA_ROUND (sa_size
7779 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7780 + frame_size
7781 + crtl->args.pretend_args_size);
7782 else if (TARGET_ABI_UNICOSMK)
7783 frame_size = ALPHA_ROUND (sa_size
7784 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7785 + ALPHA_ROUND (frame_size
7786 + crtl->outgoing_args_size);
7787 else
7788 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7789 + sa_size
7790 + ALPHA_ROUND (frame_size
7791 + crtl->args.pretend_args_size));
7792
7793 if (TARGET_ABI_OPEN_VMS)
7794 reg_offset = 8;
7795 else
7796 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7797
7798 alpha_sa_mask (&imask, &fmask);
7799
7800 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7801 We have to do that before the .ent directive as we cannot switch
7802 files within procedures with native ecoff because line numbers are
7803 linked to procedure descriptors.
7804 Outputting the lineno helps debugging of one line functions as they
7805 would otherwise get no line number at all. Please note that we would
7806 like to put out last_linenum from final.c, but it is not accessible. */
7807
7808 if (write_symbols == SDB_DEBUG)
7809 {
7810 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7811 ASM_OUTPUT_SOURCE_FILENAME (file,
7812 DECL_SOURCE_FILE (current_function_decl));
7813 #endif
7814 #ifdef SDB_OUTPUT_SOURCE_LINE
7815 if (debug_info_level != DINFO_LEVEL_TERSE)
7816 SDB_OUTPUT_SOURCE_LINE (file,
7817 DECL_SOURCE_LINE (current_function_decl));
7818 #endif
7819 }
7820
7821 /* Issue function start and label. */
7822 if (TARGET_ABI_OPEN_VMS
7823 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7824 {
7825 fputs ("\t.ent ", file);
7826 assemble_name (file, fnname);
7827 putc ('\n', file);
7828
7829 /* If the function needs GP, we'll write the "..ng" label there.
7830 Otherwise, do it here. */
7831 if (TARGET_ABI_OSF
7832 && ! alpha_function_needs_gp
7833 && ! crtl->is_thunk)
7834 {
7835 putc ('$', file);
7836 assemble_name (file, fnname);
7837 fputs ("..ng:\n", file);
7838 }
7839 }
7840
7841 strcpy (entry_label, fnname);
7842 if (TARGET_ABI_OPEN_VMS)
7843 strcat (entry_label, "..en");
7844
7845 /* For public functions, the label must be globalized by appending an
7846 additional colon. */
7847 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7848 strcat (entry_label, ":");
7849
7850 ASM_OUTPUT_LABEL (file, entry_label);
7851 inside_function = TRUE;
7852
7853 if (TARGET_ABI_OPEN_VMS)
7854 fprintf (file, "\t.base $%d\n", vms_base_regno);
7855
7856 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7857 && !flag_inhibit_size_directive)
7858 {
7859 /* Set flags in procedure descriptor to request IEEE-conformant
7860 math-library routines. The value we set it to is PDSC_EXC_IEEE
7861 (/usr/include/pdsc.h). */
7862 fputs ("\t.eflag 48\n", file);
7863 }
7864
7865 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7866 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7867 alpha_arg_offset = -frame_size + 48;
7868
7869 /* Describe our frame. If the frame size is larger than an integer,
7870 print it as zero to avoid an assembler error. We won't be
7871 properly describing such a frame, but that's the best we can do. */
7872 if (TARGET_ABI_UNICOSMK)
7873 ;
7874 else if (TARGET_ABI_OPEN_VMS)
7875 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7876 HOST_WIDE_INT_PRINT_DEC "\n",
7877 vms_unwind_regno,
7878 frame_size >= (1UL << 31) ? 0 : frame_size,
7879 reg_offset);
7880 else if (!flag_inhibit_size_directive)
7881 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7882 (frame_pointer_needed
7883 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7884 frame_size >= max_frame_size ? 0 : frame_size,
7885 crtl->args.pretend_args_size);
7886
7887 /* Describe which registers were spilled. */
7888 if (TARGET_ABI_UNICOSMK)
7889 ;
7890 else if (TARGET_ABI_OPEN_VMS)
7891 {
7892 if (imask)
7893 /* ??? Does VMS care if mask contains ra? The old code didn't
7894 set it, so I don't here. */
7895 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7896 if (fmask)
7897 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7898 if (alpha_procedure_type == PT_REGISTER)
7899 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7900 }
7901 else if (!flag_inhibit_size_directive)
7902 {
7903 if (imask)
7904 {
7905 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7906 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7907
7908 for (i = 0; i < 32; ++i)
7909 if (imask & (1UL << i))
7910 reg_offset += 8;
7911 }
7912
7913 if (fmask)
7914 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7915 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7916 }
7917
7918 #if TARGET_ABI_OPEN_VMS
7919 /* Ifdef'ed cause link_section are only available then. */
7920 switch_to_section (readonly_data_section);
7921 fprintf (file, "\t.align 3\n");
7922 assemble_name (file, fnname); fputs ("..na:\n", file);
7923 fputs ("\t.ascii \"", file);
7924 assemble_name (file, fnname);
7925 fputs ("\\0\"\n", file);
7926 alpha_need_linkage (fnname, 1);
7927 switch_to_section (text_section);
7928 #endif
7929 }
7930
7931 /* Emit the .prologue note at the scheduled end of the prologue. */
7932
7933 static void
7934 alpha_output_function_end_prologue (FILE *file)
7935 {
7936 if (TARGET_ABI_UNICOSMK)
7937 ;
7938 else if (TARGET_ABI_OPEN_VMS)
7939 fputs ("\t.prologue\n", file);
7940 else if (TARGET_ABI_WINDOWS_NT)
7941 fputs ("\t.prologue 0\n", file);
7942 else if (!flag_inhibit_size_directive)
7943 fprintf (file, "\t.prologue %d\n",
7944 alpha_function_needs_gp || crtl->is_thunk);
7945 }
7946
7947 /* Write function epilogue. */
7948
7949 /* ??? At some point we will want to support full unwind, and so will
7950 need to mark the epilogue as well. At the moment, we just confuse
7951 dwarf2out. */
7952 #undef FRP
7953 #define FRP(exp) exp
7954
7955 void
7956 alpha_expand_epilogue (void)
7957 {
7958 /* Registers to save. */
7959 unsigned long imask = 0;
7960 unsigned long fmask = 0;
7961 /* Stack space needed for pushing registers clobbered by us. */
7962 HOST_WIDE_INT sa_size;
7963 /* Complete stack size needed. */
7964 HOST_WIDE_INT frame_size;
7965 /* Offset from base reg to register save area. */
7966 HOST_WIDE_INT reg_offset;
7967 int fp_is_frame_pointer, fp_offset;
7968 rtx sa_reg, sa_reg_exp = NULL;
7969 rtx sp_adj1, sp_adj2, mem;
7970 rtx eh_ofs;
7971 int i;
7972
7973 sa_size = alpha_sa_size ();
7974
7975 frame_size = get_frame_size ();
7976 if (TARGET_ABI_OPEN_VMS)
7977 frame_size = ALPHA_ROUND (sa_size
7978 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7979 + frame_size
7980 + crtl->args.pretend_args_size);
7981 else if (TARGET_ABI_UNICOSMK)
7982 frame_size = ALPHA_ROUND (sa_size
7983 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7984 + ALPHA_ROUND (frame_size
7985 + crtl->outgoing_args_size);
7986 else
7987 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7988 + sa_size
7989 + ALPHA_ROUND (frame_size
7990 + crtl->args.pretend_args_size));
7991
7992 if (TARGET_ABI_OPEN_VMS)
7993 {
7994 if (alpha_procedure_type == PT_STACK)
7995 reg_offset = 8;
7996 else
7997 reg_offset = 0;
7998 }
7999 else
8000 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8001
8002 alpha_sa_mask (&imask, &fmask);
8003
8004 fp_is_frame_pointer
8005 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8006 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8007 fp_offset = 0;
8008 sa_reg = stack_pointer_rtx;
8009
8010 if (crtl->calls_eh_return)
8011 eh_ofs = EH_RETURN_STACKADJ_RTX;
8012 else
8013 eh_ofs = NULL_RTX;
8014
8015 if (!TARGET_ABI_UNICOSMK && sa_size)
8016 {
8017 /* If we have a frame pointer, restore SP from it. */
8018 if ((TARGET_ABI_OPEN_VMS
8019 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8020 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8021 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8022
8023 /* Cope with very large offsets to the register save area. */
8024 if (reg_offset + sa_size > 0x8000)
8025 {
8026 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8027 HOST_WIDE_INT bias;
8028
8029 if (low + sa_size <= 0x8000)
8030 bias = reg_offset - low, reg_offset = low;
8031 else
8032 bias = reg_offset, reg_offset = 0;
8033
8034 sa_reg = gen_rtx_REG (DImode, 22);
8035 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8036
8037 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8038 }
8039
8040 /* Restore registers in order, excepting a true frame pointer. */
8041
8042 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8043 if (! eh_ofs)
8044 set_mem_alias_set (mem, alpha_sr_alias_set);
8045 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8046
8047 reg_offset += 8;
8048 imask &= ~(1UL << REG_RA);
8049
8050 for (i = 0; i < 31; ++i)
8051 if (imask & (1UL << i))
8052 {
8053 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8054 fp_offset = reg_offset;
8055 else
8056 {
8057 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8058 set_mem_alias_set (mem, alpha_sr_alias_set);
8059 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8060 }
8061 reg_offset += 8;
8062 }
8063
8064 for (i = 0; i < 31; ++i)
8065 if (fmask & (1UL << i))
8066 {
8067 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8068 set_mem_alias_set (mem, alpha_sr_alias_set);
8069 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8070 reg_offset += 8;
8071 }
8072 }
8073 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8074 {
8075 /* Restore callee-saved general-purpose registers. */
8076
8077 reg_offset = -56;
8078
8079 for (i = 9; i < 15; i++)
8080 if (imask & (1UL << i))
8081 {
8082 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8083 reg_offset));
8084 set_mem_alias_set (mem, alpha_sr_alias_set);
8085 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8086 reg_offset -= 8;
8087 }
8088
8089 for (i = 2; i < 10; i++)
8090 if (fmask & (1UL << i))
8091 {
8092 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8093 reg_offset));
8094 set_mem_alias_set (mem, alpha_sr_alias_set);
8095 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8096 reg_offset -= 8;
8097 }
8098
8099 /* Restore the return address from the DSIB. */
8100
8101 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8102 set_mem_alias_set (mem, alpha_sr_alias_set);
8103 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8104 }
8105
8106 if (frame_size || eh_ofs)
8107 {
8108 sp_adj1 = stack_pointer_rtx;
8109
8110 if (eh_ofs)
8111 {
8112 sp_adj1 = gen_rtx_REG (DImode, 23);
8113 emit_move_insn (sp_adj1,
8114 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8115 }
8116
8117 /* If the stack size is large, begin computation into a temporary
8118 register so as not to interfere with a potential fp restore,
8119 which must be consecutive with an SP restore. */
8120 if (frame_size < 32768
8121 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8122 sp_adj2 = GEN_INT (frame_size);
8123 else if (TARGET_ABI_UNICOSMK)
8124 {
8125 sp_adj1 = gen_rtx_REG (DImode, 23);
8126 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8127 sp_adj2 = const0_rtx;
8128 }
8129 else if (frame_size < 0x40007fffL)
8130 {
8131 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8132
8133 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8134 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8135 sp_adj1 = sa_reg;
8136 else
8137 {
8138 sp_adj1 = gen_rtx_REG (DImode, 23);
8139 FRP (emit_move_insn (sp_adj1, sp_adj2));
8140 }
8141 sp_adj2 = GEN_INT (low);
8142 }
8143 else
8144 {
8145 rtx tmp = gen_rtx_REG (DImode, 23);
8146 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8147 3, false));
8148 if (!sp_adj2)
8149 {
8150 /* We can't drop new things to memory this late, afaik,
8151 so build it up by pieces. */
8152 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8153 -(frame_size < 0)));
8154 gcc_assert (sp_adj2);
8155 }
8156 }
8157
8158 /* From now on, things must be in order. So emit blockages. */
8159
8160 /* Restore the frame pointer. */
8161 if (TARGET_ABI_UNICOSMK)
8162 {
8163 emit_insn (gen_blockage ());
8164 mem = gen_rtx_MEM (DImode,
8165 plus_constant (hard_frame_pointer_rtx, -16));
8166 set_mem_alias_set (mem, alpha_sr_alias_set);
8167 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8168 }
8169 else if (fp_is_frame_pointer)
8170 {
8171 emit_insn (gen_blockage ());
8172 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8173 set_mem_alias_set (mem, alpha_sr_alias_set);
8174 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8175 }
8176 else if (TARGET_ABI_OPEN_VMS)
8177 {
8178 emit_insn (gen_blockage ());
8179 FRP (emit_move_insn (hard_frame_pointer_rtx,
8180 gen_rtx_REG (DImode, vms_save_fp_regno)));
8181 }
8182
8183 /* Restore the stack pointer. */
8184 emit_insn (gen_blockage ());
8185 if (sp_adj2 == const0_rtx)
8186 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8187 else
8188 FRP (emit_move_insn (stack_pointer_rtx,
8189 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8190 }
8191 else
8192 {
8193 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8194 {
8195 emit_insn (gen_blockage ());
8196 FRP (emit_move_insn (hard_frame_pointer_rtx,
8197 gen_rtx_REG (DImode, vms_save_fp_regno)));
8198 }
8199 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8200 {
8201 /* Decrement the frame pointer if the function does not have a
8202 frame. */
8203
8204 emit_insn (gen_blockage ());
8205 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8206 hard_frame_pointer_rtx, constm1_rtx)));
8207 }
8208 }
8209 }
8210 \f
8211 /* Output the rest of the textual info surrounding the epilogue. */
8212
8213 void
8214 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8215 {
8216 rtx insn;
8217
8218 /* We output a nop after noreturn calls at the very end of the function to
8219 ensure that the return address always remains in the caller's code range,
8220 as not doing so might confuse unwinding engines. */
8221 insn = get_last_insn ();
8222 if (!INSN_P (insn))
8223 insn = prev_active_insn (insn);
8224 if (GET_CODE (insn) == CALL_INSN)
8225 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8226
8227 #if TARGET_ABI_OPEN_VMS
8228 alpha_write_linkage (file, fnname, decl);
8229 #endif
8230
8231 /* End the function. */
8232 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8233 {
8234 fputs ("\t.end ", file);
8235 assemble_name (file, fnname);
8236 putc ('\n', file);
8237 }
8238 inside_function = FALSE;
8239
8240 /* Output jump tables and the static subroutine information block. */
8241 if (TARGET_ABI_UNICOSMK)
8242 {
8243 unicosmk_output_ssib (file, fnname);
8244 unicosmk_output_deferred_case_vectors (file);
8245 }
8246 }
8247
8248 #if TARGET_ABI_OSF
8249 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8250
8251 In order to avoid the hordes of differences between generated code
8252 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8253 lots of code loading up large constants, generate rtl and emit it
8254 instead of going straight to text.
8255
8256 Not sure why this idea hasn't been explored before... */
8257
8258 static void
8259 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8260 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8261 tree function)
8262 {
8263 HOST_WIDE_INT hi, lo;
8264 rtx this, insn, funexp;
8265
8266 /* We always require a valid GP. */
8267 emit_insn (gen_prologue_ldgp ());
8268 emit_note (NOTE_INSN_PROLOGUE_END);
8269
8270 /* Find the "this" pointer. If the function returns a structure,
8271 the structure return pointer is in $16. */
8272 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8273 this = gen_rtx_REG (Pmode, 17);
8274 else
8275 this = gen_rtx_REG (Pmode, 16);
8276
8277 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8278 entire constant for the add. */
8279 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8280 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8281 if (hi + lo == delta)
8282 {
8283 if (hi)
8284 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8285 if (lo)
8286 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8287 }
8288 else
8289 {
8290 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8291 delta, -(delta < 0));
8292 emit_insn (gen_adddi3 (this, this, tmp));
8293 }
8294
8295 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8296 if (vcall_offset)
8297 {
8298 rtx tmp, tmp2;
8299
8300 tmp = gen_rtx_REG (Pmode, 0);
8301 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8302
8303 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8304 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8305 if (hi + lo == vcall_offset)
8306 {
8307 if (hi)
8308 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8309 }
8310 else
8311 {
8312 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8313 vcall_offset, -(vcall_offset < 0));
8314 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8315 lo = 0;
8316 }
8317 if (lo)
8318 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8319 else
8320 tmp2 = tmp;
8321 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8322
8323 emit_insn (gen_adddi3 (this, this, tmp));
8324 }
8325
8326 /* Generate a tail call to the target function. */
8327 if (! TREE_USED (function))
8328 {
8329 assemble_external (function);
8330 TREE_USED (function) = 1;
8331 }
8332 funexp = XEXP (DECL_RTL (function), 0);
8333 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8334 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8335 SIBLING_CALL_P (insn) = 1;
8336
8337 /* Run just enough of rest_of_compilation to get the insns emitted.
8338 There's not really enough bulk here to make other passes such as
8339 instruction scheduling worth while. Note that use_thunk calls
8340 assemble_start_function and assemble_end_function. */
8341 insn = get_insns ();
8342 insn_locators_alloc ();
8343 shorten_branches (insn);
8344 final_start_function (insn, file, 1);
8345 final (insn, file, 1);
8346 final_end_function ();
8347 free_after_compilation (cfun);
8348 }
8349 #endif /* TARGET_ABI_OSF */
8350 \f
8351 /* Debugging support. */
8352
8353 #include "gstab.h"
8354
8355 /* Count the number of sdb related labels are generated (to find block
8356 start and end boundaries). */
8357
8358 int sdb_label_count = 0;
8359
8360 /* Name of the file containing the current function. */
8361
8362 static const char *current_function_file = "";
8363
8364 /* Offsets to alpha virtual arg/local debugging pointers. */
8365
8366 long alpha_arg_offset;
8367 long alpha_auto_offset;
8368 \f
8369 /* Emit a new filename to a stream. */
8370
8371 void
8372 alpha_output_filename (FILE *stream, const char *name)
8373 {
8374 static int first_time = TRUE;
8375
8376 if (first_time)
8377 {
8378 first_time = FALSE;
8379 ++num_source_filenames;
8380 current_function_file = name;
8381 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8382 output_quoted_string (stream, name);
8383 fprintf (stream, "\n");
8384 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8385 fprintf (stream, "\t#@stabs\n");
8386 }
8387
8388 else if (write_symbols == DBX_DEBUG)
8389 /* dbxout.c will emit an appropriate .stabs directive. */
8390 return;
8391
8392 else if (name != current_function_file
8393 && strcmp (name, current_function_file) != 0)
8394 {
8395 if (inside_function && ! TARGET_GAS)
8396 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8397 else
8398 {
8399 ++num_source_filenames;
8400 current_function_file = name;
8401 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8402 }
8403
8404 output_quoted_string (stream, name);
8405 fprintf (stream, "\n");
8406 }
8407 }
8408 \f
8409 /* Structure to show the current status of registers and memory. */
8410
8411 struct shadow_summary
8412 {
8413 struct {
8414 unsigned int i : 31; /* Mask of int regs */
8415 unsigned int fp : 31; /* Mask of fp regs */
8416 unsigned int mem : 1; /* mem == imem | fpmem */
8417 } used, defd;
8418 };
8419
8420 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8421 to the summary structure. SET is nonzero if the insn is setting the
8422 object, otherwise zero. */
8423
8424 static void
8425 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8426 {
8427 const char *format_ptr;
8428 int i, j;
8429
8430 if (x == 0)
8431 return;
8432
8433 switch (GET_CODE (x))
8434 {
8435 /* ??? Note that this case would be incorrect if the Alpha had a
8436 ZERO_EXTRACT in SET_DEST. */
8437 case SET:
8438 summarize_insn (SET_SRC (x), sum, 0);
8439 summarize_insn (SET_DEST (x), sum, 1);
8440 break;
8441
8442 case CLOBBER:
8443 summarize_insn (XEXP (x, 0), sum, 1);
8444 break;
8445
8446 case USE:
8447 summarize_insn (XEXP (x, 0), sum, 0);
8448 break;
8449
8450 case ASM_OPERANDS:
8451 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8452 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8453 break;
8454
8455 case PARALLEL:
8456 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8457 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8458 break;
8459
8460 case SUBREG:
8461 summarize_insn (SUBREG_REG (x), sum, 0);
8462 break;
8463
8464 case REG:
8465 {
8466 int regno = REGNO (x);
8467 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8468
8469 if (regno == 31 || regno == 63)
8470 break;
8471
8472 if (set)
8473 {
8474 if (regno < 32)
8475 sum->defd.i |= mask;
8476 else
8477 sum->defd.fp |= mask;
8478 }
8479 else
8480 {
8481 if (regno < 32)
8482 sum->used.i |= mask;
8483 else
8484 sum->used.fp |= mask;
8485 }
8486 }
8487 break;
8488
8489 case MEM:
8490 if (set)
8491 sum->defd.mem = 1;
8492 else
8493 sum->used.mem = 1;
8494
8495 /* Find the regs used in memory address computation: */
8496 summarize_insn (XEXP (x, 0), sum, 0);
8497 break;
8498
8499 case CONST_INT: case CONST_DOUBLE:
8500 case SYMBOL_REF: case LABEL_REF: case CONST:
8501 case SCRATCH: case ASM_INPUT:
8502 break;
8503
8504 /* Handle common unary and binary ops for efficiency. */
8505 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8506 case MOD: case UDIV: case UMOD: case AND: case IOR:
8507 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8508 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8509 case NE: case EQ: case GE: case GT: case LE:
8510 case LT: case GEU: case GTU: case LEU: case LTU:
8511 summarize_insn (XEXP (x, 0), sum, 0);
8512 summarize_insn (XEXP (x, 1), sum, 0);
8513 break;
8514
8515 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8516 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8517 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8518 case SQRT: case FFS:
8519 summarize_insn (XEXP (x, 0), sum, 0);
8520 break;
8521
8522 default:
8523 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8524 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8525 switch (format_ptr[i])
8526 {
8527 case 'e':
8528 summarize_insn (XEXP (x, i), sum, 0);
8529 break;
8530
8531 case 'E':
8532 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8533 summarize_insn (XVECEXP (x, i, j), sum, 0);
8534 break;
8535
8536 case 'i':
8537 break;
8538
8539 default:
8540 gcc_unreachable ();
8541 }
8542 }
8543 }
8544
8545 /* Ensure a sufficient number of `trapb' insns are in the code when
8546 the user requests code with a trap precision of functions or
8547 instructions.
8548
8549 In naive mode, when the user requests a trap-precision of
8550 "instruction", a trapb is needed after every instruction that may
8551 generate a trap. This ensures that the code is resumption safe but
8552 it is also slow.
8553
8554 When optimizations are turned on, we delay issuing a trapb as long
8555 as possible. In this context, a trap shadow is the sequence of
8556 instructions that starts with a (potentially) trap generating
8557 instruction and extends to the next trapb or call_pal instruction
8558 (but GCC never generates call_pal by itself). We can delay (and
8559 therefore sometimes omit) a trapb subject to the following
8560 conditions:
8561
8562 (a) On entry to the trap shadow, if any Alpha register or memory
8563 location contains a value that is used as an operand value by some
8564 instruction in the trap shadow (live on entry), then no instruction
8565 in the trap shadow may modify the register or memory location.
8566
8567 (b) Within the trap shadow, the computation of the base register
8568 for a memory load or store instruction may not involve using the
8569 result of an instruction that might generate an UNPREDICTABLE
8570 result.
8571
8572 (c) Within the trap shadow, no register may be used more than once
8573 as a destination register. (This is to make life easier for the
8574 trap-handler.)
8575
8576 (d) The trap shadow may not include any branch instructions. */
8577
8578 static void
8579 alpha_handle_trap_shadows (void)
8580 {
8581 struct shadow_summary shadow;
8582 int trap_pending, exception_nesting;
8583 rtx i, n;
8584
8585 trap_pending = 0;
8586 exception_nesting = 0;
8587 shadow.used.i = 0;
8588 shadow.used.fp = 0;
8589 shadow.used.mem = 0;
8590 shadow.defd = shadow.used;
8591
8592 for (i = get_insns (); i ; i = NEXT_INSN (i))
8593 {
8594 if (GET_CODE (i) == NOTE)
8595 {
8596 switch (NOTE_KIND (i))
8597 {
8598 case NOTE_INSN_EH_REGION_BEG:
8599 exception_nesting++;
8600 if (trap_pending)
8601 goto close_shadow;
8602 break;
8603
8604 case NOTE_INSN_EH_REGION_END:
8605 exception_nesting--;
8606 if (trap_pending)
8607 goto close_shadow;
8608 break;
8609
8610 case NOTE_INSN_EPILOGUE_BEG:
8611 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8612 goto close_shadow;
8613 break;
8614 }
8615 }
8616 else if (trap_pending)
8617 {
8618 if (alpha_tp == ALPHA_TP_FUNC)
8619 {
8620 if (GET_CODE (i) == JUMP_INSN
8621 && GET_CODE (PATTERN (i)) == RETURN)
8622 goto close_shadow;
8623 }
8624 else if (alpha_tp == ALPHA_TP_INSN)
8625 {
8626 if (optimize > 0)
8627 {
8628 struct shadow_summary sum;
8629
8630 sum.used.i = 0;
8631 sum.used.fp = 0;
8632 sum.used.mem = 0;
8633 sum.defd = sum.used;
8634
8635 switch (GET_CODE (i))
8636 {
8637 case INSN:
8638 /* Annoyingly, get_attr_trap will die on these. */
8639 if (GET_CODE (PATTERN (i)) == USE
8640 || GET_CODE (PATTERN (i)) == CLOBBER)
8641 break;
8642
8643 summarize_insn (PATTERN (i), &sum, 0);
8644
8645 if ((sum.defd.i & shadow.defd.i)
8646 || (sum.defd.fp & shadow.defd.fp))
8647 {
8648 /* (c) would be violated */
8649 goto close_shadow;
8650 }
8651
8652 /* Combine shadow with summary of current insn: */
8653 shadow.used.i |= sum.used.i;
8654 shadow.used.fp |= sum.used.fp;
8655 shadow.used.mem |= sum.used.mem;
8656 shadow.defd.i |= sum.defd.i;
8657 shadow.defd.fp |= sum.defd.fp;
8658 shadow.defd.mem |= sum.defd.mem;
8659
8660 if ((sum.defd.i & shadow.used.i)
8661 || (sum.defd.fp & shadow.used.fp)
8662 || (sum.defd.mem & shadow.used.mem))
8663 {
8664 /* (a) would be violated (also takes care of (b)) */
8665 gcc_assert (get_attr_trap (i) != TRAP_YES
8666 || (!(sum.defd.i & sum.used.i)
8667 && !(sum.defd.fp & sum.used.fp)));
8668
8669 goto close_shadow;
8670 }
8671 break;
8672
8673 case JUMP_INSN:
8674 case CALL_INSN:
8675 case CODE_LABEL:
8676 goto close_shadow;
8677
8678 default:
8679 gcc_unreachable ();
8680 }
8681 }
8682 else
8683 {
8684 close_shadow:
8685 n = emit_insn_before (gen_trapb (), i);
8686 PUT_MODE (n, TImode);
8687 PUT_MODE (i, TImode);
8688 trap_pending = 0;
8689 shadow.used.i = 0;
8690 shadow.used.fp = 0;
8691 shadow.used.mem = 0;
8692 shadow.defd = shadow.used;
8693 }
8694 }
8695 }
8696
8697 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8698 && GET_CODE (i) == INSN
8699 && GET_CODE (PATTERN (i)) != USE
8700 && GET_CODE (PATTERN (i)) != CLOBBER
8701 && get_attr_trap (i) == TRAP_YES)
8702 {
8703 if (optimize && !trap_pending)
8704 summarize_insn (PATTERN (i), &shadow, 0);
8705 trap_pending = 1;
8706 }
8707 }
8708 }
8709 \f
8710 /* Alpha can only issue instruction groups simultaneously if they are
8711 suitably aligned. This is very processor-specific. */
8712 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8713 that are marked "fake". These instructions do not exist on that target,
8714 but it is possible to see these insns with deranged combinations of
8715 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8716 choose a result at random. */
8717
8718 enum alphaev4_pipe {
8719 EV4_STOP = 0,
8720 EV4_IB0 = 1,
8721 EV4_IB1 = 2,
8722 EV4_IBX = 4
8723 };
8724
8725 enum alphaev5_pipe {
8726 EV5_STOP = 0,
8727 EV5_NONE = 1,
8728 EV5_E01 = 2,
8729 EV5_E0 = 4,
8730 EV5_E1 = 8,
8731 EV5_FAM = 16,
8732 EV5_FA = 32,
8733 EV5_FM = 64
8734 };
8735
8736 static enum alphaev4_pipe
8737 alphaev4_insn_pipe (rtx insn)
8738 {
8739 if (recog_memoized (insn) < 0)
8740 return EV4_STOP;
8741 if (get_attr_length (insn) != 4)
8742 return EV4_STOP;
8743
8744 switch (get_attr_type (insn))
8745 {
8746 case TYPE_ILD:
8747 case TYPE_LDSYM:
8748 case TYPE_FLD:
8749 case TYPE_LD_L:
8750 return EV4_IBX;
8751
8752 case TYPE_IADD:
8753 case TYPE_ILOG:
8754 case TYPE_ICMOV:
8755 case TYPE_ICMP:
8756 case TYPE_FST:
8757 case TYPE_SHIFT:
8758 case TYPE_IMUL:
8759 case TYPE_FBR:
8760 case TYPE_MVI: /* fake */
8761 return EV4_IB0;
8762
8763 case TYPE_IST:
8764 case TYPE_MISC:
8765 case TYPE_IBR:
8766 case TYPE_JSR:
8767 case TYPE_CALLPAL:
8768 case TYPE_FCPYS:
8769 case TYPE_FCMOV:
8770 case TYPE_FADD:
8771 case TYPE_FDIV:
8772 case TYPE_FMUL:
8773 case TYPE_ST_C:
8774 case TYPE_MB:
8775 case TYPE_FSQRT: /* fake */
8776 case TYPE_FTOI: /* fake */
8777 case TYPE_ITOF: /* fake */
8778 return EV4_IB1;
8779
8780 default:
8781 gcc_unreachable ();
8782 }
8783 }
8784
8785 static enum alphaev5_pipe
8786 alphaev5_insn_pipe (rtx insn)
8787 {
8788 if (recog_memoized (insn) < 0)
8789 return EV5_STOP;
8790 if (get_attr_length (insn) != 4)
8791 return EV5_STOP;
8792
8793 switch (get_attr_type (insn))
8794 {
8795 case TYPE_ILD:
8796 case TYPE_FLD:
8797 case TYPE_LDSYM:
8798 case TYPE_IADD:
8799 case TYPE_ILOG:
8800 case TYPE_ICMOV:
8801 case TYPE_ICMP:
8802 return EV5_E01;
8803
8804 case TYPE_IST:
8805 case TYPE_FST:
8806 case TYPE_SHIFT:
8807 case TYPE_IMUL:
8808 case TYPE_MISC:
8809 case TYPE_MVI:
8810 case TYPE_LD_L:
8811 case TYPE_ST_C:
8812 case TYPE_MB:
8813 case TYPE_FTOI: /* fake */
8814 case TYPE_ITOF: /* fake */
8815 return EV5_E0;
8816
8817 case TYPE_IBR:
8818 case TYPE_JSR:
8819 case TYPE_CALLPAL:
8820 return EV5_E1;
8821
8822 case TYPE_FCPYS:
8823 return EV5_FAM;
8824
8825 case TYPE_FBR:
8826 case TYPE_FCMOV:
8827 case TYPE_FADD:
8828 case TYPE_FDIV:
8829 case TYPE_FSQRT: /* fake */
8830 return EV5_FA;
8831
8832 case TYPE_FMUL:
8833 return EV5_FM;
8834
8835 default:
8836 gcc_unreachable ();
8837 }
8838 }
8839
8840 /* IN_USE is a mask of the slots currently filled within the insn group.
8841 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8842 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8843
8844 LEN is, of course, the length of the group in bytes. */
8845
8846 static rtx
8847 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8848 {
8849 int len, in_use;
8850
8851 len = in_use = 0;
8852
8853 if (! INSN_P (insn)
8854 || GET_CODE (PATTERN (insn)) == CLOBBER
8855 || GET_CODE (PATTERN (insn)) == USE)
8856 goto next_and_done;
8857
8858 while (1)
8859 {
8860 enum alphaev4_pipe pipe;
8861
8862 pipe = alphaev4_insn_pipe (insn);
8863 switch (pipe)
8864 {
8865 case EV4_STOP:
8866 /* Force complex instructions to start new groups. */
8867 if (in_use)
8868 goto done;
8869
8870 /* If this is a completely unrecognized insn, it's an asm.
8871 We don't know how long it is, so record length as -1 to
8872 signal a needed realignment. */
8873 if (recog_memoized (insn) < 0)
8874 len = -1;
8875 else
8876 len = get_attr_length (insn);
8877 goto next_and_done;
8878
8879 case EV4_IBX:
8880 if (in_use & EV4_IB0)
8881 {
8882 if (in_use & EV4_IB1)
8883 goto done;
8884 in_use |= EV4_IB1;
8885 }
8886 else
8887 in_use |= EV4_IB0 | EV4_IBX;
8888 break;
8889
8890 case EV4_IB0:
8891 if (in_use & EV4_IB0)
8892 {
8893 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8894 goto done;
8895 in_use |= EV4_IB1;
8896 }
8897 in_use |= EV4_IB0;
8898 break;
8899
8900 case EV4_IB1:
8901 if (in_use & EV4_IB1)
8902 goto done;
8903 in_use |= EV4_IB1;
8904 break;
8905
8906 default:
8907 gcc_unreachable ();
8908 }
8909 len += 4;
8910
8911 /* Haifa doesn't do well scheduling branches. */
8912 if (GET_CODE (insn) == JUMP_INSN)
8913 goto next_and_done;
8914
8915 next:
8916 insn = next_nonnote_insn (insn);
8917
8918 if (!insn || ! INSN_P (insn))
8919 goto done;
8920
8921 /* Let Haifa tell us where it thinks insn group boundaries are. */
8922 if (GET_MODE (insn) == TImode)
8923 goto done;
8924
8925 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8926 goto next;
8927 }
8928
8929 next_and_done:
8930 insn = next_nonnote_insn (insn);
8931
8932 done:
8933 *plen = len;
8934 *pin_use = in_use;
8935 return insn;
8936 }
8937
8938 /* IN_USE is a mask of the slots currently filled within the insn group.
8939 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8940 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8941
8942 LEN is, of course, the length of the group in bytes. */
8943
8944 static rtx
8945 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8946 {
8947 int len, in_use;
8948
8949 len = in_use = 0;
8950
8951 if (! INSN_P (insn)
8952 || GET_CODE (PATTERN (insn)) == CLOBBER
8953 || GET_CODE (PATTERN (insn)) == USE)
8954 goto next_and_done;
8955
8956 while (1)
8957 {
8958 enum alphaev5_pipe pipe;
8959
8960 pipe = alphaev5_insn_pipe (insn);
8961 switch (pipe)
8962 {
8963 case EV5_STOP:
8964 /* Force complex instructions to start new groups. */
8965 if (in_use)
8966 goto done;
8967
8968 /* If this is a completely unrecognized insn, it's an asm.
8969 We don't know how long it is, so record length as -1 to
8970 signal a needed realignment. */
8971 if (recog_memoized (insn) < 0)
8972 len = -1;
8973 else
8974 len = get_attr_length (insn);
8975 goto next_and_done;
8976
8977 /* ??? Most of the places below, we would like to assert never
8978 happen, as it would indicate an error either in Haifa, or
8979 in the scheduling description. Unfortunately, Haifa never
8980 schedules the last instruction of the BB, so we don't have
8981 an accurate TI bit to go off. */
8982 case EV5_E01:
8983 if (in_use & EV5_E0)
8984 {
8985 if (in_use & EV5_E1)
8986 goto done;
8987 in_use |= EV5_E1;
8988 }
8989 else
8990 in_use |= EV5_E0 | EV5_E01;
8991 break;
8992
8993 case EV5_E0:
8994 if (in_use & EV5_E0)
8995 {
8996 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8997 goto done;
8998 in_use |= EV5_E1;
8999 }
9000 in_use |= EV5_E0;
9001 break;
9002
9003 case EV5_E1:
9004 if (in_use & EV5_E1)
9005 goto done;
9006 in_use |= EV5_E1;
9007 break;
9008
9009 case EV5_FAM:
9010 if (in_use & EV5_FA)
9011 {
9012 if (in_use & EV5_FM)
9013 goto done;
9014 in_use |= EV5_FM;
9015 }
9016 else
9017 in_use |= EV5_FA | EV5_FAM;
9018 break;
9019
9020 case EV5_FA:
9021 if (in_use & EV5_FA)
9022 goto done;
9023 in_use |= EV5_FA;
9024 break;
9025
9026 case EV5_FM:
9027 if (in_use & EV5_FM)
9028 goto done;
9029 in_use |= EV5_FM;
9030 break;
9031
9032 case EV5_NONE:
9033 break;
9034
9035 default:
9036 gcc_unreachable ();
9037 }
9038 len += 4;
9039
9040 /* Haifa doesn't do well scheduling branches. */
9041 /* ??? If this is predicted not-taken, slotting continues, except
9042 that no more IBR, FBR, or JSR insns may be slotted. */
9043 if (GET_CODE (insn) == JUMP_INSN)
9044 goto next_and_done;
9045
9046 next:
9047 insn = next_nonnote_insn (insn);
9048
9049 if (!insn || ! INSN_P (insn))
9050 goto done;
9051
9052 /* Let Haifa tell us where it thinks insn group boundaries are. */
9053 if (GET_MODE (insn) == TImode)
9054 goto done;
9055
9056 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9057 goto next;
9058 }
9059
9060 next_and_done:
9061 insn = next_nonnote_insn (insn);
9062
9063 done:
9064 *plen = len;
9065 *pin_use = in_use;
9066 return insn;
9067 }
9068
9069 static rtx
9070 alphaev4_next_nop (int *pin_use)
9071 {
9072 int in_use = *pin_use;
9073 rtx nop;
9074
9075 if (!(in_use & EV4_IB0))
9076 {
9077 in_use |= EV4_IB0;
9078 nop = gen_nop ();
9079 }
9080 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9081 {
9082 in_use |= EV4_IB1;
9083 nop = gen_nop ();
9084 }
9085 else if (TARGET_FP && !(in_use & EV4_IB1))
9086 {
9087 in_use |= EV4_IB1;
9088 nop = gen_fnop ();
9089 }
9090 else
9091 nop = gen_unop ();
9092
9093 *pin_use = in_use;
9094 return nop;
9095 }
9096
9097 static rtx
9098 alphaev5_next_nop (int *pin_use)
9099 {
9100 int in_use = *pin_use;
9101 rtx nop;
9102
9103 if (!(in_use & EV5_E1))
9104 {
9105 in_use |= EV5_E1;
9106 nop = gen_nop ();
9107 }
9108 else if (TARGET_FP && !(in_use & EV5_FA))
9109 {
9110 in_use |= EV5_FA;
9111 nop = gen_fnop ();
9112 }
9113 else if (TARGET_FP && !(in_use & EV5_FM))
9114 {
9115 in_use |= EV5_FM;
9116 nop = gen_fnop ();
9117 }
9118 else
9119 nop = gen_unop ();
9120
9121 *pin_use = in_use;
9122 return nop;
9123 }
9124
9125 /* The instruction group alignment main loop. */
9126
9127 static void
9128 alpha_align_insns (unsigned int max_align,
9129 rtx (*next_group) (rtx, int *, int *),
9130 rtx (*next_nop) (int *))
9131 {
9132 /* ALIGN is the known alignment for the insn group. */
9133 unsigned int align;
9134 /* OFS is the offset of the current insn in the insn group. */
9135 int ofs;
9136 int prev_in_use, in_use, len, ldgp;
9137 rtx i, next;
9138
9139 /* Let shorten branches care for assigning alignments to code labels. */
9140 shorten_branches (get_insns ());
9141
9142 if (align_functions < 4)
9143 align = 4;
9144 else if ((unsigned int) align_functions < max_align)
9145 align = align_functions;
9146 else
9147 align = max_align;
9148
9149 ofs = prev_in_use = 0;
9150 i = get_insns ();
9151 if (GET_CODE (i) == NOTE)
9152 i = next_nonnote_insn (i);
9153
9154 ldgp = alpha_function_needs_gp ? 8 : 0;
9155
9156 while (i)
9157 {
9158 next = (*next_group) (i, &in_use, &len);
9159
9160 /* When we see a label, resync alignment etc. */
9161 if (GET_CODE (i) == CODE_LABEL)
9162 {
9163 unsigned int new_align = 1 << label_to_alignment (i);
9164
9165 if (new_align >= align)
9166 {
9167 align = new_align < max_align ? new_align : max_align;
9168 ofs = 0;
9169 }
9170
9171 else if (ofs & (new_align-1))
9172 ofs = (ofs | (new_align-1)) + 1;
9173 gcc_assert (!len);
9174 }
9175
9176 /* Handle complex instructions special. */
9177 else if (in_use == 0)
9178 {
9179 /* Asms will have length < 0. This is a signal that we have
9180 lost alignment knowledge. Assume, however, that the asm
9181 will not mis-align instructions. */
9182 if (len < 0)
9183 {
9184 ofs = 0;
9185 align = 4;
9186 len = 0;
9187 }
9188 }
9189
9190 /* If the known alignment is smaller than the recognized insn group,
9191 realign the output. */
9192 else if ((int) align < len)
9193 {
9194 unsigned int new_log_align = len > 8 ? 4 : 3;
9195 rtx prev, where;
9196
9197 where = prev = prev_nonnote_insn (i);
9198 if (!where || GET_CODE (where) != CODE_LABEL)
9199 where = i;
9200
9201 /* Can't realign between a call and its gp reload. */
9202 if (! (TARGET_EXPLICIT_RELOCS
9203 && prev && GET_CODE (prev) == CALL_INSN))
9204 {
9205 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9206 align = 1 << new_log_align;
9207 ofs = 0;
9208 }
9209 }
9210
9211 /* We may not insert padding inside the initial ldgp sequence. */
9212 else if (ldgp > 0)
9213 ldgp -= len;
9214
9215 /* If the group won't fit in the same INT16 as the previous,
9216 we need to add padding to keep the group together. Rather
9217 than simply leaving the insn filling to the assembler, we
9218 can make use of the knowledge of what sorts of instructions
9219 were issued in the previous group to make sure that all of
9220 the added nops are really free. */
9221 else if (ofs + len > (int) align)
9222 {
9223 int nop_count = (align - ofs) / 4;
9224 rtx where;
9225
9226 /* Insert nops before labels, branches, and calls to truly merge
9227 the execution of the nops with the previous instruction group. */
9228 where = prev_nonnote_insn (i);
9229 if (where)
9230 {
9231 if (GET_CODE (where) == CODE_LABEL)
9232 {
9233 rtx where2 = prev_nonnote_insn (where);
9234 if (where2 && GET_CODE (where2) == JUMP_INSN)
9235 where = where2;
9236 }
9237 else if (GET_CODE (where) == INSN)
9238 where = i;
9239 }
9240 else
9241 where = i;
9242
9243 do
9244 emit_insn_before ((*next_nop)(&prev_in_use), where);
9245 while (--nop_count);
9246 ofs = 0;
9247 }
9248
9249 ofs = (ofs + len) & (align - 1);
9250 prev_in_use = in_use;
9251 i = next;
9252 }
9253 }
9254 \f
9255 /* Machine dependent reorg pass. */
9256
9257 static void
9258 alpha_reorg (void)
9259 {
9260 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9261 alpha_handle_trap_shadows ();
9262
9263 /* Due to the number of extra trapb insns, don't bother fixing up
9264 alignment when trap precision is instruction. Moreover, we can
9265 only do our job when sched2 is run. */
9266 if (optimize && !optimize_size
9267 && alpha_tp != ALPHA_TP_INSN
9268 && flag_schedule_insns_after_reload)
9269 {
9270 if (alpha_tune == PROCESSOR_EV4)
9271 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9272 else if (alpha_tune == PROCESSOR_EV5)
9273 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9274 }
9275 }
9276 \f
9277 #if !TARGET_ABI_UNICOSMK
9278
9279 #ifdef HAVE_STAMP_H
9280 #include <stamp.h>
9281 #endif
9282
9283 static void
9284 alpha_file_start (void)
9285 {
9286 #ifdef OBJECT_FORMAT_ELF
9287 /* If emitting dwarf2 debug information, we cannot generate a .file
9288 directive to start the file, as it will conflict with dwarf2out
9289 file numbers. So it's only useful when emitting mdebug output. */
9290 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9291 #endif
9292
9293 default_file_start ();
9294 #ifdef MS_STAMP
9295 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9296 #endif
9297
9298 fputs ("\t.set noreorder\n", asm_out_file);
9299 fputs ("\t.set volatile\n", asm_out_file);
9300 if (!TARGET_ABI_OPEN_VMS)
9301 fputs ("\t.set noat\n", asm_out_file);
9302 if (TARGET_EXPLICIT_RELOCS)
9303 fputs ("\t.set nomacro\n", asm_out_file);
9304 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9305 {
9306 const char *arch;
9307
9308 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9309 arch = "ev6";
9310 else if (TARGET_MAX)
9311 arch = "pca56";
9312 else if (TARGET_BWX)
9313 arch = "ev56";
9314 else if (alpha_cpu == PROCESSOR_EV5)
9315 arch = "ev5";
9316 else
9317 arch = "ev4";
9318
9319 fprintf (asm_out_file, "\t.arch %s\n", arch);
9320 }
9321 }
9322 #endif
9323
9324 #ifdef OBJECT_FORMAT_ELF
9325 /* Since we don't have a .dynbss section, we should not allow global
9326 relocations in the .rodata section. */
9327
9328 static int
9329 alpha_elf_reloc_rw_mask (void)
9330 {
9331 return flag_pic ? 3 : 2;
9332 }
9333
9334 /* Return a section for X. The only special thing we do here is to
9335 honor small data. */
9336
9337 static section *
9338 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9339 unsigned HOST_WIDE_INT align)
9340 {
9341 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9342 /* ??? Consider using mergeable sdata sections. */
9343 return sdata_section;
9344 else
9345 return default_elf_select_rtx_section (mode, x, align);
9346 }
9347
9348 static unsigned int
9349 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9350 {
9351 unsigned int flags = 0;
9352
9353 if (strcmp (name, ".sdata") == 0
9354 || strncmp (name, ".sdata.", 7) == 0
9355 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9356 || strcmp (name, ".sbss") == 0
9357 || strncmp (name, ".sbss.", 6) == 0
9358 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9359 flags = SECTION_SMALL;
9360
9361 flags |= default_section_type_flags (decl, name, reloc);
9362 return flags;
9363 }
9364 #endif /* OBJECT_FORMAT_ELF */
9365 \f
9366 /* Structure to collect function names for final output in link section. */
9367 /* Note that items marked with GTY can't be ifdef'ed out. */
9368
9369 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9370 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9371
9372 struct alpha_links GTY(())
9373 {
9374 int num;
9375 rtx linkage;
9376 enum links_kind lkind;
9377 enum reloc_kind rkind;
9378 };
9379
9380 struct alpha_funcs GTY(())
9381 {
9382 int num;
9383 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9384 links;
9385 };
9386
9387 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9388 splay_tree alpha_links_tree;
9389 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9390 splay_tree alpha_funcs_tree;
9391
9392 static GTY(()) int alpha_funcs_num;
9393
9394 #if TARGET_ABI_OPEN_VMS
9395
9396 /* Return the VMS argument type corresponding to MODE. */
9397
9398 enum avms_arg_type
9399 alpha_arg_type (enum machine_mode mode)
9400 {
9401 switch (mode)
9402 {
9403 case SFmode:
9404 return TARGET_FLOAT_VAX ? FF : FS;
9405 case DFmode:
9406 return TARGET_FLOAT_VAX ? FD : FT;
9407 default:
9408 return I64;
9409 }
9410 }
9411
9412 /* Return an rtx for an integer representing the VMS Argument Information
9413 register value. */
9414
9415 rtx
9416 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9417 {
9418 unsigned HOST_WIDE_INT regval = cum.num_args;
9419 int i;
9420
9421 for (i = 0; i < 6; i++)
9422 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9423
9424 return GEN_INT (regval);
9425 }
9426 \f
9427 /* Make (or fake) .linkage entry for function call.
9428
9429 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9430
9431 Return an SYMBOL_REF rtx for the linkage. */
9432
9433 rtx
9434 alpha_need_linkage (const char *name, int is_local)
9435 {
9436 splay_tree_node node;
9437 struct alpha_links *al;
9438
9439 if (name[0] == '*')
9440 name++;
9441
9442 if (is_local)
9443 {
9444 struct alpha_funcs *cfaf;
9445
9446 if (!alpha_funcs_tree)
9447 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9448 splay_tree_compare_pointers);
9449
9450 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9451
9452 cfaf->links = 0;
9453 cfaf->num = ++alpha_funcs_num;
9454
9455 splay_tree_insert (alpha_funcs_tree,
9456 (splay_tree_key) current_function_decl,
9457 (splay_tree_value) cfaf);
9458 }
9459
9460 if (alpha_links_tree)
9461 {
9462 /* Is this name already defined? */
9463
9464 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9465 if (node)
9466 {
9467 al = (struct alpha_links *) node->value;
9468 if (is_local)
9469 {
9470 /* Defined here but external assumed. */
9471 if (al->lkind == KIND_EXTERN)
9472 al->lkind = KIND_LOCAL;
9473 }
9474 else
9475 {
9476 /* Used here but unused assumed. */
9477 if (al->lkind == KIND_UNUSED)
9478 al->lkind = KIND_LOCAL;
9479 }
9480 return al->linkage;
9481 }
9482 }
9483 else
9484 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9485
9486 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9487 name = ggc_strdup (name);
9488
9489 /* Assume external if no definition. */
9490 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9491
9492 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9493 get_identifier (name);
9494
9495 /* Construct a SYMBOL_REF for us to call. */
9496 {
9497 size_t name_len = strlen (name);
9498 char *linksym = XALLOCAVEC (char, name_len + 6);
9499 linksym[0] = '$';
9500 memcpy (linksym + 1, name, name_len);
9501 memcpy (linksym + 1 + name_len, "..lk", 5);
9502 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9503 ggc_alloc_string (linksym, name_len + 5));
9504 }
9505
9506 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9507 (splay_tree_value) al);
9508
9509 return al->linkage;
9510 }
9511
9512 rtx
9513 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9514 {
9515 splay_tree_node cfunnode;
9516 struct alpha_funcs *cfaf;
9517 struct alpha_links *al;
9518 const char *name = XSTR (linkage, 0);
9519
9520 cfaf = (struct alpha_funcs *) 0;
9521 al = (struct alpha_links *) 0;
9522
9523 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9524 cfaf = (struct alpha_funcs *) cfunnode->value;
9525
9526 if (cfaf->links)
9527 {
9528 splay_tree_node lnode;
9529
9530 /* Is this name already defined? */
9531
9532 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9533 if (lnode)
9534 al = (struct alpha_links *) lnode->value;
9535 }
9536 else
9537 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9538
9539 if (!al)
9540 {
9541 size_t name_len;
9542 size_t buflen;
9543 char buf [512];
9544 char *linksym;
9545 splay_tree_node node = 0;
9546 struct alpha_links *anl;
9547
9548 if (name[0] == '*')
9549 name++;
9550
9551 name_len = strlen (name);
9552
9553 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9554 al->num = cfaf->num;
9555
9556 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9557 if (node)
9558 {
9559 anl = (struct alpha_links *) node->value;
9560 al->lkind = anl->lkind;
9561 }
9562
9563 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9564 buflen = strlen (buf);
9565 linksym = XALLOCAVEC (char, buflen + 1);
9566 memcpy (linksym, buf, buflen + 1);
9567
9568 al->linkage = gen_rtx_SYMBOL_REF
9569 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9570
9571 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9572 (splay_tree_value) al);
9573 }
9574
9575 if (rflag)
9576 al->rkind = KIND_CODEADDR;
9577 else
9578 al->rkind = KIND_LINKAGE;
9579
9580 if (lflag)
9581 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9582 else
9583 return al->linkage;
9584 }
9585
9586 static int
9587 alpha_write_one_linkage (splay_tree_node node, void *data)
9588 {
9589 const char *const name = (const char *) node->key;
9590 struct alpha_links *link = (struct alpha_links *) node->value;
9591 FILE *stream = (FILE *) data;
9592
9593 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9594 if (link->rkind == KIND_CODEADDR)
9595 {
9596 if (link->lkind == KIND_LOCAL)
9597 {
9598 /* Local and used */
9599 fprintf (stream, "\t.quad %s..en\n", name);
9600 }
9601 else
9602 {
9603 /* External and used, request code address. */
9604 fprintf (stream, "\t.code_address %s\n", name);
9605 }
9606 }
9607 else
9608 {
9609 if (link->lkind == KIND_LOCAL)
9610 {
9611 /* Local and used, build linkage pair. */
9612 fprintf (stream, "\t.quad %s..en\n", name);
9613 fprintf (stream, "\t.quad %s\n", name);
9614 }
9615 else
9616 {
9617 /* External and used, request linkage pair. */
9618 fprintf (stream, "\t.linkage %s\n", name);
9619 }
9620 }
9621
9622 return 0;
9623 }
9624
9625 static void
9626 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9627 {
9628 splay_tree_node node;
9629 struct alpha_funcs *func;
9630
9631 fprintf (stream, "\t.link\n");
9632 fprintf (stream, "\t.align 3\n");
9633 in_section = NULL;
9634
9635 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9636 func = (struct alpha_funcs *) node->value;
9637
9638 fputs ("\t.name ", stream);
9639 assemble_name (stream, funname);
9640 fputs ("..na\n", stream);
9641 ASM_OUTPUT_LABEL (stream, funname);
9642 fprintf (stream, "\t.pdesc ");
9643 assemble_name (stream, funname);
9644 fprintf (stream, "..en,%s\n",
9645 alpha_procedure_type == PT_STACK ? "stack"
9646 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9647
9648 if (func->links)
9649 {
9650 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9651 /* splay_tree_delete (func->links); */
9652 }
9653 }
9654
9655 /* Given a decl, a section name, and whether the decl initializer
9656 has relocs, choose attributes for the section. */
9657
9658 #define SECTION_VMS_OVERLAY SECTION_FORGET
9659 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9660 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9661
9662 static unsigned int
9663 vms_section_type_flags (tree decl, const char *name, int reloc)
9664 {
9665 unsigned int flags = default_section_type_flags (decl, name, reloc);
9666
9667 if (decl && DECL_ATTRIBUTES (decl)
9668 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9669 flags |= SECTION_VMS_OVERLAY;
9670 if (decl && DECL_ATTRIBUTES (decl)
9671 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9672 flags |= SECTION_VMS_GLOBAL;
9673 if (decl && DECL_ATTRIBUTES (decl)
9674 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9675 flags |= SECTION_VMS_INITIALIZE;
9676
9677 return flags;
9678 }
9679
9680 /* Switch to an arbitrary section NAME with attributes as specified
9681 by FLAGS. ALIGN specifies any known alignment requirements for
9682 the section; 0 if the default should be used. */
9683
9684 static void
9685 vms_asm_named_section (const char *name, unsigned int flags,
9686 tree decl ATTRIBUTE_UNUSED)
9687 {
9688 fputc ('\n', asm_out_file);
9689 fprintf (asm_out_file, ".section\t%s", name);
9690
9691 if (flags & SECTION_VMS_OVERLAY)
9692 fprintf (asm_out_file, ",OVR");
9693 if (flags & SECTION_VMS_GLOBAL)
9694 fprintf (asm_out_file, ",GBL");
9695 if (flags & SECTION_VMS_INITIALIZE)
9696 fprintf (asm_out_file, ",NOMOD");
9697 if (flags & SECTION_DEBUG)
9698 fprintf (asm_out_file, ",NOWRT");
9699
9700 fputc ('\n', asm_out_file);
9701 }
9702
9703 /* Record an element in the table of global constructors. SYMBOL is
9704 a SYMBOL_REF of the function to be called; PRIORITY is a number
9705 between 0 and MAX_INIT_PRIORITY.
9706
9707 Differs from default_ctors_section_asm_out_constructor in that the
9708 width of the .ctors entry is always 64 bits, rather than the 32 bits
9709 used by a normal pointer. */
9710
9711 static void
9712 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9713 {
9714 switch_to_section (ctors_section);
9715 assemble_align (BITS_PER_WORD);
9716 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9717 }
9718
9719 static void
9720 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9721 {
9722 switch_to_section (dtors_section);
9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9725 }
9726 #else
9727
9728 rtx
9729 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9730 int is_local ATTRIBUTE_UNUSED)
9731 {
9732 return NULL_RTX;
9733 }
9734
9735 rtx
9736 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9737 tree cfundecl ATTRIBUTE_UNUSED,
9738 int lflag ATTRIBUTE_UNUSED,
9739 int rflag ATTRIBUTE_UNUSED)
9740 {
9741 return NULL_RTX;
9742 }
9743
9744 #endif /* TARGET_ABI_OPEN_VMS */
9745 \f
9746 #if TARGET_ABI_UNICOSMK
9747
9748 /* This evaluates to true if we do not know how to pass TYPE solely in
9749 registers. This is the case for all arguments that do not fit in two
9750 registers. */
9751
9752 static bool
9753 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9754 {
9755 if (type == NULL)
9756 return false;
9757
9758 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9759 return true;
9760 if (TREE_ADDRESSABLE (type))
9761 return true;
9762
9763 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9764 }
9765
9766 /* Define the offset between two registers, one to be eliminated, and the
9767 other its replacement, at the start of a routine. */
9768
9769 int
9770 unicosmk_initial_elimination_offset (int from, int to)
9771 {
9772 int fixed_size;
9773
9774 fixed_size = alpha_sa_size();
9775 if (fixed_size != 0)
9776 fixed_size += 48;
9777
9778 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9779 return -fixed_size;
9780 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9781 return 0;
9782 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9783 return (ALPHA_ROUND (crtl->outgoing_args_size)
9784 + ALPHA_ROUND (get_frame_size()));
9785 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9786 return (ALPHA_ROUND (fixed_size)
9787 + ALPHA_ROUND (get_frame_size()
9788 + crtl->outgoing_args_size));
9789 else
9790 gcc_unreachable ();
9791 }
9792
9793 /* Output the module name for .ident and .end directives. We have to strip
9794 directories and add make sure that the module name starts with a letter
9795 or '$'. */
9796
9797 static void
9798 unicosmk_output_module_name (FILE *file)
9799 {
9800 const char *name = lbasename (main_input_filename);
9801 unsigned len = strlen (name);
9802 char *clean_name = alloca (len + 2);
9803 char *ptr = clean_name;
9804
9805 /* CAM only accepts module names that start with a letter or '$'. We
9806 prefix the module name with a '$' if necessary. */
9807
9808 if (!ISALPHA (*name))
9809 *ptr++ = '$';
9810 memcpy (ptr, name, len + 1);
9811 clean_symbol_name (clean_name);
9812 fputs (clean_name, file);
9813 }
9814
9815 /* Output the definition of a common variable. */
9816
9817 void
9818 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9819 {
9820 tree name_tree;
9821 printf ("T3E__: common %s\n", name);
9822
9823 in_section = NULL;
9824 fputs("\t.endp\n\n\t.psect ", file);
9825 assemble_name(file, name);
9826 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9827 fprintf(file, "\t.byte\t0:%d\n", size);
9828
9829 /* Mark the symbol as defined in this module. */
9830 name_tree = get_identifier (name);
9831 TREE_ASM_WRITTEN (name_tree) = 1;
9832 }
9833
9834 #define SECTION_PUBLIC SECTION_MACH_DEP
9835 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9836 static int current_section_align;
9837
9838 /* A get_unnamed_section callback for switching to the text section. */
9839
9840 static void
9841 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9842 {
9843 static int count = 0;
9844 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9845 }
9846
9847 /* A get_unnamed_section callback for switching to the data section. */
9848
9849 static void
9850 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9851 {
9852 static int count = 1;
9853 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9854 }
9855
9856 /* Implement TARGET_ASM_INIT_SECTIONS.
9857
9858 The Cray assembler is really weird with respect to sections. It has only
9859 named sections and you can't reopen a section once it has been closed.
9860 This means that we have to generate unique names whenever we want to
9861 reenter the text or the data section. */
9862
9863 static void
9864 unicosmk_init_sections (void)
9865 {
9866 text_section = get_unnamed_section (SECTION_CODE,
9867 unicosmk_output_text_section_asm_op,
9868 NULL);
9869 data_section = get_unnamed_section (SECTION_WRITE,
9870 unicosmk_output_data_section_asm_op,
9871 NULL);
9872 readonly_data_section = data_section;
9873 }
9874
9875 static unsigned int
9876 unicosmk_section_type_flags (tree decl, const char *name,
9877 int reloc ATTRIBUTE_UNUSED)
9878 {
9879 unsigned int flags = default_section_type_flags (decl, name, reloc);
9880
9881 if (!decl)
9882 return flags;
9883
9884 if (TREE_CODE (decl) == FUNCTION_DECL)
9885 {
9886 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9887 if (align_functions_log > current_section_align)
9888 current_section_align = align_functions_log;
9889
9890 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9891 flags |= SECTION_MAIN;
9892 }
9893 else
9894 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9895
9896 if (TREE_PUBLIC (decl))
9897 flags |= SECTION_PUBLIC;
9898
9899 return flags;
9900 }
9901
9902 /* Generate a section name for decl and associate it with the
9903 declaration. */
9904
9905 static void
9906 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9907 {
9908 const char *name;
9909 int len;
9910
9911 gcc_assert (decl);
9912
9913 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9914 name = default_strip_name_encoding (name);
9915 len = strlen (name);
9916
9917 if (TREE_CODE (decl) == FUNCTION_DECL)
9918 {
9919 char *string;
9920
9921 /* It is essential that we prefix the section name here because
9922 otherwise the section names generated for constructors and
9923 destructors confuse collect2. */
9924
9925 string = alloca (len + 6);
9926 sprintf (string, "code@%s", name);
9927 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9928 }
9929 else if (TREE_PUBLIC (decl))
9930 DECL_SECTION_NAME (decl) = build_string (len, name);
9931 else
9932 {
9933 char *string;
9934
9935 string = alloca (len + 6);
9936 sprintf (string, "data@%s", name);
9937 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9938 }
9939 }
9940
9941 /* Switch to an arbitrary section NAME with attributes as specified
9942 by FLAGS. ALIGN specifies any known alignment requirements for
9943 the section; 0 if the default should be used. */
9944
9945 static void
9946 unicosmk_asm_named_section (const char *name, unsigned int flags,
9947 tree decl ATTRIBUTE_UNUSED)
9948 {
9949 const char *kind;
9950
9951 /* Close the previous section. */
9952
9953 fputs ("\t.endp\n\n", asm_out_file);
9954
9955 /* Find out what kind of section we are opening. */
9956
9957 if (flags & SECTION_MAIN)
9958 fputs ("\t.start\tmain\n", asm_out_file);
9959
9960 if (flags & SECTION_CODE)
9961 kind = "code";
9962 else if (flags & SECTION_PUBLIC)
9963 kind = "common";
9964 else
9965 kind = "data";
9966
9967 if (current_section_align != 0)
9968 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9969 current_section_align, kind);
9970 else
9971 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9972 }
9973
9974 static void
9975 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9976 {
9977 if (DECL_P (decl)
9978 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9979 unicosmk_unique_section (decl, 0);
9980 }
9981
9982 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9983 in code sections because .align fill unused space with zeroes. */
9984
9985 void
9986 unicosmk_output_align (FILE *file, int align)
9987 {
9988 if (inside_function)
9989 fprintf (file, "\tgcc@code@align\t%d\n", align);
9990 else
9991 fprintf (file, "\t.align\t%d\n", align);
9992 }
9993
9994 /* Add a case vector to the current function's list of deferred case
9995 vectors. Case vectors have to be put into a separate section because CAM
9996 does not allow data definitions in code sections. */
9997
9998 void
9999 unicosmk_defer_case_vector (rtx lab, rtx vec)
10000 {
10001 struct machine_function *machine = cfun->machine;
10002
10003 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10004 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10005 machine->addr_list);
10006 }
10007
10008 /* Output a case vector. */
10009
10010 static void
10011 unicosmk_output_addr_vec (FILE *file, rtx vec)
10012 {
10013 rtx lab = XEXP (vec, 0);
10014 rtx body = XEXP (vec, 1);
10015 int vlen = XVECLEN (body, 0);
10016 int idx;
10017
10018 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10019
10020 for (idx = 0; idx < vlen; idx++)
10021 {
10022 ASM_OUTPUT_ADDR_VEC_ELT
10023 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10024 }
10025 }
10026
10027 /* Output current function's deferred case vectors. */
10028
10029 static void
10030 unicosmk_output_deferred_case_vectors (FILE *file)
10031 {
10032 struct machine_function *machine = cfun->machine;
10033 rtx t;
10034
10035 if (machine->addr_list == NULL_RTX)
10036 return;
10037
10038 switch_to_section (data_section);
10039 for (t = machine->addr_list; t; t = XEXP (t, 1))
10040 unicosmk_output_addr_vec (file, XEXP (t, 0));
10041 }
10042
10043 /* Generate the name of the SSIB section for the current function. */
10044
10045 #define SSIB_PREFIX "__SSIB_"
10046 #define SSIB_PREFIX_LEN 7
10047
10048 static const char *
10049 unicosmk_ssib_name (void)
10050 {
10051 /* This is ok since CAM won't be able to deal with names longer than that
10052 anyway. */
10053
10054 static char name[256];
10055
10056 rtx x;
10057 const char *fnname;
10058 int len;
10059
10060 x = DECL_RTL (cfun->decl);
10061 gcc_assert (GET_CODE (x) == MEM);
10062 x = XEXP (x, 0);
10063 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10064 fnname = XSTR (x, 0);
10065
10066 len = strlen (fnname);
10067 if (len + SSIB_PREFIX_LEN > 255)
10068 len = 255 - SSIB_PREFIX_LEN;
10069
10070 strcpy (name, SSIB_PREFIX);
10071 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10072 name[len + SSIB_PREFIX_LEN] = 0;
10073
10074 return name;
10075 }
10076
10077 /* Set up the dynamic subprogram information block (DSIB) and update the
10078 frame pointer register ($15) for subroutines which have a frame. If the
10079 subroutine doesn't have a frame, simply increment $15. */
10080
10081 static void
10082 unicosmk_gen_dsib (unsigned long *imaskP)
10083 {
10084 if (alpha_procedure_type == PT_STACK)
10085 {
10086 const char *ssib_name;
10087 rtx mem;
10088
10089 /* Allocate 64 bytes for the DSIB. */
10090
10091 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10092 GEN_INT (-64))));
10093 emit_insn (gen_blockage ());
10094
10095 /* Save the return address. */
10096
10097 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10098 set_mem_alias_set (mem, alpha_sr_alias_set);
10099 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10100 (*imaskP) &= ~(1UL << REG_RA);
10101
10102 /* Save the old frame pointer. */
10103
10104 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10105 set_mem_alias_set (mem, alpha_sr_alias_set);
10106 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10107 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10108
10109 emit_insn (gen_blockage ());
10110
10111 /* Store the SSIB pointer. */
10112
10113 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10114 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10115 set_mem_alias_set (mem, alpha_sr_alias_set);
10116
10117 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10118 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10119 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10120
10121 /* Save the CIW index. */
10122
10123 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10124 set_mem_alias_set (mem, alpha_sr_alias_set);
10125 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10126
10127 emit_insn (gen_blockage ());
10128
10129 /* Set the new frame pointer. */
10130
10131 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10132 stack_pointer_rtx, GEN_INT (64))));
10133
10134 }
10135 else
10136 {
10137 /* Increment the frame pointer register to indicate that we do not
10138 have a frame. */
10139
10140 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10141 hard_frame_pointer_rtx, const1_rtx)));
10142 }
10143 }
10144
10145 /* Output the static subroutine information block for the current
10146 function. */
10147
10148 static void
10149 unicosmk_output_ssib (FILE *file, const char *fnname)
10150 {
10151 int len;
10152 int i;
10153 rtx x;
10154 rtx ciw;
10155 struct machine_function *machine = cfun->machine;
10156
10157 in_section = NULL;
10158 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10159 unicosmk_ssib_name ());
10160
10161 /* Some required stuff and the function name length. */
10162
10163 len = strlen (fnname);
10164 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10165
10166 /* Saved registers
10167 ??? We don't do that yet. */
10168
10169 fputs ("\t.quad\t0\n", file);
10170
10171 /* Function address. */
10172
10173 fputs ("\t.quad\t", file);
10174 assemble_name (file, fnname);
10175 putc ('\n', file);
10176
10177 fputs ("\t.quad\t0\n", file);
10178 fputs ("\t.quad\t0\n", file);
10179
10180 /* Function name.
10181 ??? We do it the same way Cray CC does it but this could be
10182 simplified. */
10183
10184 for( i = 0; i < len; i++ )
10185 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10186 if( (len % 8) == 0 )
10187 fputs ("\t.quad\t0\n", file);
10188 else
10189 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10190
10191 /* All call information words used in the function. */
10192
10193 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10194 {
10195 ciw = XEXP (x, 0);
10196 #if HOST_BITS_PER_WIDE_INT == 32
10197 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10198 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10199 #else
10200 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10201 #endif
10202 }
10203 }
10204
10205 /* Add a call information word (CIW) to the list of the current function's
10206 CIWs and return its index.
10207
10208 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10209
10210 rtx
10211 unicosmk_add_call_info_word (rtx x)
10212 {
10213 rtx node;
10214 struct machine_function *machine = cfun->machine;
10215
10216 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10217 if (machine->first_ciw == NULL_RTX)
10218 machine->first_ciw = node;
10219 else
10220 XEXP (machine->last_ciw, 1) = node;
10221
10222 machine->last_ciw = node;
10223 ++machine->ciw_count;
10224
10225 return GEN_INT (machine->ciw_count
10226 + strlen (current_function_name ())/8 + 5);
10227 }
10228
10229 /* The Cray assembler doesn't accept extern declarations for symbols which
10230 are defined in the same file. We have to keep track of all global
10231 symbols which are referenced and/or defined in a source file and output
10232 extern declarations for those which are referenced but not defined at
10233 the end of file. */
10234
10235 /* List of identifiers for which an extern declaration might have to be
10236 emitted. */
10237 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10238
10239 struct unicosmk_extern_list
10240 {
10241 struct unicosmk_extern_list *next;
10242 const char *name;
10243 };
10244
10245 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10246
10247 /* Output extern declarations which are required for every asm file. */
10248
10249 static void
10250 unicosmk_output_default_externs (FILE *file)
10251 {
10252 static const char *const externs[] =
10253 { "__T3E_MISMATCH" };
10254
10255 int i;
10256 int n;
10257
10258 n = ARRAY_SIZE (externs);
10259
10260 for (i = 0; i < n; i++)
10261 fprintf (file, "\t.extern\t%s\n", externs[i]);
10262 }
10263
10264 /* Output extern declarations for global symbols which are have been
10265 referenced but not defined. */
10266
10267 static void
10268 unicosmk_output_externs (FILE *file)
10269 {
10270 struct unicosmk_extern_list *p;
10271 const char *real_name;
10272 int len;
10273 tree name_tree;
10274
10275 len = strlen (user_label_prefix);
10276 for (p = unicosmk_extern_head; p != 0; p = p->next)
10277 {
10278 /* We have to strip the encoding and possibly remove user_label_prefix
10279 from the identifier in order to handle -fleading-underscore and
10280 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10281 real_name = default_strip_name_encoding (p->name);
10282 if (len && p->name[0] == '*'
10283 && !memcmp (real_name, user_label_prefix, len))
10284 real_name += len;
10285
10286 name_tree = get_identifier (real_name);
10287 if (! TREE_ASM_WRITTEN (name_tree))
10288 {
10289 TREE_ASM_WRITTEN (name_tree) = 1;
10290 fputs ("\t.extern\t", file);
10291 assemble_name (file, p->name);
10292 putc ('\n', file);
10293 }
10294 }
10295 }
10296
10297 /* Record an extern. */
10298
10299 void
10300 unicosmk_add_extern (const char *name)
10301 {
10302 struct unicosmk_extern_list *p;
10303
10304 p = (struct unicosmk_extern_list *)
10305 xmalloc (sizeof (struct unicosmk_extern_list));
10306 p->next = unicosmk_extern_head;
10307 p->name = name;
10308 unicosmk_extern_head = p;
10309 }
10310
10311 /* The Cray assembler generates incorrect code if identifiers which
10312 conflict with register names are used as instruction operands. We have
10313 to replace such identifiers with DEX expressions. */
10314
10315 /* Structure to collect identifiers which have been replaced by DEX
10316 expressions. */
10317 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10318
10319 struct unicosmk_dex {
10320 struct unicosmk_dex *next;
10321 const char *name;
10322 };
10323
10324 /* List of identifiers which have been replaced by DEX expressions. The DEX
10325 number is determined by the position in the list. */
10326
10327 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10328
10329 /* The number of elements in the DEX list. */
10330
10331 static int unicosmk_dex_count = 0;
10332
10333 /* Check if NAME must be replaced by a DEX expression. */
10334
10335 static int
10336 unicosmk_special_name (const char *name)
10337 {
10338 if (name[0] == '*')
10339 ++name;
10340
10341 if (name[0] == '$')
10342 ++name;
10343
10344 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10345 return 0;
10346
10347 switch (name[1])
10348 {
10349 case '1': case '2':
10350 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10351
10352 case '3':
10353 return (name[2] == '\0'
10354 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10355
10356 default:
10357 return (ISDIGIT (name[1]) && name[2] == '\0');
10358 }
10359 }
10360
10361 /* Return the DEX number if X must be replaced by a DEX expression and 0
10362 otherwise. */
10363
10364 static int
10365 unicosmk_need_dex (rtx x)
10366 {
10367 struct unicosmk_dex *dex;
10368 const char *name;
10369 int i;
10370
10371 if (GET_CODE (x) != SYMBOL_REF)
10372 return 0;
10373
10374 name = XSTR (x,0);
10375 if (! unicosmk_special_name (name))
10376 return 0;
10377
10378 i = unicosmk_dex_count;
10379 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10380 {
10381 if (! strcmp (name, dex->name))
10382 return i;
10383 --i;
10384 }
10385
10386 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10387 dex->name = name;
10388 dex->next = unicosmk_dex_list;
10389 unicosmk_dex_list = dex;
10390
10391 ++unicosmk_dex_count;
10392 return unicosmk_dex_count;
10393 }
10394
10395 /* Output the DEX definitions for this file. */
10396
10397 static void
10398 unicosmk_output_dex (FILE *file)
10399 {
10400 struct unicosmk_dex *dex;
10401 int i;
10402
10403 if (unicosmk_dex_list == NULL)
10404 return;
10405
10406 fprintf (file, "\t.dexstart\n");
10407
10408 i = unicosmk_dex_count;
10409 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10410 {
10411 fprintf (file, "\tDEX (%d) = ", i);
10412 assemble_name (file, dex->name);
10413 putc ('\n', file);
10414 --i;
10415 }
10416
10417 fprintf (file, "\t.dexend\n");
10418 }
10419
10420 /* Output text that to appear at the beginning of an assembler file. */
10421
10422 static void
10423 unicosmk_file_start (void)
10424 {
10425 int i;
10426
10427 fputs ("\t.ident\t", asm_out_file);
10428 unicosmk_output_module_name (asm_out_file);
10429 fputs ("\n\n", asm_out_file);
10430
10431 /* The Unicos/Mk assembler uses different register names. Instead of trying
10432 to support them, we simply use micro definitions. */
10433
10434 /* CAM has different register names: rN for the integer register N and fN
10435 for the floating-point register N. Instead of trying to use these in
10436 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10437 register. */
10438
10439 for (i = 0; i < 32; ++i)
10440 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10441
10442 for (i = 0; i < 32; ++i)
10443 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10444
10445 putc ('\n', asm_out_file);
10446
10447 /* The .align directive fill unused space with zeroes which does not work
10448 in code sections. We define the macro 'gcc@code@align' which uses nops
10449 instead. Note that it assumes that code sections always have the
10450 biggest possible alignment since . refers to the current offset from
10451 the beginning of the section. */
10452
10453 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10454 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10455 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10456 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10457 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10458 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10459 fputs ("\t.endr\n", asm_out_file);
10460 fputs ("\t.endif\n", asm_out_file);
10461 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10462
10463 /* Output extern declarations which should always be visible. */
10464 unicosmk_output_default_externs (asm_out_file);
10465
10466 /* Open a dummy section. We always need to be inside a section for the
10467 section-switching code to work correctly.
10468 ??? This should be a module id or something like that. I still have to
10469 figure out what the rules for those are. */
10470 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10471 }
10472
10473 /* Output text to appear at the end of an assembler file. This includes all
10474 pending extern declarations and DEX expressions. */
10475
10476 static void
10477 unicosmk_file_end (void)
10478 {
10479 fputs ("\t.endp\n\n", asm_out_file);
10480
10481 /* Output all pending externs. */
10482
10483 unicosmk_output_externs (asm_out_file);
10484
10485 /* Output dex definitions used for functions whose names conflict with
10486 register names. */
10487
10488 unicosmk_output_dex (asm_out_file);
10489
10490 fputs ("\t.end\t", asm_out_file);
10491 unicosmk_output_module_name (asm_out_file);
10492 putc ('\n', asm_out_file);
10493 }
10494
10495 #else
10496
10497 static void
10498 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10499 {}
10500
10501 static void
10502 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10503 {}
10504
10505 static void
10506 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10507 const char * fnname ATTRIBUTE_UNUSED)
10508 {}
10509
10510 rtx
10511 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10512 {
10513 return NULL_RTX;
10514 }
10515
10516 static int
10517 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10518 {
10519 return 0;
10520 }
10521
10522 #endif /* TARGET_ABI_UNICOSMK */
10523
10524 static void
10525 alpha_init_libfuncs (void)
10526 {
10527 if (TARGET_ABI_UNICOSMK)
10528 {
10529 /* Prevent gcc from generating calls to __divsi3. */
10530 set_optab_libfunc (sdiv_optab, SImode, 0);
10531 set_optab_libfunc (udiv_optab, SImode, 0);
10532
10533 /* Use the functions provided by the system library
10534 for DImode integer division. */
10535 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10536 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10537 }
10538 else if (TARGET_ABI_OPEN_VMS)
10539 {
10540 /* Use the VMS runtime library functions for division and
10541 remainder. */
10542 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10543 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10544 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10545 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10546 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10547 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10548 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10549 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10550 }
10551 }
10552
10553 \f
10554 /* Initialize the GCC target structure. */
10555 #if TARGET_ABI_OPEN_VMS
10556 # undef TARGET_ATTRIBUTE_TABLE
10557 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10558 # undef TARGET_SECTION_TYPE_FLAGS
10559 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10560 #endif
10561
10562 #undef TARGET_IN_SMALL_DATA_P
10563 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10564
10565 #if TARGET_ABI_UNICOSMK
10566 # undef TARGET_INSERT_ATTRIBUTES
10567 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10568 # undef TARGET_SECTION_TYPE_FLAGS
10569 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10570 # undef TARGET_ASM_UNIQUE_SECTION
10571 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10572 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10573 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10574 # undef TARGET_ASM_GLOBALIZE_LABEL
10575 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10576 # undef TARGET_MUST_PASS_IN_STACK
10577 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10578 #endif
10579
10580 #undef TARGET_ASM_ALIGNED_HI_OP
10581 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10582 #undef TARGET_ASM_ALIGNED_DI_OP
10583 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10584
10585 /* Default unaligned ops are provided for ELF systems. To get unaligned
10586 data for non-ELF systems, we have to turn off auto alignment. */
10587 #ifndef OBJECT_FORMAT_ELF
10588 #undef TARGET_ASM_UNALIGNED_HI_OP
10589 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10590 #undef TARGET_ASM_UNALIGNED_SI_OP
10591 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10592 #undef TARGET_ASM_UNALIGNED_DI_OP
10593 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10594 #endif
10595
10596 #ifdef OBJECT_FORMAT_ELF
10597 #undef TARGET_ASM_RELOC_RW_MASK
10598 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10599 #undef TARGET_ASM_SELECT_RTX_SECTION
10600 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10601 #undef TARGET_SECTION_TYPE_FLAGS
10602 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10603 #endif
10604
10605 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10606 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10607
10608 #undef TARGET_INIT_LIBFUNCS
10609 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10610
10611 #if TARGET_ABI_UNICOSMK
10612 #undef TARGET_ASM_FILE_START
10613 #define TARGET_ASM_FILE_START unicosmk_file_start
10614 #undef TARGET_ASM_FILE_END
10615 #define TARGET_ASM_FILE_END unicosmk_file_end
10616 #else
10617 #undef TARGET_ASM_FILE_START
10618 #define TARGET_ASM_FILE_START alpha_file_start
10619 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10620 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10621 #endif
10622
10623 #undef TARGET_SCHED_ADJUST_COST
10624 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10625 #undef TARGET_SCHED_ISSUE_RATE
10626 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10629 alpha_multipass_dfa_lookahead
10630
10631 #undef TARGET_HAVE_TLS
10632 #define TARGET_HAVE_TLS HAVE_AS_TLS
10633
10634 #undef TARGET_INIT_BUILTINS
10635 #define TARGET_INIT_BUILTINS alpha_init_builtins
10636 #undef TARGET_EXPAND_BUILTIN
10637 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10638 #undef TARGET_FOLD_BUILTIN
10639 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10640
10641 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10642 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10643 #undef TARGET_CANNOT_COPY_INSN_P
10644 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10645 #undef TARGET_CANNOT_FORCE_CONST_MEM
10646 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10647
10648 #if TARGET_ABI_OSF
10649 #undef TARGET_ASM_OUTPUT_MI_THUNK
10650 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10651 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10652 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10653 #undef TARGET_STDARG_OPTIMIZE_HOOK
10654 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10655 #endif
10656
10657 #undef TARGET_RTX_COSTS
10658 #define TARGET_RTX_COSTS alpha_rtx_costs
10659 #undef TARGET_ADDRESS_COST
10660 #define TARGET_ADDRESS_COST hook_int_rtx_0
10661
10662 #undef TARGET_MACHINE_DEPENDENT_REORG
10663 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10664
10665 #undef TARGET_PROMOTE_FUNCTION_ARGS
10666 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10667 #undef TARGET_PROMOTE_FUNCTION_RETURN
10668 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10669 #undef TARGET_PROMOTE_PROTOTYPES
10670 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10671 #undef TARGET_RETURN_IN_MEMORY
10672 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10673 #undef TARGET_PASS_BY_REFERENCE
10674 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10675 #undef TARGET_SETUP_INCOMING_VARARGS
10676 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10677 #undef TARGET_STRICT_ARGUMENT_NAMING
10678 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10679 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10680 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10681 #undef TARGET_SPLIT_COMPLEX_ARG
10682 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10683 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10684 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10685 #undef TARGET_ARG_PARTIAL_BYTES
10686 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10687
10688 #undef TARGET_SECONDARY_RELOAD
10689 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10690
10691 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10692 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10693 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10694 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10695
10696 #undef TARGET_BUILD_BUILTIN_VA_LIST
10697 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10698
10699 #undef TARGET_EXPAND_BUILTIN_VA_START
10700 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10701
10702 /* The Alpha architecture does not require sequential consistency. See
10703 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10704 for an example of how it can be violated in practice. */
10705 #undef TARGET_RELAXED_ORDERING
10706 #define TARGET_RELAXED_ORDERING true
10707
10708 #undef TARGET_DEFAULT_TARGET_FLAGS
10709 #define TARGET_DEFAULT_TARGET_FLAGS \
10710 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10711 #undef TARGET_HANDLE_OPTION
10712 #define TARGET_HANDLE_OPTION alpha_handle_option
10713
10714 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10715 #undef TARGET_MANGLE_TYPE
10716 #define TARGET_MANGLE_TYPE alpha_mangle_type
10717 #endif
10718
10719 struct gcc_target targetm = TARGET_INITIALIZER;
10720
10721 \f
10722 #include "gt-alpha.h"