]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/arm.c
alias.c (rtx_equal_for_memref_p): Use predicates to test rtx classes and new rtx...
[thirdparty/gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
58
59 const struct attribute_spec arm_attribute_table[];
60
61 /* Forward function declarations. */
62 static void arm_add_gc_roots (void);
63 static int arm_gen_constant (enum rtx_code, enum machine_mode, HOST_WIDE_INT,
64 rtx, rtx, int, int);
65 static unsigned bit_count (unsigned long);
66 static int arm_address_register_rtx_p (rtx, int);
67 static int arm_legitimate_index_p (enum machine_mode, rtx, int);
68 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
69 inline static int thumb_index_register_rtx_p (rtx, int);
70 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
71 static rtx emit_multi_reg_push (int);
72 static rtx emit_sfm (int, int);
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer (rtx, unsigned int, int);
75 #endif
76 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
77 static arm_cc get_arm_condition_code (rtx);
78 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
79 static rtx is_jump_table (rtx);
80 static const char *output_multi_immediate (rtx *, const char *, const char *,
81 int, HOST_WIDE_INT);
82 static void print_multi_reg (FILE *, const char *, int, int);
83 static const char *shift_op (rtx, HOST_WIDE_INT *);
84 static struct machine_function *arm_init_machine_status (void);
85 static int number_of_first_bit_set (int);
86 static void replace_symbols_in_block (tree, rtx, rtx);
87 static void thumb_exit (FILE *, int, rtx);
88 static void thumb_pushpop (FILE *, int, int, int *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
113 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
114 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static int arm_comp_type_attributes (tree, tree);
116 static void arm_set_default_type_attributes (tree);
117 static int arm_adjust_cost (rtx, rtx, rtx, int);
118 static int arm_use_dfa_pipeline_interface (void);
119 static int count_insns_for_constant (HOST_WIDE_INT, int);
120 static int arm_get_strip_length (int);
121 static bool arm_function_ok_for_sibcall (tree, tree);
122 static void arm_internal_label (FILE *, const char *, unsigned long);
123 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
124 tree);
125 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
126 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
127 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
128 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
129 static bool arm_9e_rtx_costs (rtx, int, int, int *);
130 static int arm_address_cost (rtx);
131 static bool arm_memory_load_p (rtx);
132 static bool arm_cirrus_insn_p (rtx);
133 static void cirrus_reorg (rtx);
134 static void arm_init_builtins (void);
135 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
136 static void arm_init_iwmmxt_builtins (void);
137 static rtx safe_vector_operand (rtx, enum machine_mode);
138 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
139 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
141
142 #ifdef OBJECT_FORMAT_ELF
143 static void arm_elf_asm_named_section (const char *, unsigned int);
144 #endif
145 #ifndef ARM_PE
146 static void arm_encode_section_info (tree, rtx, int);
147 #endif
148 #ifdef AOF_ASSEMBLER
149 static void aof_globalize_label (FILE *, const char *);
150 static void aof_dump_imports (FILE *);
151 static void aof_dump_pic_table (FILE *);
152 static void aof_file_start (void);
153 static void aof_file_end (void);
154 #endif
155 static rtx arm_struct_value_rtx (tree, int);
156 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
157 tree, int *, int);
158
159 \f
160 /* Initialize the GCC target structure. */
161 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
162 #undef TARGET_MERGE_DECL_ATTRIBUTES
163 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
164 #endif
165
166 #undef TARGET_ATTRIBUTE_TABLE
167 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
168
169 #ifdef AOF_ASSEMBLER
170 #undef TARGET_ASM_BYTE_OP
171 #define TARGET_ASM_BYTE_OP "\tDCB\t"
172 #undef TARGET_ASM_ALIGNED_HI_OP
173 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
174 #undef TARGET_ASM_ALIGNED_SI_OP
175 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
176 #undef TARGET_ASM_GLOBALIZE_LABEL
177 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
178 #undef TARGET_ASM_FILE_START
179 #define TARGET_ASM_FILE_START aof_file_start
180 #undef TARGET_ASM_FILE_END
181 #define TARGET_ASM_FILE_END aof_file_end
182 #else
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP NULL
185 #undef TARGET_ASM_INTEGER
186 #define TARGET_ASM_INTEGER arm_assemble_integer
187 #endif
188
189 #undef TARGET_ASM_FUNCTION_PROLOGUE
190 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
191
192 #undef TARGET_ASM_FUNCTION_EPILOGUE
193 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
194
195 #undef TARGET_COMP_TYPE_ATTRIBUTES
196 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
197
198 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
199 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
200
201 #undef TARGET_SCHED_ADJUST_COST
202 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
203
204 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
205 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
206
207 #undef TARGET_ENCODE_SECTION_INFO
208 #ifdef ARM_PE
209 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
210 #else
211 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
212 #endif
213
214 #undef TARGET_STRIP_NAME_ENCODING
215 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
216
217 #undef TARGET_ASM_INTERNAL_LABEL
218 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
219
220 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
221 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
222
223 #undef TARGET_ASM_OUTPUT_MI_THUNK
224 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
225 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
226 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
227
228 /* This will be overridden in arm_override_options. */
229 #undef TARGET_RTX_COSTS
230 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
231 #undef TARGET_ADDRESS_COST
232 #define TARGET_ADDRESS_COST arm_address_cost
233
234 #undef TARGET_MACHINE_DEPENDENT_REORG
235 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
236
237 #undef TARGET_INIT_BUILTINS
238 #define TARGET_INIT_BUILTINS arm_init_builtins
239 #undef TARGET_EXPAND_BUILTIN
240 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
241
242 #undef TARGET_PROMOTE_FUNCTION_ARGS
243 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
244 #undef TARGET_PROMOTE_PROTOTYPES
245 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
246
247 #undef TARGET_STRUCT_VALUE_RTX
248 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
249
250 #undef TARGET_SETUP_INCOMING_VARARGS
251 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
252
253 struct gcc_target targetm = TARGET_INITIALIZER;
254 \f
255 /* Obstack for minipool constant handling. */
256 static struct obstack minipool_obstack;
257 static char * minipool_startobj;
258
259 /* The maximum number of insns skipped which
260 will be conditionalised if possible. */
261 static int max_insns_skipped = 5;
262
263 extern FILE * asm_out_file;
264
265 /* True if we are currently building a constant table. */
266 int making_const_table;
267
268 /* Define the information needed to generate branch insns. This is
269 stored from the compare operation. */
270 rtx arm_compare_op0, arm_compare_op1;
271
272 /* The processor for which instructions should be scheduled. */
273 enum processor_type arm_tune = arm_none;
274
275 /* Which floating point model to use. */
276 enum arm_fp_model arm_fp_model;
277
278 /* Which floating point hardware is available. */
279 enum fputype arm_fpu_arch;
280
281 /* Which floating point hardware to schedule for. */
282 enum fputype arm_fpu_tune;
283
284 /* Whether to use floating point hardware. */
285 enum float_abi_type arm_float_abi;
286
287 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
288 enum prog_mode_type arm_prgmode;
289
290 /* Set by the -mfpu=... option. */
291 const char * target_fpu_name = NULL;
292
293 /* Set by the -mfpe=... option. */
294 const char * target_fpe_name = NULL;
295
296 /* Set by the -mfloat-abi=... option. */
297 const char * target_float_abi_name = NULL;
298
299 /* Used to parse -mstructure_size_boundary command line option. */
300 const char * structure_size_string = NULL;
301 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
302
303 /* Bit values used to identify processor capabilities. */
304 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
305 #define FL_ARCH3M (1 << 1) /* Extended multiply */
306 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
307 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
308 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
309 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
310 #define FL_THUMB (1 << 6) /* Thumb aware */
311 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
312 #define FL_STRONG (1 << 8) /* StrongARM */
313 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
314 #define FL_XSCALE (1 << 10) /* XScale */
315 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
316 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
317 media instructions. */
318 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
319
320 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
321
322 /* The bits in this mask specify which
323 instructions we are allowed to generate. */
324 static unsigned long insn_flags = 0;
325
326 /* The bits in this mask specify which instruction scheduling options should
327 be used. */
328 static unsigned long tune_flags = 0;
329
330 /* The following are used in the arm.md file as equivalents to bits
331 in the above two flag variables. */
332
333 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
334 int arm_arch3m = 0;
335
336 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
337 int arm_arch4 = 0;
338
339 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
340 int arm_arch5 = 0;
341
342 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
343 int arm_arch5e = 0;
344
345 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
346 int arm_arch6 = 0;
347
348 /* Nonzero if this chip can benefit from load scheduling. */
349 int arm_ld_sched = 0;
350
351 /* Nonzero if this chip is a StrongARM. */
352 int arm_is_strong = 0;
353
354 /* Nonzero if this chip supports Intel Wireless MMX technology. */
355 int arm_arch_iwmmxt = 0;
356
357 /* Nonzero if this chip is an XScale. */
358 int arm_arch_xscale = 0;
359
360 /* Nonzero if tuning for XScale */
361 int arm_tune_xscale = 0;
362
363 /* Nonzero if this chip is an ARM6 or an ARM7. */
364 int arm_is_6_or_7 = 0;
365
366 /* Nonzero if generating Thumb instructions. */
367 int thumb_code = 0;
368
369 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
370 must report the mode of the memory reference from PRINT_OPERAND to
371 PRINT_OPERAND_ADDRESS. */
372 enum machine_mode output_memory_reference_mode;
373
374 /* The register number to be used for the PIC offset register. */
375 const char * arm_pic_register_string = NULL;
376 int arm_pic_register = INVALID_REGNUM;
377
378 /* Set to 1 when a return insn is output, this means that the epilogue
379 is not needed. */
380 int return_used_this_function;
381
382 /* Set to 1 after arm_reorg has started. Reset to start at the start of
383 the next function. */
384 static int after_arm_reorg = 0;
385
386 /* The maximum number of insns to be used when loading a constant. */
387 static int arm_constant_limit = 3;
388
389 /* For an explanation of these variables, see final_prescan_insn below. */
390 int arm_ccfsm_state;
391 enum arm_cond_code arm_current_cc;
392 rtx arm_target_insn;
393 int arm_target_label;
394
395 /* The condition codes of the ARM, and the inverse function. */
396 static const char * const arm_condition_codes[] =
397 {
398 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
399 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
400 };
401
402 #define streq(string1, string2) (strcmp (string1, string2) == 0)
403 \f
404 /* Initialization code. */
405
406 struct processors
407 {
408 const char *const name;
409 enum processor_type core;
410 const unsigned long flags;
411 bool (* rtx_costs) (rtx, int, int, int *);
412 };
413
414 /* Not all of these give usefully different compilation alternatives,
415 but there is no simple way of generalizing them. */
416 static const struct processors all_cores[] =
417 {
418 /* ARM Cores */
419 #define ARM_CORE(NAME, FLAGS, COSTS) \
420 {#NAME, arm_none, FLAGS, arm_##COSTS##_rtx_costs},
421 #include "arm-cores.def"
422 #undef ARM_CORE
423 {NULL, arm_none, 0, NULL}
424 };
425
426 static const struct processors all_architectures[] =
427 {
428 /* ARM Architectures */
429 /* We don't specify rtx_costs here as it will be figured out
430 from the core. */
431
432 { "armv2", arm2, FL_CO_PROC | FL_MODE26 , NULL},
433 { "armv2a", arm2, FL_CO_PROC | FL_MODE26 , NULL},
434 { "armv3", arm6, FL_CO_PROC | FL_MODE26 | FL_MODE32 , NULL},
435 { "armv3m", arm7m, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M , NULL},
436 { "armv4", arm7tdmi, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M | FL_ARCH4 , NULL},
437 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
438 implementations that support it, so we will leave it out for now. */
439 { "armv4t", arm7tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB , NULL},
440 { "armv5", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
441 { "armv5t", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
442 { "armv5te", arm1026ejs, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E , NULL},
443 { "armv6", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
444 { "armv6j", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
445 { "ep9312", ep9312, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS , NULL},
446 {"iwmmxt", iwmmxt, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT , NULL},
447 { NULL, arm_none, 0 , NULL}
448 };
449
450 /* This is a magic structure. The 'string' field is magically filled in
451 with a pointer to the value specified by the user on the command line
452 assuming that the user has specified such a value. */
453
454 struct arm_cpu_select arm_select[] =
455 {
456 /* string name processors */
457 { NULL, "-mcpu=", all_cores },
458 { NULL, "-march=", all_architectures },
459 { NULL, "-mtune=", all_cores }
460 };
461
462 struct fpu_desc
463 {
464 const char * name;
465 enum fputype fpu;
466 };
467
468
469 /* Available values for for -mfpu=. */
470
471 static const struct fpu_desc all_fpus[] =
472 {
473 {"fpa", FPUTYPE_FPA},
474 {"fpe2", FPUTYPE_FPA_EMU2},
475 {"fpe3", FPUTYPE_FPA_EMU2},
476 {"maverick", FPUTYPE_MAVERICK},
477 {"vfp", FPUTYPE_VFP}
478 };
479
480
481 /* Floating point models used by the different hardware.
482 See fputype in arm.h. */
483
484 static const enum fputype fp_model_for_fpu[] =
485 {
486 /* No FP hardware. */
487 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
488 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
489 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
490 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
491 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
492 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
493 };
494
495
496 struct float_abi
497 {
498 const char * name;
499 enum float_abi_type abi_type;
500 };
501
502
503 /* Available values for -mfloat-abi=. */
504
505 static const struct float_abi all_float_abis[] =
506 {
507 {"soft", ARM_FLOAT_ABI_SOFT},
508 {"softfp", ARM_FLOAT_ABI_SOFTFP},
509 {"hard", ARM_FLOAT_ABI_HARD}
510 };
511
512
513 /* Return the number of bits set in VALUE. */
514 static unsigned
515 bit_count (unsigned long value)
516 {
517 unsigned long count = 0;
518
519 while (value)
520 {
521 count++;
522 value &= value - 1; /* Clear the least-significant set bit. */
523 }
524
525 return count;
526 }
527
528 /* Fix up any incompatible options that the user has specified.
529 This has now turned into a maze. */
530 void
531 arm_override_options (void)
532 {
533 unsigned i;
534
535 /* Set up the flags based on the cpu/architecture selected by the user. */
536 for (i = ARRAY_SIZE (arm_select); i--;)
537 {
538 struct arm_cpu_select * ptr = arm_select + i;
539
540 if (ptr->string != NULL && ptr->string[0] != '\0')
541 {
542 const struct processors * sel;
543
544 for (sel = ptr->processors; sel->name != NULL; sel++)
545 if (streq (ptr->string, sel->name))
546 {
547 /* Determine the processor core for which we should
548 tune code-generation. */
549 if (/* -mcpu= is a sensible default. */
550 i == 0
551 /* If -march= is used, and -mcpu= has not been used,
552 assume that we should tune for a representative
553 CPU from that architecture. */
554 || i == 1
555 /* -mtune= overrides -mcpu= and -march=. */
556 || i == 2)
557 arm_tune = (enum processor_type) (sel - ptr->processors);
558
559 if (i != 2)
560 {
561 /* If we have been given an architecture and a processor
562 make sure that they are compatible. We only generate
563 a warning though, and we prefer the CPU over the
564 architecture. */
565 if (insn_flags != 0 && (insn_flags ^ sel->flags))
566 warning ("switch -mcpu=%s conflicts with -march= switch",
567 ptr->string);
568
569 insn_flags = sel->flags;
570 }
571
572 break;
573 }
574
575 if (sel->name == NULL)
576 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
577 }
578 }
579
580 /* If the user did not specify a processor, choose one for them. */
581 if (insn_flags == 0)
582 {
583 const struct processors * sel;
584 unsigned int sought;
585 static const struct cpu_default
586 {
587 const int cpu;
588 const char *const name;
589 }
590 cpu_defaults[] =
591 {
592 { TARGET_CPU_arm2, "arm2" },
593 { TARGET_CPU_arm6, "arm6" },
594 { TARGET_CPU_arm610, "arm610" },
595 { TARGET_CPU_arm710, "arm710" },
596 { TARGET_CPU_arm7m, "arm7m" },
597 { TARGET_CPU_arm7500fe, "arm7500fe" },
598 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
599 { TARGET_CPU_arm8, "arm8" },
600 { TARGET_CPU_arm810, "arm810" },
601 { TARGET_CPU_arm9, "arm9" },
602 { TARGET_CPU_strongarm, "strongarm" },
603 { TARGET_CPU_xscale, "xscale" },
604 { TARGET_CPU_ep9312, "ep9312" },
605 { TARGET_CPU_iwmmxt, "iwmmxt" },
606 { TARGET_CPU_arm926ejs, "arm926ejs" },
607 { TARGET_CPU_arm1026ejs, "arm1026ejs" },
608 { TARGET_CPU_arm1136js, "arm1136js" },
609 { TARGET_CPU_arm1136jfs, "arm1136jfs" },
610 { TARGET_CPU_generic, "arm" },
611 { 0, 0 }
612 };
613 const struct cpu_default * def;
614
615 /* Find the default. */
616 for (def = cpu_defaults; def->name; def++)
617 if (def->cpu == TARGET_CPU_DEFAULT)
618 break;
619
620 /* Make sure we found the default CPU. */
621 if (def->name == NULL)
622 abort ();
623
624 /* Find the default CPU's flags. */
625 for (sel = all_cores; sel->name != NULL; sel++)
626 if (streq (def->name, sel->name))
627 break;
628
629 if (sel->name == NULL)
630 abort ();
631
632 insn_flags = sel->flags;
633
634 /* Now check to see if the user has specified some command line
635 switch that require certain abilities from the cpu. */
636 sought = 0;
637
638 if (TARGET_INTERWORK || TARGET_THUMB)
639 {
640 sought |= (FL_THUMB | FL_MODE32);
641
642 /* Force apcs-32 to be used for interworking. */
643 target_flags |= ARM_FLAG_APCS_32;
644
645 /* There are no ARM processors that support both APCS-26 and
646 interworking. Therefore we force FL_MODE26 to be removed
647 from insn_flags here (if it was set), so that the search
648 below will always be able to find a compatible processor. */
649 insn_flags &= ~FL_MODE26;
650 }
651 else if (!TARGET_APCS_32)
652 sought |= FL_MODE26;
653
654 if (sought != 0 && ((sought & insn_flags) != sought))
655 {
656 /* Try to locate a CPU type that supports all of the abilities
657 of the default CPU, plus the extra abilities requested by
658 the user. */
659 for (sel = all_cores; sel->name != NULL; sel++)
660 if ((sel->flags & sought) == (sought | insn_flags))
661 break;
662
663 if (sel->name == NULL)
664 {
665 unsigned current_bit_count = 0;
666 const struct processors * best_fit = NULL;
667
668 /* Ideally we would like to issue an error message here
669 saying that it was not possible to find a CPU compatible
670 with the default CPU, but which also supports the command
671 line options specified by the programmer, and so they
672 ought to use the -mcpu=<name> command line option to
673 override the default CPU type.
674
675 Unfortunately this does not work with multilibing. We
676 need to be able to support multilibs for -mapcs-26 and for
677 -mthumb-interwork and there is no CPU that can support both
678 options. Instead if we cannot find a cpu that has both the
679 characteristics of the default cpu and the given command line
680 options we scan the array again looking for a best match. */
681 for (sel = all_cores; sel->name != NULL; sel++)
682 if ((sel->flags & sought) == sought)
683 {
684 unsigned count;
685
686 count = bit_count (sel->flags & insn_flags);
687
688 if (count >= current_bit_count)
689 {
690 best_fit = sel;
691 current_bit_count = count;
692 }
693 }
694
695 if (best_fit == NULL)
696 abort ();
697 else
698 sel = best_fit;
699 }
700
701 insn_flags = sel->flags;
702 }
703 if (arm_tune == arm_none)
704 arm_tune = (enum processor_type) (sel - all_cores);
705 }
706
707 /* The processor for which we should tune should now have been
708 chosen. */
709 if (arm_tune == arm_none)
710 abort ();
711
712 tune_flags = all_cores[(int)arm_tune].flags;
713 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
714
715 /* Make sure that the processor choice does not conflict with any of the
716 other command line choices. */
717 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
718 {
719 /* If APCS-32 was not the default then it must have been set by the
720 user, so issue a warning message. If the user has specified
721 "-mapcs-32 -mcpu=arm2" then we loose here. */
722 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
723 warning ("target CPU does not support APCS-32" );
724 target_flags &= ~ARM_FLAG_APCS_32;
725 }
726 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
727 {
728 warning ("target CPU does not support APCS-26" );
729 target_flags |= ARM_FLAG_APCS_32;
730 }
731
732 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
733 {
734 warning ("target CPU does not support interworking" );
735 target_flags &= ~ARM_FLAG_INTERWORK;
736 }
737
738 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
739 {
740 warning ("target CPU does not support THUMB instructions");
741 target_flags &= ~ARM_FLAG_THUMB;
742 }
743
744 if (TARGET_APCS_FRAME && TARGET_THUMB)
745 {
746 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
747 target_flags &= ~ARM_FLAG_APCS_FRAME;
748 }
749
750 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
751 from here where no function is being compiled currently. */
752 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
753 && TARGET_ARM)
754 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
755
756 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
757 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
758
759 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
760 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
761
762 /* If interworking is enabled then APCS-32 must be selected as well. */
763 if (TARGET_INTERWORK)
764 {
765 if (!TARGET_APCS_32)
766 warning ("interworking forces APCS-32 to be used" );
767 target_flags |= ARM_FLAG_APCS_32;
768 }
769
770 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
771 {
772 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
773 target_flags |= ARM_FLAG_APCS_FRAME;
774 }
775
776 if (TARGET_POKE_FUNCTION_NAME)
777 target_flags |= ARM_FLAG_APCS_FRAME;
778
779 if (TARGET_APCS_REENT && flag_pic)
780 error ("-fpic and -mapcs-reent are incompatible");
781
782 if (TARGET_APCS_REENT)
783 warning ("APCS reentrant code not supported. Ignored");
784
785 /* If this target is normally configured to use APCS frames, warn if they
786 are turned off and debugging is turned on. */
787 if (TARGET_ARM
788 && write_symbols != NO_DEBUG
789 && !TARGET_APCS_FRAME
790 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
791 warning ("-g with -mno-apcs-frame may not give sensible debugging");
792
793 /* If stack checking is disabled, we can use r10 as the PIC register,
794 which keeps r9 available. */
795 if (flag_pic)
796 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
797
798 if (TARGET_APCS_FLOAT)
799 warning ("passing floating point arguments in fp regs not yet supported");
800
801 /* Initialize boolean versions of the flags, for use in the arm.md file. */
802 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
803 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
804 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
805 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
806 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
807 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
808
809 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
810 arm_is_strong = (tune_flags & FL_STRONG) != 0;
811 thumb_code = (TARGET_ARM == 0);
812 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
813 && !(tune_flags & FL_ARCH4))) != 0;
814 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
815 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
816
817 if (TARGET_IWMMXT && (! TARGET_ATPCS))
818 target_flags |= ARM_FLAG_ATPCS;
819
820 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
821 if (target_fpu_name == NULL && target_fpe_name != NULL)
822 {
823 if (streq (target_fpe_name, "2"))
824 target_fpu_name = "fpe2";
825 else if (streq (target_fpe_name, "3"))
826 target_fpu_name = "fpe3";
827 else
828 error ("invalid floating point emulation option: -mfpe=%s",
829 target_fpe_name);
830 }
831 if (target_fpu_name != NULL)
832 {
833 /* The user specified a FPU. */
834 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
835 {
836 if (streq (all_fpus[i].name, target_fpu_name))
837 {
838 arm_fpu_arch = all_fpus[i].fpu;
839 arm_fpu_tune = arm_fpu_arch;
840 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
841 break;
842 }
843 }
844 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
845 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
846 }
847 else
848 {
849 #ifdef FPUTYPE_DEFAULT
850 /* Use the default is it is specified for this platform. */
851 arm_fpu_arch = FPUTYPE_DEFAULT;
852 arm_fpu_tune = FPUTYPE_DEFAULT;
853 #else
854 /* Pick one based on CPU type. */
855 if ((insn_flags & FL_VFP) != 0)
856 arm_fpu_arch = FPUTYPE_VFP;
857 else if (insn_flags & FL_CIRRUS)
858 arm_fpu_arch = FPUTYPE_MAVERICK;
859 else
860 arm_fpu_arch = FPUTYPE_FPA_EMU2;
861 #endif
862 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
863 arm_fpu_tune = FPUTYPE_FPA;
864 else
865 arm_fpu_tune = arm_fpu_arch;
866 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
867 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
868 abort ();
869 }
870
871 if (target_float_abi_name != NULL)
872 {
873 /* The user specified a FP ABI. */
874 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
875 {
876 if (streq (all_float_abis[i].name, target_float_abi_name))
877 {
878 arm_float_abi = all_float_abis[i].abi_type;
879 break;
880 }
881 }
882 if (i == ARRAY_SIZE (all_float_abis))
883 error ("invalid floating point abi: -mfloat-abi=%s",
884 target_float_abi_name);
885 }
886 else
887 {
888 /* Use soft-float target flag. */
889 if (target_flags & ARM_FLAG_SOFT_FLOAT)
890 arm_float_abi = ARM_FLOAT_ABI_SOFT;
891 else
892 arm_float_abi = ARM_FLOAT_ABI_HARD;
893 }
894
895 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
896 sorry ("-mfloat-abi=softfp");
897 /* If soft-float is specified then don't use FPU. */
898 if (TARGET_SOFT_FLOAT)
899 arm_fpu_arch = FPUTYPE_NONE;
900
901 /* For arm2/3 there is no need to do any scheduling if there is only
902 a floating point emulator, or we are doing software floating-point. */
903 if ((TARGET_SOFT_FLOAT
904 || arm_fpu_tune == FPUTYPE_FPA_EMU2
905 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
906 && (tune_flags & FL_MODE32) == 0)
907 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
908
909 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
910
911 if (structure_size_string != NULL)
912 {
913 int size = strtol (structure_size_string, NULL, 0);
914
915 if (size == 8 || size == 32)
916 arm_structure_size_boundary = size;
917 else
918 warning ("structure size boundary can only be set to 8 or 32");
919 }
920
921 if (arm_pic_register_string != NULL)
922 {
923 int pic_register = decode_reg_name (arm_pic_register_string);
924
925 if (!flag_pic)
926 warning ("-mpic-register= is useless without -fpic");
927
928 /* Prevent the user from choosing an obviously stupid PIC register. */
929 else if (pic_register < 0 || call_used_regs[pic_register]
930 || pic_register == HARD_FRAME_POINTER_REGNUM
931 || pic_register == STACK_POINTER_REGNUM
932 || pic_register >= PC_REGNUM)
933 error ("unable to use '%s' for PIC register", arm_pic_register_string);
934 else
935 arm_pic_register = pic_register;
936 }
937
938 if (TARGET_THUMB && flag_schedule_insns)
939 {
940 /* Don't warn since it's on by default in -O2. */
941 flag_schedule_insns = 0;
942 }
943
944 if (optimize_size)
945 {
946 /* There's some dispute as to whether this should be 1 or 2. However,
947 experiments seem to show that in pathological cases a setting of
948 1 degrades less severely than a setting of 2. This could change if
949 other parts of the compiler change their behavior. */
950 arm_constant_limit = 1;
951
952 /* If optimizing for size, bump the number of instructions that we
953 are prepared to conditionally execute (even on a StrongARM). */
954 max_insns_skipped = 6;
955 }
956 else
957 {
958 /* For processors with load scheduling, it never costs more than
959 2 cycles to load a constant, and the load scheduler may well
960 reduce that to 1. */
961 if (tune_flags & FL_LDSCHED)
962 arm_constant_limit = 1;
963
964 /* On XScale the longer latency of a load makes it more difficult
965 to achieve a good schedule, so it's faster to synthesize
966 constants that can be done in two insns. */
967 if (arm_tune_xscale)
968 arm_constant_limit = 2;
969
970 /* StrongARM has early execution of branches, so a sequence
971 that is worth skipping is shorter. */
972 if (arm_is_strong)
973 max_insns_skipped = 3;
974 }
975
976 /* Register global variables with the garbage collector. */
977 arm_add_gc_roots ();
978 }
979
980 static void
981 arm_add_gc_roots (void)
982 {
983 gcc_obstack_init(&minipool_obstack);
984 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
985 }
986 \f
987 /* A table of known ARM exception types.
988 For use with the interrupt function attribute. */
989
990 typedef struct
991 {
992 const char *const arg;
993 const unsigned long return_value;
994 }
995 isr_attribute_arg;
996
997 static const isr_attribute_arg isr_attribute_args [] =
998 {
999 { "IRQ", ARM_FT_ISR },
1000 { "irq", ARM_FT_ISR },
1001 { "FIQ", ARM_FT_FIQ },
1002 { "fiq", ARM_FT_FIQ },
1003 { "ABORT", ARM_FT_ISR },
1004 { "abort", ARM_FT_ISR },
1005 { "ABORT", ARM_FT_ISR },
1006 { "abort", ARM_FT_ISR },
1007 { "UNDEF", ARM_FT_EXCEPTION },
1008 { "undef", ARM_FT_EXCEPTION },
1009 { "SWI", ARM_FT_EXCEPTION },
1010 { "swi", ARM_FT_EXCEPTION },
1011 { NULL, ARM_FT_NORMAL }
1012 };
1013
1014 /* Returns the (interrupt) function type of the current
1015 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1016
1017 static unsigned long
1018 arm_isr_value (tree argument)
1019 {
1020 const isr_attribute_arg * ptr;
1021 const char * arg;
1022
1023 /* No argument - default to IRQ. */
1024 if (argument == NULL_TREE)
1025 return ARM_FT_ISR;
1026
1027 /* Get the value of the argument. */
1028 if (TREE_VALUE (argument) == NULL_TREE
1029 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1030 return ARM_FT_UNKNOWN;
1031
1032 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1033
1034 /* Check it against the list of known arguments. */
1035 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1036 if (streq (arg, ptr->arg))
1037 return ptr->return_value;
1038
1039 /* An unrecognized interrupt type. */
1040 return ARM_FT_UNKNOWN;
1041 }
1042
1043 /* Computes the type of the current function. */
1044
1045 static unsigned long
1046 arm_compute_func_type (void)
1047 {
1048 unsigned long type = ARM_FT_UNKNOWN;
1049 tree a;
1050 tree attr;
1051
1052 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1053 abort ();
1054
1055 /* Decide if the current function is volatile. Such functions
1056 never return, and many memory cycles can be saved by not storing
1057 register values that will never be needed again. This optimization
1058 was added to speed up context switching in a kernel application. */
1059 if (optimize > 0
1060 && current_function_nothrow
1061 && TREE_THIS_VOLATILE (current_function_decl))
1062 type |= ARM_FT_VOLATILE;
1063
1064 if (current_function_needs_context)
1065 type |= ARM_FT_NESTED;
1066
1067 attr = DECL_ATTRIBUTES (current_function_decl);
1068
1069 a = lookup_attribute ("naked", attr);
1070 if (a != NULL_TREE)
1071 type |= ARM_FT_NAKED;
1072
1073 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
1074 type |= ARM_FT_EXCEPTION_HANDLER;
1075 else
1076 {
1077 a = lookup_attribute ("isr", attr);
1078 if (a == NULL_TREE)
1079 a = lookup_attribute ("interrupt", attr);
1080
1081 if (a == NULL_TREE)
1082 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1083 else
1084 type |= arm_isr_value (TREE_VALUE (a));
1085 }
1086
1087 return type;
1088 }
1089
1090 /* Returns the type of the current function. */
1091
1092 unsigned long
1093 arm_current_func_type (void)
1094 {
1095 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1096 cfun->machine->func_type = arm_compute_func_type ();
1097
1098 return cfun->machine->func_type;
1099 }
1100 \f
1101 /* Return 1 if it is possible to return using a single instruction.
1102 If SIBLING is non-null, this is a test for a return before a sibling
1103 call. SIBLING is the call insn, so we can examine its register usage. */
1104
1105 int
1106 use_return_insn (int iscond, rtx sibling)
1107 {
1108 int regno;
1109 unsigned int func_type;
1110 unsigned long saved_int_regs;
1111 unsigned HOST_WIDE_INT stack_adjust;
1112
1113 /* Never use a return instruction before reload has run. */
1114 if (!reload_completed)
1115 return 0;
1116
1117 func_type = arm_current_func_type ();
1118
1119 /* Naked functions and volatile functions need special
1120 consideration. */
1121 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1122 return 0;
1123
1124 /* So do interrupt functions that use the frame pointer. */
1125 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1126 return 0;
1127
1128 stack_adjust = arm_get_frame_size () + current_function_outgoing_args_size;
1129
1130 /* As do variadic functions. */
1131 if (current_function_pretend_args_size
1132 || cfun->machine->uses_anonymous_args
1133 /* Or if the function calls __builtin_eh_return () */
1134 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1135 /* Or if the function calls alloca */
1136 || current_function_calls_alloca
1137 /* Or if there is a stack adjustment. However, if the stack pointer
1138 is saved on the stack, we can use a pre-incrementing stack load. */
1139 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1140 return 0;
1141
1142 saved_int_regs = arm_compute_save_reg_mask ();
1143
1144 /* Unfortunately, the insn
1145
1146 ldmib sp, {..., sp, ...}
1147
1148 triggers a bug on most SA-110 based devices, such that the stack
1149 pointer won't be correctly restored if the instruction takes a
1150 page fault. We work around this problem by popping r3 along with
1151 the other registers, since that is never slower than executing
1152 another instruction.
1153
1154 We test for !arm_arch5 here, because code for any architecture
1155 less than this could potentially be run on one of the buggy
1156 chips. */
1157 if (stack_adjust == 4 && !arm_arch5)
1158 {
1159 /* Validate that r3 is a call-clobbered register (always true in
1160 the default abi) ... */
1161 if (!call_used_regs[3])
1162 return 0;
1163
1164 /* ... that it isn't being used for a return value (always true
1165 until we implement return-in-regs), or for a tail-call
1166 argument ... */
1167 if (sibling)
1168 {
1169 if (GET_CODE (sibling) != CALL_INSN)
1170 abort ();
1171
1172 if (find_regno_fusage (sibling, USE, 3))
1173 return 0;
1174 }
1175
1176 /* ... and that there are no call-saved registers in r0-r2
1177 (always true in the default ABI). */
1178 if (saved_int_regs & 0x7)
1179 return 0;
1180 }
1181
1182 /* Can't be done if interworking with Thumb, and any registers have been
1183 stacked. */
1184 if (TARGET_INTERWORK && saved_int_regs != 0)
1185 return 0;
1186
1187 /* On StrongARM, conditional returns are expensive if they aren't
1188 taken and multiple registers have been stacked. */
1189 if (iscond && arm_is_strong)
1190 {
1191 /* Conditional return when just the LR is stored is a simple
1192 conditional-load instruction, that's not expensive. */
1193 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1194 return 0;
1195
1196 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1197 return 0;
1198 }
1199
1200 /* If there are saved registers but the LR isn't saved, then we need
1201 two instructions for the return. */
1202 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1203 return 0;
1204
1205 /* Can't be done if any of the FPA regs are pushed,
1206 since this also requires an insn. */
1207 if (TARGET_HARD_FLOAT && TARGET_FPA)
1208 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1209 if (regs_ever_live[regno] && !call_used_regs[regno])
1210 return 0;
1211
1212 /* Likewise VFP regs. */
1213 if (TARGET_HARD_FLOAT && TARGET_VFP)
1214 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1215 if (regs_ever_live[regno] && !call_used_regs[regno])
1216 return 0;
1217
1218 if (TARGET_REALLY_IWMMXT)
1219 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1220 if (regs_ever_live[regno] && ! call_used_regs [regno])
1221 return 0;
1222
1223 return 1;
1224 }
1225
1226 /* Return TRUE if int I is a valid immediate ARM constant. */
1227
1228 int
1229 const_ok_for_arm (HOST_WIDE_INT i)
1230 {
1231 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1232
1233 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1234 be all zero, or all one. */
1235 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1236 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1237 != ((~(unsigned HOST_WIDE_INT) 0)
1238 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1239 return FALSE;
1240
1241 /* Fast return for 0 and powers of 2 */
1242 if ((i & (i - 1)) == 0)
1243 return TRUE;
1244
1245 do
1246 {
1247 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1248 return TRUE;
1249 mask =
1250 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1251 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1252 }
1253 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1254
1255 return FALSE;
1256 }
1257
1258 /* Return true if I is a valid constant for the operation CODE. */
1259 static int
1260 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1261 {
1262 if (const_ok_for_arm (i))
1263 return 1;
1264
1265 switch (code)
1266 {
1267 case PLUS:
1268 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1269
1270 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1271 case XOR:
1272 case IOR:
1273 return 0;
1274
1275 case AND:
1276 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1277
1278 default:
1279 abort ();
1280 }
1281 }
1282
1283 /* Emit a sequence of insns to handle a large constant.
1284 CODE is the code of the operation required, it can be any of SET, PLUS,
1285 IOR, AND, XOR, MINUS;
1286 MODE is the mode in which the operation is being performed;
1287 VAL is the integer to operate on;
1288 SOURCE is the other operand (a register, or a null-pointer for SET);
1289 SUBTARGETS means it is safe to create scratch registers if that will
1290 either produce a simpler sequence, or we will want to cse the values.
1291 Return value is the number of insns emitted. */
1292
1293 int
1294 arm_split_constant (enum rtx_code code, enum machine_mode mode,
1295 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1296 {
1297 if (subtargets || code == SET
1298 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1299 && REGNO (target) != REGNO (source)))
1300 {
1301 /* After arm_reorg has been called, we can't fix up expensive
1302 constants by pushing them into memory so we must synthesize
1303 them in-line, regardless of the cost. This is only likely to
1304 be more costly on chips that have load delay slots and we are
1305 compiling without running the scheduler (so no splitting
1306 occurred before the final instruction emission).
1307
1308 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1309 */
1310 if (!after_arm_reorg
1311 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1312 > arm_constant_limit + (code != SET)))
1313 {
1314 if (code == SET)
1315 {
1316 /* Currently SET is the only monadic value for CODE, all
1317 the rest are diadic. */
1318 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1319 return 1;
1320 }
1321 else
1322 {
1323 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1324
1325 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1326 /* For MINUS, the value is subtracted from, since we never
1327 have subtraction of a constant. */
1328 if (code == MINUS)
1329 emit_insn (gen_rtx_SET (VOIDmode, target,
1330 gen_rtx_MINUS (mode, temp, source)));
1331 else
1332 emit_insn (gen_rtx_SET (VOIDmode, target,
1333 gen_rtx_fmt_ee (code, mode, source, temp)));
1334 return 2;
1335 }
1336 }
1337 }
1338
1339 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1340 }
1341
1342 static int
1343 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1344 {
1345 HOST_WIDE_INT temp1;
1346 int num_insns = 0;
1347 do
1348 {
1349 int end;
1350
1351 if (i <= 0)
1352 i += 32;
1353 if (remainder & (3 << (i - 2)))
1354 {
1355 end = i - 8;
1356 if (end < 0)
1357 end += 32;
1358 temp1 = remainder & ((0x0ff << end)
1359 | ((i < end) ? (0xff >> (32 - end)) : 0));
1360 remainder &= ~temp1;
1361 num_insns++;
1362 i -= 6;
1363 }
1364 i -= 2;
1365 } while (remainder);
1366 return num_insns;
1367 }
1368
1369 /* As above, but extra parameter GENERATE which, if clear, suppresses
1370 RTL generation. */
1371
1372 static int
1373 arm_gen_constant (enum rtx_code code, enum machine_mode mode,
1374 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1375 int generate)
1376 {
1377 int can_invert = 0;
1378 int can_negate = 0;
1379 int can_negate_initial = 0;
1380 int can_shift = 0;
1381 int i;
1382 int num_bits_set = 0;
1383 int set_sign_bit_copies = 0;
1384 int clear_sign_bit_copies = 0;
1385 int clear_zero_bit_copies = 0;
1386 int set_zero_bit_copies = 0;
1387 int insns = 0;
1388 unsigned HOST_WIDE_INT temp1, temp2;
1389 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1390
1391 /* Find out which operations are safe for a given CODE. Also do a quick
1392 check for degenerate cases; these can occur when DImode operations
1393 are split. */
1394 switch (code)
1395 {
1396 case SET:
1397 can_invert = 1;
1398 can_shift = 1;
1399 can_negate = 1;
1400 break;
1401
1402 case PLUS:
1403 can_negate = 1;
1404 can_negate_initial = 1;
1405 break;
1406
1407 case IOR:
1408 if (remainder == 0xffffffff)
1409 {
1410 if (generate)
1411 emit_insn (gen_rtx_SET (VOIDmode, target,
1412 GEN_INT (ARM_SIGN_EXTEND (val))));
1413 return 1;
1414 }
1415 if (remainder == 0)
1416 {
1417 if (reload_completed && rtx_equal_p (target, source))
1418 return 0;
1419 if (generate)
1420 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1421 return 1;
1422 }
1423 break;
1424
1425 case AND:
1426 if (remainder == 0)
1427 {
1428 if (generate)
1429 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1430 return 1;
1431 }
1432 if (remainder == 0xffffffff)
1433 {
1434 if (reload_completed && rtx_equal_p (target, source))
1435 return 0;
1436 if (generate)
1437 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1438 return 1;
1439 }
1440 can_invert = 1;
1441 break;
1442
1443 case XOR:
1444 if (remainder == 0)
1445 {
1446 if (reload_completed && rtx_equal_p (target, source))
1447 return 0;
1448 if (generate)
1449 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1450 return 1;
1451 }
1452 if (remainder == 0xffffffff)
1453 {
1454 if (generate)
1455 emit_insn (gen_rtx_SET (VOIDmode, target,
1456 gen_rtx_NOT (mode, source)));
1457 return 1;
1458 }
1459
1460 /* We don't know how to handle this yet below. */
1461 abort ();
1462
1463 case MINUS:
1464 /* We treat MINUS as (val - source), since (source - val) is always
1465 passed as (source + (-val)). */
1466 if (remainder == 0)
1467 {
1468 if (generate)
1469 emit_insn (gen_rtx_SET (VOIDmode, target,
1470 gen_rtx_NEG (mode, source)));
1471 return 1;
1472 }
1473 if (const_ok_for_arm (val))
1474 {
1475 if (generate)
1476 emit_insn (gen_rtx_SET (VOIDmode, target,
1477 gen_rtx_MINUS (mode, GEN_INT (val),
1478 source)));
1479 return 1;
1480 }
1481 can_negate = 1;
1482
1483 break;
1484
1485 default:
1486 abort ();
1487 }
1488
1489 /* If we can do it in one insn get out quickly. */
1490 if (const_ok_for_arm (val)
1491 || (can_negate_initial && const_ok_for_arm (-val))
1492 || (can_invert && const_ok_for_arm (~val)))
1493 {
1494 if (generate)
1495 emit_insn (gen_rtx_SET (VOIDmode, target,
1496 (source ? gen_rtx_fmt_ee (code, mode, source,
1497 GEN_INT (val))
1498 : GEN_INT (val))));
1499 return 1;
1500 }
1501
1502 /* Calculate a few attributes that may be useful for specific
1503 optimizations. */
1504 for (i = 31; i >= 0; i--)
1505 {
1506 if ((remainder & (1 << i)) == 0)
1507 clear_sign_bit_copies++;
1508 else
1509 break;
1510 }
1511
1512 for (i = 31; i >= 0; i--)
1513 {
1514 if ((remainder & (1 << i)) != 0)
1515 set_sign_bit_copies++;
1516 else
1517 break;
1518 }
1519
1520 for (i = 0; i <= 31; i++)
1521 {
1522 if ((remainder & (1 << i)) == 0)
1523 clear_zero_bit_copies++;
1524 else
1525 break;
1526 }
1527
1528 for (i = 0; i <= 31; i++)
1529 {
1530 if ((remainder & (1 << i)) != 0)
1531 set_zero_bit_copies++;
1532 else
1533 break;
1534 }
1535
1536 switch (code)
1537 {
1538 case SET:
1539 /* See if we can do this by sign_extending a constant that is known
1540 to be negative. This is a good, way of doing it, since the shift
1541 may well merge into a subsequent insn. */
1542 if (set_sign_bit_copies > 1)
1543 {
1544 if (const_ok_for_arm
1545 (temp1 = ARM_SIGN_EXTEND (remainder
1546 << (set_sign_bit_copies - 1))))
1547 {
1548 if (generate)
1549 {
1550 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1551 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1552 GEN_INT (temp1)));
1553 emit_insn (gen_ashrsi3 (target, new_src,
1554 GEN_INT (set_sign_bit_copies - 1)));
1555 }
1556 return 2;
1557 }
1558 /* For an inverted constant, we will need to set the low bits,
1559 these will be shifted out of harm's way. */
1560 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1561 if (const_ok_for_arm (~temp1))
1562 {
1563 if (generate)
1564 {
1565 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1566 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1567 GEN_INT (temp1)));
1568 emit_insn (gen_ashrsi3 (target, new_src,
1569 GEN_INT (set_sign_bit_copies - 1)));
1570 }
1571 return 2;
1572 }
1573 }
1574
1575 /* See if we can generate this by setting the bottom (or the top)
1576 16 bits, and then shifting these into the other half of the
1577 word. We only look for the simplest cases, to do more would cost
1578 too much. Be careful, however, not to generate this when the
1579 alternative would take fewer insns. */
1580 if (val & 0xffff0000)
1581 {
1582 temp1 = remainder & 0xffff0000;
1583 temp2 = remainder & 0x0000ffff;
1584
1585 /* Overlaps outside this range are best done using other methods. */
1586 for (i = 9; i < 24; i++)
1587 {
1588 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1589 && !const_ok_for_arm (temp2))
1590 {
1591 rtx new_src = (subtargets
1592 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1593 : target);
1594 insns = arm_gen_constant (code, mode, temp2, new_src,
1595 source, subtargets, generate);
1596 source = new_src;
1597 if (generate)
1598 emit_insn (gen_rtx_SET
1599 (VOIDmode, target,
1600 gen_rtx_IOR (mode,
1601 gen_rtx_ASHIFT (mode, source,
1602 GEN_INT (i)),
1603 source)));
1604 return insns + 1;
1605 }
1606 }
1607
1608 /* Don't duplicate cases already considered. */
1609 for (i = 17; i < 24; i++)
1610 {
1611 if (((temp1 | (temp1 >> i)) == remainder)
1612 && !const_ok_for_arm (temp1))
1613 {
1614 rtx new_src = (subtargets
1615 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1616 : target);
1617 insns = arm_gen_constant (code, mode, temp1, new_src,
1618 source, subtargets, generate);
1619 source = new_src;
1620 if (generate)
1621 emit_insn
1622 (gen_rtx_SET (VOIDmode, target,
1623 gen_rtx_IOR
1624 (mode,
1625 gen_rtx_LSHIFTRT (mode, source,
1626 GEN_INT (i)),
1627 source)));
1628 return insns + 1;
1629 }
1630 }
1631 }
1632 break;
1633
1634 case IOR:
1635 case XOR:
1636 /* If we have IOR or XOR, and the constant can be loaded in a
1637 single instruction, and we can find a temporary to put it in,
1638 then this can be done in two instructions instead of 3-4. */
1639 if (subtargets
1640 /* TARGET can't be NULL if SUBTARGETS is 0 */
1641 || (reload_completed && !reg_mentioned_p (target, source)))
1642 {
1643 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1644 {
1645 if (generate)
1646 {
1647 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1648
1649 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1650 emit_insn (gen_rtx_SET (VOIDmode, target,
1651 gen_rtx_fmt_ee (code, mode, source, sub)));
1652 }
1653 return 2;
1654 }
1655 }
1656
1657 if (code == XOR)
1658 break;
1659
1660 if (set_sign_bit_copies > 8
1661 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1662 {
1663 if (generate)
1664 {
1665 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1666 rtx shift = GEN_INT (set_sign_bit_copies);
1667
1668 emit_insn (gen_rtx_SET (VOIDmode, sub,
1669 gen_rtx_NOT (mode,
1670 gen_rtx_ASHIFT (mode,
1671 source,
1672 shift))));
1673 emit_insn (gen_rtx_SET (VOIDmode, target,
1674 gen_rtx_NOT (mode,
1675 gen_rtx_LSHIFTRT (mode, sub,
1676 shift))));
1677 }
1678 return 2;
1679 }
1680
1681 if (set_zero_bit_copies > 8
1682 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1683 {
1684 if (generate)
1685 {
1686 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1687 rtx shift = GEN_INT (set_zero_bit_copies);
1688
1689 emit_insn (gen_rtx_SET (VOIDmode, sub,
1690 gen_rtx_NOT (mode,
1691 gen_rtx_LSHIFTRT (mode,
1692 source,
1693 shift))));
1694 emit_insn (gen_rtx_SET (VOIDmode, target,
1695 gen_rtx_NOT (mode,
1696 gen_rtx_ASHIFT (mode, sub,
1697 shift))));
1698 }
1699 return 2;
1700 }
1701
1702 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1703 {
1704 if (generate)
1705 {
1706 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1707 emit_insn (gen_rtx_SET (VOIDmode, sub,
1708 gen_rtx_NOT (mode, source)));
1709 source = sub;
1710 if (subtargets)
1711 sub = gen_reg_rtx (mode);
1712 emit_insn (gen_rtx_SET (VOIDmode, sub,
1713 gen_rtx_AND (mode, source,
1714 GEN_INT (temp1))));
1715 emit_insn (gen_rtx_SET (VOIDmode, target,
1716 gen_rtx_NOT (mode, sub)));
1717 }
1718 return 3;
1719 }
1720 break;
1721
1722 case AND:
1723 /* See if two shifts will do 2 or more insn's worth of work. */
1724 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1725 {
1726 HOST_WIDE_INT shift_mask = ((0xffffffff
1727 << (32 - clear_sign_bit_copies))
1728 & 0xffffffff);
1729
1730 if ((remainder | shift_mask) != 0xffffffff)
1731 {
1732 if (generate)
1733 {
1734 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1735 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1736 new_src, source, subtargets, 1);
1737 source = new_src;
1738 }
1739 else
1740 {
1741 rtx targ = subtargets ? NULL_RTX : target;
1742 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1743 targ, source, subtargets, 0);
1744 }
1745 }
1746
1747 if (generate)
1748 {
1749 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1750 rtx shift = GEN_INT (clear_sign_bit_copies);
1751
1752 emit_insn (gen_ashlsi3 (new_src, source, shift));
1753 emit_insn (gen_lshrsi3 (target, new_src, shift));
1754 }
1755
1756 return insns + 2;
1757 }
1758
1759 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1760 {
1761 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1762
1763 if ((remainder | shift_mask) != 0xffffffff)
1764 {
1765 if (generate)
1766 {
1767 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1768
1769 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1770 new_src, source, subtargets, 1);
1771 source = new_src;
1772 }
1773 else
1774 {
1775 rtx targ = subtargets ? NULL_RTX : target;
1776
1777 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1778 targ, source, subtargets, 0);
1779 }
1780 }
1781
1782 if (generate)
1783 {
1784 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1785 rtx shift = GEN_INT (clear_zero_bit_copies);
1786
1787 emit_insn (gen_lshrsi3 (new_src, source, shift));
1788 emit_insn (gen_ashlsi3 (target, new_src, shift));
1789 }
1790
1791 return insns + 2;
1792 }
1793
1794 break;
1795
1796 default:
1797 break;
1798 }
1799
1800 for (i = 0; i < 32; i++)
1801 if (remainder & (1 << i))
1802 num_bits_set++;
1803
1804 if (code == AND || (can_invert && num_bits_set > 16))
1805 remainder = (~remainder) & 0xffffffff;
1806 else if (code == PLUS && num_bits_set > 16)
1807 remainder = (-remainder) & 0xffffffff;
1808 else
1809 {
1810 can_invert = 0;
1811 can_negate = 0;
1812 }
1813
1814 /* Now try and find a way of doing the job in either two or three
1815 instructions.
1816 We start by looking for the largest block of zeros that are aligned on
1817 a 2-bit boundary, we then fill up the temps, wrapping around to the
1818 top of the word when we drop off the bottom.
1819 In the worst case this code should produce no more than four insns. */
1820 {
1821 int best_start = 0;
1822 int best_consecutive_zeros = 0;
1823
1824 for (i = 0; i < 32; i += 2)
1825 {
1826 int consecutive_zeros = 0;
1827
1828 if (!(remainder & (3 << i)))
1829 {
1830 while ((i < 32) && !(remainder & (3 << i)))
1831 {
1832 consecutive_zeros += 2;
1833 i += 2;
1834 }
1835 if (consecutive_zeros > best_consecutive_zeros)
1836 {
1837 best_consecutive_zeros = consecutive_zeros;
1838 best_start = i - consecutive_zeros;
1839 }
1840 i -= 2;
1841 }
1842 }
1843
1844 /* So long as it won't require any more insns to do so, it's
1845 desirable to emit a small constant (in bits 0...9) in the last
1846 insn. This way there is more chance that it can be combined with
1847 a later addressing insn to form a pre-indexed load or store
1848 operation. Consider:
1849
1850 *((volatile int *)0xe0000100) = 1;
1851 *((volatile int *)0xe0000110) = 2;
1852
1853 We want this to wind up as:
1854
1855 mov rA, #0xe0000000
1856 mov rB, #1
1857 str rB, [rA, #0x100]
1858 mov rB, #2
1859 str rB, [rA, #0x110]
1860
1861 rather than having to synthesize both large constants from scratch.
1862
1863 Therefore, we calculate how many insns would be required to emit
1864 the constant starting from `best_start', and also starting from
1865 zero (ie with bit 31 first to be output). If `best_start' doesn't
1866 yield a shorter sequence, we may as well use zero. */
1867 if (best_start != 0
1868 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1869 && (count_insns_for_constant (remainder, 0) <=
1870 count_insns_for_constant (remainder, best_start)))
1871 best_start = 0;
1872
1873 /* Now start emitting the insns. */
1874 i = best_start;
1875 do
1876 {
1877 int end;
1878
1879 if (i <= 0)
1880 i += 32;
1881 if (remainder & (3 << (i - 2)))
1882 {
1883 end = i - 8;
1884 if (end < 0)
1885 end += 32;
1886 temp1 = remainder & ((0x0ff << end)
1887 | ((i < end) ? (0xff >> (32 - end)) : 0));
1888 remainder &= ~temp1;
1889
1890 if (generate)
1891 {
1892 rtx new_src, temp1_rtx;
1893
1894 if (code == SET || code == MINUS)
1895 {
1896 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1897 if (can_invert && code != MINUS)
1898 temp1 = ~temp1;
1899 }
1900 else
1901 {
1902 if (remainder && subtargets)
1903 new_src = gen_reg_rtx (mode);
1904 else
1905 new_src = target;
1906 if (can_invert)
1907 temp1 = ~temp1;
1908 else if (can_negate)
1909 temp1 = -temp1;
1910 }
1911
1912 temp1 = trunc_int_for_mode (temp1, mode);
1913 temp1_rtx = GEN_INT (temp1);
1914
1915 if (code == SET)
1916 ;
1917 else if (code == MINUS)
1918 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1919 else
1920 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1921
1922 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1923 source = new_src;
1924 }
1925
1926 if (code == SET)
1927 {
1928 can_invert = 0;
1929 code = PLUS;
1930 }
1931 else if (code == MINUS)
1932 code = PLUS;
1933
1934 insns++;
1935 i -= 6;
1936 }
1937 i -= 2;
1938 }
1939 while (remainder);
1940 }
1941
1942 return insns;
1943 }
1944
1945 /* Canonicalize a comparison so that we are more likely to recognize it.
1946 This can be done for a few constant compares, where we can make the
1947 immediate value easier to load. */
1948
1949 enum rtx_code
1950 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
1951 {
1952 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1953
1954 switch (code)
1955 {
1956 case EQ:
1957 case NE:
1958 return code;
1959
1960 case GT:
1961 case LE:
1962 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1963 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1964 {
1965 *op1 = GEN_INT (i + 1);
1966 return code == GT ? GE : LT;
1967 }
1968 break;
1969
1970 case GE:
1971 case LT:
1972 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1973 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1974 {
1975 *op1 = GEN_INT (i - 1);
1976 return code == GE ? GT : LE;
1977 }
1978 break;
1979
1980 case GTU:
1981 case LEU:
1982 if (i != ~((unsigned HOST_WIDE_INT) 0)
1983 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1984 {
1985 *op1 = GEN_INT (i + 1);
1986 return code == GTU ? GEU : LTU;
1987 }
1988 break;
1989
1990 case GEU:
1991 case LTU:
1992 if (i != 0
1993 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1994 {
1995 *op1 = GEN_INT (i - 1);
1996 return code == GEU ? GTU : LEU;
1997 }
1998 break;
1999
2000 default:
2001 abort ();
2002 }
2003
2004 return code;
2005 }
2006
2007 /* Decide whether a type should be returned in memory (true)
2008 or in a register (false). This is called by the macro
2009 RETURN_IN_MEMORY. */
2010 int
2011 arm_return_in_memory (tree type)
2012 {
2013 HOST_WIDE_INT size;
2014
2015 if (!AGGREGATE_TYPE_P (type))
2016 /* All simple types are returned in registers. */
2017 return 0;
2018
2019 size = int_size_in_bytes (type);
2020
2021 if (TARGET_ATPCS)
2022 {
2023 /* ATPCS returns aggregate types in memory only if they are
2024 larger than a word (or are variable size). */
2025 return (size < 0 || size > UNITS_PER_WORD);
2026 }
2027
2028 /* For the arm-wince targets we choose to be compatible with Microsoft's
2029 ARM and Thumb compilers, which always return aggregates in memory. */
2030 #ifndef ARM_WINCE
2031 /* All structures/unions bigger than one word are returned in memory.
2032 Also catch the case where int_size_in_bytes returns -1. In this case
2033 the aggregate is either huge or of variable size, and in either case
2034 we will want to return it via memory and not in a register. */
2035 if (size < 0 || size > UNITS_PER_WORD)
2036 return 1;
2037
2038 if (TREE_CODE (type) == RECORD_TYPE)
2039 {
2040 tree field;
2041
2042 /* For a struct the APCS says that we only return in a register
2043 if the type is 'integer like' and every addressable element
2044 has an offset of zero. For practical purposes this means
2045 that the structure can have at most one non bit-field element
2046 and that this element must be the first one in the structure. */
2047
2048 /* Find the first field, ignoring non FIELD_DECL things which will
2049 have been created by C++. */
2050 for (field = TYPE_FIELDS (type);
2051 field && TREE_CODE (field) != FIELD_DECL;
2052 field = TREE_CHAIN (field))
2053 continue;
2054
2055 if (field == NULL)
2056 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2057
2058 /* Check that the first field is valid for returning in a register. */
2059
2060 /* ... Floats are not allowed */
2061 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2062 return 1;
2063
2064 /* ... Aggregates that are not themselves valid for returning in
2065 a register are not allowed. */
2066 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2067 return 1;
2068
2069 /* Now check the remaining fields, if any. Only bitfields are allowed,
2070 since they are not addressable. */
2071 for (field = TREE_CHAIN (field);
2072 field;
2073 field = TREE_CHAIN (field))
2074 {
2075 if (TREE_CODE (field) != FIELD_DECL)
2076 continue;
2077
2078 if (!DECL_BIT_FIELD_TYPE (field))
2079 return 1;
2080 }
2081
2082 return 0;
2083 }
2084
2085 if (TREE_CODE (type) == UNION_TYPE)
2086 {
2087 tree field;
2088
2089 /* Unions can be returned in registers if every element is
2090 integral, or can be returned in an integer register. */
2091 for (field = TYPE_FIELDS (type);
2092 field;
2093 field = TREE_CHAIN (field))
2094 {
2095 if (TREE_CODE (field) != FIELD_DECL)
2096 continue;
2097
2098 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2099 return 1;
2100
2101 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2102 return 1;
2103 }
2104
2105 return 0;
2106 }
2107 #endif /* not ARM_WINCE */
2108
2109 /* Return all other types in memory. */
2110 return 1;
2111 }
2112
2113 /* Indicate whether or not words of a double are in big-endian order. */
2114
2115 int
2116 arm_float_words_big_endian (void)
2117 {
2118 if (TARGET_MAVERICK)
2119 return 0;
2120
2121 /* For FPA, float words are always big-endian. For VFP, floats words
2122 follow the memory system mode. */
2123
2124 if (TARGET_FPA)
2125 {
2126 return 1;
2127 }
2128
2129 if (TARGET_VFP)
2130 return (TARGET_BIG_END ? 1 : 0);
2131
2132 return 1;
2133 }
2134
2135 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2136 for a call to a function whose data type is FNTYPE.
2137 For a library call, FNTYPE is NULL. */
2138 void
2139 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2140 rtx libname ATTRIBUTE_UNUSED,
2141 tree fndecl ATTRIBUTE_UNUSED)
2142 {
2143 /* On the ARM, the offset starts at 0. */
2144 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2145 pcum->iwmmxt_nregs = 0;
2146
2147 pcum->call_cookie = CALL_NORMAL;
2148
2149 if (TARGET_LONG_CALLS)
2150 pcum->call_cookie = CALL_LONG;
2151
2152 /* Check for long call/short call attributes. The attributes
2153 override any command line option. */
2154 if (fntype)
2155 {
2156 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2157 pcum->call_cookie = CALL_SHORT;
2158 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2159 pcum->call_cookie = CALL_LONG;
2160 }
2161
2162 /* Varargs vectors are treated the same as long long.
2163 named_count avoids having to change the way arm handles 'named' */
2164 pcum->named_count = 0;
2165 pcum->nargs = 0;
2166
2167 if (TARGET_REALLY_IWMMXT && fntype)
2168 {
2169 tree fn_arg;
2170
2171 for (fn_arg = TYPE_ARG_TYPES (fntype);
2172 fn_arg;
2173 fn_arg = TREE_CHAIN (fn_arg))
2174 pcum->named_count += 1;
2175
2176 if (! pcum->named_count)
2177 pcum->named_count = INT_MAX;
2178 }
2179 }
2180
2181 /* Determine where to put an argument to a function.
2182 Value is zero to push the argument on the stack,
2183 or a hard register in which to store the argument.
2184
2185 MODE is the argument's machine mode.
2186 TYPE is the data type of the argument (as a tree).
2187 This is null for libcalls where that information may
2188 not be available.
2189 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2190 the preceding args and about the function being called.
2191 NAMED is nonzero if this argument is a named parameter
2192 (otherwise it is an extra parameter matching an ellipsis). */
2193
2194 rtx
2195 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2196 tree type ATTRIBUTE_UNUSED, int named)
2197 {
2198 if (TARGET_REALLY_IWMMXT)
2199 {
2200 if (VECTOR_MODE_SUPPORTED_P (mode))
2201 {
2202 /* varargs vectors are treated the same as long long.
2203 named_count avoids having to change the way arm handles 'named' */
2204 if (pcum->named_count <= pcum->nargs + 1)
2205 {
2206 if (pcum->nregs == 1)
2207 pcum->nregs += 1;
2208 if (pcum->nregs <= 2)
2209 return gen_rtx_REG (mode, pcum->nregs);
2210 else
2211 return NULL_RTX;
2212 }
2213 else if (pcum->iwmmxt_nregs <= 9)
2214 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2215 else
2216 return NULL_RTX;
2217 }
2218 else if ((mode == DImode || mode == DFmode) && pcum->nregs & 1)
2219 pcum->nregs += 1;
2220 }
2221
2222 if (mode == VOIDmode)
2223 /* Compute operand 2 of the call insn. */
2224 return GEN_INT (pcum->call_cookie);
2225
2226 if (!named || pcum->nregs >= NUM_ARG_REGS)
2227 return NULL_RTX;
2228
2229 return gen_rtx_REG (mode, pcum->nregs);
2230 }
2231
2232 /* Variable sized types are passed by reference. This is a GCC
2233 extension to the ARM ABI. */
2234
2235 int
2236 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2237 enum machine_mode mode ATTRIBUTE_UNUSED,
2238 tree type, int named ATTRIBUTE_UNUSED)
2239 {
2240 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2241 }
2242
2243 /* Implement va_arg. */
2244
2245 rtx
2246 arm_va_arg (tree valist, tree type)
2247 {
2248 /* Variable sized types are passed by reference. */
2249 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2250 {
2251 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2252 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2253 }
2254
2255 if (FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), NULL) == IWMMXT_ALIGNMENT)
2256 {
2257 tree minus_eight;
2258 tree t;
2259
2260 /* Maintain 64-bit alignment of the valist pointer by
2261 constructing: valist = ((valist + (8 - 1)) & -8). */
2262 minus_eight = build_int_2 (- (IWMMXT_ALIGNMENT / BITS_PER_UNIT), -1);
2263 t = build_int_2 ((IWMMXT_ALIGNMENT / BITS_PER_UNIT) - 1, 0);
2264 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2265 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, minus_eight);
2266 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2267 TREE_SIDE_EFFECTS (t) = 1;
2268 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2269
2270 /* This is to stop the combine pass optimizing
2271 away the alignment adjustment. */
2272 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2273 }
2274
2275 return std_expand_builtin_va_arg (valist, type);
2276 }
2277 \f
2278 /* Encode the current state of the #pragma [no_]long_calls. */
2279 typedef enum
2280 {
2281 OFF, /* No #pramgma [no_]long_calls is in effect. */
2282 LONG, /* #pragma long_calls is in effect. */
2283 SHORT /* #pragma no_long_calls is in effect. */
2284 } arm_pragma_enum;
2285
2286 static arm_pragma_enum arm_pragma_long_calls = OFF;
2287
2288 void
2289 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2290 {
2291 arm_pragma_long_calls = LONG;
2292 }
2293
2294 void
2295 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2296 {
2297 arm_pragma_long_calls = SHORT;
2298 }
2299
2300 void
2301 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2302 {
2303 arm_pragma_long_calls = OFF;
2304 }
2305 \f
2306 /* Table of machine attributes. */
2307 const struct attribute_spec arm_attribute_table[] =
2308 {
2309 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2310 /* Function calls made to this symbol must be done indirectly, because
2311 it may lie outside of the 26 bit addressing range of a normal function
2312 call. */
2313 { "long_call", 0, 0, false, true, true, NULL },
2314 /* Whereas these functions are always known to reside within the 26 bit
2315 addressing range. */
2316 { "short_call", 0, 0, false, true, true, NULL },
2317 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2318 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2319 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2320 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2321 #ifdef ARM_PE
2322 /* ARM/PE has three new attributes:
2323 interfacearm - ?
2324 dllexport - for exporting a function/variable that will live in a dll
2325 dllimport - for importing a function/variable from a dll
2326
2327 Microsoft allows multiple declspecs in one __declspec, separating
2328 them with spaces. We do NOT support this. Instead, use __declspec
2329 multiple times.
2330 */
2331 { "dllimport", 0, 0, true, false, false, NULL },
2332 { "dllexport", 0, 0, true, false, false, NULL },
2333 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2334 #endif
2335 { NULL, 0, 0, false, false, false, NULL }
2336 };
2337
2338 /* Handle an attribute requiring a FUNCTION_DECL;
2339 arguments as in struct attribute_spec.handler. */
2340 static tree
2341 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2342 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2343 {
2344 if (TREE_CODE (*node) != FUNCTION_DECL)
2345 {
2346 warning ("`%s' attribute only applies to functions",
2347 IDENTIFIER_POINTER (name));
2348 *no_add_attrs = true;
2349 }
2350
2351 return NULL_TREE;
2352 }
2353
2354 /* Handle an "interrupt" or "isr" attribute;
2355 arguments as in struct attribute_spec.handler. */
2356 static tree
2357 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2358 bool *no_add_attrs)
2359 {
2360 if (DECL_P (*node))
2361 {
2362 if (TREE_CODE (*node) != FUNCTION_DECL)
2363 {
2364 warning ("`%s' attribute only applies to functions",
2365 IDENTIFIER_POINTER (name));
2366 *no_add_attrs = true;
2367 }
2368 /* FIXME: the argument if any is checked for type attributes;
2369 should it be checked for decl ones? */
2370 }
2371 else
2372 {
2373 if (TREE_CODE (*node) == FUNCTION_TYPE
2374 || TREE_CODE (*node) == METHOD_TYPE)
2375 {
2376 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2377 {
2378 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2379 *no_add_attrs = true;
2380 }
2381 }
2382 else if (TREE_CODE (*node) == POINTER_TYPE
2383 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2384 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2385 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2386 {
2387 *node = build_type_copy (*node);
2388 TREE_TYPE (*node) = build_type_attribute_variant
2389 (TREE_TYPE (*node),
2390 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2391 *no_add_attrs = true;
2392 }
2393 else
2394 {
2395 /* Possibly pass this attribute on from the type to a decl. */
2396 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2397 | (int) ATTR_FLAG_FUNCTION_NEXT
2398 | (int) ATTR_FLAG_ARRAY_NEXT))
2399 {
2400 *no_add_attrs = true;
2401 return tree_cons (name, args, NULL_TREE);
2402 }
2403 else
2404 {
2405 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2406 }
2407 }
2408 }
2409
2410 return NULL_TREE;
2411 }
2412
2413 /* Return 0 if the attributes for two types are incompatible, 1 if they
2414 are compatible, and 2 if they are nearly compatible (which causes a
2415 warning to be generated). */
2416 static int
2417 arm_comp_type_attributes (tree type1, tree type2)
2418 {
2419 int l1, l2, s1, s2;
2420
2421 /* Check for mismatch of non-default calling convention. */
2422 if (TREE_CODE (type1) != FUNCTION_TYPE)
2423 return 1;
2424
2425 /* Check for mismatched call attributes. */
2426 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2427 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2428 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2429 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2430
2431 /* Only bother to check if an attribute is defined. */
2432 if (l1 | l2 | s1 | s2)
2433 {
2434 /* If one type has an attribute, the other must have the same attribute. */
2435 if ((l1 != l2) || (s1 != s2))
2436 return 0;
2437
2438 /* Disallow mixed attributes. */
2439 if ((l1 & s2) || (l2 & s1))
2440 return 0;
2441 }
2442
2443 /* Check for mismatched ISR attribute. */
2444 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2445 if (! l1)
2446 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2447 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2448 if (! l2)
2449 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2450 if (l1 != l2)
2451 return 0;
2452
2453 return 1;
2454 }
2455
2456 /* Encode long_call or short_call attribute by prefixing
2457 symbol name in DECL with a special character FLAG. */
2458 void
2459 arm_encode_call_attribute (tree decl, int flag)
2460 {
2461 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2462 int len = strlen (str);
2463 char * newstr;
2464
2465 /* Do not allow weak functions to be treated as short call. */
2466 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2467 return;
2468
2469 newstr = alloca (len + 2);
2470 newstr[0] = flag;
2471 strcpy (newstr + 1, str);
2472
2473 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2474 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2475 }
2476
2477 /* Assigns default attributes to newly defined type. This is used to
2478 set short_call/long_call attributes for function types of
2479 functions defined inside corresponding #pragma scopes. */
2480 static void
2481 arm_set_default_type_attributes (tree type)
2482 {
2483 /* Add __attribute__ ((long_call)) to all functions, when
2484 inside #pragma long_calls or __attribute__ ((short_call)),
2485 when inside #pragma no_long_calls. */
2486 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2487 {
2488 tree type_attr_list, attr_name;
2489 type_attr_list = TYPE_ATTRIBUTES (type);
2490
2491 if (arm_pragma_long_calls == LONG)
2492 attr_name = get_identifier ("long_call");
2493 else if (arm_pragma_long_calls == SHORT)
2494 attr_name = get_identifier ("short_call");
2495 else
2496 return;
2497
2498 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2499 TYPE_ATTRIBUTES (type) = type_attr_list;
2500 }
2501 }
2502 \f
2503 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2504 defined within the current compilation unit. If this cannot be
2505 determined, then 0 is returned. */
2506 static int
2507 current_file_function_operand (rtx sym_ref)
2508 {
2509 /* This is a bit of a fib. A function will have a short call flag
2510 applied to its name if it has the short call attribute, or it has
2511 already been defined within the current compilation unit. */
2512 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2513 return 1;
2514
2515 /* The current function is always defined within the current compilation
2516 unit. if it s a weak definition however, then this may not be the real
2517 definition of the function, and so we have to say no. */
2518 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2519 && !DECL_WEAK (current_function_decl))
2520 return 1;
2521
2522 /* We cannot make the determination - default to returning 0. */
2523 return 0;
2524 }
2525
2526 /* Return nonzero if a 32 bit "long_call" should be generated for
2527 this call. We generate a long_call if the function:
2528
2529 a. has an __attribute__((long call))
2530 or b. is within the scope of a #pragma long_calls
2531 or c. the -mlong-calls command line switch has been specified
2532
2533 However we do not generate a long call if the function:
2534
2535 d. has an __attribute__ ((short_call))
2536 or e. is inside the scope of a #pragma no_long_calls
2537 or f. has an __attribute__ ((section))
2538 or g. is defined within the current compilation unit.
2539
2540 This function will be called by C fragments contained in the machine
2541 description file. CALL_REF and CALL_COOKIE correspond to the matched
2542 rtl operands. CALL_SYMBOL is used to distinguish between
2543 two different callers of the function. It is set to 1 in the
2544 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2545 and "call_value" patterns. This is because of the difference in the
2546 SYM_REFs passed by these patterns. */
2547 int
2548 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2549 {
2550 if (!call_symbol)
2551 {
2552 if (GET_CODE (sym_ref) != MEM)
2553 return 0;
2554
2555 sym_ref = XEXP (sym_ref, 0);
2556 }
2557
2558 if (GET_CODE (sym_ref) != SYMBOL_REF)
2559 return 0;
2560
2561 if (call_cookie & CALL_SHORT)
2562 return 0;
2563
2564 if (TARGET_LONG_CALLS && flag_function_sections)
2565 return 1;
2566
2567 if (current_file_function_operand (sym_ref))
2568 return 0;
2569
2570 return (call_cookie & CALL_LONG)
2571 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2572 || TARGET_LONG_CALLS;
2573 }
2574
2575 /* Return nonzero if it is ok to make a tail-call to DECL. */
2576 static bool
2577 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2578 {
2579 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2580
2581 if (cfun->machine->sibcall_blocked)
2582 return false;
2583
2584 /* Never tailcall something for which we have no decl, or if we
2585 are in Thumb mode. */
2586 if (decl == NULL || TARGET_THUMB)
2587 return false;
2588
2589 /* Get the calling method. */
2590 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2591 call_type = CALL_SHORT;
2592 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2593 call_type = CALL_LONG;
2594
2595 /* Cannot tail-call to long calls, since these are out of range of
2596 a branch instruction. However, if not compiling PIC, we know
2597 we can reach the symbol if it is in this compilation unit. */
2598 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2599 return false;
2600
2601 /* If we are interworking and the function is not declared static
2602 then we can't tail-call it unless we know that it exists in this
2603 compilation unit (since it might be a Thumb routine). */
2604 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2605 return false;
2606
2607 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2608 if (IS_INTERRUPT (arm_current_func_type ()))
2609 return false;
2610
2611 /* Everything else is ok. */
2612 return true;
2613 }
2614
2615 \f
2616 /* Addressing mode support functions. */
2617
2618 /* Return nonzero if X is a legitimate immediate operand when compiling
2619 for PIC. */
2620 int
2621 legitimate_pic_operand_p (rtx x)
2622 {
2623 if (CONSTANT_P (x)
2624 && flag_pic
2625 && (GET_CODE (x) == SYMBOL_REF
2626 || (GET_CODE (x) == CONST
2627 && GET_CODE (XEXP (x, 0)) == PLUS
2628 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2629 return 0;
2630
2631 return 1;
2632 }
2633
2634 rtx
2635 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2636 {
2637 if (GET_CODE (orig) == SYMBOL_REF
2638 || GET_CODE (orig) == LABEL_REF)
2639 {
2640 #ifndef AOF_ASSEMBLER
2641 rtx pic_ref, address;
2642 #endif
2643 rtx insn;
2644 int subregs = 0;
2645
2646 if (reg == 0)
2647 {
2648 if (no_new_pseudos)
2649 abort ();
2650 else
2651 reg = gen_reg_rtx (Pmode);
2652
2653 subregs = 1;
2654 }
2655
2656 #ifdef AOF_ASSEMBLER
2657 /* The AOF assembler can generate relocations for these directly, and
2658 understands that the PIC register has to be added into the offset. */
2659 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2660 #else
2661 if (subregs)
2662 address = gen_reg_rtx (Pmode);
2663 else
2664 address = reg;
2665
2666 if (TARGET_ARM)
2667 emit_insn (gen_pic_load_addr_arm (address, orig));
2668 else
2669 emit_insn (gen_pic_load_addr_thumb (address, orig));
2670
2671 if ((GET_CODE (orig) == LABEL_REF
2672 || (GET_CODE (orig) == SYMBOL_REF &&
2673 SYMBOL_REF_LOCAL_P (orig)))
2674 && NEED_GOT_RELOC)
2675 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2676 else
2677 {
2678 pic_ref = gen_rtx_MEM (Pmode,
2679 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2680 address));
2681 RTX_UNCHANGING_P (pic_ref) = 1;
2682 }
2683
2684 insn = emit_move_insn (reg, pic_ref);
2685 #endif
2686 current_function_uses_pic_offset_table = 1;
2687 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2688 by loop. */
2689 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2690 REG_NOTES (insn));
2691 return reg;
2692 }
2693 else if (GET_CODE (orig) == CONST)
2694 {
2695 rtx base, offset;
2696
2697 if (GET_CODE (XEXP (orig, 0)) == PLUS
2698 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2699 return orig;
2700
2701 if (reg == 0)
2702 {
2703 if (no_new_pseudos)
2704 abort ();
2705 else
2706 reg = gen_reg_rtx (Pmode);
2707 }
2708
2709 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2710 {
2711 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2712 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2713 base == reg ? 0 : reg);
2714 }
2715 else
2716 abort ();
2717
2718 if (GET_CODE (offset) == CONST_INT)
2719 {
2720 /* The base register doesn't really matter, we only want to
2721 test the index for the appropriate mode. */
2722 if (!arm_legitimate_index_p (mode, offset, 0))
2723 {
2724 if (!no_new_pseudos)
2725 offset = force_reg (Pmode, offset);
2726 else
2727 abort ();
2728 }
2729
2730 if (GET_CODE (offset) == CONST_INT)
2731 return plus_constant (base, INTVAL (offset));
2732 }
2733
2734 if (GET_MODE_SIZE (mode) > 4
2735 && (GET_MODE_CLASS (mode) == MODE_INT
2736 || TARGET_SOFT_FLOAT))
2737 {
2738 emit_insn (gen_addsi3 (reg, base, offset));
2739 return reg;
2740 }
2741
2742 return gen_rtx_PLUS (Pmode, base, offset);
2743 }
2744
2745 return orig;
2746 }
2747
2748 /* Generate code to load the PIC register. PROLOGUE is true if
2749 called from arm_expand_prologue (in which case we want the
2750 generated insns at the start of the function); false if called
2751 by an exception receiver that needs the PIC register reloaded
2752 (in which case the insns are just dumped at the current location). */
2753 void
2754 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2755 {
2756 #ifndef AOF_ASSEMBLER
2757 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2758 rtx global_offset_table;
2759
2760 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2761 return;
2762
2763 if (!flag_pic)
2764 abort ();
2765
2766 start_sequence ();
2767 l1 = gen_label_rtx ();
2768
2769 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2770 /* On the ARM the PC register contains 'dot + 8' at the time of the
2771 addition, on the Thumb it is 'dot + 4'. */
2772 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2773 if (GOT_PCREL)
2774 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2775 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2776 else
2777 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2778
2779 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2780
2781 if (TARGET_ARM)
2782 {
2783 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2784 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2785 }
2786 else
2787 {
2788 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2789 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2790 }
2791
2792 seq = get_insns ();
2793 end_sequence ();
2794 if (prologue)
2795 emit_insn_after (seq, get_insns ());
2796 else
2797 emit_insn (seq);
2798
2799 /* Need to emit this whether or not we obey regdecls,
2800 since setjmp/longjmp can cause life info to screw up. */
2801 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2802 #endif /* AOF_ASSEMBLER */
2803 }
2804
2805 /* Return nonzero if X is valid as an ARM state addressing register. */
2806 static int
2807 arm_address_register_rtx_p (rtx x, int strict_p)
2808 {
2809 int regno;
2810
2811 if (GET_CODE (x) != REG)
2812 return 0;
2813
2814 regno = REGNO (x);
2815
2816 if (strict_p)
2817 return ARM_REGNO_OK_FOR_BASE_P (regno);
2818
2819 return (regno <= LAST_ARM_REGNUM
2820 || regno >= FIRST_PSEUDO_REGISTER
2821 || regno == FRAME_POINTER_REGNUM
2822 || regno == ARG_POINTER_REGNUM);
2823 }
2824
2825 /* Return nonzero if X is a valid ARM state address operand. */
2826 int
2827 arm_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2828 {
2829 if (arm_address_register_rtx_p (x, strict_p))
2830 return 1;
2831
2832 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2833 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2834
2835 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2836 && GET_MODE_SIZE (mode) <= 4
2837 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2838 && GET_CODE (XEXP (x, 1)) == PLUS
2839 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2840 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), strict_p);
2841
2842 /* After reload constants split into minipools will have addresses
2843 from a LABEL_REF. */
2844 else if (reload_completed
2845 && (GET_CODE (x) == LABEL_REF
2846 || (GET_CODE (x) == CONST
2847 && GET_CODE (XEXP (x, 0)) == PLUS
2848 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2849 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2850 return 1;
2851
2852 else if (mode == TImode)
2853 return 0;
2854
2855 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2856 {
2857 if (GET_CODE (x) == PLUS
2858 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2859 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2860 {
2861 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2862
2863 if (val == 4 || val == -4 || val == -8)
2864 return 1;
2865 }
2866 }
2867
2868 else if (TARGET_HARD_FLOAT && TARGET_VFP && mode == DFmode)
2869 {
2870 if (GET_CODE (x) == PLUS
2871 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2872 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2873 {
2874 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2875
2876 /* ??? valid arm offsets are a subset of VFP offsets.
2877 For now only allow this subset. Proper fix is to add an
2878 additional memory constraint for arm address modes.
2879 Alternatively allow full vfp addressing and let
2880 output_move_double fix it up with a sub-optimal sequence. */
2881 if (val == 4 || val == -4 || val == -8)
2882 return 1;
2883 }
2884 }
2885
2886 else if (GET_CODE (x) == PLUS)
2887 {
2888 rtx xop0 = XEXP (x, 0);
2889 rtx xop1 = XEXP (x, 1);
2890
2891 return ((arm_address_register_rtx_p (xop0, strict_p)
2892 && arm_legitimate_index_p (mode, xop1, strict_p))
2893 || (arm_address_register_rtx_p (xop1, strict_p)
2894 && arm_legitimate_index_p (mode, xop0, strict_p)));
2895 }
2896
2897 #if 0
2898 /* Reload currently can't handle MINUS, so disable this for now */
2899 else if (GET_CODE (x) == MINUS)
2900 {
2901 rtx xop0 = XEXP (x, 0);
2902 rtx xop1 = XEXP (x, 1);
2903
2904 return (arm_address_register_rtx_p (xop0, strict_p)
2905 && arm_legitimate_index_p (mode, xop1, strict_p));
2906 }
2907 #endif
2908
2909 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
2910 && GET_CODE (x) == SYMBOL_REF
2911 && CONSTANT_POOL_ADDRESS_P (x)
2912 && ! (flag_pic
2913 && symbol_mentioned_p (get_pool_constant (x))))
2914 return 1;
2915
2916 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
2917 && (GET_MODE_SIZE (mode) <= 4)
2918 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
2919 return 1;
2920
2921 return 0;
2922 }
2923
2924 /* Return nonzero if INDEX is valid for an address index operand in
2925 ARM state. */
2926 static int
2927 arm_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
2928 {
2929 HOST_WIDE_INT range;
2930 enum rtx_code code = GET_CODE (index);
2931
2932 if (TARGET_HARD_FLOAT && TARGET_FPA && GET_MODE_CLASS (mode) == MODE_FLOAT)
2933 return (code == CONST_INT && INTVAL (index) < 1024
2934 && INTVAL (index) > -1024
2935 && (INTVAL (index) & 3) == 0);
2936
2937 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
2938 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
2939 return (code == CONST_INT
2940 && INTVAL (index) < 255
2941 && INTVAL (index) > -255);
2942
2943 if (arm_address_register_rtx_p (index, strict_p)
2944 && GET_MODE_SIZE (mode) <= 4)
2945 return 1;
2946
2947 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
2948 return (code == CONST_INT
2949 && INTVAL (index) < 256
2950 && INTVAL (index) > -256);
2951
2952 /* XXX What about ldrsb? */
2953 if (GET_MODE_SIZE (mode) <= 4 && code == MULT
2954 && (!arm_arch4 || (mode) != HImode))
2955 {
2956 rtx xiop0 = XEXP (index, 0);
2957 rtx xiop1 = XEXP (index, 1);
2958
2959 return ((arm_address_register_rtx_p (xiop0, strict_p)
2960 && power_of_two_operand (xiop1, SImode))
2961 || (arm_address_register_rtx_p (xiop1, strict_p)
2962 && power_of_two_operand (xiop0, SImode)));
2963 }
2964
2965 if (GET_MODE_SIZE (mode) <= 4
2966 && (code == LSHIFTRT || code == ASHIFTRT
2967 || code == ASHIFT || code == ROTATERT)
2968 && (!arm_arch4 || (mode) != HImode))
2969 {
2970 rtx op = XEXP (index, 1);
2971
2972 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
2973 && GET_CODE (op) == CONST_INT
2974 && INTVAL (op) > 0
2975 && INTVAL (op) <= 31);
2976 }
2977
2978 /* XXX For ARM v4 we may be doing a sign-extend operation during the
2979 load, but that has a restricted addressing range and we are unable
2980 to tell here whether that is the case. To be safe we restrict all
2981 loads to that range. */
2982 if (arm_arch4)
2983 range = (mode == HImode || mode == QImode) ? 256 : 4096;
2984 else
2985 range = (mode == HImode) ? 4095 : 4096;
2986
2987 return (code == CONST_INT
2988 && INTVAL (index) < range
2989 && INTVAL (index) > -range);
2990 }
2991
2992 /* Return nonzero if X is valid as a Thumb state base register. */
2993 static int
2994 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
2995 {
2996 int regno;
2997
2998 if (GET_CODE (x) != REG)
2999 return 0;
3000
3001 regno = REGNO (x);
3002
3003 if (strict_p)
3004 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3005
3006 return (regno <= LAST_LO_REGNUM
3007 || regno > LAST_VIRTUAL_REGISTER
3008 || regno == FRAME_POINTER_REGNUM
3009 || (GET_MODE_SIZE (mode) >= 4
3010 && (regno == STACK_POINTER_REGNUM
3011 || regno >= FIRST_PSEUDO_REGISTER
3012 || x == hard_frame_pointer_rtx
3013 || x == arg_pointer_rtx)));
3014 }
3015
3016 /* Return nonzero if x is a legitimate index register. This is the case
3017 for any base register that can access a QImode object. */
3018 inline static int
3019 thumb_index_register_rtx_p (rtx x, int strict_p)
3020 {
3021 return thumb_base_register_rtx_p (x, QImode, strict_p);
3022 }
3023
3024 /* Return nonzero if x is a legitimate Thumb-state address.
3025
3026 The AP may be eliminated to either the SP or the FP, so we use the
3027 least common denominator, e.g. SImode, and offsets from 0 to 64.
3028
3029 ??? Verify whether the above is the right approach.
3030
3031 ??? Also, the FP may be eliminated to the SP, so perhaps that
3032 needs special handling also.
3033
3034 ??? Look at how the mips16 port solves this problem. It probably uses
3035 better ways to solve some of these problems.
3036
3037 Although it is not incorrect, we don't accept QImode and HImode
3038 addresses based on the frame pointer or arg pointer until the
3039 reload pass starts. This is so that eliminating such addresses
3040 into stack based ones won't produce impossible code. */
3041 int
3042 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3043 {
3044 /* ??? Not clear if this is right. Experiment. */
3045 if (GET_MODE_SIZE (mode) < 4
3046 && !(reload_in_progress || reload_completed)
3047 && (reg_mentioned_p (frame_pointer_rtx, x)
3048 || reg_mentioned_p (arg_pointer_rtx, x)
3049 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3050 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3051 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3052 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3053 return 0;
3054
3055 /* Accept any base register. SP only in SImode or larger. */
3056 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3057 return 1;
3058
3059 /* This is PC relative data before arm_reorg runs. */
3060 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3061 && GET_CODE (x) == SYMBOL_REF
3062 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3063 return 1;
3064
3065 /* This is PC relative data after arm_reorg runs. */
3066 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3067 && (GET_CODE (x) == LABEL_REF
3068 || (GET_CODE (x) == CONST
3069 && GET_CODE (XEXP (x, 0)) == PLUS
3070 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3071 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3072 return 1;
3073
3074 /* Post-inc indexing only supported for SImode and larger. */
3075 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3076 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3077 return 1;
3078
3079 else if (GET_CODE (x) == PLUS)
3080 {
3081 /* REG+REG address can be any two index registers. */
3082 /* We disallow FRAME+REG addressing since we know that FRAME
3083 will be replaced with STACK, and SP relative addressing only
3084 permits SP+OFFSET. */
3085 if (GET_MODE_SIZE (mode) <= 4
3086 && XEXP (x, 0) != frame_pointer_rtx
3087 && XEXP (x, 1) != frame_pointer_rtx
3088 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3089 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3090 return 1;
3091
3092 /* REG+const has 5-7 bit offset for non-SP registers. */
3093 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3094 || XEXP (x, 0) == arg_pointer_rtx)
3095 && GET_CODE (XEXP (x, 1)) == CONST_INT
3096 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3097 return 1;
3098
3099 /* REG+const has 10 bit offset for SP, but only SImode and
3100 larger is supported. */
3101 /* ??? Should probably check for DI/DFmode overflow here
3102 just like GO_IF_LEGITIMATE_OFFSET does. */
3103 else if (GET_CODE (XEXP (x, 0)) == REG
3104 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3105 && GET_MODE_SIZE (mode) >= 4
3106 && GET_CODE (XEXP (x, 1)) == CONST_INT
3107 && INTVAL (XEXP (x, 1)) >= 0
3108 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3109 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3110 return 1;
3111
3112 else if (GET_CODE (XEXP (x, 0)) == REG
3113 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3114 && GET_MODE_SIZE (mode) >= 4
3115 && GET_CODE (XEXP (x, 1)) == CONST_INT
3116 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3117 return 1;
3118 }
3119
3120 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3121 && GET_MODE_SIZE (mode) == 4
3122 && GET_CODE (x) == SYMBOL_REF
3123 && CONSTANT_POOL_ADDRESS_P (x)
3124 && !(flag_pic
3125 && symbol_mentioned_p (get_pool_constant (x))))
3126 return 1;
3127
3128 return 0;
3129 }
3130
3131 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3132 instruction of mode MODE. */
3133 int
3134 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3135 {
3136 switch (GET_MODE_SIZE (mode))
3137 {
3138 case 1:
3139 return val >= 0 && val < 32;
3140
3141 case 2:
3142 return val >= 0 && val < 64 && (val & 1) == 0;
3143
3144 default:
3145 return (val >= 0
3146 && (val + GET_MODE_SIZE (mode)) <= 128
3147 && (val & 3) == 0);
3148 }
3149 }
3150
3151 /* Try machine-dependent ways of modifying an illegitimate address
3152 to be legitimate. If we find one, return the new, valid address. */
3153 rtx
3154 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3155 {
3156 if (GET_CODE (x) == PLUS)
3157 {
3158 rtx xop0 = XEXP (x, 0);
3159 rtx xop1 = XEXP (x, 1);
3160
3161 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3162 xop0 = force_reg (SImode, xop0);
3163
3164 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3165 xop1 = force_reg (SImode, xop1);
3166
3167 if (ARM_BASE_REGISTER_RTX_P (xop0)
3168 && GET_CODE (xop1) == CONST_INT)
3169 {
3170 HOST_WIDE_INT n, low_n;
3171 rtx base_reg, val;
3172 n = INTVAL (xop1);
3173
3174 /* VFP addressing modes actually allow greater offsets, but for
3175 now we just stick with the lowest common denominator. */
3176 if (mode == DImode
3177 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3178 {
3179 low_n = n & 0x0f;
3180 n &= ~0x0f;
3181 if (low_n > 4)
3182 {
3183 n += 16;
3184 low_n -= 16;
3185 }
3186 }
3187 else
3188 {
3189 low_n = ((mode) == TImode ? 0
3190 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3191 n -= low_n;
3192 }
3193
3194 base_reg = gen_reg_rtx (SImode);
3195 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3196 GEN_INT (n)), NULL_RTX);
3197 emit_move_insn (base_reg, val);
3198 x = (low_n == 0 ? base_reg
3199 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3200 }
3201 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3202 x = gen_rtx_PLUS (SImode, xop0, xop1);
3203 }
3204
3205 /* XXX We don't allow MINUS any more -- see comment in
3206 arm_legitimate_address_p (). */
3207 else if (GET_CODE (x) == MINUS)
3208 {
3209 rtx xop0 = XEXP (x, 0);
3210 rtx xop1 = XEXP (x, 1);
3211
3212 if (CONSTANT_P (xop0))
3213 xop0 = force_reg (SImode, xop0);
3214
3215 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3216 xop1 = force_reg (SImode, xop1);
3217
3218 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3219 x = gen_rtx_MINUS (SImode, xop0, xop1);
3220 }
3221
3222 if (flag_pic)
3223 {
3224 /* We need to find and carefully transform any SYMBOL and LABEL
3225 references; so go back to the original address expression. */
3226 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3227
3228 if (new_x != orig_x)
3229 x = new_x;
3230 }
3231
3232 return x;
3233 }
3234
3235
3236 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3237 to be legitimate. If we find one, return the new, valid address. */
3238 rtx
3239 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3240 {
3241 if (GET_CODE (x) == PLUS
3242 && GET_CODE (XEXP (x, 1)) == CONST_INT
3243 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3244 || INTVAL (XEXP (x, 1)) < 0))
3245 {
3246 rtx xop0 = XEXP (x, 0);
3247 rtx xop1 = XEXP (x, 1);
3248 HOST_WIDE_INT offset = INTVAL (xop1);
3249
3250 /* Try and fold the offset into a biasing of the base register and
3251 then offsetting that. Don't do this when optimizing for space
3252 since it can cause too many CSEs. */
3253 if (optimize_size && offset >= 0
3254 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3255 {
3256 HOST_WIDE_INT delta;
3257
3258 if (offset >= 256)
3259 delta = offset - (256 - GET_MODE_SIZE (mode));
3260 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3261 delta = 31 * GET_MODE_SIZE (mode);
3262 else
3263 delta = offset & (~31 * GET_MODE_SIZE (mode));
3264
3265 xop0 = force_operand (plus_constant (xop0, offset - delta),
3266 NULL_RTX);
3267 x = plus_constant (xop0, delta);
3268 }
3269 else if (offset < 0 && offset > -256)
3270 /* Small negative offsets are best done with a subtract before the
3271 dereference, forcing these into a register normally takes two
3272 instructions. */
3273 x = force_operand (x, NULL_RTX);
3274 else
3275 {
3276 /* For the remaining cases, force the constant into a register. */
3277 xop1 = force_reg (SImode, xop1);
3278 x = gen_rtx_PLUS (SImode, xop0, xop1);
3279 }
3280 }
3281 else if (GET_CODE (x) == PLUS
3282 && s_register_operand (XEXP (x, 1), SImode)
3283 && !s_register_operand (XEXP (x, 0), SImode))
3284 {
3285 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3286
3287 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3288 }
3289
3290 if (flag_pic)
3291 {
3292 /* We need to find and carefully transform any SYMBOL and LABEL
3293 references; so go back to the original address expression. */
3294 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3295
3296 if (new_x != orig_x)
3297 x = new_x;
3298 }
3299
3300 return x;
3301 }
3302
3303 \f
3304
3305 #define REG_OR_SUBREG_REG(X) \
3306 (GET_CODE (X) == REG \
3307 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3308
3309 #define REG_OR_SUBREG_RTX(X) \
3310 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3311
3312 #ifndef COSTS_N_INSNS
3313 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3314 #endif
3315 static inline int
3316 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3317 {
3318 enum machine_mode mode = GET_MODE (x);
3319
3320 switch (code)
3321 {
3322 case ASHIFT:
3323 case ASHIFTRT:
3324 case LSHIFTRT:
3325 case ROTATERT:
3326 case PLUS:
3327 case MINUS:
3328 case COMPARE:
3329 case NEG:
3330 case NOT:
3331 return COSTS_N_INSNS (1);
3332
3333 case MULT:
3334 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3335 {
3336 int cycles = 0;
3337 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3338
3339 while (i)
3340 {
3341 i >>= 2;
3342 cycles++;
3343 }
3344 return COSTS_N_INSNS (2) + cycles;
3345 }
3346 return COSTS_N_INSNS (1) + 16;
3347
3348 case SET:
3349 return (COSTS_N_INSNS (1)
3350 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3351 + GET_CODE (SET_DEST (x)) == MEM));
3352
3353 case CONST_INT:
3354 if (outer == SET)
3355 {
3356 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3357 return 0;
3358 if (thumb_shiftable_const (INTVAL (x)))
3359 return COSTS_N_INSNS (2);
3360 return COSTS_N_INSNS (3);
3361 }
3362 else if ((outer == PLUS || outer == COMPARE)
3363 && INTVAL (x) < 256 && INTVAL (x) > -256)
3364 return 0;
3365 else if (outer == AND
3366 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3367 return COSTS_N_INSNS (1);
3368 else if (outer == ASHIFT || outer == ASHIFTRT
3369 || outer == LSHIFTRT)
3370 return 0;
3371 return COSTS_N_INSNS (2);
3372
3373 case CONST:
3374 case CONST_DOUBLE:
3375 case LABEL_REF:
3376 case SYMBOL_REF:
3377 return COSTS_N_INSNS (3);
3378
3379 case UDIV:
3380 case UMOD:
3381 case DIV:
3382 case MOD:
3383 return 100;
3384
3385 case TRUNCATE:
3386 return 99;
3387
3388 case AND:
3389 case XOR:
3390 case IOR:
3391 /* XXX guess. */
3392 return 8;
3393
3394 case ADDRESSOF:
3395 case MEM:
3396 /* XXX another guess. */
3397 /* Memory costs quite a lot for the first word, but subsequent words
3398 load at the equivalent of a single insn each. */
3399 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3400 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3401 ? 4 : 0));
3402
3403 case IF_THEN_ELSE:
3404 /* XXX a guess. */
3405 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3406 return 14;
3407 return 2;
3408
3409 case ZERO_EXTEND:
3410 /* XXX still guessing. */
3411 switch (GET_MODE (XEXP (x, 0)))
3412 {
3413 case QImode:
3414 return (1 + (mode == DImode ? 4 : 0)
3415 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3416
3417 case HImode:
3418 return (4 + (mode == DImode ? 4 : 0)
3419 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3420
3421 case SImode:
3422 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3423
3424 default:
3425 return 99;
3426 }
3427
3428 default:
3429 return 99;
3430 }
3431 }
3432
3433
3434 /* Worker routine for arm_rtx_costs. */
3435 static inline int
3436 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3437 {
3438 enum machine_mode mode = GET_MODE (x);
3439 enum rtx_code subcode;
3440 int extra_cost;
3441
3442 switch (code)
3443 {
3444 case MEM:
3445 /* Memory costs quite a lot for the first word, but subsequent words
3446 load at the equivalent of a single insn each. */
3447 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3448 + (GET_CODE (x) == SYMBOL_REF
3449 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3450
3451 case DIV:
3452 case MOD:
3453 case UDIV:
3454 case UMOD:
3455 return optimize_size ? COSTS_N_INSNS (2) : 100;
3456
3457 case ROTATE:
3458 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3459 return 4;
3460 /* Fall through */
3461 case ROTATERT:
3462 if (mode != SImode)
3463 return 8;
3464 /* Fall through */
3465 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3466 if (mode == DImode)
3467 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3468 + ((GET_CODE (XEXP (x, 0)) == REG
3469 || (GET_CODE (XEXP (x, 0)) == SUBREG
3470 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3471 ? 0 : 8));
3472 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3473 || (GET_CODE (XEXP (x, 0)) == SUBREG
3474 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3475 ? 0 : 4)
3476 + ((GET_CODE (XEXP (x, 1)) == REG
3477 || (GET_CODE (XEXP (x, 1)) == SUBREG
3478 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3479 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3480 ? 0 : 4));
3481
3482 case MINUS:
3483 if (mode == DImode)
3484 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3485 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3486 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3487 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3488 ? 0 : 8));
3489
3490 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3491 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3492 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3493 && arm_const_double_rtx (XEXP (x, 1))))
3494 ? 0 : 8)
3495 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3496 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3497 && arm_const_double_rtx (XEXP (x, 0))))
3498 ? 0 : 8));
3499
3500 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3501 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3502 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3503 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3504 || subcode == ASHIFTRT || subcode == LSHIFTRT
3505 || subcode == ROTATE || subcode == ROTATERT
3506 || (subcode == MULT
3507 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3508 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3509 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3510 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3511 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3512 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3513 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3514 return 1;
3515 /* Fall through */
3516
3517 case PLUS:
3518 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3519 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3520 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3521 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3522 && arm_const_double_rtx (XEXP (x, 1))))
3523 ? 0 : 8));
3524
3525 /* Fall through */
3526 case AND: case XOR: case IOR:
3527 extra_cost = 0;
3528
3529 /* Normally the frame registers will be spilt into reg+const during
3530 reload, so it is a bad idea to combine them with other instructions,
3531 since then they might not be moved outside of loops. As a compromise
3532 we allow integration with ops that have a constant as their second
3533 operand. */
3534 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3535 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3536 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3537 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3538 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3539 extra_cost = 4;
3540
3541 if (mode == DImode)
3542 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3543 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3544 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3545 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3546 ? 0 : 8));
3547
3548 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3549 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3550 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3551 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3552 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3553 ? 0 : 4));
3554
3555 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3556 return (1 + extra_cost
3557 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3558 || subcode == LSHIFTRT || subcode == ASHIFTRT
3559 || subcode == ROTATE || subcode == ROTATERT
3560 || (subcode == MULT
3561 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3562 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3563 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3564 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3565 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3566 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3567 ? 0 : 4));
3568
3569 return 8;
3570
3571 case MULT:
3572 /* This should have been handled by the CPU specific routines. */
3573 abort ();
3574
3575 case TRUNCATE:
3576 if (arm_arch3m && mode == SImode
3577 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3578 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3579 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3580 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3581 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3582 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3583 return 8;
3584 return 99;
3585
3586 case NEG:
3587 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3588 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3589 /* Fall through */
3590 case NOT:
3591 if (mode == DImode)
3592 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3593
3594 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3595
3596 case IF_THEN_ELSE:
3597 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3598 return 14;
3599 return 2;
3600
3601 case COMPARE:
3602 return 1;
3603
3604 case ABS:
3605 return 4 + (mode == DImode ? 4 : 0);
3606
3607 case SIGN_EXTEND:
3608 if (GET_MODE (XEXP (x, 0)) == QImode)
3609 return (4 + (mode == DImode ? 4 : 0)
3610 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3611 /* Fall through */
3612 case ZERO_EXTEND:
3613 switch (GET_MODE (XEXP (x, 0)))
3614 {
3615 case QImode:
3616 return (1 + (mode == DImode ? 4 : 0)
3617 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3618
3619 case HImode:
3620 return (4 + (mode == DImode ? 4 : 0)
3621 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3622
3623 case SImode:
3624 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3625
3626 case V8QImode:
3627 case V4HImode:
3628 case V2SImode:
3629 case V4QImode:
3630 case V2HImode:
3631 return 1;
3632
3633 default:
3634 break;
3635 }
3636 abort ();
3637
3638 case CONST_INT:
3639 if (const_ok_for_arm (INTVAL (x)))
3640 return outer == SET ? 2 : -1;
3641 else if (outer == AND
3642 && const_ok_for_arm (~INTVAL (x)))
3643 return -1;
3644 else if ((outer == COMPARE
3645 || outer == PLUS || outer == MINUS)
3646 && const_ok_for_arm (-INTVAL (x)))
3647 return -1;
3648 else
3649 return 5;
3650
3651 case CONST:
3652 case LABEL_REF:
3653 case SYMBOL_REF:
3654 return 6;
3655
3656 case CONST_DOUBLE:
3657 if (arm_const_double_rtx (x))
3658 return outer == SET ? 2 : -1;
3659 else if ((outer == COMPARE || outer == PLUS)
3660 && neg_const_double_rtx_ok_for_fpa (x))
3661 return -1;
3662 return 7;
3663
3664 default:
3665 return 99;
3666 }
3667 }
3668
3669 /* RTX costs for cores with a slow MUL implementation. */
3670
3671 static bool
3672 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3673 {
3674 enum machine_mode mode = GET_MODE (x);
3675
3676 if (TARGET_THUMB)
3677 {
3678 *total = thumb_rtx_costs (x, code, outer_code);
3679 return true;
3680 }
3681
3682 switch (code)
3683 {
3684 case MULT:
3685 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3686 || mode == DImode)
3687 {
3688 *total = 30;
3689 return true;
3690 }
3691
3692 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3693 {
3694 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3695 & (unsigned HOST_WIDE_INT) 0xffffffff);
3696 int cost, const_ok = const_ok_for_arm (i);
3697 int j, booth_unit_size;
3698
3699 /* Tune as appropriate. */
3700 cost = const_ok ? 4 : 8;
3701 booth_unit_size = 2;
3702 for (j = 0; i && j < 32; j += booth_unit_size)
3703 {
3704 i >>= booth_unit_size;
3705 cost += 2;
3706 }
3707
3708 *total = cost;
3709 return true;
3710 }
3711
3712 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3713 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3714 return true;
3715
3716 default:
3717 *total = arm_rtx_costs_1 (x, code, outer_code);
3718 return true;
3719 }
3720 }
3721
3722
3723 /* RTX cost for cores with a fast multiply unit (M variants). */
3724
3725 static bool
3726 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3727 {
3728 enum machine_mode mode = GET_MODE (x);
3729
3730 if (TARGET_THUMB)
3731 {
3732 *total = thumb_rtx_costs (x, code, outer_code);
3733 return true;
3734 }
3735
3736 switch (code)
3737 {
3738 case MULT:
3739 /* There is no point basing this on the tuning, since it is always the
3740 fast variant if it exists at all. */
3741 if (mode == DImode
3742 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3743 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3744 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3745 {
3746 *total = 8;
3747 return true;
3748 }
3749
3750
3751 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3752 || mode == DImode)
3753 {
3754 *total = 30;
3755 return true;
3756 }
3757
3758 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3759 {
3760 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3761 & (unsigned HOST_WIDE_INT) 0xffffffff);
3762 int cost, const_ok = const_ok_for_arm (i);
3763 int j, booth_unit_size;
3764
3765 /* Tune as appropriate. */
3766 cost = const_ok ? 4 : 8;
3767 booth_unit_size = 8;
3768 for (j = 0; i && j < 32; j += booth_unit_size)
3769 {
3770 i >>= booth_unit_size;
3771 cost += 2;
3772 }
3773
3774 *total = cost;
3775 return true;
3776 }
3777
3778 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3779 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3780 return true;
3781
3782 default:
3783 *total = arm_rtx_costs_1 (x, code, outer_code);
3784 return true;
3785 }
3786 }
3787
3788
3789 /* RTX cost for XScale CPUs. */
3790
3791 static bool
3792 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
3793 {
3794 enum machine_mode mode = GET_MODE (x);
3795
3796 if (TARGET_THUMB)
3797 {
3798 *total = thumb_rtx_costs (x, code, outer_code);
3799 return true;
3800 }
3801
3802 switch (code)
3803 {
3804 case MULT:
3805 /* There is no point basing this on the tuning, since it is always the
3806 fast variant if it exists at all. */
3807 if (mode == DImode
3808 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3809 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3810 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3811 {
3812 *total = 8;
3813 return true;
3814 }
3815
3816
3817 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3818 || mode == DImode)
3819 {
3820 *total = 30;
3821 return true;
3822 }
3823
3824 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3825 {
3826 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3827 & (unsigned HOST_WIDE_INT) 0xffffffff);
3828 int cost, const_ok = const_ok_for_arm (i);
3829 unsigned HOST_WIDE_INT masked_const;
3830
3831 /* The cost will be related to two insns.
3832 First a load of the constant (MOV or LDR), then a multiply. */
3833 cost = 2;
3834 if (! const_ok)
3835 cost += 1; /* LDR is probably more expensive because
3836 of longer result latency. */
3837 masked_const = i & 0xffff8000;
3838 if (masked_const != 0 && masked_const != 0xffff8000)
3839 {
3840 masked_const = i & 0xf8000000;
3841 if (masked_const == 0 || masked_const == 0xf8000000)
3842 cost += 1;
3843 else
3844 cost += 2;
3845 }
3846 *total = cost;
3847 return true;
3848 }
3849
3850 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3851 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3852 return true;
3853
3854 default:
3855 *total = arm_rtx_costs_1 (x, code, outer_code);
3856 return true;
3857 }
3858 }
3859
3860
3861 /* RTX costs for 9e (and later) cores. */
3862
3863 static bool
3864 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
3865 {
3866 enum machine_mode mode = GET_MODE (x);
3867 int nonreg_cost;
3868 int cost;
3869
3870 if (TARGET_THUMB)
3871 {
3872 switch (code)
3873 {
3874 case MULT:
3875 *total = COSTS_N_INSNS (3);
3876 return true;
3877
3878 default:
3879 *total = thumb_rtx_costs (x, code, outer_code);
3880 return true;
3881 }
3882 }
3883
3884 switch (code)
3885 {
3886 case MULT:
3887 /* There is no point basing this on the tuning, since it is always the
3888 fast variant if it exists at all. */
3889 if (mode == DImode
3890 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3891 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3892 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3893 {
3894 *total = 3;
3895 return true;
3896 }
3897
3898
3899 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3900 {
3901 *total = 30;
3902 return true;
3903 }
3904 if (mode == DImode)
3905 {
3906 cost = 7;
3907 nonreg_cost = 8;
3908 }
3909 else
3910 {
3911 cost = 2;
3912 nonreg_cost = 4;
3913 }
3914
3915
3916 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
3917 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
3918 return true;
3919
3920 default:
3921 *total = arm_rtx_costs_1 (x, code, outer_code);
3922 return true;
3923 }
3924 }
3925 /* All address computations that can be done are free, but rtx cost returns
3926 the same for practically all of them. So we weight the different types
3927 of address here in the order (most pref first):
3928 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
3929 static inline int
3930 arm_arm_address_cost (rtx x)
3931 {
3932 enum rtx_code c = GET_CODE (x);
3933
3934 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
3935 return 0;
3936 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
3937 return 10;
3938
3939 if (c == PLUS || c == MINUS)
3940 {
3941 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
3942 return 2;
3943
3944 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
3945 return 3;
3946
3947 return 4;
3948 }
3949
3950 return 6;
3951 }
3952
3953 static inline int
3954 arm_thumb_address_cost (rtx x)
3955 {
3956 enum rtx_code c = GET_CODE (x);
3957
3958 if (c == REG)
3959 return 1;
3960 if (c == PLUS
3961 && GET_CODE (XEXP (x, 0)) == REG
3962 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3963 return 1;
3964
3965 return 2;
3966 }
3967
3968 static int
3969 arm_address_cost (rtx x)
3970 {
3971 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
3972 }
3973
3974 static int
3975 arm_use_dfa_pipeline_interface (void)
3976 {
3977 return true;
3978 }
3979
3980 static int
3981 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
3982 {
3983 rtx i_pat, d_pat;
3984
3985 /* Some true dependencies can have a higher cost depending
3986 on precisely how certain input operands are used. */
3987 if (arm_tune_xscale
3988 && REG_NOTE_KIND (link) == 0
3989 && recog_memoized (insn) >= 0
3990 && recog_memoized (dep) >= 0)
3991 {
3992 int shift_opnum = get_attr_shift (insn);
3993 enum attr_type attr_type = get_attr_type (dep);
3994
3995 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
3996 operand for INSN. If we have a shifted input operand and the
3997 instruction we depend on is another ALU instruction, then we may
3998 have to account for an additional stall. */
3999 if (shift_opnum != 0
4000 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4001 {
4002 rtx shifted_operand;
4003 int opno;
4004
4005 /* Get the shifted operand. */
4006 extract_insn (insn);
4007 shifted_operand = recog_data.operand[shift_opnum];
4008
4009 /* Iterate over all the operands in DEP. If we write an operand
4010 that overlaps with SHIFTED_OPERAND, then we have increase the
4011 cost of this dependency. */
4012 extract_insn (dep);
4013 preprocess_constraints ();
4014 for (opno = 0; opno < recog_data.n_operands; opno++)
4015 {
4016 /* We can ignore strict inputs. */
4017 if (recog_data.operand_type[opno] == OP_IN)
4018 continue;
4019
4020 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4021 shifted_operand))
4022 return 2;
4023 }
4024 }
4025 }
4026
4027 /* XXX This is not strictly true for the FPA. */
4028 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4029 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4030 return 0;
4031
4032 /* Call insns don't incur a stall, even if they follow a load. */
4033 if (REG_NOTE_KIND (link) == 0
4034 && GET_CODE (insn) == CALL_INSN)
4035 return 1;
4036
4037 if ((i_pat = single_set (insn)) != NULL
4038 && GET_CODE (SET_SRC (i_pat)) == MEM
4039 && (d_pat = single_set (dep)) != NULL
4040 && GET_CODE (SET_DEST (d_pat)) == MEM)
4041 {
4042 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4043 /* This is a load after a store, there is no conflict if the load reads
4044 from a cached area. Assume that loads from the stack, and from the
4045 constant pool are cached, and that others will miss. This is a
4046 hack. */
4047
4048 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4049 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4050 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4051 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4052 return 1;
4053 }
4054
4055 return cost;
4056 }
4057
4058 static int fp_consts_inited = 0;
4059
4060 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4061 static const char * const strings_fp[8] =
4062 {
4063 "0", "1", "2", "3",
4064 "4", "5", "0.5", "10"
4065 };
4066
4067 static REAL_VALUE_TYPE values_fp[8];
4068
4069 static void
4070 init_fp_table (void)
4071 {
4072 int i;
4073 REAL_VALUE_TYPE r;
4074
4075 if (TARGET_VFP)
4076 fp_consts_inited = 1;
4077 else
4078 fp_consts_inited = 8;
4079
4080 for (i = 0; i < fp_consts_inited; i++)
4081 {
4082 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4083 values_fp[i] = r;
4084 }
4085 }
4086
4087 /* Return TRUE if rtx X is a valid immediate FP constant. */
4088 int
4089 arm_const_double_rtx (rtx x)
4090 {
4091 REAL_VALUE_TYPE r;
4092 int i;
4093
4094 if (!fp_consts_inited)
4095 init_fp_table ();
4096
4097 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4098 if (REAL_VALUE_MINUS_ZERO (r))
4099 return 0;
4100
4101 for (i = 0; i < fp_consts_inited; i++)
4102 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4103 return 1;
4104
4105 return 0;
4106 }
4107
4108 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4109 int
4110 neg_const_double_rtx_ok_for_fpa (rtx x)
4111 {
4112 REAL_VALUE_TYPE r;
4113 int i;
4114
4115 if (!fp_consts_inited)
4116 init_fp_table ();
4117
4118 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4119 r = REAL_VALUE_NEGATE (r);
4120 if (REAL_VALUE_MINUS_ZERO (r))
4121 return 0;
4122
4123 for (i = 0; i < 8; i++)
4124 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4125 return 1;
4126
4127 return 0;
4128 }
4129 \f
4130 /* Predicates for `match_operand' and `match_operator'. */
4131
4132 /* s_register_operand is the same as register_operand, but it doesn't accept
4133 (SUBREG (MEM)...).
4134
4135 This function exists because at the time it was put in it led to better
4136 code. SUBREG(MEM) always needs a reload in the places where
4137 s_register_operand is used, and this seemed to lead to excessive
4138 reloading. */
4139 int
4140 s_register_operand (rtx op, enum machine_mode mode)
4141 {
4142 if (GET_MODE (op) != mode && mode != VOIDmode)
4143 return 0;
4144
4145 if (GET_CODE (op) == SUBREG)
4146 op = SUBREG_REG (op);
4147
4148 /* We don't consider registers whose class is NO_REGS
4149 to be a register operand. */
4150 /* XXX might have to check for lo regs only for thumb ??? */
4151 return (GET_CODE (op) == REG
4152 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4153 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4154 }
4155
4156 /* A hard register operand (even before reload. */
4157 int
4158 arm_hard_register_operand (rtx op, enum machine_mode mode)
4159 {
4160 if (GET_MODE (op) != mode && mode != VOIDmode)
4161 return 0;
4162
4163 return (GET_CODE (op) == REG
4164 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4165 }
4166
4167 /* An arm register operand. */
4168 int
4169 arm_general_register_operand (rtx op, enum machine_mode mode)
4170 {
4171 if (GET_MODE (op) != mode && mode != VOIDmode)
4172 return 0;
4173
4174 if (GET_CODE (op) == SUBREG)
4175 op = SUBREG_REG (op);
4176
4177 return (GET_CODE (op) == REG
4178 && (REGNO (op) <= LAST_ARM_REGNUM
4179 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4180 }
4181
4182 /* Only accept reg, subreg(reg), const_int. */
4183 int
4184 reg_or_int_operand (rtx op, enum machine_mode mode)
4185 {
4186 if (GET_CODE (op) == CONST_INT)
4187 return 1;
4188
4189 if (GET_MODE (op) != mode && mode != VOIDmode)
4190 return 0;
4191
4192 if (GET_CODE (op) == SUBREG)
4193 op = SUBREG_REG (op);
4194
4195 /* We don't consider registers whose class is NO_REGS
4196 to be a register operand. */
4197 return (GET_CODE (op) == REG
4198 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4199 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4200 }
4201
4202 /* Return 1 if OP is an item in memory, given that we are in reload. */
4203 int
4204 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4205 {
4206 int regno = true_regnum (op);
4207
4208 return (!CONSTANT_P (op)
4209 && (regno == -1
4210 || (GET_CODE (op) == REG
4211 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4212 }
4213
4214 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
4215 memory access (architecture V4).
4216 MODE is QImode if called when computing constraints, or VOIDmode when
4217 emitting patterns. In this latter case we cannot use memory_operand()
4218 because it will fail on badly formed MEMs, which is precisely what we are
4219 trying to catch. */
4220 int
4221 bad_signed_byte_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4222 {
4223 if (GET_CODE (op) != MEM)
4224 return 0;
4225
4226 op = XEXP (op, 0);
4227
4228 /* A sum of anything more complex than reg + reg or reg + const is bad. */
4229 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
4230 && (!s_register_operand (XEXP (op, 0), VOIDmode)
4231 || (!s_register_operand (XEXP (op, 1), VOIDmode)
4232 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
4233 return 1;
4234
4235 /* Big constants are also bad. */
4236 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
4237 && (INTVAL (XEXP (op, 1)) > 0xff
4238 || -INTVAL (XEXP (op, 1)) > 0xff))
4239 return 1;
4240
4241 /* Everything else is good, or can will automatically be made so. */
4242 return 0;
4243 }
4244
4245 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4246 int
4247 arm_rhs_operand (rtx op, enum machine_mode mode)
4248 {
4249 return (s_register_operand (op, mode)
4250 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4251 }
4252
4253 /* Return TRUE for valid operands for the
4254 rhs of an ARM instruction, or a load. */
4255 int
4256 arm_rhsm_operand (rtx op, enum machine_mode mode)
4257 {
4258 return (s_register_operand (op, mode)
4259 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4260 || memory_operand (op, mode));
4261 }
4262
4263 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4264 constant that is valid when negated. */
4265 int
4266 arm_add_operand (rtx op, enum machine_mode mode)
4267 {
4268 if (TARGET_THUMB)
4269 return thumb_cmp_operand (op, mode);
4270
4271 return (s_register_operand (op, mode)
4272 || (GET_CODE (op) == CONST_INT
4273 && (const_ok_for_arm (INTVAL (op))
4274 || const_ok_for_arm (-INTVAL (op)))));
4275 }
4276
4277 /* Return TRUE for valid ARM constants (or when valid if negated). */
4278 int
4279 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4280 {
4281 return (GET_CODE (op) == CONST_INT
4282 && (const_ok_for_arm (INTVAL (op))
4283 || const_ok_for_arm (-INTVAL (op))));
4284 }
4285
4286 int
4287 arm_not_operand (rtx op, enum machine_mode mode)
4288 {
4289 return (s_register_operand (op, mode)
4290 || (GET_CODE (op) == CONST_INT
4291 && (const_ok_for_arm (INTVAL (op))
4292 || const_ok_for_arm (~INTVAL (op)))));
4293 }
4294
4295 /* Return TRUE if the operand is a memory reference which contains an
4296 offsettable address. */
4297 int
4298 offsettable_memory_operand (rtx op, enum machine_mode mode)
4299 {
4300 if (mode == VOIDmode)
4301 mode = GET_MODE (op);
4302
4303 return (mode == GET_MODE (op)
4304 && GET_CODE (op) == MEM
4305 && offsettable_address_p (reload_completed | reload_in_progress,
4306 mode, XEXP (op, 0)));
4307 }
4308
4309 /* Return TRUE if the operand is a memory reference which is, or can be
4310 made word aligned by adjusting the offset. */
4311 int
4312 alignable_memory_operand (rtx op, enum machine_mode mode)
4313 {
4314 rtx reg;
4315
4316 if (mode == VOIDmode)
4317 mode = GET_MODE (op);
4318
4319 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4320 return 0;
4321
4322 op = XEXP (op, 0);
4323
4324 return ((GET_CODE (reg = op) == REG
4325 || (GET_CODE (op) == SUBREG
4326 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4327 || (GET_CODE (op) == PLUS
4328 && GET_CODE (XEXP (op, 1)) == CONST_INT
4329 && (GET_CODE (reg = XEXP (op, 0)) == REG
4330 || (GET_CODE (XEXP (op, 0)) == SUBREG
4331 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4332 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4333 }
4334
4335 /* Similar to s_register_operand, but does not allow hard integer
4336 registers. */
4337 int
4338 f_register_operand (rtx op, enum machine_mode mode)
4339 {
4340 if (GET_MODE (op) != mode && mode != VOIDmode)
4341 return 0;
4342
4343 if (GET_CODE (op) == SUBREG)
4344 op = SUBREG_REG (op);
4345
4346 /* We don't consider registers whose class is NO_REGS
4347 to be a register operand. */
4348 return (GET_CODE (op) == REG
4349 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4350 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4351 }
4352
4353 /* Return TRUE for valid operands for the rhs of an floating point insns.
4354 Allows regs or certain consts on FPA, just regs for everything else. */
4355 int
4356 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4357 {
4358 if (s_register_operand (op, mode))
4359 return TRUE;
4360
4361 if (GET_MODE (op) != mode && mode != VOIDmode)
4362 return FALSE;
4363
4364 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4365 return arm_const_double_rtx (op);
4366
4367 return FALSE;
4368 }
4369
4370 int
4371 arm_float_add_operand (rtx op, enum machine_mode mode)
4372 {
4373 if (s_register_operand (op, mode))
4374 return TRUE;
4375
4376 if (GET_MODE (op) != mode && mode != VOIDmode)
4377 return FALSE;
4378
4379 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4380 return (arm_const_double_rtx (op)
4381 || neg_const_double_rtx_ok_for_fpa (op));
4382
4383 return FALSE;
4384 }
4385
4386
4387 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4388 Depends which fpu we are targeting. */
4389
4390 int
4391 arm_float_compare_operand (rtx op, enum machine_mode mode)
4392 {
4393 if (TARGET_VFP)
4394 return vfp_compare_operand (op, mode);
4395 else
4396 return arm_float_rhs_operand (op, mode);
4397 }
4398
4399
4400 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4401 int
4402 cirrus_memory_offset (rtx op)
4403 {
4404 /* Reject eliminable registers. */
4405 if (! (reload_in_progress || reload_completed)
4406 && ( reg_mentioned_p (frame_pointer_rtx, op)
4407 || reg_mentioned_p (arg_pointer_rtx, op)
4408 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4409 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4410 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4411 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4412 return 0;
4413
4414 if (GET_CODE (op) == MEM)
4415 {
4416 rtx ind;
4417
4418 ind = XEXP (op, 0);
4419
4420 /* Match: (mem (reg)). */
4421 if (GET_CODE (ind) == REG)
4422 return 1;
4423
4424 /* Match:
4425 (mem (plus (reg)
4426 (const))). */
4427 if (GET_CODE (ind) == PLUS
4428 && GET_CODE (XEXP (ind, 0)) == REG
4429 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4430 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4431 return 1;
4432 }
4433
4434 return 0;
4435 }
4436
4437 /* Return nonzero if OP is a Cirrus or general register. */
4438 int
4439 cirrus_register_operand (rtx op, enum machine_mode mode)
4440 {
4441 if (GET_MODE (op) != mode && mode != VOIDmode)
4442 return FALSE;
4443
4444 if (GET_CODE (op) == SUBREG)
4445 op = SUBREG_REG (op);
4446
4447 return (GET_CODE (op) == REG
4448 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4449 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4450 }
4451
4452 /* Return nonzero if OP is a cirrus FP register. */
4453 int
4454 cirrus_fp_register (rtx op, enum machine_mode mode)
4455 {
4456 if (GET_MODE (op) != mode && mode != VOIDmode)
4457 return FALSE;
4458
4459 if (GET_CODE (op) == SUBREG)
4460 op = SUBREG_REG (op);
4461
4462 return (GET_CODE (op) == REG
4463 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4464 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4465 }
4466
4467 /* Return nonzero if OP is a 6bit constant (0..63). */
4468 int
4469 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4470 {
4471 return (GET_CODE (op) == CONST_INT
4472 && INTVAL (op) >= 0
4473 && INTVAL (op) < 64);
4474 }
4475
4476
4477 /* Return TRUE if OP is a valid VFP memory address pattern. */
4478 /* Copied from cirrus_memory_offset but with restricted offset range. */
4479
4480 int
4481 vfp_mem_operand (rtx op)
4482 {
4483 /* Reject eliminable registers. */
4484
4485 if (! (reload_in_progress || reload_completed)
4486 && ( reg_mentioned_p (frame_pointer_rtx, op)
4487 || reg_mentioned_p (arg_pointer_rtx, op)
4488 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4489 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4490 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4491 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4492 return FALSE;
4493
4494 /* Constants are converted into offsets from labels. */
4495 if (GET_CODE (op) == MEM)
4496 {
4497 rtx ind;
4498
4499 ind = XEXP (op, 0);
4500
4501 if (reload_completed
4502 && (GET_CODE (ind) == LABEL_REF
4503 || (GET_CODE (ind) == CONST
4504 && GET_CODE (XEXP (ind, 0)) == PLUS
4505 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4506 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4507 return TRUE;
4508
4509 /* Match: (mem (reg)). */
4510 if (GET_CODE (ind) == REG)
4511 return arm_address_register_rtx_p (ind, 0);
4512
4513 /* Match:
4514 (mem (plus (reg)
4515 (const))). */
4516 if (GET_CODE (ind) == PLUS
4517 && GET_CODE (XEXP (ind, 0)) == REG
4518 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4519 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4520 && INTVAL (XEXP (ind, 1)) > -1024
4521 && INTVAL (XEXP (ind, 1)) < 1024)
4522 return TRUE;
4523 }
4524
4525 return FALSE;
4526 }
4527
4528
4529 /* Return TRUE if OP is a REG or constant zero. */
4530 int
4531 vfp_compare_operand (rtx op, enum machine_mode mode)
4532 {
4533 if (s_register_operand (op, mode))
4534 return TRUE;
4535
4536 return (GET_CODE (op) == CONST_DOUBLE
4537 && arm_const_double_rtx (op));
4538 }
4539
4540
4541 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4542 VFP registers. Otherwise return NO_REGS. */
4543
4544 enum reg_class
4545 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4546 {
4547 if (vfp_mem_operand (x) || s_register_operand (x, mode))
4548 return NO_REGS;
4549
4550 return GENERAL_REGS;
4551 }
4552
4553
4554 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4555 Use by the Cirrus Maverick code which has to workaround
4556 a hardware bug triggered by such instructions. */
4557 static bool
4558 arm_memory_load_p (rtx insn)
4559 {
4560 rtx body, lhs, rhs;;
4561
4562 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4563 return false;
4564
4565 body = PATTERN (insn);
4566
4567 if (GET_CODE (body) != SET)
4568 return false;
4569
4570 lhs = XEXP (body, 0);
4571 rhs = XEXP (body, 1);
4572
4573 lhs = REG_OR_SUBREG_RTX (lhs);
4574
4575 /* If the destination is not a general purpose
4576 register we do not have to worry. */
4577 if (GET_CODE (lhs) != REG
4578 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4579 return false;
4580
4581 /* As well as loads from memory we also have to react
4582 to loads of invalid constants which will be turned
4583 into loads from the minipool. */
4584 return (GET_CODE (rhs) == MEM
4585 || GET_CODE (rhs) == SYMBOL_REF
4586 || note_invalid_constants (insn, -1, false));
4587 }
4588
4589 /* Return TRUE if INSN is a Cirrus instruction. */
4590 static bool
4591 arm_cirrus_insn_p (rtx insn)
4592 {
4593 enum attr_cirrus attr;
4594
4595 /* get_attr aborts on USE and CLOBBER. */
4596 if (!insn
4597 || GET_CODE (insn) != INSN
4598 || GET_CODE (PATTERN (insn)) == USE
4599 || GET_CODE (PATTERN (insn)) == CLOBBER)
4600 return 0;
4601
4602 attr = get_attr_cirrus (insn);
4603
4604 return attr != CIRRUS_NOT;
4605 }
4606
4607 /* Cirrus reorg for invalid instruction combinations. */
4608 static void
4609 cirrus_reorg (rtx first)
4610 {
4611 enum attr_cirrus attr;
4612 rtx body = PATTERN (first);
4613 rtx t;
4614 int nops;
4615
4616 /* Any branch must be followed by 2 non Cirrus instructions. */
4617 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4618 {
4619 nops = 0;
4620 t = next_nonnote_insn (first);
4621
4622 if (arm_cirrus_insn_p (t))
4623 ++ nops;
4624
4625 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4626 ++ nops;
4627
4628 while (nops --)
4629 emit_insn_after (gen_nop (), first);
4630
4631 return;
4632 }
4633
4634 /* (float (blah)) is in parallel with a clobber. */
4635 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4636 body = XVECEXP (body, 0, 0);
4637
4638 if (GET_CODE (body) == SET)
4639 {
4640 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4641
4642 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4643 be followed by a non Cirrus insn. */
4644 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4645 {
4646 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4647 emit_insn_after (gen_nop (), first);
4648
4649 return;
4650 }
4651 else if (arm_memory_load_p (first))
4652 {
4653 unsigned int arm_regno;
4654
4655 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4656 ldr/cfmv64hr combination where the Rd field is the same
4657 in both instructions must be split with a non Cirrus
4658 insn. Example:
4659
4660 ldr r0, blah
4661 nop
4662 cfmvsr mvf0, r0. */
4663
4664 /* Get Arm register number for ldr insn. */
4665 if (GET_CODE (lhs) == REG)
4666 arm_regno = REGNO (lhs);
4667 else if (GET_CODE (rhs) == REG)
4668 arm_regno = REGNO (rhs);
4669 else
4670 abort ();
4671
4672 /* Next insn. */
4673 first = next_nonnote_insn (first);
4674
4675 if (! arm_cirrus_insn_p (first))
4676 return;
4677
4678 body = PATTERN (first);
4679
4680 /* (float (blah)) is in parallel with a clobber. */
4681 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4682 body = XVECEXP (body, 0, 0);
4683
4684 if (GET_CODE (body) == FLOAT)
4685 body = XEXP (body, 0);
4686
4687 if (get_attr_cirrus (first) == CIRRUS_MOVE
4688 && GET_CODE (XEXP (body, 1)) == REG
4689 && arm_regno == REGNO (XEXP (body, 1)))
4690 emit_insn_after (gen_nop (), first);
4691
4692 return;
4693 }
4694 }
4695
4696 /* get_attr aborts on USE and CLOBBER. */
4697 if (!first
4698 || GET_CODE (first) != INSN
4699 || GET_CODE (PATTERN (first)) == USE
4700 || GET_CODE (PATTERN (first)) == CLOBBER)
4701 return;
4702
4703 attr = get_attr_cirrus (first);
4704
4705 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4706 must be followed by a non-coprocessor instruction. */
4707 if (attr == CIRRUS_COMPARE)
4708 {
4709 nops = 0;
4710
4711 t = next_nonnote_insn (first);
4712
4713 if (arm_cirrus_insn_p (t))
4714 ++ nops;
4715
4716 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4717 ++ nops;
4718
4719 while (nops --)
4720 emit_insn_after (gen_nop (), first);
4721
4722 return;
4723 }
4724 }
4725
4726 /* Return nonzero if OP is a constant power of two. */
4727 int
4728 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4729 {
4730 if (GET_CODE (op) == CONST_INT)
4731 {
4732 HOST_WIDE_INT value = INTVAL (op);
4733
4734 return value != 0 && (value & (value - 1)) == 0;
4735 }
4736
4737 return FALSE;
4738 }
4739
4740 /* Return TRUE for a valid operand of a DImode operation.
4741 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4742 Note that this disallows MEM(REG+REG), but allows
4743 MEM(PRE/POST_INC/DEC(REG)). */
4744 int
4745 di_operand (rtx op, enum machine_mode mode)
4746 {
4747 if (s_register_operand (op, mode))
4748 return TRUE;
4749
4750 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4751 return FALSE;
4752
4753 if (GET_CODE (op) == SUBREG)
4754 op = SUBREG_REG (op);
4755
4756 switch (GET_CODE (op))
4757 {
4758 case CONST_DOUBLE:
4759 case CONST_INT:
4760 return TRUE;
4761
4762 case MEM:
4763 return memory_address_p (DImode, XEXP (op, 0));
4764
4765 default:
4766 return FALSE;
4767 }
4768 }
4769
4770 /* Like di_operand, but don't accept constants. */
4771 int
4772 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4773 {
4774 if (s_register_operand (op, mode))
4775 return TRUE;
4776
4777 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4778 return FALSE;
4779
4780 if (GET_CODE (op) == SUBREG)
4781 op = SUBREG_REG (op);
4782
4783 if (GET_CODE (op) == MEM)
4784 return memory_address_p (DImode, XEXP (op, 0));
4785
4786 return FALSE;
4787 }
4788
4789 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
4790 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4791 Note that this disallows MEM(REG+REG), but allows
4792 MEM(PRE/POST_INC/DEC(REG)). */
4793 int
4794 soft_df_operand (rtx op, enum machine_mode mode)
4795 {
4796 if (s_register_operand (op, mode))
4797 return TRUE;
4798
4799 if (mode != VOIDmode && GET_MODE (op) != mode)
4800 return FALSE;
4801
4802 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4803 return FALSE;
4804
4805 if (GET_CODE (op) == SUBREG)
4806 op = SUBREG_REG (op);
4807
4808 switch (GET_CODE (op))
4809 {
4810 case CONST_DOUBLE:
4811 return TRUE;
4812
4813 case MEM:
4814 return memory_address_p (DFmode, XEXP (op, 0));
4815
4816 default:
4817 return FALSE;
4818 }
4819 }
4820
4821 /* Like soft_df_operand, but don't accept constants. */
4822 int
4823 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4824 {
4825 if (s_register_operand (op, mode))
4826 return TRUE;
4827
4828 if (mode != VOIDmode && GET_MODE (op) != mode)
4829 return FALSE;
4830
4831 if (GET_CODE (op) == SUBREG)
4832 op = SUBREG_REG (op);
4833
4834 if (GET_CODE (op) == MEM)
4835 return memory_address_p (DFmode, XEXP (op, 0));
4836 return FALSE;
4837 }
4838
4839 /* Return TRUE for valid index operands. */
4840 int
4841 index_operand (rtx op, enum machine_mode mode)
4842 {
4843 return (s_register_operand (op, mode)
4844 || (immediate_operand (op, mode)
4845 && (GET_CODE (op) != CONST_INT
4846 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4847 }
4848
4849 /* Return TRUE for valid shifts by a constant. This also accepts any
4850 power of two on the (somewhat overly relaxed) assumption that the
4851 shift operator in this case was a mult. */
4852 int
4853 const_shift_operand (rtx op, enum machine_mode mode)
4854 {
4855 return (power_of_two_operand (op, mode)
4856 || (immediate_operand (op, mode)
4857 && (GET_CODE (op) != CONST_INT
4858 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4859 }
4860
4861 /* Return TRUE for arithmetic operators which can be combined with a multiply
4862 (shift). */
4863 int
4864 shiftable_operator (rtx x, enum machine_mode mode)
4865 {
4866 enum rtx_code code;
4867
4868 if (GET_MODE (x) != mode)
4869 return FALSE;
4870
4871 code = GET_CODE (x);
4872
4873 return (code == PLUS || code == MINUS
4874 || code == IOR || code == XOR || code == AND);
4875 }
4876
4877 /* Return TRUE for binary logical operators. */
4878 int
4879 logical_binary_operator (rtx x, enum machine_mode mode)
4880 {
4881 enum rtx_code code;
4882
4883 if (GET_MODE (x) != mode)
4884 return FALSE;
4885
4886 code = GET_CODE (x);
4887
4888 return (code == IOR || code == XOR || code == AND);
4889 }
4890
4891 /* Return TRUE for shift operators. */
4892 int
4893 shift_operator (rtx x,enum machine_mode mode)
4894 {
4895 enum rtx_code code;
4896
4897 if (GET_MODE (x) != mode)
4898 return FALSE;
4899
4900 code = GET_CODE (x);
4901
4902 if (code == MULT)
4903 return power_of_two_operand (XEXP (x, 1), mode);
4904
4905 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
4906 || code == ROTATERT);
4907 }
4908
4909 /* Return TRUE if x is EQ or NE. */
4910 int
4911 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
4912 {
4913 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
4914 }
4915
4916 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
4917 int
4918 arm_comparison_operator (rtx x, enum machine_mode mode)
4919 {
4920 return (comparison_operator (x, mode)
4921 && GET_CODE (x) != LTGT
4922 && GET_CODE (x) != UNEQ);
4923 }
4924
4925 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
4926 int
4927 minmax_operator (rtx x, enum machine_mode mode)
4928 {
4929 enum rtx_code code = GET_CODE (x);
4930
4931 if (GET_MODE (x) != mode)
4932 return FALSE;
4933
4934 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
4935 }
4936
4937 /* Return TRUE if this is the condition code register, if we aren't given
4938 a mode, accept any class CCmode register. */
4939 int
4940 cc_register (rtx x, enum machine_mode mode)
4941 {
4942 if (mode == VOIDmode)
4943 {
4944 mode = GET_MODE (x);
4945
4946 if (GET_MODE_CLASS (mode) != MODE_CC)
4947 return FALSE;
4948 }
4949
4950 if ( GET_MODE (x) == mode
4951 && GET_CODE (x) == REG
4952 && REGNO (x) == CC_REGNUM)
4953 return TRUE;
4954
4955 return FALSE;
4956 }
4957
4958 /* Return TRUE if this is the condition code register, if we aren't given
4959 a mode, accept any class CCmode register which indicates a dominance
4960 expression. */
4961 int
4962 dominant_cc_register (rtx x, enum machine_mode mode)
4963 {
4964 if (mode == VOIDmode)
4965 {
4966 mode = GET_MODE (x);
4967
4968 if (GET_MODE_CLASS (mode) != MODE_CC)
4969 return FALSE;
4970 }
4971
4972 if (mode != CC_DNEmode && mode != CC_DEQmode
4973 && mode != CC_DLEmode && mode != CC_DLTmode
4974 && mode != CC_DGEmode && mode != CC_DGTmode
4975 && mode != CC_DLEUmode && mode != CC_DLTUmode
4976 && mode != CC_DGEUmode && mode != CC_DGTUmode)
4977 return FALSE;
4978
4979 return cc_register (x, mode);
4980 }
4981
4982 /* Return TRUE if X references a SYMBOL_REF. */
4983 int
4984 symbol_mentioned_p (rtx x)
4985 {
4986 const char * fmt;
4987 int i;
4988
4989 if (GET_CODE (x) == SYMBOL_REF)
4990 return 1;
4991
4992 fmt = GET_RTX_FORMAT (GET_CODE (x));
4993
4994 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4995 {
4996 if (fmt[i] == 'E')
4997 {
4998 int j;
4999
5000 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5001 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5002 return 1;
5003 }
5004 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5005 return 1;
5006 }
5007
5008 return 0;
5009 }
5010
5011 /* Return TRUE if X references a LABEL_REF. */
5012 int
5013 label_mentioned_p (rtx x)
5014 {
5015 const char * fmt;
5016 int i;
5017
5018 if (GET_CODE (x) == LABEL_REF)
5019 return 1;
5020
5021 fmt = GET_RTX_FORMAT (GET_CODE (x));
5022 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5023 {
5024 if (fmt[i] == 'E')
5025 {
5026 int j;
5027
5028 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5029 if (label_mentioned_p (XVECEXP (x, i, j)))
5030 return 1;
5031 }
5032 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5033 return 1;
5034 }
5035
5036 return 0;
5037 }
5038
5039 enum rtx_code
5040 minmax_code (rtx x)
5041 {
5042 enum rtx_code code = GET_CODE (x);
5043
5044 if (code == SMAX)
5045 return GE;
5046 else if (code == SMIN)
5047 return LE;
5048 else if (code == UMIN)
5049 return LEU;
5050 else if (code == UMAX)
5051 return GEU;
5052
5053 abort ();
5054 }
5055
5056 /* Return 1 if memory locations are adjacent. */
5057 int
5058 adjacent_mem_locations (rtx a, rtx b)
5059 {
5060 if ((GET_CODE (XEXP (a, 0)) == REG
5061 || (GET_CODE (XEXP (a, 0)) == PLUS
5062 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5063 && (GET_CODE (XEXP (b, 0)) == REG
5064 || (GET_CODE (XEXP (b, 0)) == PLUS
5065 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5066 {
5067 int val0 = 0, val1 = 0;
5068 int reg0, reg1;
5069
5070 if (GET_CODE (XEXP (a, 0)) == PLUS)
5071 {
5072 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5073 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5074 }
5075 else
5076 reg0 = REGNO (XEXP (a, 0));
5077
5078 if (GET_CODE (XEXP (b, 0)) == PLUS)
5079 {
5080 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5081 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5082 }
5083 else
5084 reg1 = REGNO (XEXP (b, 0));
5085
5086 /* Don't accept any offset that will require multiple
5087 instructions to handle, since this would cause the
5088 arith_adjacentmem pattern to output an overlong sequence. */
5089 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5090 return 0;
5091
5092 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5093 }
5094 return 0;
5095 }
5096
5097 /* Return 1 if OP is a load multiple operation. It is known to be
5098 parallel and the first section will be tested. */
5099 int
5100 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5101 {
5102 HOST_WIDE_INT count = XVECLEN (op, 0);
5103 int dest_regno;
5104 rtx src_addr;
5105 HOST_WIDE_INT i = 1, base = 0;
5106 rtx elt;
5107
5108 if (count <= 1
5109 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5110 return 0;
5111
5112 /* Check to see if this might be a write-back. */
5113 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5114 {
5115 i++;
5116 base = 1;
5117
5118 /* Now check it more carefully. */
5119 if (GET_CODE (SET_DEST (elt)) != REG
5120 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5121 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5122 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5123 return 0;
5124 }
5125
5126 /* Perform a quick check so we don't blow up below. */
5127 if (count <= i
5128 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5129 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5130 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5131 return 0;
5132
5133 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5134 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5135
5136 for (; i < count; i++)
5137 {
5138 elt = XVECEXP (op, 0, i);
5139
5140 if (GET_CODE (elt) != SET
5141 || GET_CODE (SET_DEST (elt)) != REG
5142 || GET_MODE (SET_DEST (elt)) != SImode
5143 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5144 || GET_CODE (SET_SRC (elt)) != MEM
5145 || GET_MODE (SET_SRC (elt)) != SImode
5146 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5147 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5148 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5149 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5150 return 0;
5151 }
5152
5153 return 1;
5154 }
5155
5156 /* Return 1 if OP is a store multiple operation. It is known to be
5157 parallel and the first section will be tested. */
5158 int
5159 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5160 {
5161 HOST_WIDE_INT count = XVECLEN (op, 0);
5162 int src_regno;
5163 rtx dest_addr;
5164 HOST_WIDE_INT i = 1, base = 0;
5165 rtx elt;
5166
5167 if (count <= 1
5168 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5169 return 0;
5170
5171 /* Check to see if this might be a write-back. */
5172 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5173 {
5174 i++;
5175 base = 1;
5176
5177 /* Now check it more carefully. */
5178 if (GET_CODE (SET_DEST (elt)) != REG
5179 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5180 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5181 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5182 return 0;
5183 }
5184
5185 /* Perform a quick check so we don't blow up below. */
5186 if (count <= i
5187 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5188 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5189 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5190 return 0;
5191
5192 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5193 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5194
5195 for (; i < count; i++)
5196 {
5197 elt = XVECEXP (op, 0, i);
5198
5199 if (GET_CODE (elt) != SET
5200 || GET_CODE (SET_SRC (elt)) != REG
5201 || GET_MODE (SET_SRC (elt)) != SImode
5202 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5203 || GET_CODE (SET_DEST (elt)) != MEM
5204 || GET_MODE (SET_DEST (elt)) != SImode
5205 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5206 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5207 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5208 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5209 return 0;
5210 }
5211
5212 return 1;
5213 }
5214
5215 int
5216 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5217 HOST_WIDE_INT *load_offset)
5218 {
5219 int unsorted_regs[4];
5220 HOST_WIDE_INT unsorted_offsets[4];
5221 int order[4];
5222 int base_reg = -1;
5223 int i;
5224
5225 /* Can only handle 2, 3, or 4 insns at present,
5226 though could be easily extended if required. */
5227 if (nops < 2 || nops > 4)
5228 abort ();
5229
5230 /* Loop over the operands and check that the memory references are
5231 suitable (ie immediate offsets from the same base register). At
5232 the same time, extract the target register, and the memory
5233 offsets. */
5234 for (i = 0; i < nops; i++)
5235 {
5236 rtx reg;
5237 rtx offset;
5238
5239 /* Convert a subreg of a mem into the mem itself. */
5240 if (GET_CODE (operands[nops + i]) == SUBREG)
5241 operands[nops + i] = alter_subreg (operands + (nops + i));
5242
5243 if (GET_CODE (operands[nops + i]) != MEM)
5244 abort ();
5245
5246 /* Don't reorder volatile memory references; it doesn't seem worth
5247 looking for the case where the order is ok anyway. */
5248 if (MEM_VOLATILE_P (operands[nops + i]))
5249 return 0;
5250
5251 offset = const0_rtx;
5252
5253 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5254 || (GET_CODE (reg) == SUBREG
5255 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5256 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5257 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5258 == REG)
5259 || (GET_CODE (reg) == SUBREG
5260 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5261 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5262 == CONST_INT)))
5263 {
5264 if (i == 0)
5265 {
5266 base_reg = REGNO (reg);
5267 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5268 ? REGNO (operands[i])
5269 : REGNO (SUBREG_REG (operands[i])));
5270 order[0] = 0;
5271 }
5272 else
5273 {
5274 if (base_reg != (int) REGNO (reg))
5275 /* Not addressed from the same base register. */
5276 return 0;
5277
5278 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5279 ? REGNO (operands[i])
5280 : REGNO (SUBREG_REG (operands[i])));
5281 if (unsorted_regs[i] < unsorted_regs[order[0]])
5282 order[0] = i;
5283 }
5284
5285 /* If it isn't an integer register, or if it overwrites the
5286 base register but isn't the last insn in the list, then
5287 we can't do this. */
5288 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5289 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5290 return 0;
5291
5292 unsorted_offsets[i] = INTVAL (offset);
5293 }
5294 else
5295 /* Not a suitable memory address. */
5296 return 0;
5297 }
5298
5299 /* All the useful information has now been extracted from the
5300 operands into unsorted_regs and unsorted_offsets; additionally,
5301 order[0] has been set to the lowest numbered register in the
5302 list. Sort the registers into order, and check that the memory
5303 offsets are ascending and adjacent. */
5304
5305 for (i = 1; i < nops; i++)
5306 {
5307 int j;
5308
5309 order[i] = order[i - 1];
5310 for (j = 0; j < nops; j++)
5311 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5312 && (order[i] == order[i - 1]
5313 || unsorted_regs[j] < unsorted_regs[order[i]]))
5314 order[i] = j;
5315
5316 /* Have we found a suitable register? if not, one must be used more
5317 than once. */
5318 if (order[i] == order[i - 1])
5319 return 0;
5320
5321 /* Is the memory address adjacent and ascending? */
5322 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5323 return 0;
5324 }
5325
5326 if (base)
5327 {
5328 *base = base_reg;
5329
5330 for (i = 0; i < nops; i++)
5331 regs[i] = unsorted_regs[order[i]];
5332
5333 *load_offset = unsorted_offsets[order[0]];
5334 }
5335
5336 if (unsorted_offsets[order[0]] == 0)
5337 return 1; /* ldmia */
5338
5339 if (unsorted_offsets[order[0]] == 4)
5340 return 2; /* ldmib */
5341
5342 if (unsorted_offsets[order[nops - 1]] == 0)
5343 return 3; /* ldmda */
5344
5345 if (unsorted_offsets[order[nops - 1]] == -4)
5346 return 4; /* ldmdb */
5347
5348 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5349 if the offset isn't small enough. The reason 2 ldrs are faster
5350 is because these ARMs are able to do more than one cache access
5351 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5352 whilst the ARM8 has a double bandwidth cache. This means that
5353 these cores can do both an instruction fetch and a data fetch in
5354 a single cycle, so the trick of calculating the address into a
5355 scratch register (one of the result regs) and then doing a load
5356 multiple actually becomes slower (and no smaller in code size).
5357 That is the transformation
5358
5359 ldr rd1, [rbase + offset]
5360 ldr rd2, [rbase + offset + 4]
5361
5362 to
5363
5364 add rd1, rbase, offset
5365 ldmia rd1, {rd1, rd2}
5366
5367 produces worse code -- '3 cycles + any stalls on rd2' instead of
5368 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5369 access per cycle, the first sequence could never complete in less
5370 than 6 cycles, whereas the ldm sequence would only take 5 and
5371 would make better use of sequential accesses if not hitting the
5372 cache.
5373
5374 We cheat here and test 'arm_ld_sched' which we currently know to
5375 only be true for the ARM8, ARM9 and StrongARM. If this ever
5376 changes, then the test below needs to be reworked. */
5377 if (nops == 2 && arm_ld_sched)
5378 return 0;
5379
5380 /* Can't do it without setting up the offset, only do this if it takes
5381 no more than one insn. */
5382 return (const_ok_for_arm (unsorted_offsets[order[0]])
5383 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5384 }
5385
5386 const char *
5387 emit_ldm_seq (rtx *operands, int nops)
5388 {
5389 int regs[4];
5390 int base_reg;
5391 HOST_WIDE_INT offset;
5392 char buf[100];
5393 int i;
5394
5395 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5396 {
5397 case 1:
5398 strcpy (buf, "ldm%?ia\t");
5399 break;
5400
5401 case 2:
5402 strcpy (buf, "ldm%?ib\t");
5403 break;
5404
5405 case 3:
5406 strcpy (buf, "ldm%?da\t");
5407 break;
5408
5409 case 4:
5410 strcpy (buf, "ldm%?db\t");
5411 break;
5412
5413 case 5:
5414 if (offset >= 0)
5415 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5416 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5417 (long) offset);
5418 else
5419 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5420 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5421 (long) -offset);
5422 output_asm_insn (buf, operands);
5423 base_reg = regs[0];
5424 strcpy (buf, "ldm%?ia\t");
5425 break;
5426
5427 default:
5428 abort ();
5429 }
5430
5431 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5432 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5433
5434 for (i = 1; i < nops; i++)
5435 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5436 reg_names[regs[i]]);
5437
5438 strcat (buf, "}\t%@ phole ldm");
5439
5440 output_asm_insn (buf, operands);
5441 return "";
5442 }
5443
5444 int
5445 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5446 HOST_WIDE_INT * load_offset)
5447 {
5448 int unsorted_regs[4];
5449 HOST_WIDE_INT unsorted_offsets[4];
5450 int order[4];
5451 int base_reg = -1;
5452 int i;
5453
5454 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5455 extended if required. */
5456 if (nops < 2 || nops > 4)
5457 abort ();
5458
5459 /* Loop over the operands and check that the memory references are
5460 suitable (ie immediate offsets from the same base register). At
5461 the same time, extract the target register, and the memory
5462 offsets. */
5463 for (i = 0; i < nops; i++)
5464 {
5465 rtx reg;
5466 rtx offset;
5467
5468 /* Convert a subreg of a mem into the mem itself. */
5469 if (GET_CODE (operands[nops + i]) == SUBREG)
5470 operands[nops + i] = alter_subreg (operands + (nops + i));
5471
5472 if (GET_CODE (operands[nops + i]) != MEM)
5473 abort ();
5474
5475 /* Don't reorder volatile memory references; it doesn't seem worth
5476 looking for the case where the order is ok anyway. */
5477 if (MEM_VOLATILE_P (operands[nops + i]))
5478 return 0;
5479
5480 offset = const0_rtx;
5481
5482 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5483 || (GET_CODE (reg) == SUBREG
5484 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5485 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5486 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5487 == REG)
5488 || (GET_CODE (reg) == SUBREG
5489 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5490 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5491 == CONST_INT)))
5492 {
5493 if (i == 0)
5494 {
5495 base_reg = REGNO (reg);
5496 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5497 ? REGNO (operands[i])
5498 : REGNO (SUBREG_REG (operands[i])));
5499 order[0] = 0;
5500 }
5501 else
5502 {
5503 if (base_reg != (int) REGNO (reg))
5504 /* Not addressed from the same base register. */
5505 return 0;
5506
5507 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5508 ? REGNO (operands[i])
5509 : REGNO (SUBREG_REG (operands[i])));
5510 if (unsorted_regs[i] < unsorted_regs[order[0]])
5511 order[0] = i;
5512 }
5513
5514 /* If it isn't an integer register, then we can't do this. */
5515 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5516 return 0;
5517
5518 unsorted_offsets[i] = INTVAL (offset);
5519 }
5520 else
5521 /* Not a suitable memory address. */
5522 return 0;
5523 }
5524
5525 /* All the useful information has now been extracted from the
5526 operands into unsorted_regs and unsorted_offsets; additionally,
5527 order[0] has been set to the lowest numbered register in the
5528 list. Sort the registers into order, and check that the memory
5529 offsets are ascending and adjacent. */
5530
5531 for (i = 1; i < nops; i++)
5532 {
5533 int j;
5534
5535 order[i] = order[i - 1];
5536 for (j = 0; j < nops; j++)
5537 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5538 && (order[i] == order[i - 1]
5539 || unsorted_regs[j] < unsorted_regs[order[i]]))
5540 order[i] = j;
5541
5542 /* Have we found a suitable register? if not, one must be used more
5543 than once. */
5544 if (order[i] == order[i - 1])
5545 return 0;
5546
5547 /* Is the memory address adjacent and ascending? */
5548 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5549 return 0;
5550 }
5551
5552 if (base)
5553 {
5554 *base = base_reg;
5555
5556 for (i = 0; i < nops; i++)
5557 regs[i] = unsorted_regs[order[i]];
5558
5559 *load_offset = unsorted_offsets[order[0]];
5560 }
5561
5562 if (unsorted_offsets[order[0]] == 0)
5563 return 1; /* stmia */
5564
5565 if (unsorted_offsets[order[0]] == 4)
5566 return 2; /* stmib */
5567
5568 if (unsorted_offsets[order[nops - 1]] == 0)
5569 return 3; /* stmda */
5570
5571 if (unsorted_offsets[order[nops - 1]] == -4)
5572 return 4; /* stmdb */
5573
5574 return 0;
5575 }
5576
5577 const char *
5578 emit_stm_seq (rtx *operands, int nops)
5579 {
5580 int regs[4];
5581 int base_reg;
5582 HOST_WIDE_INT offset;
5583 char buf[100];
5584 int i;
5585
5586 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5587 {
5588 case 1:
5589 strcpy (buf, "stm%?ia\t");
5590 break;
5591
5592 case 2:
5593 strcpy (buf, "stm%?ib\t");
5594 break;
5595
5596 case 3:
5597 strcpy (buf, "stm%?da\t");
5598 break;
5599
5600 case 4:
5601 strcpy (buf, "stm%?db\t");
5602 break;
5603
5604 default:
5605 abort ();
5606 }
5607
5608 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5609 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5610
5611 for (i = 1; i < nops; i++)
5612 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5613 reg_names[regs[i]]);
5614
5615 strcat (buf, "}\t%@ phole stm");
5616
5617 output_asm_insn (buf, operands);
5618 return "";
5619 }
5620
5621 int
5622 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5623 {
5624 if (GET_CODE (op) != PARALLEL
5625 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5626 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5627 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5628 return 0;
5629
5630 return 1;
5631 }
5632 \f
5633 /* Routines for use in generating RTL. */
5634
5635 rtx
5636 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5637 int write_back, int unchanging_p, int in_struct_p,
5638 int scalar_p)
5639 {
5640 int i = 0, j;
5641 rtx result;
5642 int sign = up ? 1 : -1;
5643 rtx mem;
5644
5645 /* XScale has load-store double instructions, but they have stricter
5646 alignment requirements than load-store multiple, so we can not
5647 use them.
5648
5649 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5650 the pipeline until completion.
5651
5652 NREGS CYCLES
5653 1 3
5654 2 4
5655 3 5
5656 4 6
5657
5658 An ldr instruction takes 1-3 cycles, but does not block the
5659 pipeline.
5660
5661 NREGS CYCLES
5662 1 1-3
5663 2 2-6
5664 3 3-9
5665 4 4-12
5666
5667 Best case ldr will always win. However, the more ldr instructions
5668 we issue, the less likely we are to be able to schedule them well.
5669 Using ldr instructions also increases code size.
5670
5671 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5672 for counts of 3 or 4 regs. */
5673 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5674 {
5675 rtx seq;
5676
5677 start_sequence ();
5678
5679 for (i = 0; i < count; i++)
5680 {
5681 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5682 RTX_UNCHANGING_P (mem) = unchanging_p;
5683 MEM_IN_STRUCT_P (mem) = in_struct_p;
5684 MEM_SCALAR_P (mem) = scalar_p;
5685 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5686 }
5687
5688 if (write_back)
5689 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5690
5691 seq = get_insns ();
5692 end_sequence ();
5693
5694 return seq;
5695 }
5696
5697 result = gen_rtx_PARALLEL (VOIDmode,
5698 rtvec_alloc (count + (write_back ? 1 : 0)));
5699 if (write_back)
5700 {
5701 XVECEXP (result, 0, 0)
5702 = gen_rtx_SET (GET_MODE (from), from,
5703 plus_constant (from, count * 4 * sign));
5704 i = 1;
5705 count++;
5706 }
5707
5708 for (j = 0; i < count; i++, j++)
5709 {
5710 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5711 RTX_UNCHANGING_P (mem) = unchanging_p;
5712 MEM_IN_STRUCT_P (mem) = in_struct_p;
5713 MEM_SCALAR_P (mem) = scalar_p;
5714 XVECEXP (result, 0, i)
5715 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5716 }
5717
5718 return result;
5719 }
5720
5721 rtx
5722 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5723 int write_back, int unchanging_p, int in_struct_p,
5724 int scalar_p)
5725 {
5726 int i = 0, j;
5727 rtx result;
5728 int sign = up ? 1 : -1;
5729 rtx mem;
5730
5731 /* See arm_gen_load_multiple for discussion of
5732 the pros/cons of ldm/stm usage for XScale. */
5733 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5734 {
5735 rtx seq;
5736
5737 start_sequence ();
5738
5739 for (i = 0; i < count; i++)
5740 {
5741 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5742 RTX_UNCHANGING_P (mem) = unchanging_p;
5743 MEM_IN_STRUCT_P (mem) = in_struct_p;
5744 MEM_SCALAR_P (mem) = scalar_p;
5745 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5746 }
5747
5748 if (write_back)
5749 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5750
5751 seq = get_insns ();
5752 end_sequence ();
5753
5754 return seq;
5755 }
5756
5757 result = gen_rtx_PARALLEL (VOIDmode,
5758 rtvec_alloc (count + (write_back ? 1 : 0)));
5759 if (write_back)
5760 {
5761 XVECEXP (result, 0, 0)
5762 = gen_rtx_SET (GET_MODE (to), to,
5763 plus_constant (to, count * 4 * sign));
5764 i = 1;
5765 count++;
5766 }
5767
5768 for (j = 0; i < count; i++, j++)
5769 {
5770 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5771 RTX_UNCHANGING_P (mem) = unchanging_p;
5772 MEM_IN_STRUCT_P (mem) = in_struct_p;
5773 MEM_SCALAR_P (mem) = scalar_p;
5774
5775 XVECEXP (result, 0, i)
5776 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5777 }
5778
5779 return result;
5780 }
5781
5782 int
5783 arm_gen_movstrqi (rtx *operands)
5784 {
5785 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5786 int i;
5787 rtx src, dst;
5788 rtx st_src, st_dst, fin_src, fin_dst;
5789 rtx part_bytes_reg = NULL;
5790 rtx mem;
5791 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5792 int dst_scalar_p, src_scalar_p;
5793
5794 if (GET_CODE (operands[2]) != CONST_INT
5795 || GET_CODE (operands[3]) != CONST_INT
5796 || INTVAL (operands[2]) > 64
5797 || INTVAL (operands[3]) & 3)
5798 return 0;
5799
5800 st_dst = XEXP (operands[0], 0);
5801 st_src = XEXP (operands[1], 0);
5802
5803 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5804 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5805 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5806 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5807 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5808 src_scalar_p = MEM_SCALAR_P (operands[1]);
5809
5810 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5811 fin_src = src = copy_to_mode_reg (SImode, st_src);
5812
5813 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5814 out_words_to_go = INTVAL (operands[2]) / 4;
5815 last_bytes = INTVAL (operands[2]) & 3;
5816
5817 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5818 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5819
5820 for (i = 0; in_words_to_go >= 2; i+=4)
5821 {
5822 if (in_words_to_go > 4)
5823 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5824 src_unchanging_p,
5825 src_in_struct_p,
5826 src_scalar_p));
5827 else
5828 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5829 FALSE, src_unchanging_p,
5830 src_in_struct_p, src_scalar_p));
5831
5832 if (out_words_to_go)
5833 {
5834 if (out_words_to_go > 4)
5835 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5836 dst_unchanging_p,
5837 dst_in_struct_p,
5838 dst_scalar_p));
5839 else if (out_words_to_go != 1)
5840 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5841 dst, TRUE,
5842 (last_bytes == 0
5843 ? FALSE : TRUE),
5844 dst_unchanging_p,
5845 dst_in_struct_p,
5846 dst_scalar_p));
5847 else
5848 {
5849 mem = gen_rtx_MEM (SImode, dst);
5850 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5851 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5852 MEM_SCALAR_P (mem) = dst_scalar_p;
5853 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5854 if (last_bytes != 0)
5855 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5856 }
5857 }
5858
5859 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5860 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5861 }
5862
5863 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5864 if (out_words_to_go)
5865 {
5866 rtx sreg;
5867
5868 mem = gen_rtx_MEM (SImode, src);
5869 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5870 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5871 MEM_SCALAR_P (mem) = src_scalar_p;
5872 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5873 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5874
5875 mem = gen_rtx_MEM (SImode, dst);
5876 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5877 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5878 MEM_SCALAR_P (mem) = dst_scalar_p;
5879 emit_move_insn (mem, sreg);
5880 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
5881 in_words_to_go--;
5882
5883 if (in_words_to_go) /* Sanity check */
5884 abort ();
5885 }
5886
5887 if (in_words_to_go)
5888 {
5889 if (in_words_to_go < 0)
5890 abort ();
5891
5892 mem = gen_rtx_MEM (SImode, src);
5893 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5894 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5895 MEM_SCALAR_P (mem) = src_scalar_p;
5896 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5897 }
5898
5899 if (last_bytes && part_bytes_reg == NULL)
5900 abort ();
5901
5902 if (BYTES_BIG_ENDIAN && last_bytes)
5903 {
5904 rtx tmp = gen_reg_rtx (SImode);
5905
5906 /* The bytes we want are in the top end of the word. */
5907 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5908 GEN_INT (8 * (4 - last_bytes))));
5909 part_bytes_reg = tmp;
5910
5911 while (last_bytes)
5912 {
5913 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
5914 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5915 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5916 MEM_SCALAR_P (mem) = dst_scalar_p;
5917 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5918
5919 if (--last_bytes)
5920 {
5921 tmp = gen_reg_rtx (SImode);
5922 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5923 part_bytes_reg = tmp;
5924 }
5925 }
5926
5927 }
5928 else
5929 {
5930 if (last_bytes > 1)
5931 {
5932 mem = gen_rtx_MEM (HImode, dst);
5933 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5934 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5935 MEM_SCALAR_P (mem) = dst_scalar_p;
5936 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5937 last_bytes -= 2;
5938 if (last_bytes)
5939 {
5940 rtx tmp = gen_reg_rtx (SImode);
5941
5942 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5943 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5944 part_bytes_reg = tmp;
5945 }
5946 }
5947
5948 if (last_bytes)
5949 {
5950 mem = gen_rtx_MEM (QImode, dst);
5951 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5952 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5953 MEM_SCALAR_P (mem) = dst_scalar_p;
5954 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5955 }
5956 }
5957
5958 return 1;
5959 }
5960
5961 /* Generate a memory reference for a half word, such that it will be loaded
5962 into the top 16 bits of the word. We can assume that the address is
5963 known to be alignable and of the form reg, or plus (reg, const). */
5964
5965 rtx
5966 arm_gen_rotated_half_load (rtx memref)
5967 {
5968 HOST_WIDE_INT offset = 0;
5969 rtx base = XEXP (memref, 0);
5970
5971 if (GET_CODE (base) == PLUS)
5972 {
5973 offset = INTVAL (XEXP (base, 1));
5974 base = XEXP (base, 0);
5975 }
5976
5977 /* If we aren't allowed to generate unaligned addresses, then fail. */
5978 if (TARGET_MMU_TRAPS
5979 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
5980 return NULL;
5981
5982 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5983
5984 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5985 return base;
5986
5987 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5988 }
5989
5990 /* Select a dominance comparison mode if possible for a test of the general
5991 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5992 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5993 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5994 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5995 In all cases OP will be either EQ or NE, but we don't need to know which
5996 here. If we are unable to support a dominance comparison we return
5997 CC mode. This will then fail to match for the RTL expressions that
5998 generate this call. */
5999 enum machine_mode
6000 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6001 {
6002 enum rtx_code cond1, cond2;
6003 int swapped = 0;
6004
6005 /* Currently we will probably get the wrong result if the individual
6006 comparisons are not simple. This also ensures that it is safe to
6007 reverse a comparison if necessary. */
6008 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6009 != CCmode)
6010 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6011 != CCmode))
6012 return CCmode;
6013
6014 /* The if_then_else variant of this tests the second condition if the
6015 first passes, but is true if the first fails. Reverse the first
6016 condition to get a true "inclusive-or" expression. */
6017 if (cond_or == DOM_CC_NX_OR_Y)
6018 cond1 = reverse_condition (cond1);
6019
6020 /* If the comparisons are not equal, and one doesn't dominate the other,
6021 then we can't do this. */
6022 if (cond1 != cond2
6023 && !comparison_dominates_p (cond1, cond2)
6024 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6025 return CCmode;
6026
6027 if (swapped)
6028 {
6029 enum rtx_code temp = cond1;
6030 cond1 = cond2;
6031 cond2 = temp;
6032 }
6033
6034 switch (cond1)
6035 {
6036 case EQ:
6037 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6038 return CC_DEQmode;
6039
6040 switch (cond2)
6041 {
6042 case LE: return CC_DLEmode;
6043 case LEU: return CC_DLEUmode;
6044 case GE: return CC_DGEmode;
6045 case GEU: return CC_DGEUmode;
6046 default: break;
6047 }
6048
6049 break;
6050
6051 case LT:
6052 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6053 return CC_DLTmode;
6054 if (cond2 == LE)
6055 return CC_DLEmode;
6056 if (cond2 == NE)
6057 return CC_DNEmode;
6058 break;
6059
6060 case GT:
6061 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6062 return CC_DGTmode;
6063 if (cond2 == GE)
6064 return CC_DGEmode;
6065 if (cond2 == NE)
6066 return CC_DNEmode;
6067 break;
6068
6069 case LTU:
6070 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6071 return CC_DLTUmode;
6072 if (cond2 == LEU)
6073 return CC_DLEUmode;
6074 if (cond2 == NE)
6075 return CC_DNEmode;
6076 break;
6077
6078 case GTU:
6079 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6080 return CC_DGTUmode;
6081 if (cond2 == GEU)
6082 return CC_DGEUmode;
6083 if (cond2 == NE)
6084 return CC_DNEmode;
6085 break;
6086
6087 /* The remaining cases only occur when both comparisons are the
6088 same. */
6089 case NE:
6090 return CC_DNEmode;
6091
6092 case LE:
6093 return CC_DLEmode;
6094
6095 case GE:
6096 return CC_DGEmode;
6097
6098 case LEU:
6099 return CC_DLEUmode;
6100
6101 case GEU:
6102 return CC_DGEUmode;
6103
6104 default:
6105 break;
6106 }
6107
6108 abort ();
6109 }
6110
6111 enum machine_mode
6112 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6113 {
6114 /* All floating point compares return CCFP if it is an equality
6115 comparison, and CCFPE otherwise. */
6116 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6117 {
6118 switch (op)
6119 {
6120 case EQ:
6121 case NE:
6122 case UNORDERED:
6123 case ORDERED:
6124 case UNLT:
6125 case UNLE:
6126 case UNGT:
6127 case UNGE:
6128 case UNEQ:
6129 case LTGT:
6130 return CCFPmode;
6131
6132 case LT:
6133 case LE:
6134 case GT:
6135 case GE:
6136 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6137 return CCFPmode;
6138 return CCFPEmode;
6139
6140 default:
6141 abort ();
6142 }
6143 }
6144
6145 /* A compare with a shifted operand. Because of canonicalization, the
6146 comparison will have to be swapped when we emit the assembler. */
6147 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6148 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6149 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6150 || GET_CODE (x) == ROTATERT))
6151 return CC_SWPmode;
6152
6153 /* This is a special case that is used by combine to allow a
6154 comparison of a shifted byte load to be split into a zero-extend
6155 followed by a comparison of the shifted integer (only valid for
6156 equalities and unsigned inequalities). */
6157 if (GET_MODE (x) == SImode
6158 && GET_CODE (x) == ASHIFT
6159 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6160 && GET_CODE (XEXP (x, 0)) == SUBREG
6161 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6162 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6163 && (op == EQ || op == NE
6164 || op == GEU || op == GTU || op == LTU || op == LEU)
6165 && GET_CODE (y) == CONST_INT)
6166 return CC_Zmode;
6167
6168 /* A construct for a conditional compare, if the false arm contains
6169 0, then both conditions must be true, otherwise either condition
6170 must be true. Not all conditions are possible, so CCmode is
6171 returned if it can't be done. */
6172 if (GET_CODE (x) == IF_THEN_ELSE
6173 && (XEXP (x, 2) == const0_rtx
6174 || XEXP (x, 2) == const1_rtx)
6175 && COMPARISON_P (XEXP (x, 0))
6176 && COMPARISON_P (XEXP (x, 1)))
6177 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6178 INTVAL (XEXP (x, 2)));
6179
6180 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6181 if (GET_CODE (x) == AND
6182 && COMPARISON_P (XEXP (x, 0))
6183 && COMPARISON_P (XEXP (x, 1)))
6184 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6185 DOM_CC_X_AND_Y);
6186
6187 if (GET_CODE (x) == IOR
6188 && COMPARISON_P (XEXP (x, 0))
6189 && COMPARISON_P (XEXP (x, 1)))
6190 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6191 DOM_CC_X_OR_Y);
6192
6193 /* An operation (on Thumb) where we want to test for a single bit.
6194 This is done by shifting that bit up into the top bit of a
6195 scratch register; we can then branch on the sign bit. */
6196 if (TARGET_THUMB
6197 && GET_MODE (x) == SImode
6198 && (op == EQ || op == NE)
6199 && (GET_CODE (x) == ZERO_EXTRACT))
6200 return CC_Nmode;
6201
6202 /* An operation that sets the condition codes as a side-effect, the
6203 V flag is not set correctly, so we can only use comparisons where
6204 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6205 instead.) */
6206 if (GET_MODE (x) == SImode
6207 && y == const0_rtx
6208 && (op == EQ || op == NE || op == LT || op == GE)
6209 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6210 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6211 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6212 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6213 || GET_CODE (x) == LSHIFTRT
6214 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6215 || GET_CODE (x) == ROTATERT
6216 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6217 return CC_NOOVmode;
6218
6219 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6220 return CC_Zmode;
6221
6222 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6223 && GET_CODE (x) == PLUS
6224 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6225 return CC_Cmode;
6226
6227 return CCmode;
6228 }
6229
6230 /* X and Y are two things to compare using CODE. Emit the compare insn and
6231 return the rtx for register 0 in the proper mode. FP means this is a
6232 floating point compare: I don't think that it is needed on the arm. */
6233 rtx
6234 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6235 {
6236 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6237 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6238
6239 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6240 gen_rtx_COMPARE (mode, x, y)));
6241
6242 return cc_reg;
6243 }
6244
6245 /* Generate a sequence of insns that will generate the correct return
6246 address mask depending on the physical architecture that the program
6247 is running on. */
6248 rtx
6249 arm_gen_return_addr_mask (void)
6250 {
6251 rtx reg = gen_reg_rtx (Pmode);
6252
6253 emit_insn (gen_return_addr_mask (reg));
6254 return reg;
6255 }
6256
6257 void
6258 arm_reload_in_hi (rtx *operands)
6259 {
6260 rtx ref = operands[1];
6261 rtx base, scratch;
6262 HOST_WIDE_INT offset = 0;
6263
6264 if (GET_CODE (ref) == SUBREG)
6265 {
6266 offset = SUBREG_BYTE (ref);
6267 ref = SUBREG_REG (ref);
6268 }
6269
6270 if (GET_CODE (ref) == REG)
6271 {
6272 /* We have a pseudo which has been spilt onto the stack; there
6273 are two cases here: the first where there is a simple
6274 stack-slot replacement and a second where the stack-slot is
6275 out of range, or is used as a subreg. */
6276 if (reg_equiv_mem[REGNO (ref)])
6277 {
6278 ref = reg_equiv_mem[REGNO (ref)];
6279 base = find_replacement (&XEXP (ref, 0));
6280 }
6281 else
6282 /* The slot is out of range, or was dressed up in a SUBREG. */
6283 base = reg_equiv_address[REGNO (ref)];
6284 }
6285 else
6286 base = find_replacement (&XEXP (ref, 0));
6287
6288 /* Handle the case where the address is too complex to be offset by 1. */
6289 if (GET_CODE (base) == MINUS
6290 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6291 {
6292 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6293
6294 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6295 base = base_plus;
6296 }
6297 else if (GET_CODE (base) == PLUS)
6298 {
6299 /* The addend must be CONST_INT, or we would have dealt with it above. */
6300 HOST_WIDE_INT hi, lo;
6301
6302 offset += INTVAL (XEXP (base, 1));
6303 base = XEXP (base, 0);
6304
6305 /* Rework the address into a legal sequence of insns. */
6306 /* Valid range for lo is -4095 -> 4095 */
6307 lo = (offset >= 0
6308 ? (offset & 0xfff)
6309 : -((-offset) & 0xfff));
6310
6311 /* Corner case, if lo is the max offset then we would be out of range
6312 once we have added the additional 1 below, so bump the msb into the
6313 pre-loading insn(s). */
6314 if (lo == 4095)
6315 lo &= 0x7ff;
6316
6317 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6318 ^ (HOST_WIDE_INT) 0x80000000)
6319 - (HOST_WIDE_INT) 0x80000000);
6320
6321 if (hi + lo != offset)
6322 abort ();
6323
6324 if (hi != 0)
6325 {
6326 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6327
6328 /* Get the base address; addsi3 knows how to handle constants
6329 that require more than one insn. */
6330 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6331 base = base_plus;
6332 offset = lo;
6333 }
6334 }
6335
6336 /* Operands[2] may overlap operands[0] (though it won't overlap
6337 operands[1]), that's why we asked for a DImode reg -- so we can
6338 use the bit that does not overlap. */
6339 if (REGNO (operands[2]) == REGNO (operands[0]))
6340 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6341 else
6342 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6343
6344 emit_insn (gen_zero_extendqisi2 (scratch,
6345 gen_rtx_MEM (QImode,
6346 plus_constant (base,
6347 offset))));
6348 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6349 gen_rtx_MEM (QImode,
6350 plus_constant (base,
6351 offset + 1))));
6352 if (!BYTES_BIG_ENDIAN)
6353 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6354 gen_rtx_IOR (SImode,
6355 gen_rtx_ASHIFT
6356 (SImode,
6357 gen_rtx_SUBREG (SImode, operands[0], 0),
6358 GEN_INT (8)),
6359 scratch)));
6360 else
6361 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6362 gen_rtx_IOR (SImode,
6363 gen_rtx_ASHIFT (SImode, scratch,
6364 GEN_INT (8)),
6365 gen_rtx_SUBREG (SImode, operands[0],
6366 0))));
6367 }
6368
6369 /* Handle storing a half-word to memory during reload by synthesizing as two
6370 byte stores. Take care not to clobber the input values until after we
6371 have moved them somewhere safe. This code assumes that if the DImode
6372 scratch in operands[2] overlaps either the input value or output address
6373 in some way, then that value must die in this insn (we absolutely need
6374 two scratch registers for some corner cases). */
6375 void
6376 arm_reload_out_hi (rtx *operands)
6377 {
6378 rtx ref = operands[0];
6379 rtx outval = operands[1];
6380 rtx base, scratch;
6381 HOST_WIDE_INT offset = 0;
6382
6383 if (GET_CODE (ref) == SUBREG)
6384 {
6385 offset = SUBREG_BYTE (ref);
6386 ref = SUBREG_REG (ref);
6387 }
6388
6389 if (GET_CODE (ref) == REG)
6390 {
6391 /* We have a pseudo which has been spilt onto the stack; there
6392 are two cases here: the first where there is a simple
6393 stack-slot replacement and a second where the stack-slot is
6394 out of range, or is used as a subreg. */
6395 if (reg_equiv_mem[REGNO (ref)])
6396 {
6397 ref = reg_equiv_mem[REGNO (ref)];
6398 base = find_replacement (&XEXP (ref, 0));
6399 }
6400 else
6401 /* The slot is out of range, or was dressed up in a SUBREG. */
6402 base = reg_equiv_address[REGNO (ref)];
6403 }
6404 else
6405 base = find_replacement (&XEXP (ref, 0));
6406
6407 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6408
6409 /* Handle the case where the address is too complex to be offset by 1. */
6410 if (GET_CODE (base) == MINUS
6411 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6412 {
6413 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6414
6415 /* Be careful not to destroy OUTVAL. */
6416 if (reg_overlap_mentioned_p (base_plus, outval))
6417 {
6418 /* Updating base_plus might destroy outval, see if we can
6419 swap the scratch and base_plus. */
6420 if (!reg_overlap_mentioned_p (scratch, outval))
6421 {
6422 rtx tmp = scratch;
6423 scratch = base_plus;
6424 base_plus = tmp;
6425 }
6426 else
6427 {
6428 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6429
6430 /* Be conservative and copy OUTVAL into the scratch now,
6431 this should only be necessary if outval is a subreg
6432 of something larger than a word. */
6433 /* XXX Might this clobber base? I can't see how it can,
6434 since scratch is known to overlap with OUTVAL, and
6435 must be wider than a word. */
6436 emit_insn (gen_movhi (scratch_hi, outval));
6437 outval = scratch_hi;
6438 }
6439 }
6440
6441 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6442 base = base_plus;
6443 }
6444 else if (GET_CODE (base) == PLUS)
6445 {
6446 /* The addend must be CONST_INT, or we would have dealt with it above. */
6447 HOST_WIDE_INT hi, lo;
6448
6449 offset += INTVAL (XEXP (base, 1));
6450 base = XEXP (base, 0);
6451
6452 /* Rework the address into a legal sequence of insns. */
6453 /* Valid range for lo is -4095 -> 4095 */
6454 lo = (offset >= 0
6455 ? (offset & 0xfff)
6456 : -((-offset) & 0xfff));
6457
6458 /* Corner case, if lo is the max offset then we would be out of range
6459 once we have added the additional 1 below, so bump the msb into the
6460 pre-loading insn(s). */
6461 if (lo == 4095)
6462 lo &= 0x7ff;
6463
6464 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6465 ^ (HOST_WIDE_INT) 0x80000000)
6466 - (HOST_WIDE_INT) 0x80000000);
6467
6468 if (hi + lo != offset)
6469 abort ();
6470
6471 if (hi != 0)
6472 {
6473 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6474
6475 /* Be careful not to destroy OUTVAL. */
6476 if (reg_overlap_mentioned_p (base_plus, outval))
6477 {
6478 /* Updating base_plus might destroy outval, see if we
6479 can swap the scratch and base_plus. */
6480 if (!reg_overlap_mentioned_p (scratch, outval))
6481 {
6482 rtx tmp = scratch;
6483 scratch = base_plus;
6484 base_plus = tmp;
6485 }
6486 else
6487 {
6488 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6489
6490 /* Be conservative and copy outval into scratch now,
6491 this should only be necessary if outval is a
6492 subreg of something larger than a word. */
6493 /* XXX Might this clobber base? I can't see how it
6494 can, since scratch is known to overlap with
6495 outval. */
6496 emit_insn (gen_movhi (scratch_hi, outval));
6497 outval = scratch_hi;
6498 }
6499 }
6500
6501 /* Get the base address; addsi3 knows how to handle constants
6502 that require more than one insn. */
6503 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6504 base = base_plus;
6505 offset = lo;
6506 }
6507 }
6508
6509 if (BYTES_BIG_ENDIAN)
6510 {
6511 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6512 plus_constant (base, offset + 1)),
6513 gen_lowpart (QImode, outval)));
6514 emit_insn (gen_lshrsi3 (scratch,
6515 gen_rtx_SUBREG (SImode, outval, 0),
6516 GEN_INT (8)));
6517 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6518 gen_lowpart (QImode, scratch)));
6519 }
6520 else
6521 {
6522 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6523 gen_lowpart (QImode, outval)));
6524 emit_insn (gen_lshrsi3 (scratch,
6525 gen_rtx_SUBREG (SImode, outval, 0),
6526 GEN_INT (8)));
6527 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6528 plus_constant (base, offset + 1)),
6529 gen_lowpart (QImode, scratch)));
6530 }
6531 }
6532 \f
6533 /* Print a symbolic form of X to the debug file, F. */
6534 static void
6535 arm_print_value (FILE *f, rtx x)
6536 {
6537 switch (GET_CODE (x))
6538 {
6539 case CONST_INT:
6540 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6541 return;
6542
6543 case CONST_DOUBLE:
6544 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6545 return;
6546
6547 case CONST_VECTOR:
6548 {
6549 int i;
6550
6551 fprintf (f, "<");
6552 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6553 {
6554 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6555 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6556 fputc (',', f);
6557 }
6558 fprintf (f, ">");
6559 }
6560 return;
6561
6562 case CONST_STRING:
6563 fprintf (f, "\"%s\"", XSTR (x, 0));
6564 return;
6565
6566 case SYMBOL_REF:
6567 fprintf (f, "`%s'", XSTR (x, 0));
6568 return;
6569
6570 case LABEL_REF:
6571 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6572 return;
6573
6574 case CONST:
6575 arm_print_value (f, XEXP (x, 0));
6576 return;
6577
6578 case PLUS:
6579 arm_print_value (f, XEXP (x, 0));
6580 fprintf (f, "+");
6581 arm_print_value (f, XEXP (x, 1));
6582 return;
6583
6584 case PC:
6585 fprintf (f, "pc");
6586 return;
6587
6588 default:
6589 fprintf (f, "????");
6590 return;
6591 }
6592 }
6593 \f
6594 /* Routines for manipulation of the constant pool. */
6595
6596 /* Arm instructions cannot load a large constant directly into a
6597 register; they have to come from a pc relative load. The constant
6598 must therefore be placed in the addressable range of the pc
6599 relative load. Depending on the precise pc relative load
6600 instruction the range is somewhere between 256 bytes and 4k. This
6601 means that we often have to dump a constant inside a function, and
6602 generate code to branch around it.
6603
6604 It is important to minimize this, since the branches will slow
6605 things down and make the code larger.
6606
6607 Normally we can hide the table after an existing unconditional
6608 branch so that there is no interruption of the flow, but in the
6609 worst case the code looks like this:
6610
6611 ldr rn, L1
6612 ...
6613 b L2
6614 align
6615 L1: .long value
6616 L2:
6617 ...
6618
6619 ldr rn, L3
6620 ...
6621 b L4
6622 align
6623 L3: .long value
6624 L4:
6625 ...
6626
6627 We fix this by performing a scan after scheduling, which notices
6628 which instructions need to have their operands fetched from the
6629 constant table and builds the table.
6630
6631 The algorithm starts by building a table of all the constants that
6632 need fixing up and all the natural barriers in the function (places
6633 where a constant table can be dropped without breaking the flow).
6634 For each fixup we note how far the pc-relative replacement will be
6635 able to reach and the offset of the instruction into the function.
6636
6637 Having built the table we then group the fixes together to form
6638 tables that are as large as possible (subject to addressing
6639 constraints) and emit each table of constants after the last
6640 barrier that is within range of all the instructions in the group.
6641 If a group does not contain a barrier, then we forcibly create one
6642 by inserting a jump instruction into the flow. Once the table has
6643 been inserted, the insns are then modified to reference the
6644 relevant entry in the pool.
6645
6646 Possible enhancements to the algorithm (not implemented) are:
6647
6648 1) For some processors and object formats, there may be benefit in
6649 aligning the pools to the start of cache lines; this alignment
6650 would need to be taken into account when calculating addressability
6651 of a pool. */
6652
6653 /* These typedefs are located at the start of this file, so that
6654 they can be used in the prototypes there. This comment is to
6655 remind readers of that fact so that the following structures
6656 can be understood more easily.
6657
6658 typedef struct minipool_node Mnode;
6659 typedef struct minipool_fixup Mfix; */
6660
6661 struct minipool_node
6662 {
6663 /* Doubly linked chain of entries. */
6664 Mnode * next;
6665 Mnode * prev;
6666 /* The maximum offset into the code that this entry can be placed. While
6667 pushing fixes for forward references, all entries are sorted in order
6668 of increasing max_address. */
6669 HOST_WIDE_INT max_address;
6670 /* Similarly for an entry inserted for a backwards ref. */
6671 HOST_WIDE_INT min_address;
6672 /* The number of fixes referencing this entry. This can become zero
6673 if we "unpush" an entry. In this case we ignore the entry when we
6674 come to emit the code. */
6675 int refcount;
6676 /* The offset from the start of the minipool. */
6677 HOST_WIDE_INT offset;
6678 /* The value in table. */
6679 rtx value;
6680 /* The mode of value. */
6681 enum machine_mode mode;
6682 /* The size of the value. With iWMMXt enabled
6683 sizes > 4 also imply an alignment of 8-bytes. */
6684 int fix_size;
6685 };
6686
6687 struct minipool_fixup
6688 {
6689 Mfix * next;
6690 rtx insn;
6691 HOST_WIDE_INT address;
6692 rtx * loc;
6693 enum machine_mode mode;
6694 int fix_size;
6695 rtx value;
6696 Mnode * minipool;
6697 HOST_WIDE_INT forwards;
6698 HOST_WIDE_INT backwards;
6699 };
6700
6701 /* Fixes less than a word need padding out to a word boundary. */
6702 #define MINIPOOL_FIX_SIZE(mode) \
6703 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6704
6705 static Mnode * minipool_vector_head;
6706 static Mnode * minipool_vector_tail;
6707 static rtx minipool_vector_label;
6708
6709 /* The linked list of all minipool fixes required for this function. */
6710 Mfix * minipool_fix_head;
6711 Mfix * minipool_fix_tail;
6712 /* The fix entry for the current minipool, once it has been placed. */
6713 Mfix * minipool_barrier;
6714
6715 /* Determines if INSN is the start of a jump table. Returns the end
6716 of the TABLE or NULL_RTX. */
6717 static rtx
6718 is_jump_table (rtx insn)
6719 {
6720 rtx table;
6721
6722 if (GET_CODE (insn) == JUMP_INSN
6723 && JUMP_LABEL (insn) != NULL
6724 && ((table = next_real_insn (JUMP_LABEL (insn)))
6725 == next_real_insn (insn))
6726 && table != NULL
6727 && GET_CODE (table) == JUMP_INSN
6728 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6729 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6730 return table;
6731
6732 return NULL_RTX;
6733 }
6734
6735 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6736 #define JUMP_TABLES_IN_TEXT_SECTION 0
6737 #endif
6738
6739 static HOST_WIDE_INT
6740 get_jump_table_size (rtx insn)
6741 {
6742 /* ADDR_VECs only take room if read-only data does into the text
6743 section. */
6744 if (JUMP_TABLES_IN_TEXT_SECTION
6745 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6746 || 1
6747 #endif
6748 )
6749 {
6750 rtx body = PATTERN (insn);
6751 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6752
6753 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6754 }
6755
6756 return 0;
6757 }
6758
6759 /* Move a minipool fix MP from its current location to before MAX_MP.
6760 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6761 constraints may need updating. */
6762 static Mnode *
6763 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6764 HOST_WIDE_INT max_address)
6765 {
6766 /* This should never be true and the code below assumes these are
6767 different. */
6768 if (mp == max_mp)
6769 abort ();
6770
6771 if (max_mp == NULL)
6772 {
6773 if (max_address < mp->max_address)
6774 mp->max_address = max_address;
6775 }
6776 else
6777 {
6778 if (max_address > max_mp->max_address - mp->fix_size)
6779 mp->max_address = max_mp->max_address - mp->fix_size;
6780 else
6781 mp->max_address = max_address;
6782
6783 /* Unlink MP from its current position. Since max_mp is non-null,
6784 mp->prev must be non-null. */
6785 mp->prev->next = mp->next;
6786 if (mp->next != NULL)
6787 mp->next->prev = mp->prev;
6788 else
6789 minipool_vector_tail = mp->prev;
6790
6791 /* Re-insert it before MAX_MP. */
6792 mp->next = max_mp;
6793 mp->prev = max_mp->prev;
6794 max_mp->prev = mp;
6795
6796 if (mp->prev != NULL)
6797 mp->prev->next = mp;
6798 else
6799 minipool_vector_head = mp;
6800 }
6801
6802 /* Save the new entry. */
6803 max_mp = mp;
6804
6805 /* Scan over the preceding entries and adjust their addresses as
6806 required. */
6807 while (mp->prev != NULL
6808 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6809 {
6810 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6811 mp = mp->prev;
6812 }
6813
6814 return max_mp;
6815 }
6816
6817 /* Add a constant to the minipool for a forward reference. Returns the
6818 node added or NULL if the constant will not fit in this pool. */
6819 static Mnode *
6820 add_minipool_forward_ref (Mfix *fix)
6821 {
6822 /* If set, max_mp is the first pool_entry that has a lower
6823 constraint than the one we are trying to add. */
6824 Mnode * max_mp = NULL;
6825 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6826 Mnode * mp;
6827
6828 /* If this fix's address is greater than the address of the first
6829 entry, then we can't put the fix in this pool. We subtract the
6830 size of the current fix to ensure that if the table is fully
6831 packed we still have enough room to insert this value by suffling
6832 the other fixes forwards. */
6833 if (minipool_vector_head &&
6834 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6835 return NULL;
6836
6837 /* Scan the pool to see if a constant with the same value has
6838 already been added. While we are doing this, also note the
6839 location where we must insert the constant if it doesn't already
6840 exist. */
6841 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6842 {
6843 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6844 && fix->mode == mp->mode
6845 && (GET_CODE (fix->value) != CODE_LABEL
6846 || (CODE_LABEL_NUMBER (fix->value)
6847 == CODE_LABEL_NUMBER (mp->value)))
6848 && rtx_equal_p (fix->value, mp->value))
6849 {
6850 /* More than one fix references this entry. */
6851 mp->refcount++;
6852 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6853 }
6854
6855 /* Note the insertion point if necessary. */
6856 if (max_mp == NULL
6857 && mp->max_address > max_address)
6858 max_mp = mp;
6859
6860 /* If we are inserting an 8-bytes aligned quantity and
6861 we have not already found an insertion point, then
6862 make sure that all such 8-byte aligned quantities are
6863 placed at the start of the pool. */
6864 if (TARGET_REALLY_IWMMXT
6865 && max_mp == NULL
6866 && fix->fix_size == 8
6867 && mp->fix_size != 8)
6868 {
6869 max_mp = mp;
6870 max_address = mp->max_address;
6871 }
6872 }
6873
6874 /* The value is not currently in the minipool, so we need to create
6875 a new entry for it. If MAX_MP is NULL, the entry will be put on
6876 the end of the list since the placement is less constrained than
6877 any existing entry. Otherwise, we insert the new fix before
6878 MAX_MP and, if necessary, adjust the constraints on the other
6879 entries. */
6880 mp = xmalloc (sizeof (* mp));
6881 mp->fix_size = fix->fix_size;
6882 mp->mode = fix->mode;
6883 mp->value = fix->value;
6884 mp->refcount = 1;
6885 /* Not yet required for a backwards ref. */
6886 mp->min_address = -65536;
6887
6888 if (max_mp == NULL)
6889 {
6890 mp->max_address = max_address;
6891 mp->next = NULL;
6892 mp->prev = minipool_vector_tail;
6893
6894 if (mp->prev == NULL)
6895 {
6896 minipool_vector_head = mp;
6897 minipool_vector_label = gen_label_rtx ();
6898 }
6899 else
6900 mp->prev->next = mp;
6901
6902 minipool_vector_tail = mp;
6903 }
6904 else
6905 {
6906 if (max_address > max_mp->max_address - mp->fix_size)
6907 mp->max_address = max_mp->max_address - mp->fix_size;
6908 else
6909 mp->max_address = max_address;
6910
6911 mp->next = max_mp;
6912 mp->prev = max_mp->prev;
6913 max_mp->prev = mp;
6914 if (mp->prev != NULL)
6915 mp->prev->next = mp;
6916 else
6917 minipool_vector_head = mp;
6918 }
6919
6920 /* Save the new entry. */
6921 max_mp = mp;
6922
6923 /* Scan over the preceding entries and adjust their addresses as
6924 required. */
6925 while (mp->prev != NULL
6926 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6927 {
6928 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6929 mp = mp->prev;
6930 }
6931
6932 return max_mp;
6933 }
6934
6935 static Mnode *
6936 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6937 HOST_WIDE_INT min_address)
6938 {
6939 HOST_WIDE_INT offset;
6940
6941 /* This should never be true, and the code below assumes these are
6942 different. */
6943 if (mp == min_mp)
6944 abort ();
6945
6946 if (min_mp == NULL)
6947 {
6948 if (min_address > mp->min_address)
6949 mp->min_address = min_address;
6950 }
6951 else
6952 {
6953 /* We will adjust this below if it is too loose. */
6954 mp->min_address = min_address;
6955
6956 /* Unlink MP from its current position. Since min_mp is non-null,
6957 mp->next must be non-null. */
6958 mp->next->prev = mp->prev;
6959 if (mp->prev != NULL)
6960 mp->prev->next = mp->next;
6961 else
6962 minipool_vector_head = mp->next;
6963
6964 /* Reinsert it after MIN_MP. */
6965 mp->prev = min_mp;
6966 mp->next = min_mp->next;
6967 min_mp->next = mp;
6968 if (mp->next != NULL)
6969 mp->next->prev = mp;
6970 else
6971 minipool_vector_tail = mp;
6972 }
6973
6974 min_mp = mp;
6975
6976 offset = 0;
6977 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6978 {
6979 mp->offset = offset;
6980 if (mp->refcount > 0)
6981 offset += mp->fix_size;
6982
6983 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6984 mp->next->min_address = mp->min_address + mp->fix_size;
6985 }
6986
6987 return min_mp;
6988 }
6989
6990 /* Add a constant to the minipool for a backward reference. Returns the
6991 node added or NULL if the constant will not fit in this pool.
6992
6993 Note that the code for insertion for a backwards reference can be
6994 somewhat confusing because the calculated offsets for each fix do
6995 not take into account the size of the pool (which is still under
6996 construction. */
6997 static Mnode *
6998 add_minipool_backward_ref (Mfix *fix)
6999 {
7000 /* If set, min_mp is the last pool_entry that has a lower constraint
7001 than the one we are trying to add. */
7002 Mnode *min_mp = NULL;
7003 /* This can be negative, since it is only a constraint. */
7004 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7005 Mnode *mp;
7006
7007 /* If we can't reach the current pool from this insn, or if we can't
7008 insert this entry at the end of the pool without pushing other
7009 fixes out of range, then we don't try. This ensures that we
7010 can't fail later on. */
7011 if (min_address >= minipool_barrier->address
7012 || (minipool_vector_tail->min_address + fix->fix_size
7013 >= minipool_barrier->address))
7014 return NULL;
7015
7016 /* Scan the pool to see if a constant with the same value has
7017 already been added. While we are doing this, also note the
7018 location where we must insert the constant if it doesn't already
7019 exist. */
7020 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7021 {
7022 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7023 && fix->mode == mp->mode
7024 && (GET_CODE (fix->value) != CODE_LABEL
7025 || (CODE_LABEL_NUMBER (fix->value)
7026 == CODE_LABEL_NUMBER (mp->value)))
7027 && rtx_equal_p (fix->value, mp->value)
7028 /* Check that there is enough slack to move this entry to the
7029 end of the table (this is conservative). */
7030 && (mp->max_address
7031 > (minipool_barrier->address
7032 + minipool_vector_tail->offset
7033 + minipool_vector_tail->fix_size)))
7034 {
7035 mp->refcount++;
7036 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7037 }
7038
7039 if (min_mp != NULL)
7040 mp->min_address += fix->fix_size;
7041 else
7042 {
7043 /* Note the insertion point if necessary. */
7044 if (mp->min_address < min_address)
7045 {
7046 /* For now, we do not allow the insertion of 8-byte alignment
7047 requiring nodes anywhere but at the start of the pool. */
7048 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8 && mp->fix_size != 8)
7049 return NULL;
7050 else
7051 min_mp = mp;
7052 }
7053 else if (mp->max_address
7054 < minipool_barrier->address + mp->offset + fix->fix_size)
7055 {
7056 /* Inserting before this entry would push the fix beyond
7057 its maximum address (which can happen if we have
7058 re-located a forwards fix); force the new fix to come
7059 after it. */
7060 min_mp = mp;
7061 min_address = mp->min_address + fix->fix_size;
7062 }
7063 /* If we are inserting an 8-bytes aligned quantity and
7064 we have not already found an insertion point, then
7065 make sure that all such 8-byte aligned quantities are
7066 placed at the start of the pool. */
7067 else if (TARGET_REALLY_IWMMXT
7068 && min_mp == NULL
7069 && fix->fix_size == 8
7070 && mp->fix_size < 8)
7071 {
7072 min_mp = mp;
7073 min_address = mp->min_address + fix->fix_size;
7074 }
7075 }
7076 }
7077
7078 /* We need to create a new entry. */
7079 mp = xmalloc (sizeof (* mp));
7080 mp->fix_size = fix->fix_size;
7081 mp->mode = fix->mode;
7082 mp->value = fix->value;
7083 mp->refcount = 1;
7084 mp->max_address = minipool_barrier->address + 65536;
7085
7086 mp->min_address = min_address;
7087
7088 if (min_mp == NULL)
7089 {
7090 mp->prev = NULL;
7091 mp->next = minipool_vector_head;
7092
7093 if (mp->next == NULL)
7094 {
7095 minipool_vector_tail = mp;
7096 minipool_vector_label = gen_label_rtx ();
7097 }
7098 else
7099 mp->next->prev = mp;
7100
7101 minipool_vector_head = mp;
7102 }
7103 else
7104 {
7105 mp->next = min_mp->next;
7106 mp->prev = min_mp;
7107 min_mp->next = mp;
7108
7109 if (mp->next != NULL)
7110 mp->next->prev = mp;
7111 else
7112 minipool_vector_tail = mp;
7113 }
7114
7115 /* Save the new entry. */
7116 min_mp = mp;
7117
7118 if (mp->prev)
7119 mp = mp->prev;
7120 else
7121 mp->offset = 0;
7122
7123 /* Scan over the following entries and adjust their offsets. */
7124 while (mp->next != NULL)
7125 {
7126 if (mp->next->min_address < mp->min_address + mp->fix_size)
7127 mp->next->min_address = mp->min_address + mp->fix_size;
7128
7129 if (mp->refcount)
7130 mp->next->offset = mp->offset + mp->fix_size;
7131 else
7132 mp->next->offset = mp->offset;
7133
7134 mp = mp->next;
7135 }
7136
7137 return min_mp;
7138 }
7139
7140 static void
7141 assign_minipool_offsets (Mfix *barrier)
7142 {
7143 HOST_WIDE_INT offset = 0;
7144 Mnode *mp;
7145
7146 minipool_barrier = barrier;
7147
7148 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7149 {
7150 mp->offset = offset;
7151
7152 if (mp->refcount > 0)
7153 offset += mp->fix_size;
7154 }
7155 }
7156
7157 /* Output the literal table */
7158 static void
7159 dump_minipool (rtx scan)
7160 {
7161 Mnode * mp;
7162 Mnode * nmp;
7163 int align64 = 0;
7164
7165 if (TARGET_REALLY_IWMMXT)
7166 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7167 if (mp->refcount > 0 && mp->fix_size == 8)
7168 {
7169 align64 = 1;
7170 break;
7171 }
7172
7173 if (dump_file)
7174 fprintf (dump_file,
7175 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7176 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7177
7178 scan = emit_label_after (gen_label_rtx (), scan);
7179 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7180 scan = emit_label_after (minipool_vector_label, scan);
7181
7182 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7183 {
7184 if (mp->refcount > 0)
7185 {
7186 if (dump_file)
7187 {
7188 fprintf (dump_file,
7189 ";; Offset %u, min %ld, max %ld ",
7190 (unsigned) mp->offset, (unsigned long) mp->min_address,
7191 (unsigned long) mp->max_address);
7192 arm_print_value (dump_file, mp->value);
7193 fputc ('\n', dump_file);
7194 }
7195
7196 switch (mp->fix_size)
7197 {
7198 #ifdef HAVE_consttable_1
7199 case 1:
7200 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7201 break;
7202
7203 #endif
7204 #ifdef HAVE_consttable_2
7205 case 2:
7206 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7207 break;
7208
7209 #endif
7210 #ifdef HAVE_consttable_4
7211 case 4:
7212 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7213 break;
7214
7215 #endif
7216 #ifdef HAVE_consttable_8
7217 case 8:
7218 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7219 break;
7220
7221 #endif
7222 default:
7223 abort ();
7224 break;
7225 }
7226 }
7227
7228 nmp = mp->next;
7229 free (mp);
7230 }
7231
7232 minipool_vector_head = minipool_vector_tail = NULL;
7233 scan = emit_insn_after (gen_consttable_end (), scan);
7234 scan = emit_barrier_after (scan);
7235 }
7236
7237 /* Return the cost of forcibly inserting a barrier after INSN. */
7238 static int
7239 arm_barrier_cost (rtx insn)
7240 {
7241 /* Basing the location of the pool on the loop depth is preferable,
7242 but at the moment, the basic block information seems to be
7243 corrupt by this stage of the compilation. */
7244 int base_cost = 50;
7245 rtx next = next_nonnote_insn (insn);
7246
7247 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7248 base_cost -= 20;
7249
7250 switch (GET_CODE (insn))
7251 {
7252 case CODE_LABEL:
7253 /* It will always be better to place the table before the label, rather
7254 than after it. */
7255 return 50;
7256
7257 case INSN:
7258 case CALL_INSN:
7259 return base_cost;
7260
7261 case JUMP_INSN:
7262 return base_cost - 10;
7263
7264 default:
7265 return base_cost + 10;
7266 }
7267 }
7268
7269 /* Find the best place in the insn stream in the range
7270 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7271 Create the barrier by inserting a jump and add a new fix entry for
7272 it. */
7273 static Mfix *
7274 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7275 {
7276 HOST_WIDE_INT count = 0;
7277 rtx barrier;
7278 rtx from = fix->insn;
7279 rtx selected = from;
7280 int selected_cost;
7281 HOST_WIDE_INT selected_address;
7282 Mfix * new_fix;
7283 HOST_WIDE_INT max_count = max_address - fix->address;
7284 rtx label = gen_label_rtx ();
7285
7286 selected_cost = arm_barrier_cost (from);
7287 selected_address = fix->address;
7288
7289 while (from && count < max_count)
7290 {
7291 rtx tmp;
7292 int new_cost;
7293
7294 /* This code shouldn't have been called if there was a natural barrier
7295 within range. */
7296 if (GET_CODE (from) == BARRIER)
7297 abort ();
7298
7299 /* Count the length of this insn. */
7300 count += get_attr_length (from);
7301
7302 /* If there is a jump table, add its length. */
7303 tmp = is_jump_table (from);
7304 if (tmp != NULL)
7305 {
7306 count += get_jump_table_size (tmp);
7307
7308 /* Jump tables aren't in a basic block, so base the cost on
7309 the dispatch insn. If we select this location, we will
7310 still put the pool after the table. */
7311 new_cost = arm_barrier_cost (from);
7312
7313 if (count < max_count && new_cost <= selected_cost)
7314 {
7315 selected = tmp;
7316 selected_cost = new_cost;
7317 selected_address = fix->address + count;
7318 }
7319
7320 /* Continue after the dispatch table. */
7321 from = NEXT_INSN (tmp);
7322 continue;
7323 }
7324
7325 new_cost = arm_barrier_cost (from);
7326
7327 if (count < max_count && new_cost <= selected_cost)
7328 {
7329 selected = from;
7330 selected_cost = new_cost;
7331 selected_address = fix->address + count;
7332 }
7333
7334 from = NEXT_INSN (from);
7335 }
7336
7337 /* Create a new JUMP_INSN that branches around a barrier. */
7338 from = emit_jump_insn_after (gen_jump (label), selected);
7339 JUMP_LABEL (from) = label;
7340 barrier = emit_barrier_after (from);
7341 emit_label_after (label, barrier);
7342
7343 /* Create a minipool barrier entry for the new barrier. */
7344 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7345 new_fix->insn = barrier;
7346 new_fix->address = selected_address;
7347 new_fix->next = fix->next;
7348 fix->next = new_fix;
7349
7350 return new_fix;
7351 }
7352
7353 /* Record that there is a natural barrier in the insn stream at
7354 ADDRESS. */
7355 static void
7356 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7357 {
7358 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7359
7360 fix->insn = insn;
7361 fix->address = address;
7362
7363 fix->next = NULL;
7364 if (minipool_fix_head != NULL)
7365 minipool_fix_tail->next = fix;
7366 else
7367 minipool_fix_head = fix;
7368
7369 minipool_fix_tail = fix;
7370 }
7371
7372 /* Record INSN, which will need fixing up to load a value from the
7373 minipool. ADDRESS is the offset of the insn since the start of the
7374 function; LOC is a pointer to the part of the insn which requires
7375 fixing; VALUE is the constant that must be loaded, which is of type
7376 MODE. */
7377 static void
7378 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7379 enum machine_mode mode, rtx value)
7380 {
7381 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7382
7383 #ifdef AOF_ASSEMBLER
7384 /* PIC symbol references need to be converted into offsets into the
7385 based area. */
7386 /* XXX This shouldn't be done here. */
7387 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7388 value = aof_pic_entry (value);
7389 #endif /* AOF_ASSEMBLER */
7390
7391 fix->insn = insn;
7392 fix->address = address;
7393 fix->loc = loc;
7394 fix->mode = mode;
7395 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7396 fix->value = value;
7397 fix->forwards = get_attr_pool_range (insn);
7398 fix->backwards = get_attr_neg_pool_range (insn);
7399 fix->minipool = NULL;
7400
7401 /* If an insn doesn't have a range defined for it, then it isn't
7402 expecting to be reworked by this code. Better to abort now than
7403 to generate duff assembly code. */
7404 if (fix->forwards == 0 && fix->backwards == 0)
7405 abort ();
7406
7407 /* With iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7408 So there might be an empty word before the start of the pool.
7409 Hence we reduce the forward range by 4 to allow for this
7410 possibility. */
7411 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8)
7412 fix->forwards -= 4;
7413
7414 if (dump_file)
7415 {
7416 fprintf (dump_file,
7417 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7418 GET_MODE_NAME (mode),
7419 INSN_UID (insn), (unsigned long) address,
7420 -1 * (long)fix->backwards, (long)fix->forwards);
7421 arm_print_value (dump_file, fix->value);
7422 fprintf (dump_file, "\n");
7423 }
7424
7425 /* Add it to the chain of fixes. */
7426 fix->next = NULL;
7427
7428 if (minipool_fix_head != NULL)
7429 minipool_fix_tail->next = fix;
7430 else
7431 minipool_fix_head = fix;
7432
7433 minipool_fix_tail = fix;
7434 }
7435
7436 /* Scan INSN and note any of its operands that need fixing.
7437 If DO_PUSHES is false we do not actually push any of the fixups
7438 needed. The function returns TRUE is any fixups were needed/pushed.
7439 This is used by arm_memory_load_p() which needs to know about loads
7440 of constants that will be converted into minipool loads. */
7441 static bool
7442 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7443 {
7444 bool result = false;
7445 int opno;
7446
7447 extract_insn (insn);
7448
7449 if (!constrain_operands (1))
7450 fatal_insn_not_found (insn);
7451
7452 if (recog_data.n_alternatives == 0)
7453 return false;
7454
7455 /* Fill in recog_op_alt with information about the constraints of this insn. */
7456 preprocess_constraints ();
7457
7458 for (opno = 0; opno < recog_data.n_operands; opno++)
7459 {
7460 /* Things we need to fix can only occur in inputs. */
7461 if (recog_data.operand_type[opno] != OP_IN)
7462 continue;
7463
7464 /* If this alternative is a memory reference, then any mention
7465 of constants in this alternative is really to fool reload
7466 into allowing us to accept one there. We need to fix them up
7467 now so that we output the right code. */
7468 if (recog_op_alt[opno][which_alternative].memory_ok)
7469 {
7470 rtx op = recog_data.operand[opno];
7471
7472 if (CONSTANT_P (op))
7473 {
7474 if (do_pushes)
7475 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7476 recog_data.operand_mode[opno], op);
7477 result = true;
7478 }
7479 else if (GET_CODE (op) == MEM
7480 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7481 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7482 {
7483 if (do_pushes)
7484 {
7485 rtx cop = avoid_constant_pool_reference (op);
7486
7487 /* Casting the address of something to a mode narrower
7488 than a word can cause avoid_constant_pool_reference()
7489 to return the pool reference itself. That's no good to
7490 us here. Lets just hope that we can use the
7491 constant pool value directly. */
7492 if (op == cop)
7493 cop = get_pool_constant (XEXP (op, 0));
7494
7495 push_minipool_fix (insn, address,
7496 recog_data.operand_loc[opno],
7497 recog_data.operand_mode[opno], cop);
7498 }
7499
7500 result = true;
7501 }
7502 }
7503 }
7504
7505 return result;
7506 }
7507
7508 /* Gcc puts the pool in the wrong place for ARM, since we can only
7509 load addresses a limited distance around the pc. We do some
7510 special munging to move the constant pool values to the correct
7511 point in the code. */
7512 static void
7513 arm_reorg (void)
7514 {
7515 rtx insn;
7516 HOST_WIDE_INT address = 0;
7517 Mfix * fix;
7518
7519 minipool_fix_head = minipool_fix_tail = NULL;
7520
7521 /* The first insn must always be a note, or the code below won't
7522 scan it properly. */
7523 insn = get_insns ();
7524 if (GET_CODE (insn) != NOTE)
7525 abort ();
7526
7527 /* Scan all the insns and record the operands that will need fixing. */
7528 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7529 {
7530 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7531 && (arm_cirrus_insn_p (insn)
7532 || GET_CODE (insn) == JUMP_INSN
7533 || arm_memory_load_p (insn)))
7534 cirrus_reorg (insn);
7535
7536 if (GET_CODE (insn) == BARRIER)
7537 push_minipool_barrier (insn, address);
7538 else if (INSN_P (insn))
7539 {
7540 rtx table;
7541
7542 note_invalid_constants (insn, address, true);
7543 address += get_attr_length (insn);
7544
7545 /* If the insn is a vector jump, add the size of the table
7546 and skip the table. */
7547 if ((table = is_jump_table (insn)) != NULL)
7548 {
7549 address += get_jump_table_size (table);
7550 insn = table;
7551 }
7552 }
7553 }
7554
7555 fix = minipool_fix_head;
7556
7557 /* Now scan the fixups and perform the required changes. */
7558 while (fix)
7559 {
7560 Mfix * ftmp;
7561 Mfix * fdel;
7562 Mfix * last_added_fix;
7563 Mfix * last_barrier = NULL;
7564 Mfix * this_fix;
7565
7566 /* Skip any further barriers before the next fix. */
7567 while (fix && GET_CODE (fix->insn) == BARRIER)
7568 fix = fix->next;
7569
7570 /* No more fixes. */
7571 if (fix == NULL)
7572 break;
7573
7574 last_added_fix = NULL;
7575
7576 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7577 {
7578 if (GET_CODE (ftmp->insn) == BARRIER)
7579 {
7580 if (ftmp->address >= minipool_vector_head->max_address)
7581 break;
7582
7583 last_barrier = ftmp;
7584 }
7585 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7586 break;
7587
7588 last_added_fix = ftmp; /* Keep track of the last fix added. */
7589 }
7590
7591 /* If we found a barrier, drop back to that; any fixes that we
7592 could have reached but come after the barrier will now go in
7593 the next mini-pool. */
7594 if (last_barrier != NULL)
7595 {
7596 /* Reduce the refcount for those fixes that won't go into this
7597 pool after all. */
7598 for (fdel = last_barrier->next;
7599 fdel && fdel != ftmp;
7600 fdel = fdel->next)
7601 {
7602 fdel->minipool->refcount--;
7603 fdel->minipool = NULL;
7604 }
7605
7606 ftmp = last_barrier;
7607 }
7608 else
7609 {
7610 /* ftmp is first fix that we can't fit into this pool and
7611 there no natural barriers that we could use. Insert a
7612 new barrier in the code somewhere between the previous
7613 fix and this one, and arrange to jump around it. */
7614 HOST_WIDE_INT max_address;
7615
7616 /* The last item on the list of fixes must be a barrier, so
7617 we can never run off the end of the list of fixes without
7618 last_barrier being set. */
7619 if (ftmp == NULL)
7620 abort ();
7621
7622 max_address = minipool_vector_head->max_address;
7623 /* Check that there isn't another fix that is in range that
7624 we couldn't fit into this pool because the pool was
7625 already too large: we need to put the pool before such an
7626 instruction. */
7627 if (ftmp->address < max_address)
7628 max_address = ftmp->address;
7629
7630 last_barrier = create_fix_barrier (last_added_fix, max_address);
7631 }
7632
7633 assign_minipool_offsets (last_barrier);
7634
7635 while (ftmp)
7636 {
7637 if (GET_CODE (ftmp->insn) != BARRIER
7638 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7639 == NULL))
7640 break;
7641
7642 ftmp = ftmp->next;
7643 }
7644
7645 /* Scan over the fixes we have identified for this pool, fixing them
7646 up and adding the constants to the pool itself. */
7647 for (this_fix = fix; this_fix && ftmp != this_fix;
7648 this_fix = this_fix->next)
7649 if (GET_CODE (this_fix->insn) != BARRIER)
7650 {
7651 rtx addr
7652 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7653 minipool_vector_label),
7654 this_fix->minipool->offset);
7655 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7656 }
7657
7658 dump_minipool (last_barrier->insn);
7659 fix = ftmp;
7660 }
7661
7662 /* From now on we must synthesize any constants that we can't handle
7663 directly. This can happen if the RTL gets split during final
7664 instruction generation. */
7665 after_arm_reorg = 1;
7666
7667 /* Free the minipool memory. */
7668 obstack_free (&minipool_obstack, minipool_startobj);
7669 }
7670 \f
7671 /* Routines to output assembly language. */
7672
7673 /* If the rtx is the correct value then return the string of the number.
7674 In this way we can ensure that valid double constants are generated even
7675 when cross compiling. */
7676 const char *
7677 fp_immediate_constant (rtx x)
7678 {
7679 REAL_VALUE_TYPE r;
7680 int i;
7681
7682 if (!fp_consts_inited)
7683 init_fp_table ();
7684
7685 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7686 for (i = 0; i < 8; i++)
7687 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7688 return strings_fp[i];
7689
7690 abort ();
7691 }
7692
7693 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7694 static const char *
7695 fp_const_from_val (REAL_VALUE_TYPE *r)
7696 {
7697 int i;
7698
7699 if (!fp_consts_inited)
7700 init_fp_table ();
7701
7702 for (i = 0; i < 8; i++)
7703 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7704 return strings_fp[i];
7705
7706 abort ();
7707 }
7708
7709 /* Output the operands of a LDM/STM instruction to STREAM.
7710 MASK is the ARM register set mask of which only bits 0-15 are important.
7711 REG is the base register, either the frame pointer or the stack pointer,
7712 INSTR is the possibly suffixed load or store instruction. */
7713 static void
7714 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7715 {
7716 int i;
7717 int not_first = FALSE;
7718
7719 fputc ('\t', stream);
7720 asm_fprintf (stream, instr, reg);
7721 fputs (", {", stream);
7722
7723 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7724 if (mask & (1 << i))
7725 {
7726 if (not_first)
7727 fprintf (stream, ", ");
7728
7729 asm_fprintf (stream, "%r", i);
7730 not_first = TRUE;
7731 }
7732
7733 fprintf (stream, "}");
7734
7735 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7736 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7737 Strictly speaking the instruction would be unpredicatble only if
7738 we were writing back the base register as well, but since we never
7739 want to generate an LDM type 2 instruction (register bank switching)
7740 which is what you get if the PC is not being loaded, we do not need
7741 to check for writeback. */
7742 if (! TARGET_APCS_32
7743 && ((mask & (1 << PC_REGNUM)) != 0))
7744 fprintf (stream, "^");
7745
7746 fprintf (stream, "\n");
7747 }
7748
7749
7750 /* Output the operands of a FLDM/FSTM instruction to STREAM.
7751 REG is the base register,
7752 INSTR is the possibly suffixed load or store instruction.
7753 FMT specifies now to print the register name.
7754 START and COUNT specify the register range. */
7755
7756 static void
7757 vfp_print_multi (FILE *stream, const char *instr, int reg,
7758 const char * fmt, int start, int count)
7759 {
7760 int i;
7761
7762 fputc ('\t', stream);
7763 asm_fprintf (stream, instr, reg);
7764 fputs (", {", stream);
7765
7766 for (i = start; i < start + count; i++)
7767 {
7768 if (i > start)
7769 fputs (", ", stream);
7770 asm_fprintf (stream, fmt, i);
7771 }
7772 fputs ("}\n", stream);
7773 }
7774
7775
7776 /* Output the assembly for a store multiple. */
7777
7778 const char *
7779 vfp_output_fstmx (rtx * operands)
7780 {
7781 char pattern[100];
7782 int p;
7783 int base;
7784 int i;
7785
7786 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7787 p = strlen (pattern);
7788
7789 if (GET_CODE (operands[1]) != REG)
7790 abort ();
7791
7792 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7793 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7794 {
7795 p += sprintf (&pattern[p], ", d%d", base + i);
7796 }
7797 strcpy (&pattern[p], "}");
7798
7799 output_asm_insn (pattern, operands);
7800 return "";
7801 }
7802
7803
7804 /* Emit RTL to save block of VFP register pairs to the stack. */
7805
7806 static rtx
7807 vfp_emit_fstmx (int base_reg, int count)
7808 {
7809 rtx par;
7810 rtx dwarf;
7811 rtx tmp, reg;
7812 int i;
7813
7814 /* ??? The frame layout is implementation defined. We describe
7815 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7816 We really need some way of representing the whole block so that the
7817 unwinder can figure it out at runtime. */
7818 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7819 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7820
7821 reg = gen_rtx_REG (DFmode, base_reg);
7822 base_reg += 2;
7823
7824 XVECEXP (par, 0, 0)
7825 = gen_rtx_SET (VOIDmode,
7826 gen_rtx_MEM (BLKmode,
7827 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7828 gen_rtx_UNSPEC (BLKmode,
7829 gen_rtvec (1, reg),
7830 UNSPEC_PUSH_MULT));
7831
7832 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7833 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7834 GEN_INT (-(count * 8 + 4))));
7835 RTX_FRAME_RELATED_P (tmp) = 1;
7836 XVECEXP (dwarf, 0, 0) = tmp;
7837
7838 tmp = gen_rtx_SET (VOIDmode,
7839 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7840 reg);
7841 RTX_FRAME_RELATED_P (tmp) = 1;
7842 XVECEXP (dwarf, 0, 1) = tmp;
7843
7844 for (i = 1; i < count; i++)
7845 {
7846 reg = gen_rtx_REG (DFmode, base_reg);
7847 base_reg += 2;
7848 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7849
7850 tmp = gen_rtx_SET (VOIDmode,
7851 gen_rtx_MEM (DFmode,
7852 gen_rtx_PLUS (SImode,
7853 stack_pointer_rtx,
7854 GEN_INT (i * 8))),
7855 reg);
7856 RTX_FRAME_RELATED_P (tmp) = 1;
7857 XVECEXP (dwarf, 0, i + 1) = tmp;
7858 }
7859
7860 par = emit_insn (par);
7861 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7862 REG_NOTES (par));
7863 return par;
7864 }
7865
7866
7867 /* Output a 'call' insn. */
7868 const char *
7869 output_call (rtx *operands)
7870 {
7871 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7872
7873 if (REGNO (operands[0]) == LR_REGNUM)
7874 {
7875 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7876 output_asm_insn ("mov%?\t%0, %|lr", operands);
7877 }
7878
7879 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7880
7881 if (TARGET_INTERWORK)
7882 output_asm_insn ("bx%?\t%0", operands);
7883 else
7884 output_asm_insn ("mov%?\t%|pc, %0", operands);
7885
7886 return "";
7887 }
7888
7889 /* Output a 'call' insn that is a reference in memory. */
7890 const char *
7891 output_call_mem (rtx *operands)
7892 {
7893 if (TARGET_INTERWORK)
7894 {
7895 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7896 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7897 output_asm_insn ("bx%?\t%|ip", operands);
7898 }
7899 else if (regno_use_in (LR_REGNUM, operands[0]))
7900 {
7901 /* LR is used in the memory address. We load the address in the
7902 first instruction. It's safe to use IP as the target of the
7903 load since the call will kill it anyway. */
7904 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7905 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7906 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7907 }
7908 else
7909 {
7910 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7911 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7912 }
7913
7914 return "";
7915 }
7916
7917
7918 /* Output a move from arm registers to an fpa registers.
7919 OPERANDS[0] is an fpa register.
7920 OPERANDS[1] is the first registers of an arm register pair. */
7921 const char *
7922 output_mov_long_double_fpa_from_arm (rtx *operands)
7923 {
7924 int arm_reg0 = REGNO (operands[1]);
7925 rtx ops[3];
7926
7927 if (arm_reg0 == IP_REGNUM)
7928 abort ();
7929
7930 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7931 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7932 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7933
7934 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7935 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7936
7937 return "";
7938 }
7939
7940 /* Output a move from an fpa register to arm registers.
7941 OPERANDS[0] is the first registers of an arm register pair.
7942 OPERANDS[1] is an fpa register. */
7943 const char *
7944 output_mov_long_double_arm_from_fpa (rtx *operands)
7945 {
7946 int arm_reg0 = REGNO (operands[0]);
7947 rtx ops[3];
7948
7949 if (arm_reg0 == IP_REGNUM)
7950 abort ();
7951
7952 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7953 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7954 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7955
7956 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7957 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7958 return "";
7959 }
7960
7961 /* Output a move from arm registers to arm registers of a long double
7962 OPERANDS[0] is the destination.
7963 OPERANDS[1] is the source. */
7964 const char *
7965 output_mov_long_double_arm_from_arm (rtx *operands)
7966 {
7967 /* We have to be careful here because the two might overlap. */
7968 int dest_start = REGNO (operands[0]);
7969 int src_start = REGNO (operands[1]);
7970 rtx ops[2];
7971 int i;
7972
7973 if (dest_start < src_start)
7974 {
7975 for (i = 0; i < 3; i++)
7976 {
7977 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7978 ops[1] = gen_rtx_REG (SImode, src_start + i);
7979 output_asm_insn ("mov%?\t%0, %1", ops);
7980 }
7981 }
7982 else
7983 {
7984 for (i = 2; i >= 0; i--)
7985 {
7986 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7987 ops[1] = gen_rtx_REG (SImode, src_start + i);
7988 output_asm_insn ("mov%?\t%0, %1", ops);
7989 }
7990 }
7991
7992 return "";
7993 }
7994
7995
7996 /* Output a move from arm registers to an fpa registers.
7997 OPERANDS[0] is an fpa register.
7998 OPERANDS[1] is the first registers of an arm register pair. */
7999 const char *
8000 output_mov_double_fpa_from_arm (rtx *operands)
8001 {
8002 int arm_reg0 = REGNO (operands[1]);
8003 rtx ops[2];
8004
8005 if (arm_reg0 == IP_REGNUM)
8006 abort ();
8007
8008 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8009 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8010 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8011 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8012 return "";
8013 }
8014
8015 /* Output a move from an fpa register to arm registers.
8016 OPERANDS[0] is the first registers of an arm register pair.
8017 OPERANDS[1] is an fpa register. */
8018 const char *
8019 output_mov_double_arm_from_fpa (rtx *operands)
8020 {
8021 int arm_reg0 = REGNO (operands[0]);
8022 rtx ops[2];
8023
8024 if (arm_reg0 == IP_REGNUM)
8025 abort ();
8026
8027 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8028 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8029 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8030 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8031 return "";
8032 }
8033
8034 /* Output a move between double words.
8035 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8036 or MEM<-REG and all MEMs must be offsettable addresses. */
8037 const char *
8038 output_move_double (rtx *operands)
8039 {
8040 enum rtx_code code0 = GET_CODE (operands[0]);
8041 enum rtx_code code1 = GET_CODE (operands[1]);
8042 rtx otherops[3];
8043
8044 if (code0 == REG)
8045 {
8046 int reg0 = REGNO (operands[0]);
8047
8048 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8049
8050 if (code1 == REG)
8051 {
8052 int reg1 = REGNO (operands[1]);
8053 if (reg1 == IP_REGNUM)
8054 abort ();
8055
8056 /* Ensure the second source is not overwritten. */
8057 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8058 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8059 else
8060 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8061 }
8062 else if (code1 == CONST_VECTOR)
8063 {
8064 HOST_WIDE_INT hint = 0;
8065
8066 switch (GET_MODE (operands[1]))
8067 {
8068 case V2SImode:
8069 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8070 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8071 break;
8072
8073 case V4HImode:
8074 if (BYTES_BIG_ENDIAN)
8075 {
8076 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8077 hint <<= 16;
8078 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8079 }
8080 else
8081 {
8082 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8083 hint <<= 16;
8084 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8085 }
8086
8087 otherops[1] = GEN_INT (hint);
8088 hint = 0;
8089
8090 if (BYTES_BIG_ENDIAN)
8091 {
8092 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8093 hint <<= 16;
8094 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8095 }
8096 else
8097 {
8098 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8099 hint <<= 16;
8100 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8101 }
8102
8103 operands[1] = GEN_INT (hint);
8104 break;
8105
8106 case V8QImode:
8107 if (BYTES_BIG_ENDIAN)
8108 {
8109 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8110 hint <<= 8;
8111 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8112 hint <<= 8;
8113 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8114 hint <<= 8;
8115 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8116 }
8117 else
8118 {
8119 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8120 hint <<= 8;
8121 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8122 hint <<= 8;
8123 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8124 hint <<= 8;
8125 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8126 }
8127
8128 otherops[1] = GEN_INT (hint);
8129 hint = 0;
8130
8131 if (BYTES_BIG_ENDIAN)
8132 {
8133 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8134 hint <<= 8;
8135 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8136 hint <<= 8;
8137 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8138 hint <<= 8;
8139 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8140 }
8141 else
8142 {
8143 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8144 hint <<= 8;
8145 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8146 hint <<= 8;
8147 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8148 hint <<= 8;
8149 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8150 }
8151
8152 operands[1] = GEN_INT (hint);
8153 break;
8154
8155 default:
8156 abort ();
8157 }
8158 output_mov_immediate (operands);
8159 output_mov_immediate (otherops);
8160 }
8161 else if (code1 == CONST_DOUBLE)
8162 {
8163 if (GET_MODE (operands[1]) == DFmode)
8164 {
8165 REAL_VALUE_TYPE r;
8166 long l[2];
8167
8168 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8169 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8170 otherops[1] = GEN_INT (l[1]);
8171 operands[1] = GEN_INT (l[0]);
8172 }
8173 else if (GET_MODE (operands[1]) != VOIDmode)
8174 abort ();
8175 else if (WORDS_BIG_ENDIAN)
8176 {
8177 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8178 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8179 }
8180 else
8181 {
8182 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8183 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8184 }
8185
8186 output_mov_immediate (operands);
8187 output_mov_immediate (otherops);
8188 }
8189 else if (code1 == CONST_INT)
8190 {
8191 #if HOST_BITS_PER_WIDE_INT > 32
8192 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8193 what the upper word is. */
8194 if (WORDS_BIG_ENDIAN)
8195 {
8196 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8197 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8198 }
8199 else
8200 {
8201 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8202 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8203 }
8204 #else
8205 /* Sign extend the intval into the high-order word. */
8206 if (WORDS_BIG_ENDIAN)
8207 {
8208 otherops[1] = operands[1];
8209 operands[1] = (INTVAL (operands[1]) < 0
8210 ? constm1_rtx : const0_rtx);
8211 }
8212 else
8213 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8214 #endif
8215 output_mov_immediate (otherops);
8216 output_mov_immediate (operands);
8217 }
8218 else if (code1 == MEM)
8219 {
8220 switch (GET_CODE (XEXP (operands[1], 0)))
8221 {
8222 case REG:
8223 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8224 break;
8225
8226 case PRE_INC:
8227 abort (); /* Should never happen now. */
8228 break;
8229
8230 case PRE_DEC:
8231 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8232 break;
8233
8234 case POST_INC:
8235 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8236 break;
8237
8238 case POST_DEC:
8239 abort (); /* Should never happen now. */
8240 break;
8241
8242 case LABEL_REF:
8243 case CONST:
8244 output_asm_insn ("adr%?\t%0, %1", operands);
8245 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8246 break;
8247
8248 default:
8249 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8250 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8251 {
8252 otherops[0] = operands[0];
8253 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8254 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8255
8256 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8257 {
8258 if (GET_CODE (otherops[2]) == CONST_INT)
8259 {
8260 switch ((int) INTVAL (otherops[2]))
8261 {
8262 case -8:
8263 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8264 return "";
8265 case -4:
8266 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8267 return "";
8268 case 4:
8269 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8270 return "";
8271 }
8272
8273 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8274 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8275 else
8276 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8277 }
8278 else
8279 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8280 }
8281 else
8282 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8283
8284 return "ldm%?ia\t%0, %M0";
8285 }
8286 else
8287 {
8288 otherops[1] = adjust_address (operands[1], SImode, 4);
8289 /* Take care of overlapping base/data reg. */
8290 if (reg_mentioned_p (operands[0], operands[1]))
8291 {
8292 output_asm_insn ("ldr%?\t%0, %1", otherops);
8293 output_asm_insn ("ldr%?\t%0, %1", operands);
8294 }
8295 else
8296 {
8297 output_asm_insn ("ldr%?\t%0, %1", operands);
8298 output_asm_insn ("ldr%?\t%0, %1", otherops);
8299 }
8300 }
8301 }
8302 }
8303 else
8304 abort (); /* Constraints should prevent this. */
8305 }
8306 else if (code0 == MEM && code1 == REG)
8307 {
8308 if (REGNO (operands[1]) == IP_REGNUM)
8309 abort ();
8310
8311 switch (GET_CODE (XEXP (operands[0], 0)))
8312 {
8313 case REG:
8314 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8315 break;
8316
8317 case PRE_INC:
8318 abort (); /* Should never happen now. */
8319 break;
8320
8321 case PRE_DEC:
8322 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8323 break;
8324
8325 case POST_INC:
8326 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8327 break;
8328
8329 case POST_DEC:
8330 abort (); /* Should never happen now. */
8331 break;
8332
8333 case PLUS:
8334 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
8335 {
8336 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8337 {
8338 case -8:
8339 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8340 return "";
8341
8342 case -4:
8343 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8344 return "";
8345
8346 case 4:
8347 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8348 return "";
8349 }
8350 }
8351 /* Fall through */
8352
8353 default:
8354 otherops[0] = adjust_address (operands[0], SImode, 4);
8355 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8356 output_asm_insn ("str%?\t%1, %0", operands);
8357 output_asm_insn ("str%?\t%1, %0", otherops);
8358 }
8359 }
8360 else
8361 /* Constraints should prevent this. */
8362 abort ();
8363
8364 return "";
8365 }
8366
8367
8368 /* Output an arbitrary MOV reg, #n.
8369 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8370 const char *
8371 output_mov_immediate (rtx *operands)
8372 {
8373 HOST_WIDE_INT n = INTVAL (operands[1]);
8374
8375 /* Try to use one MOV. */
8376 if (const_ok_for_arm (n))
8377 output_asm_insn ("mov%?\t%0, %1", operands);
8378
8379 /* Try to use one MVN. */
8380 else if (const_ok_for_arm (~n))
8381 {
8382 operands[1] = GEN_INT (~n);
8383 output_asm_insn ("mvn%?\t%0, %1", operands);
8384 }
8385 else
8386 {
8387 int n_ones = 0;
8388 int i;
8389
8390 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8391 for (i = 0; i < 32; i++)
8392 if (n & 1 << i)
8393 n_ones++;
8394
8395 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8396 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8397 else
8398 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8399 }
8400
8401 return "";
8402 }
8403
8404 /* Output an ADD r, s, #n where n may be too big for one instruction.
8405 If adding zero to one register, output nothing. */
8406 const char *
8407 output_add_immediate (rtx *operands)
8408 {
8409 HOST_WIDE_INT n = INTVAL (operands[2]);
8410
8411 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8412 {
8413 if (n < 0)
8414 output_multi_immediate (operands,
8415 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8416 -n);
8417 else
8418 output_multi_immediate (operands,
8419 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8420 n);
8421 }
8422
8423 return "";
8424 }
8425
8426 /* Output a multiple immediate operation.
8427 OPERANDS is the vector of operands referred to in the output patterns.
8428 INSTR1 is the output pattern to use for the first constant.
8429 INSTR2 is the output pattern to use for subsequent constants.
8430 IMMED_OP is the index of the constant slot in OPERANDS.
8431 N is the constant value. */
8432 static const char *
8433 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8434 int immed_op, HOST_WIDE_INT n)
8435 {
8436 #if HOST_BITS_PER_WIDE_INT > 32
8437 n &= 0xffffffff;
8438 #endif
8439
8440 if (n == 0)
8441 {
8442 /* Quick and easy output. */
8443 operands[immed_op] = const0_rtx;
8444 output_asm_insn (instr1, operands);
8445 }
8446 else
8447 {
8448 int i;
8449 const char * instr = instr1;
8450
8451 /* Note that n is never zero here (which would give no output). */
8452 for (i = 0; i < 32; i += 2)
8453 {
8454 if (n & (3 << i))
8455 {
8456 operands[immed_op] = GEN_INT (n & (255 << i));
8457 output_asm_insn (instr, operands);
8458 instr = instr2;
8459 i += 6;
8460 }
8461 }
8462 }
8463
8464 return "";
8465 }
8466
8467 /* Return the appropriate ARM instruction for the operation code.
8468 The returned result should not be overwritten. OP is the rtx of the
8469 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8470 was shifted. */
8471 const char *
8472 arithmetic_instr (rtx op, int shift_first_arg)
8473 {
8474 switch (GET_CODE (op))
8475 {
8476 case PLUS:
8477 return "add";
8478
8479 case MINUS:
8480 return shift_first_arg ? "rsb" : "sub";
8481
8482 case IOR:
8483 return "orr";
8484
8485 case XOR:
8486 return "eor";
8487
8488 case AND:
8489 return "and";
8490
8491 default:
8492 abort ();
8493 }
8494 }
8495
8496 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8497 for the operation code. The returned result should not be overwritten.
8498 OP is the rtx code of the shift.
8499 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8500 shift. */
8501 static const char *
8502 shift_op (rtx op, HOST_WIDE_INT *amountp)
8503 {
8504 const char * mnem;
8505 enum rtx_code code = GET_CODE (op);
8506
8507 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8508 *amountp = -1;
8509 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8510 *amountp = INTVAL (XEXP (op, 1));
8511 else
8512 abort ();
8513
8514 switch (code)
8515 {
8516 case ASHIFT:
8517 mnem = "asl";
8518 break;
8519
8520 case ASHIFTRT:
8521 mnem = "asr";
8522 break;
8523
8524 case LSHIFTRT:
8525 mnem = "lsr";
8526 break;
8527
8528 case ROTATERT:
8529 mnem = "ror";
8530 break;
8531
8532 case MULT:
8533 /* We never have to worry about the amount being other than a
8534 power of 2, since this case can never be reloaded from a reg. */
8535 if (*amountp != -1)
8536 *amountp = int_log2 (*amountp);
8537 else
8538 abort ();
8539 return "asl";
8540
8541 default:
8542 abort ();
8543 }
8544
8545 if (*amountp != -1)
8546 {
8547 /* This is not 100% correct, but follows from the desire to merge
8548 multiplication by a power of 2 with the recognizer for a
8549 shift. >=32 is not a valid shift for "asl", so we must try and
8550 output a shift that produces the correct arithmetical result.
8551 Using lsr #32 is identical except for the fact that the carry bit
8552 is not set correctly if we set the flags; but we never use the
8553 carry bit from such an operation, so we can ignore that. */
8554 if (code == ROTATERT)
8555 /* Rotate is just modulo 32. */
8556 *amountp &= 31;
8557 else if (*amountp != (*amountp & 31))
8558 {
8559 if (code == ASHIFT)
8560 mnem = "lsr";
8561 *amountp = 32;
8562 }
8563
8564 /* Shifts of 0 are no-ops. */
8565 if (*amountp == 0)
8566 return NULL;
8567 }
8568
8569 return mnem;
8570 }
8571
8572 /* Obtain the shift from the POWER of two. */
8573
8574 static HOST_WIDE_INT
8575 int_log2 (HOST_WIDE_INT power)
8576 {
8577 HOST_WIDE_INT shift = 0;
8578
8579 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8580 {
8581 if (shift > 31)
8582 abort ();
8583 shift++;
8584 }
8585
8586 return shift;
8587 }
8588
8589 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8590 /bin/as is horribly restrictive. */
8591 #define MAX_ASCII_LEN 51
8592
8593 void
8594 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8595 {
8596 int i;
8597 int len_so_far = 0;
8598
8599 fputs ("\t.ascii\t\"", stream);
8600
8601 for (i = 0; i < len; i++)
8602 {
8603 int c = p[i];
8604
8605 if (len_so_far >= MAX_ASCII_LEN)
8606 {
8607 fputs ("\"\n\t.ascii\t\"", stream);
8608 len_so_far = 0;
8609 }
8610
8611 switch (c)
8612 {
8613 case TARGET_TAB:
8614 fputs ("\\t", stream);
8615 len_so_far += 2;
8616 break;
8617
8618 case TARGET_FF:
8619 fputs ("\\f", stream);
8620 len_so_far += 2;
8621 break;
8622
8623 case TARGET_BS:
8624 fputs ("\\b", stream);
8625 len_so_far += 2;
8626 break;
8627
8628 case TARGET_CR:
8629 fputs ("\\r", stream);
8630 len_so_far += 2;
8631 break;
8632
8633 case TARGET_NEWLINE:
8634 fputs ("\\n", stream);
8635 c = p [i + 1];
8636 if ((c >= ' ' && c <= '~')
8637 || c == TARGET_TAB)
8638 /* This is a good place for a line break. */
8639 len_so_far = MAX_ASCII_LEN;
8640 else
8641 len_so_far += 2;
8642 break;
8643
8644 case '\"':
8645 case '\\':
8646 putc ('\\', stream);
8647 len_so_far++;
8648 /* Drop through. */
8649
8650 default:
8651 if (c >= ' ' && c <= '~')
8652 {
8653 putc (c, stream);
8654 len_so_far++;
8655 }
8656 else
8657 {
8658 fprintf (stream, "\\%03o", c);
8659 len_so_far += 4;
8660 }
8661 break;
8662 }
8663 }
8664
8665 fputs ("\"\n", stream);
8666 }
8667 \f
8668 /* Compute the register sabe mask for registers 0 through 12
8669 inclusive. This code is used by both arm_compute_save_reg_mask
8670 and arm_compute_initial_elimination_offset. */
8671 static unsigned long
8672 arm_compute_save_reg0_reg12_mask (void)
8673 {
8674 unsigned long func_type = arm_current_func_type ();
8675 unsigned int save_reg_mask = 0;
8676 unsigned int reg;
8677
8678 if (IS_INTERRUPT (func_type))
8679 {
8680 unsigned int max_reg;
8681 /* Interrupt functions must not corrupt any registers,
8682 even call clobbered ones. If this is a leaf function
8683 we can just examine the registers used by the RTL, but
8684 otherwise we have to assume that whatever function is
8685 called might clobber anything, and so we have to save
8686 all the call-clobbered registers as well. */
8687 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8688 /* FIQ handlers have registers r8 - r12 banked, so
8689 we only need to check r0 - r7, Normal ISRs only
8690 bank r14 and r15, so we must check up to r12.
8691 r13 is the stack pointer which is always preserved,
8692 so we do not need to consider it here. */
8693 max_reg = 7;
8694 else
8695 max_reg = 12;
8696
8697 for (reg = 0; reg <= max_reg; reg++)
8698 if (regs_ever_live[reg]
8699 || (! current_function_is_leaf && call_used_regs [reg]))
8700 save_reg_mask |= (1 << reg);
8701 }
8702 else
8703 {
8704 /* In the normal case we only need to save those registers
8705 which are call saved and which are used by this function. */
8706 for (reg = 0; reg <= 10; reg++)
8707 if (regs_ever_live[reg] && ! call_used_regs [reg])
8708 save_reg_mask |= (1 << reg);
8709
8710 /* Handle the frame pointer as a special case. */
8711 if (! TARGET_APCS_FRAME
8712 && ! frame_pointer_needed
8713 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8714 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8715 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8716
8717 /* If we aren't loading the PIC register,
8718 don't stack it even though it may be live. */
8719 if (flag_pic
8720 && ! TARGET_SINGLE_PIC_BASE
8721 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8722 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8723 }
8724
8725 return save_reg_mask;
8726 }
8727
8728 /* Compute a bit mask of which registers need to be
8729 saved on the stack for the current function. */
8730
8731 static unsigned long
8732 arm_compute_save_reg_mask (void)
8733 {
8734 unsigned int save_reg_mask = 0;
8735 unsigned long func_type = arm_current_func_type ();
8736
8737 if (IS_NAKED (func_type))
8738 /* This should never really happen. */
8739 return 0;
8740
8741 /* If we are creating a stack frame, then we must save the frame pointer,
8742 IP (which will hold the old stack pointer), LR and the PC. */
8743 if (frame_pointer_needed)
8744 save_reg_mask |=
8745 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8746 | (1 << IP_REGNUM)
8747 | (1 << LR_REGNUM)
8748 | (1 << PC_REGNUM);
8749
8750 /* Volatile functions do not return, so there
8751 is no need to save any other registers. */
8752 if (IS_VOLATILE (func_type))
8753 return save_reg_mask;
8754
8755 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8756
8757 /* Decide if we need to save the link register.
8758 Interrupt routines have their own banked link register,
8759 so they never need to save it.
8760 Otherwise if we do not use the link register we do not need to save
8761 it. If we are pushing other registers onto the stack however, we
8762 can save an instruction in the epilogue by pushing the link register
8763 now and then popping it back into the PC. This incurs extra memory
8764 accesses though, so we only do it when optimizing for size, and only
8765 if we know that we will not need a fancy return sequence. */
8766 if (regs_ever_live [LR_REGNUM]
8767 || (save_reg_mask
8768 && optimize_size
8769 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8770 save_reg_mask |= 1 << LR_REGNUM;
8771
8772 if (cfun->machine->lr_save_eliminated)
8773 save_reg_mask &= ~ (1 << LR_REGNUM);
8774
8775 if (TARGET_REALLY_IWMMXT
8776 && ((bit_count (save_reg_mask)
8777 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8778 {
8779 unsigned int reg;
8780
8781 /* The total number of registers that are going to be pushed
8782 onto the stack is odd. We need to ensure that the stack
8783 is 64-bit aligned before we start to save iWMMXt registers,
8784 and also before we start to create locals. (A local variable
8785 might be a double or long long which we will load/store using
8786 an iWMMXt instruction). Therefore we need to push another
8787 ARM register, so that the stack will be 64-bit aligned. We
8788 try to avoid using the arg registers (r0 -r3) as they might be
8789 used to pass values in a tail call. */
8790 for (reg = 4; reg <= 12; reg++)
8791 if ((save_reg_mask & (1 << reg)) == 0)
8792 break;
8793
8794 if (reg <= 12)
8795 save_reg_mask |= (1 << reg);
8796 else
8797 {
8798 cfun->machine->sibcall_blocked = 1;
8799 save_reg_mask |= (1 << 3);
8800 }
8801 }
8802
8803 return save_reg_mask;
8804 }
8805
8806 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8807 everything bar the final return instruction. */
8808 const char *
8809 output_return_instruction (rtx operand, int really_return, int reverse)
8810 {
8811 char conditional[10];
8812 char instr[100];
8813 int reg;
8814 unsigned long live_regs_mask;
8815 unsigned long func_type;
8816
8817 func_type = arm_current_func_type ();
8818
8819 if (IS_NAKED (func_type))
8820 return "";
8821
8822 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8823 {
8824 /* If this function was declared non-returning, and we have
8825 found a tail call, then we have to trust that the called
8826 function won't return. */
8827 if (really_return)
8828 {
8829 rtx ops[2];
8830
8831 /* Otherwise, trap an attempted return by aborting. */
8832 ops[0] = operand;
8833 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8834 : "abort");
8835 assemble_external_libcall (ops[1]);
8836 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8837 }
8838
8839 return "";
8840 }
8841
8842 if (current_function_calls_alloca && !really_return)
8843 abort ();
8844
8845 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8846
8847 return_used_this_function = 1;
8848
8849 live_regs_mask = arm_compute_save_reg_mask ();
8850
8851 if (live_regs_mask)
8852 {
8853 const char * return_reg;
8854
8855 /* If we do not have any special requirements for function exit
8856 (eg interworking, or ISR) then we can load the return address
8857 directly into the PC. Otherwise we must load it into LR. */
8858 if (really_return
8859 && ! TARGET_INTERWORK)
8860 return_reg = reg_names[PC_REGNUM];
8861 else
8862 return_reg = reg_names[LR_REGNUM];
8863
8864 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8865 {
8866 /* There are three possible reasons for the IP register
8867 being saved. 1) a stack frame was created, in which case
8868 IP contains the old stack pointer, or 2) an ISR routine
8869 corrupted it, or 3) it was saved to align the stack on
8870 iWMMXt. In case 1, restore IP into SP, otherwise just
8871 restore IP. */
8872 if (frame_pointer_needed)
8873 {
8874 live_regs_mask &= ~ (1 << IP_REGNUM);
8875 live_regs_mask |= (1 << SP_REGNUM);
8876 }
8877 else
8878 {
8879 if (! IS_INTERRUPT (func_type)
8880 && ! TARGET_REALLY_IWMMXT)
8881 abort ();
8882 }
8883 }
8884
8885 /* On some ARM architectures it is faster to use LDR rather than
8886 LDM to load a single register. On other architectures, the
8887 cost is the same. In 26 bit mode, or for exception handlers,
8888 we have to use LDM to load the PC so that the CPSR is also
8889 restored. */
8890 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8891 {
8892 if (live_regs_mask == (unsigned int)(1 << reg))
8893 break;
8894 }
8895 if (reg <= LAST_ARM_REGNUM
8896 && (reg != LR_REGNUM
8897 || ! really_return
8898 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
8899 {
8900 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8901 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8902 }
8903 else
8904 {
8905 char *p;
8906 int first = 1;
8907
8908 /* Generate the load multiple instruction to restore the
8909 registers. Note we can get here, even if
8910 frame_pointer_needed is true, but only if sp already
8911 points to the base of the saved core registers. */
8912 if (live_regs_mask & (1 << SP_REGNUM))
8913 {
8914 unsigned HOST_WIDE_INT stack_adjust =
8915 arm_get_frame_size () + current_function_outgoing_args_size;
8916
8917 if (stack_adjust != 0 && stack_adjust != 4)
8918 abort ();
8919
8920 if (stack_adjust && arm_arch5)
8921 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8922 else
8923 {
8924 /* If we can't use ldmib (SA110 bug), then try to pop r3
8925 instead. */
8926 if (stack_adjust)
8927 live_regs_mask |= 1 << 3;
8928 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8929 }
8930 }
8931 else
8932 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8933
8934 p = instr + strlen (instr);
8935
8936 for (reg = 0; reg <= SP_REGNUM; reg++)
8937 if (live_regs_mask & (1 << reg))
8938 {
8939 int l = strlen (reg_names[reg]);
8940
8941 if (first)
8942 first = 0;
8943 else
8944 {
8945 memcpy (p, ", ", 2);
8946 p += 2;
8947 }
8948
8949 memcpy (p, "%|", 2);
8950 memcpy (p + 2, reg_names[reg], l);
8951 p += l + 2;
8952 }
8953
8954 if (live_regs_mask & (1 << LR_REGNUM))
8955 {
8956 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8957 /* Decide if we need to add the ^ symbol to the end of the
8958 register list. This causes the saved condition codes
8959 register to be copied into the current condition codes
8960 register. We do the copy if we are conforming to the 32-bit
8961 ABI and this is an interrupt function, or if we are
8962 conforming to the 26-bit ABI. There is a special case for
8963 the 26-bit ABI however, which is if we are writing back the
8964 stack pointer but not loading the PC. In this case adding
8965 the ^ symbol would create a type 2 LDM instruction, where
8966 writeback is UNPREDICTABLE. We are safe in leaving the ^
8967 character off in this case however, since the actual return
8968 instruction will be a MOVS which will restore the CPSR. */
8969 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
8970 || (! TARGET_APCS_32 && really_return))
8971 strcat (p, "^");
8972 }
8973 else
8974 strcpy (p, "}");
8975 }
8976
8977 output_asm_insn (instr, & operand);
8978
8979 /* See if we need to generate an extra instruction to
8980 perform the actual function return. */
8981 if (really_return
8982 && func_type != ARM_FT_INTERWORKED
8983 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8984 {
8985 /* The return has already been handled
8986 by loading the LR into the PC. */
8987 really_return = 0;
8988 }
8989 }
8990
8991 if (really_return)
8992 {
8993 switch ((int) ARM_FUNC_TYPE (func_type))
8994 {
8995 case ARM_FT_ISR:
8996 case ARM_FT_FIQ:
8997 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
8998 break;
8999
9000 case ARM_FT_INTERWORKED:
9001 sprintf (instr, "bx%s\t%%|lr", conditional);
9002 break;
9003
9004 case ARM_FT_EXCEPTION:
9005 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9006 break;
9007
9008 default:
9009 /* ARMv5 implementations always provide BX, so interworking
9010 is the default unless APCS-26 is in use. */
9011 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
9012 sprintf (instr, "bx%s\t%%|lr", conditional);
9013 else
9014 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
9015 conditional, TARGET_APCS_32 ? "" : "s");
9016 break;
9017 }
9018
9019 output_asm_insn (instr, & operand);
9020 }
9021
9022 return "";
9023 }
9024
9025 /* Write the function name into the code section, directly preceding
9026 the function prologue.
9027
9028 Code will be output similar to this:
9029 t0
9030 .ascii "arm_poke_function_name", 0
9031 .align
9032 t1
9033 .word 0xff000000 + (t1 - t0)
9034 arm_poke_function_name
9035 mov ip, sp
9036 stmfd sp!, {fp, ip, lr, pc}
9037 sub fp, ip, #4
9038
9039 When performing a stack backtrace, code can inspect the value
9040 of 'pc' stored at 'fp' + 0. If the trace function then looks
9041 at location pc - 12 and the top 8 bits are set, then we know
9042 that there is a function name embedded immediately preceding this
9043 location and has length ((pc[-3]) & 0xff000000).
9044
9045 We assume that pc is declared as a pointer to an unsigned long.
9046
9047 It is of no benefit to output the function name if we are assembling
9048 a leaf function. These function types will not contain a stack
9049 backtrace structure, therefore it is not possible to determine the
9050 function name. */
9051 void
9052 arm_poke_function_name (FILE *stream, const char *name)
9053 {
9054 unsigned long alignlength;
9055 unsigned long length;
9056 rtx x;
9057
9058 length = strlen (name) + 1;
9059 alignlength = ROUND_UP_WORD (length);
9060
9061 ASM_OUTPUT_ASCII (stream, name, length);
9062 ASM_OUTPUT_ALIGN (stream, 2);
9063 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9064 assemble_aligned_integer (UNITS_PER_WORD, x);
9065 }
9066
9067 /* Place some comments into the assembler stream
9068 describing the current function. */
9069 static void
9070 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9071 {
9072 unsigned long func_type;
9073
9074 if (!TARGET_ARM)
9075 {
9076 thumb_output_function_prologue (f, frame_size);
9077 return;
9078 }
9079
9080 /* Sanity check. */
9081 if (arm_ccfsm_state || arm_target_insn)
9082 abort ();
9083
9084 func_type = arm_current_func_type ();
9085
9086 switch ((int) ARM_FUNC_TYPE (func_type))
9087 {
9088 default:
9089 case ARM_FT_NORMAL:
9090 break;
9091 case ARM_FT_INTERWORKED:
9092 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9093 break;
9094 case ARM_FT_EXCEPTION_HANDLER:
9095 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
9096 break;
9097 case ARM_FT_ISR:
9098 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9099 break;
9100 case ARM_FT_FIQ:
9101 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9102 break;
9103 case ARM_FT_EXCEPTION:
9104 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9105 break;
9106 }
9107
9108 if (IS_NAKED (func_type))
9109 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9110
9111 if (IS_VOLATILE (func_type))
9112 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9113
9114 if (IS_NESTED (func_type))
9115 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9116
9117 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9118 current_function_args_size,
9119 current_function_pretend_args_size, frame_size);
9120
9121 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9122 frame_pointer_needed,
9123 cfun->machine->uses_anonymous_args);
9124
9125 if (cfun->machine->lr_save_eliminated)
9126 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9127
9128 #ifdef AOF_ASSEMBLER
9129 if (flag_pic)
9130 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9131 #endif
9132
9133 return_used_this_function = 0;
9134 }
9135
9136 const char *
9137 arm_output_epilogue (rtx sibling)
9138 {
9139 int reg;
9140 unsigned long saved_regs_mask;
9141 unsigned long func_type;
9142 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9143 frame that is $fp + 4 for a non-variadic function. */
9144 int floats_offset = 0;
9145 rtx operands[3];
9146 int frame_size = arm_get_frame_size ();
9147 FILE * f = asm_out_file;
9148 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9149 unsigned int lrm_count = 0;
9150 int really_return = (sibling == NULL);
9151 int start_reg;
9152
9153 /* If we have already generated the return instruction
9154 then it is futile to generate anything else. */
9155 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9156 return "";
9157
9158 func_type = arm_current_func_type ();
9159
9160 if (IS_NAKED (func_type))
9161 /* Naked functions don't have epilogues. */
9162 return "";
9163
9164 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9165 {
9166 rtx op;
9167
9168 /* A volatile function should never return. Call abort. */
9169 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9170 assemble_external_libcall (op);
9171 output_asm_insn ("bl\t%a0", &op);
9172
9173 return "";
9174 }
9175
9176 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
9177 && ! really_return)
9178 /* If we are throwing an exception, then we really must
9179 be doing a return, so we can't tail-call. */
9180 abort ();
9181
9182 saved_regs_mask = arm_compute_save_reg_mask ();
9183
9184 if (TARGET_IWMMXT)
9185 lrm_count = bit_count (saved_regs_mask);
9186
9187 /* XXX We should adjust floats_offset for any anonymous args, and then
9188 re-adjust vfp_offset below to compensate. */
9189
9190 /* Compute how far away the floats will be. */
9191 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9192 if (saved_regs_mask & (1 << reg))
9193 floats_offset += 4;
9194
9195 if (frame_pointer_needed)
9196 {
9197 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9198 int vfp_offset = 4;
9199
9200 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9201 {
9202 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9203 if (regs_ever_live[reg] && !call_used_regs[reg])
9204 {
9205 floats_offset += 12;
9206 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9207 reg, FP_REGNUM, floats_offset - vfp_offset);
9208 }
9209 }
9210 else
9211 {
9212 start_reg = LAST_FPA_REGNUM;
9213
9214 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9215 {
9216 if (regs_ever_live[reg] && !call_used_regs[reg])
9217 {
9218 floats_offset += 12;
9219
9220 /* We can't unstack more than four registers at once. */
9221 if (start_reg - reg == 3)
9222 {
9223 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9224 reg, FP_REGNUM, floats_offset - vfp_offset);
9225 start_reg = reg - 1;
9226 }
9227 }
9228 else
9229 {
9230 if (reg != start_reg)
9231 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9232 reg + 1, start_reg - reg,
9233 FP_REGNUM, floats_offset - vfp_offset);
9234 start_reg = reg - 1;
9235 }
9236 }
9237
9238 /* Just in case the last register checked also needs unstacking. */
9239 if (reg != start_reg)
9240 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9241 reg + 1, start_reg - reg,
9242 FP_REGNUM, floats_offset - vfp_offset);
9243 }
9244
9245 if (TARGET_HARD_FLOAT && TARGET_VFP)
9246 {
9247 int nregs = 0;
9248
9249 /* We save regs in pairs. */
9250 /* A special insn for saving/restoring VFP registers. This does
9251 not have base+offset addressing modes, so we use IP to
9252 hold the address. Each block requires nregs*2+1 words. */
9253 start_reg = FIRST_VFP_REGNUM;
9254 /* Count how many blocks of registers need saving. */
9255 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9256 {
9257 if ((!regs_ever_live[reg] || call_used_regs[reg])
9258 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9259 {
9260 if (start_reg != reg)
9261 floats_offset += 4;
9262 start_reg = reg + 2;
9263 }
9264 else
9265 {
9266 floats_offset += 8;
9267 nregs++;
9268 }
9269 }
9270 if (start_reg != reg)
9271 floats_offset += 4;
9272
9273 if (nregs > 0)
9274 {
9275 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9276 FP_REGNUM, floats_offset - vfp_offset);
9277 }
9278 start_reg = FIRST_VFP_REGNUM;
9279 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9280 {
9281 if ((!regs_ever_live[reg] || call_used_regs[reg])
9282 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9283 {
9284 if (start_reg != reg)
9285 {
9286 vfp_print_multi (f, "fldmfdx\t%r!", IP_REGNUM, "d%d",
9287 (start_reg - FIRST_VFP_REGNUM) / 2,
9288 (reg - start_reg) / 2);
9289 }
9290 start_reg = reg + 2;
9291 }
9292 }
9293 if (start_reg != reg)
9294 {
9295 vfp_print_multi (f, "fldmfdx\t%r!", IP_REGNUM, "d%d",
9296 (start_reg - FIRST_VFP_REGNUM) / 2,
9297 (reg - start_reg) / 2);
9298 }
9299 }
9300
9301 if (TARGET_IWMMXT)
9302 {
9303 /* The frame pointer is guaranteed to be non-double-word aligned.
9304 This is because it is set to (old_stack_pointer - 4) and the
9305 old_stack_pointer was double word aligned. Thus the offset to
9306 the iWMMXt registers to be loaded must also be non-double-word
9307 sized, so that the resultant address *is* double-word aligned.
9308 We can ignore floats_offset since that was already included in
9309 the live_regs_mask. */
9310 lrm_count += (lrm_count % 2 ? 2 : 1);
9311
9312 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9313 if (regs_ever_live[reg] && !call_used_regs[reg])
9314 {
9315 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9316 reg, FP_REGNUM, lrm_count * 4);
9317 lrm_count += 2;
9318 }
9319 }
9320
9321 /* saved_regs_mask should contain the IP, which at the time of stack
9322 frame generation actually contains the old stack pointer. So a
9323 quick way to unwind the stack is just pop the IP register directly
9324 into the stack pointer. */
9325 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9326 abort ();
9327 saved_regs_mask &= ~ (1 << IP_REGNUM);
9328 saved_regs_mask |= (1 << SP_REGNUM);
9329
9330 /* There are two registers left in saved_regs_mask - LR and PC. We
9331 only need to restore the LR register (the return address), but to
9332 save time we can load it directly into the PC, unless we need a
9333 special function exit sequence, or we are not really returning. */
9334 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
9335 /* Delete the LR from the register mask, so that the LR on
9336 the stack is loaded into the PC in the register mask. */
9337 saved_regs_mask &= ~ (1 << LR_REGNUM);
9338 else
9339 saved_regs_mask &= ~ (1 << PC_REGNUM);
9340
9341 /* We must use SP as the base register, because SP is one of the
9342 registers being restored. If an interrupt or page fault
9343 happens in the ldm instruction, the SP might or might not
9344 have been restored. That would be bad, as then SP will no
9345 longer indicate the safe area of stack, and we can get stack
9346 corruption. Using SP as the base register means that it will
9347 be reset correctly to the original value, should an interrupt
9348 occur. If the stack pointer already points at the right
9349 place, then omit the subtraction. */
9350 if (((frame_size + current_function_outgoing_args_size + floats_offset)
9351 != 4 * (1 + (int) bit_count (saved_regs_mask)))
9352 || current_function_calls_alloca)
9353 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9354 4 * bit_count (saved_regs_mask));
9355 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9356
9357 if (IS_INTERRUPT (func_type))
9358 /* Interrupt handlers will have pushed the
9359 IP onto the stack, so restore it now. */
9360 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9361 }
9362 else
9363 {
9364 /* Restore stack pointer if necessary. */
9365 if (frame_size + current_function_outgoing_args_size != 0)
9366 {
9367 operands[0] = operands[1] = stack_pointer_rtx;
9368 operands[2] = GEN_INT (frame_size
9369 + current_function_outgoing_args_size);
9370 output_add_immediate (operands);
9371 }
9372
9373 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9374 {
9375 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9376 if (regs_ever_live[reg] && !call_used_regs[reg])
9377 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9378 reg, SP_REGNUM);
9379 }
9380 else
9381 {
9382 start_reg = FIRST_FPA_REGNUM;
9383
9384 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9385 {
9386 if (regs_ever_live[reg] && !call_used_regs[reg])
9387 {
9388 if (reg - start_reg == 3)
9389 {
9390 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9391 start_reg, SP_REGNUM);
9392 start_reg = reg + 1;
9393 }
9394 }
9395 else
9396 {
9397 if (reg != start_reg)
9398 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9399 start_reg, reg - start_reg,
9400 SP_REGNUM);
9401
9402 start_reg = reg + 1;
9403 }
9404 }
9405
9406 /* Just in case the last register checked also needs unstacking. */
9407 if (reg != start_reg)
9408 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9409 start_reg, reg - start_reg, SP_REGNUM);
9410 }
9411
9412 if (TARGET_HARD_FLOAT && TARGET_VFP)
9413 {
9414 start_reg = FIRST_VFP_REGNUM;
9415 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9416 {
9417 if ((!regs_ever_live[reg] || call_used_regs[reg])
9418 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9419 {
9420 if (start_reg != reg)
9421 {
9422 vfp_print_multi (f, "fldmfdx\t%r!", SP_REGNUM, "d%d",
9423 (start_reg - FIRST_VFP_REGNUM) / 2,
9424 (reg - start_reg) / 2);
9425 }
9426 start_reg = reg + 2;
9427 }
9428 }
9429 if (start_reg != reg)
9430 {
9431 vfp_print_multi (f, "fldmfdx\t%r!", SP_REGNUM, "d%d",
9432 (start_reg - FIRST_VFP_REGNUM) / 2,
9433 (reg - start_reg) / 2);
9434 }
9435 }
9436 if (TARGET_IWMMXT)
9437 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9438 if (regs_ever_live[reg] && !call_used_regs[reg])
9439 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
9440
9441 /* If we can, restore the LR into the PC. */
9442 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9443 && really_return
9444 && current_function_pretend_args_size == 0
9445 && saved_regs_mask & (1 << LR_REGNUM))
9446 {
9447 saved_regs_mask &= ~ (1 << LR_REGNUM);
9448 saved_regs_mask |= (1 << PC_REGNUM);
9449 }
9450
9451 /* Load the registers off the stack. If we only have one register
9452 to load use the LDR instruction - it is faster. */
9453 if (saved_regs_mask == (1 << LR_REGNUM))
9454 {
9455 /* The exception handler ignores the LR, so we do
9456 not really need to load it off the stack. */
9457 if (eh_ofs)
9458 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
9459 else
9460 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9461 }
9462 else if (saved_regs_mask)
9463 {
9464 if (saved_regs_mask & (1 << SP_REGNUM))
9465 /* Note - write back to the stack register is not enabled
9466 (ie "ldmfd sp!..."). We know that the stack pointer is
9467 in the list of registers and if we add writeback the
9468 instruction becomes UNPREDICTABLE. */
9469 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9470 else
9471 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9472 }
9473
9474 if (current_function_pretend_args_size)
9475 {
9476 /* Unwind the pre-pushed regs. */
9477 operands[0] = operands[1] = stack_pointer_rtx;
9478 operands[2] = GEN_INT (current_function_pretend_args_size);
9479 output_add_immediate (operands);
9480 }
9481 }
9482
9483 if (! really_return
9484 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9485 && current_function_pretend_args_size == 0
9486 && saved_regs_mask & (1 << PC_REGNUM)))
9487 return "";
9488
9489 /* Generate the return instruction. */
9490 switch ((int) ARM_FUNC_TYPE (func_type))
9491 {
9492 case ARM_FT_EXCEPTION_HANDLER:
9493 /* Even in 26-bit mode we do a mov (rather than a movs)
9494 because we don't have the PSR bits set in the address. */
9495 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
9496 break;
9497
9498 case ARM_FT_ISR:
9499 case ARM_FT_FIQ:
9500 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9501 break;
9502
9503 case ARM_FT_EXCEPTION:
9504 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9505 break;
9506
9507 case ARM_FT_INTERWORKED:
9508 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9509 break;
9510
9511 default:
9512 if (frame_pointer_needed)
9513 /* If we used the frame pointer then the return address
9514 will have been loaded off the stack directly into the
9515 PC, so there is no need to issue a MOV instruction
9516 here. */
9517 ;
9518 else if (current_function_pretend_args_size == 0
9519 && (saved_regs_mask & (1 << LR_REGNUM)))
9520 /* Similarly we may have been able to load LR into the PC
9521 even if we did not create a stack frame. */
9522 ;
9523 else if (TARGET_APCS_32)
9524 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9525 else
9526 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9527 break;
9528 }
9529
9530 return "";
9531 }
9532
9533 static void
9534 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9535 HOST_WIDE_INT frame_size)
9536 {
9537 if (TARGET_THUMB)
9538 {
9539 /* ??? Probably not safe to set this here, since it assumes that a
9540 function will be emitted as assembly immediately after we generate
9541 RTL for it. This does not happen for inline functions. */
9542 return_used_this_function = 0;
9543 }
9544 else
9545 {
9546 /* We need to take into account any stack-frame rounding. */
9547 frame_size = arm_get_frame_size ();
9548
9549 if (use_return_insn (FALSE, NULL)
9550 && return_used_this_function
9551 && (frame_size + current_function_outgoing_args_size) != 0
9552 && !frame_pointer_needed)
9553 abort ();
9554
9555 /* Reset the ARM-specific per-function variables. */
9556 after_arm_reorg = 0;
9557 }
9558 }
9559
9560 /* Generate and emit an insn that we will recognize as a push_multi.
9561 Unfortunately, since this insn does not reflect very well the actual
9562 semantics of the operation, we need to annotate the insn for the benefit
9563 of DWARF2 frame unwind information. */
9564 static rtx
9565 emit_multi_reg_push (int mask)
9566 {
9567 int num_regs = 0;
9568 int num_dwarf_regs;
9569 int i, j;
9570 rtx par;
9571 rtx dwarf;
9572 int dwarf_par_index;
9573 rtx tmp, reg;
9574
9575 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9576 if (mask & (1 << i))
9577 num_regs++;
9578
9579 if (num_regs == 0 || num_regs > 16)
9580 abort ();
9581
9582 /* We don't record the PC in the dwarf frame information. */
9583 num_dwarf_regs = num_regs;
9584 if (mask & (1 << PC_REGNUM))
9585 num_dwarf_regs--;
9586
9587 /* For the body of the insn we are going to generate an UNSPEC in
9588 parallel with several USEs. This allows the insn to be recognized
9589 by the push_multi pattern in the arm.md file. The insn looks
9590 something like this:
9591
9592 (parallel [
9593 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9594 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9595 (use (reg:SI 11 fp))
9596 (use (reg:SI 12 ip))
9597 (use (reg:SI 14 lr))
9598 (use (reg:SI 15 pc))
9599 ])
9600
9601 For the frame note however, we try to be more explicit and actually
9602 show each register being stored into the stack frame, plus a (single)
9603 decrement of the stack pointer. We do it this way in order to be
9604 friendly to the stack unwinding code, which only wants to see a single
9605 stack decrement per instruction. The RTL we generate for the note looks
9606 something like this:
9607
9608 (sequence [
9609 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9610 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9611 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9612 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9613 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9614 ])
9615
9616 This sequence is used both by the code to support stack unwinding for
9617 exceptions handlers and the code to generate dwarf2 frame debugging. */
9618
9619 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9620 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9621 dwarf_par_index = 1;
9622
9623 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9624 {
9625 if (mask & (1 << i))
9626 {
9627 reg = gen_rtx_REG (SImode, i);
9628
9629 XVECEXP (par, 0, 0)
9630 = gen_rtx_SET (VOIDmode,
9631 gen_rtx_MEM (BLKmode,
9632 gen_rtx_PRE_DEC (BLKmode,
9633 stack_pointer_rtx)),
9634 gen_rtx_UNSPEC (BLKmode,
9635 gen_rtvec (1, reg),
9636 UNSPEC_PUSH_MULT));
9637
9638 if (i != PC_REGNUM)
9639 {
9640 tmp = gen_rtx_SET (VOIDmode,
9641 gen_rtx_MEM (SImode, stack_pointer_rtx),
9642 reg);
9643 RTX_FRAME_RELATED_P (tmp) = 1;
9644 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9645 dwarf_par_index++;
9646 }
9647
9648 break;
9649 }
9650 }
9651
9652 for (j = 1, i++; j < num_regs; i++)
9653 {
9654 if (mask & (1 << i))
9655 {
9656 reg = gen_rtx_REG (SImode, i);
9657
9658 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9659
9660 if (i != PC_REGNUM)
9661 {
9662 tmp = gen_rtx_SET (VOIDmode,
9663 gen_rtx_MEM (SImode,
9664 plus_constant (stack_pointer_rtx,
9665 4 * j)),
9666 reg);
9667 RTX_FRAME_RELATED_P (tmp) = 1;
9668 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9669 }
9670
9671 j++;
9672 }
9673 }
9674
9675 par = emit_insn (par);
9676
9677 tmp = gen_rtx_SET (SImode,
9678 stack_pointer_rtx,
9679 gen_rtx_PLUS (SImode,
9680 stack_pointer_rtx,
9681 GEN_INT (-4 * num_regs)));
9682 RTX_FRAME_RELATED_P (tmp) = 1;
9683 XVECEXP (dwarf, 0, 0) = tmp;
9684
9685 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9686 REG_NOTES (par));
9687 return par;
9688 }
9689
9690 static rtx
9691 emit_sfm (int base_reg, int count)
9692 {
9693 rtx par;
9694 rtx dwarf;
9695 rtx tmp, reg;
9696 int i;
9697
9698 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9699 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9700
9701 reg = gen_rtx_REG (XFmode, base_reg++);
9702
9703 XVECEXP (par, 0, 0)
9704 = gen_rtx_SET (VOIDmode,
9705 gen_rtx_MEM (BLKmode,
9706 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9707 gen_rtx_UNSPEC (BLKmode,
9708 gen_rtvec (1, reg),
9709 UNSPEC_PUSH_MULT));
9710 tmp
9711 = gen_rtx_SET (VOIDmode,
9712 gen_rtx_MEM (XFmode,
9713 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9714 reg);
9715 RTX_FRAME_RELATED_P (tmp) = 1;
9716 XVECEXP (dwarf, 0, count - 1) = tmp;
9717
9718 for (i = 1; i < count; i++)
9719 {
9720 reg = gen_rtx_REG (XFmode, base_reg++);
9721 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9722
9723 tmp = gen_rtx_SET (VOIDmode,
9724 gen_rtx_MEM (XFmode,
9725 gen_rtx_PRE_DEC (BLKmode,
9726 stack_pointer_rtx)),
9727 reg);
9728 RTX_FRAME_RELATED_P (tmp) = 1;
9729 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9730 }
9731
9732 par = emit_insn (par);
9733 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9734 REG_NOTES (par));
9735 return par;
9736 }
9737
9738
9739 /* Compute the distance from register FROM to register TO.
9740 These can be the arg pointer (26), the soft frame pointer (25),
9741 the stack pointer (13) or the hard frame pointer (11).
9742 Typical stack layout looks like this:
9743
9744 old stack pointer -> | |
9745 ----
9746 | | \
9747 | | saved arguments for
9748 | | vararg functions
9749 | | /
9750 --
9751 hard FP & arg pointer -> | | \
9752 | | stack
9753 | | frame
9754 | | /
9755 --
9756 | | \
9757 | | call saved
9758 | | registers
9759 soft frame pointer -> | | /
9760 --
9761 | | \
9762 | | local
9763 | | variables
9764 | | /
9765 --
9766 | | \
9767 | | outgoing
9768 | | arguments
9769 current stack pointer -> | | /
9770 --
9771
9772 For a given function some or all of these stack components
9773 may not be needed, giving rise to the possibility of
9774 eliminating some of the registers.
9775
9776 The values returned by this function must reflect the behavior
9777 of arm_expand_prologue() and arm_compute_save_reg_mask().
9778
9779 The sign of the number returned reflects the direction of stack
9780 growth, so the values are positive for all eliminations except
9781 from the soft frame pointer to the hard frame pointer. */
9782 unsigned int
9783 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9784 {
9785 unsigned int local_vars = arm_get_frame_size ();
9786 unsigned int outgoing_args = current_function_outgoing_args_size;
9787 unsigned int stack_frame;
9788 unsigned int call_saved_registers;
9789 unsigned long func_type;
9790
9791 func_type = arm_current_func_type ();
9792
9793 /* Volatile functions never return, so there is
9794 no need to save call saved registers. */
9795 call_saved_registers = 0;
9796 if (! IS_VOLATILE (func_type))
9797 {
9798 unsigned int reg_mask;
9799 unsigned int reg;
9800 bool new_block;
9801
9802 /* Make sure that we compute which registers will be saved
9803 on the stack using the same algorithm that is used by
9804 the prologue creation code. */
9805 reg_mask = arm_compute_save_reg_mask ();
9806
9807 /* Now count the number of bits set in save_reg_mask.
9808 If we have already counted the registers in the stack
9809 frame, do not count them again. Non call-saved registers
9810 might be saved in the call-save area of the stack, if
9811 doing so will preserve the stack's alignment. Hence we
9812 must count them here. For each set bit we need 4 bytes
9813 of stack space. */
9814 if (frame_pointer_needed)
9815 reg_mask &= 0x07ff;
9816 call_saved_registers += 4 * bit_count (reg_mask);
9817
9818 /* If the hard floating point registers are going to be
9819 used then they must be saved on the stack as well.
9820 Each register occupies 12 bytes of stack space. */
9821 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9822 if (regs_ever_live[reg] && ! call_used_regs[reg])
9823 call_saved_registers += 12;
9824
9825 /* Likewise VFP regs. */
9826 if (TARGET_HARD_FLOAT && TARGET_VFP)
9827 {
9828 new_block = TRUE;
9829 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9830 {
9831 if ((regs_ever_live[reg] && !call_used_regs[reg])
9832 || (regs_ever_live[reg + 1] && !call_used_regs[reg + 1]))
9833 {
9834 if (new_block)
9835 {
9836 call_saved_registers += 4;
9837 new_block = FALSE;
9838 }
9839 call_saved_registers += 8;
9840 }
9841 else
9842 new_block = TRUE;
9843 }
9844 }
9845
9846 if (TARGET_REALLY_IWMMXT)
9847 /* Check for the call-saved iWMMXt registers. */
9848 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9849 if (regs_ever_live[reg] && ! call_used_regs [reg])
9850 call_saved_registers += 8;
9851 }
9852
9853 /* The stack frame contains 4 registers - the old frame pointer,
9854 the old stack pointer, the return address and PC of the start
9855 of the function. */
9856 stack_frame = frame_pointer_needed ? 16 : 0;
9857
9858 /* OK, now we have enough information to compute the distances.
9859 There must be an entry in these switch tables for each pair
9860 of registers in ELIMINABLE_REGS, even if some of the entries
9861 seem to be redundant or useless. */
9862 switch (from)
9863 {
9864 case ARG_POINTER_REGNUM:
9865 switch (to)
9866 {
9867 case THUMB_HARD_FRAME_POINTER_REGNUM:
9868 return 0;
9869
9870 case FRAME_POINTER_REGNUM:
9871 /* This is the reverse of the soft frame pointer
9872 to hard frame pointer elimination below. */
9873 if (call_saved_registers == 0 && stack_frame == 0)
9874 return 0;
9875 return (call_saved_registers + stack_frame - 4);
9876
9877 case ARM_HARD_FRAME_POINTER_REGNUM:
9878 /* If there is no stack frame then the hard
9879 frame pointer and the arg pointer coincide. */
9880 if (stack_frame == 0 && call_saved_registers != 0)
9881 return 0;
9882 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9883 return (frame_pointer_needed
9884 && current_function_needs_context
9885 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9886
9887 case STACK_POINTER_REGNUM:
9888 /* If nothing has been pushed on the stack at all
9889 then this will return -4. This *is* correct! */
9890 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
9891
9892 default:
9893 abort ();
9894 }
9895 break;
9896
9897 case FRAME_POINTER_REGNUM:
9898 switch (to)
9899 {
9900 case THUMB_HARD_FRAME_POINTER_REGNUM:
9901 return 0;
9902
9903 case ARM_HARD_FRAME_POINTER_REGNUM:
9904 /* The hard frame pointer points to the top entry in the
9905 stack frame. The soft frame pointer to the bottom entry
9906 in the stack frame. If there is no stack frame at all,
9907 then they are identical. */
9908 if (call_saved_registers == 0 && stack_frame == 0)
9909 return 0;
9910 return - (call_saved_registers + stack_frame - 4);
9911
9912 case STACK_POINTER_REGNUM:
9913 return local_vars + outgoing_args;
9914
9915 default:
9916 abort ();
9917 }
9918 break;
9919
9920 default:
9921 /* You cannot eliminate from the stack pointer.
9922 In theory you could eliminate from the hard frame
9923 pointer to the stack pointer, but this will never
9924 happen, since if a stack frame is not needed the
9925 hard frame pointer will never be used. */
9926 abort ();
9927 }
9928 }
9929
9930 /* Calculate the size of the stack frame, taking into account any
9931 padding that is required to ensure stack-alignment. */
9932 HOST_WIDE_INT
9933 arm_get_frame_size (void)
9934 {
9935 int regno;
9936
9937 int base_size = ROUND_UP_WORD (get_frame_size ());
9938 int entry_size = 0;
9939 unsigned long func_type = arm_current_func_type ();
9940 int leaf;
9941 bool new_block;
9942
9943 if (! TARGET_ARM)
9944 abort();
9945
9946 if (! TARGET_ATPCS)
9947 return base_size;
9948
9949 /* We need to know if we are a leaf function. Unfortunately, it
9950 is possible to be called after start_sequence has been called,
9951 which causes get_insns to return the insns for the sequence,
9952 not the function, which will cause leaf_function_p to return
9953 the incorrect result.
9954
9955 To work around this, we cache the computed frame size. This
9956 works because we will only be calling RTL expanders that need
9957 to know about leaf functions once reload has completed, and the
9958 frame size cannot be changed after that time, so we can safely
9959 use the cached value. */
9960
9961 if (reload_completed)
9962 return cfun->machine->frame_size;
9963
9964 leaf = leaf_function_p ();
9965
9966 /* A leaf function does not need any stack alignment if it has nothing
9967 on the stack. */
9968 if (leaf && base_size == 0)
9969 {
9970 cfun->machine->frame_size = 0;
9971 return 0;
9972 }
9973
9974 /* We know that SP will be word aligned on entry, and we must
9975 preserve that condition at any subroutine call. But those are
9976 the only constraints. */
9977
9978 /* Space for variadic functions. */
9979 if (current_function_pretend_args_size)
9980 entry_size += current_function_pretend_args_size;
9981
9982 /* Space for saved registers. */
9983 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
9984
9985 if (! IS_VOLATILE (func_type))
9986 {
9987 /* Space for saved FPA registers. */
9988 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9989 if (regs_ever_live[regno] && ! call_used_regs[regno])
9990 entry_size += 12;
9991
9992 /* Space for saved VFP registers. */
9993 if (TARGET_HARD_FLOAT && TARGET_VFP)
9994 {
9995 new_block = TRUE;
9996 for (regno = FIRST_VFP_REGNUM; regno < LAST_VFP_REGNUM; regno += 2)
9997 {
9998 if ((regs_ever_live[regno] && !call_used_regs[regno])
9999 || (regs_ever_live[regno + 1] && !call_used_regs[regno + 1]))
10000 {
10001 if (new_block)
10002 {
10003 entry_size += 4;
10004 new_block = FALSE;
10005 }
10006 entry_size += 8;
10007 }
10008 else
10009 new_block = TRUE;
10010 }
10011 }
10012 }
10013
10014 if (TARGET_REALLY_IWMMXT)
10015 {
10016 /* Check for the call-saved iWMMXt registers. */
10017 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
10018 if (regs_ever_live [regno] && ! call_used_regs [regno])
10019 entry_size += 8;
10020 }
10021
10022 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10023 base_size += 4;
10024 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10025 abort ();
10026
10027 cfun->machine->frame_size = base_size;
10028
10029 return base_size;
10030 }
10031
10032 /* Generate the prologue instructions for entry into an ARM function. */
10033 void
10034 arm_expand_prologue (void)
10035 {
10036 int reg;
10037 rtx amount;
10038 rtx insn;
10039 rtx ip_rtx;
10040 unsigned long live_regs_mask;
10041 unsigned long func_type;
10042 int fp_offset = 0;
10043 int saved_pretend_args = 0;
10044 unsigned int args_to_push;
10045
10046 func_type = arm_current_func_type ();
10047
10048 /* Naked functions don't have prologues. */
10049 if (IS_NAKED (func_type))
10050 return;
10051
10052 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10053 args_to_push = current_function_pretend_args_size;
10054
10055 /* Compute which register we will have to save onto the stack. */
10056 live_regs_mask = arm_compute_save_reg_mask ();
10057
10058 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10059
10060 if (frame_pointer_needed)
10061 {
10062 if (IS_INTERRUPT (func_type))
10063 {
10064 /* Interrupt functions must not corrupt any registers.
10065 Creating a frame pointer however, corrupts the IP
10066 register, so we must push it first. */
10067 insn = emit_multi_reg_push (1 << IP_REGNUM);
10068
10069 /* Do not set RTX_FRAME_RELATED_P on this insn.
10070 The dwarf stack unwinding code only wants to see one
10071 stack decrement per function, and this is not it. If
10072 this instruction is labeled as being part of the frame
10073 creation sequence then dwarf2out_frame_debug_expr will
10074 abort when it encounters the assignment of IP to FP
10075 later on, since the use of SP here establishes SP as
10076 the CFA register and not IP.
10077
10078 Anyway this instruction is not really part of the stack
10079 frame creation although it is part of the prologue. */
10080 }
10081 else if (IS_NESTED (func_type))
10082 {
10083 /* The Static chain register is the same as the IP register
10084 used as a scratch register during stack frame creation.
10085 To get around this need to find somewhere to store IP
10086 whilst the frame is being created. We try the following
10087 places in order:
10088
10089 1. The last argument register.
10090 2. A slot on the stack above the frame. (This only
10091 works if the function is not a varargs function).
10092 3. Register r3, after pushing the argument registers
10093 onto the stack.
10094
10095 Note - we only need to tell the dwarf2 backend about the SP
10096 adjustment in the second variant; the static chain register
10097 doesn't need to be unwound, as it doesn't contain a value
10098 inherited from the caller. */
10099
10100 if (regs_ever_live[3] == 0)
10101 {
10102 insn = gen_rtx_REG (SImode, 3);
10103 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10104 insn = emit_insn (insn);
10105 }
10106 else if (args_to_push == 0)
10107 {
10108 rtx dwarf;
10109 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10110 insn = gen_rtx_MEM (SImode, insn);
10111 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10112 insn = emit_insn (insn);
10113
10114 fp_offset = 4;
10115
10116 /* Just tell the dwarf backend that we adjusted SP. */
10117 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10118 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10119 GEN_INT (-fp_offset)));
10120 RTX_FRAME_RELATED_P (insn) = 1;
10121 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10122 dwarf, REG_NOTES (insn));
10123 }
10124 else
10125 {
10126 /* Store the args on the stack. */
10127 if (cfun->machine->uses_anonymous_args)
10128 insn = emit_multi_reg_push
10129 ((0xf0 >> (args_to_push / 4)) & 0xf);
10130 else
10131 insn = emit_insn
10132 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10133 GEN_INT (- args_to_push)));
10134
10135 RTX_FRAME_RELATED_P (insn) = 1;
10136
10137 saved_pretend_args = 1;
10138 fp_offset = args_to_push;
10139 args_to_push = 0;
10140
10141 /* Now reuse r3 to preserve IP. */
10142 insn = gen_rtx_REG (SImode, 3);
10143 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10144 (void) emit_insn (insn);
10145 }
10146 }
10147
10148 if (fp_offset)
10149 {
10150 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10151 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10152 }
10153 else
10154 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10155
10156 insn = emit_insn (insn);
10157 RTX_FRAME_RELATED_P (insn) = 1;
10158 }
10159
10160 if (args_to_push)
10161 {
10162 /* Push the argument registers, or reserve space for them. */
10163 if (cfun->machine->uses_anonymous_args)
10164 insn = emit_multi_reg_push
10165 ((0xf0 >> (args_to_push / 4)) & 0xf);
10166 else
10167 insn = emit_insn
10168 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10169 GEN_INT (- args_to_push)));
10170 RTX_FRAME_RELATED_P (insn) = 1;
10171 }
10172
10173 /* If this is an interrupt service routine, and the link register
10174 is going to be pushed, and we are not creating a stack frame,
10175 (which would involve an extra push of IP and a pop in the epilogue)
10176 subtracting four from LR now will mean that the function return
10177 can be done with a single instruction. */
10178 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10179 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10180 && ! frame_pointer_needed)
10181 emit_insn (gen_rtx_SET (SImode,
10182 gen_rtx_REG (SImode, LR_REGNUM),
10183 gen_rtx_PLUS (SImode,
10184 gen_rtx_REG (SImode, LR_REGNUM),
10185 GEN_INT (-4))));
10186
10187 if (live_regs_mask)
10188 {
10189 insn = emit_multi_reg_push (live_regs_mask);
10190 RTX_FRAME_RELATED_P (insn) = 1;
10191 }
10192
10193 if (TARGET_IWMMXT)
10194 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10195 if (regs_ever_live[reg] && ! call_used_regs [reg])
10196 {
10197 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10198 insn = gen_rtx_MEM (V2SImode, insn);
10199 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10200 gen_rtx_REG (V2SImode, reg)));
10201 RTX_FRAME_RELATED_P (insn) = 1;
10202 }
10203
10204 if (! IS_VOLATILE (func_type))
10205 {
10206 int start_reg;
10207
10208 /* Save any floating point call-saved registers used by this
10209 function. */
10210 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10211 {
10212 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10213 if (regs_ever_live[reg] && !call_used_regs[reg])
10214 {
10215 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10216 insn = gen_rtx_MEM (XFmode, insn);
10217 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10218 gen_rtx_REG (XFmode, reg)));
10219 RTX_FRAME_RELATED_P (insn) = 1;
10220 }
10221 }
10222 else
10223 {
10224 start_reg = LAST_FPA_REGNUM;
10225
10226 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10227 {
10228 if (regs_ever_live[reg] && !call_used_regs[reg])
10229 {
10230 if (start_reg - reg == 3)
10231 {
10232 insn = emit_sfm (reg, 4);
10233 RTX_FRAME_RELATED_P (insn) = 1;
10234 start_reg = reg - 1;
10235 }
10236 }
10237 else
10238 {
10239 if (start_reg != reg)
10240 {
10241 insn = emit_sfm (reg + 1, start_reg - reg);
10242 RTX_FRAME_RELATED_P (insn) = 1;
10243 }
10244 start_reg = reg - 1;
10245 }
10246 }
10247
10248 if (start_reg != reg)
10249 {
10250 insn = emit_sfm (reg + 1, start_reg - reg);
10251 RTX_FRAME_RELATED_P (insn) = 1;
10252 }
10253 }
10254 if (TARGET_HARD_FLOAT && TARGET_VFP)
10255 {
10256 start_reg = FIRST_VFP_REGNUM;
10257
10258 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10259 {
10260 if ((!regs_ever_live[reg] || call_used_regs[reg])
10261 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10262 {
10263 if (start_reg != reg)
10264 {
10265 insn = vfp_emit_fstmx (start_reg,
10266 (reg - start_reg) / 2);
10267 RTX_FRAME_RELATED_P (insn) = 1;
10268 }
10269 start_reg = reg + 2;
10270 }
10271 }
10272 if (start_reg != reg)
10273 {
10274 insn = vfp_emit_fstmx (start_reg,
10275 (reg - start_reg) / 2);
10276 RTX_FRAME_RELATED_P (insn) = 1;
10277 }
10278 }
10279 }
10280
10281 if (frame_pointer_needed)
10282 {
10283 /* Create the new frame pointer. */
10284 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10285 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10286 RTX_FRAME_RELATED_P (insn) = 1;
10287
10288 if (IS_NESTED (func_type))
10289 {
10290 /* Recover the static chain register. */
10291 if (regs_ever_live [3] == 0
10292 || saved_pretend_args)
10293 insn = gen_rtx_REG (SImode, 3);
10294 else /* if (current_function_pretend_args_size == 0) */
10295 {
10296 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10297 GEN_INT (4));
10298 insn = gen_rtx_MEM (SImode, insn);
10299 }
10300
10301 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10302 /* Add a USE to stop propagate_one_insn() from barfing. */
10303 emit_insn (gen_prologue_use (ip_rtx));
10304 }
10305 }
10306
10307 amount = GEN_INT (-(arm_get_frame_size ()
10308 + current_function_outgoing_args_size));
10309
10310 if (amount != const0_rtx)
10311 {
10312 /* This add can produce multiple insns for a large constant, so we
10313 need to get tricky. */
10314 rtx last = get_last_insn ();
10315 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10316 amount));
10317 do
10318 {
10319 last = last ? NEXT_INSN (last) : get_insns ();
10320 RTX_FRAME_RELATED_P (last) = 1;
10321 }
10322 while (last != insn);
10323
10324 /* If the frame pointer is needed, emit a special barrier that
10325 will prevent the scheduler from moving stores to the frame
10326 before the stack adjustment. */
10327 if (frame_pointer_needed)
10328 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10329 hard_frame_pointer_rtx));
10330 }
10331
10332 /* If we are profiling, make sure no instructions are scheduled before
10333 the call to mcount. Similarly if the user has requested no
10334 scheduling in the prolog. */
10335 if (current_function_profile || TARGET_NO_SCHED_PRO)
10336 emit_insn (gen_blockage ());
10337
10338 /* If the link register is being kept alive, with the return address in it,
10339 then make sure that it does not get reused by the ce2 pass. */
10340 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10341 {
10342 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10343 cfun->machine->lr_save_eliminated = 1;
10344 }
10345 }
10346 \f
10347 /* If CODE is 'd', then the X is a condition operand and the instruction
10348 should only be executed if the condition is true.
10349 if CODE is 'D', then the X is a condition operand and the instruction
10350 should only be executed if the condition is false: however, if the mode
10351 of the comparison is CCFPEmode, then always execute the instruction -- we
10352 do this because in these circumstances !GE does not necessarily imply LT;
10353 in these cases the instruction pattern will take care to make sure that
10354 an instruction containing %d will follow, thereby undoing the effects of
10355 doing this instruction unconditionally.
10356 If CODE is 'N' then X is a floating point operand that must be negated
10357 before output.
10358 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10359 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10360 void
10361 arm_print_operand (FILE *stream, rtx x, int code)
10362 {
10363 switch (code)
10364 {
10365 case '@':
10366 fputs (ASM_COMMENT_START, stream);
10367 return;
10368
10369 case '_':
10370 fputs (user_label_prefix, stream);
10371 return;
10372
10373 case '|':
10374 fputs (REGISTER_PREFIX, stream);
10375 return;
10376
10377 case '?':
10378 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10379 {
10380 if (TARGET_THUMB || current_insn_predicate != NULL)
10381 abort ();
10382
10383 fputs (arm_condition_codes[arm_current_cc], stream);
10384 }
10385 else if (current_insn_predicate)
10386 {
10387 enum arm_cond_code code;
10388
10389 if (TARGET_THUMB)
10390 abort ();
10391
10392 code = get_arm_condition_code (current_insn_predicate);
10393 fputs (arm_condition_codes[code], stream);
10394 }
10395 return;
10396
10397 case 'N':
10398 {
10399 REAL_VALUE_TYPE r;
10400 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10401 r = REAL_VALUE_NEGATE (r);
10402 fprintf (stream, "%s", fp_const_from_val (&r));
10403 }
10404 return;
10405
10406 case 'B':
10407 if (GET_CODE (x) == CONST_INT)
10408 {
10409 HOST_WIDE_INT val;
10410 val = ARM_SIGN_EXTEND (~INTVAL (x));
10411 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10412 }
10413 else
10414 {
10415 putc ('~', stream);
10416 output_addr_const (stream, x);
10417 }
10418 return;
10419
10420 case 'i':
10421 fprintf (stream, "%s", arithmetic_instr (x, 1));
10422 return;
10423
10424 /* Truncate Cirrus shift counts. */
10425 case 's':
10426 if (GET_CODE (x) == CONST_INT)
10427 {
10428 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10429 return;
10430 }
10431 arm_print_operand (stream, x, 0);
10432 return;
10433
10434 case 'I':
10435 fprintf (stream, "%s", arithmetic_instr (x, 0));
10436 return;
10437
10438 case 'S':
10439 {
10440 HOST_WIDE_INT val;
10441 const char * shift = shift_op (x, &val);
10442
10443 if (shift)
10444 {
10445 fprintf (stream, ", %s ", shift_op (x, &val));
10446 if (val == -1)
10447 arm_print_operand (stream, XEXP (x, 1), 0);
10448 else
10449 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10450 }
10451 }
10452 return;
10453
10454 /* An explanation of the 'Q', 'R' and 'H' register operands:
10455
10456 In a pair of registers containing a DI or DF value the 'Q'
10457 operand returns the register number of the register containing
10458 the least significant part of the value. The 'R' operand returns
10459 the register number of the register containing the most
10460 significant part of the value.
10461
10462 The 'H' operand returns the higher of the two register numbers.
10463 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10464 same as the 'Q' operand, since the most significant part of the
10465 value is held in the lower number register. The reverse is true
10466 on systems where WORDS_BIG_ENDIAN is false.
10467
10468 The purpose of these operands is to distinguish between cases
10469 where the endian-ness of the values is important (for example
10470 when they are added together), and cases where the endian-ness
10471 is irrelevant, but the order of register operations is important.
10472 For example when loading a value from memory into a register
10473 pair, the endian-ness does not matter. Provided that the value
10474 from the lower memory address is put into the lower numbered
10475 register, and the value from the higher address is put into the
10476 higher numbered register, the load will work regardless of whether
10477 the value being loaded is big-wordian or little-wordian. The
10478 order of the two register loads can matter however, if the address
10479 of the memory location is actually held in one of the registers
10480 being overwritten by the load. */
10481 case 'Q':
10482 if (REGNO (x) > LAST_ARM_REGNUM)
10483 abort ();
10484 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10485 return;
10486
10487 case 'R':
10488 if (REGNO (x) > LAST_ARM_REGNUM)
10489 abort ();
10490 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10491 return;
10492
10493 case 'H':
10494 if (REGNO (x) > LAST_ARM_REGNUM)
10495 abort ();
10496 asm_fprintf (stream, "%r", REGNO (x) + 1);
10497 return;
10498
10499 case 'm':
10500 asm_fprintf (stream, "%r",
10501 GET_CODE (XEXP (x, 0)) == REG
10502 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10503 return;
10504
10505 case 'M':
10506 asm_fprintf (stream, "{%r-%r}",
10507 REGNO (x),
10508 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10509 return;
10510
10511 case 'd':
10512 /* CONST_TRUE_RTX means always -- that's the default. */
10513 if (x == const_true_rtx)
10514 return;
10515
10516 fputs (arm_condition_codes[get_arm_condition_code (x)],
10517 stream);
10518 return;
10519
10520 case 'D':
10521 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10522 want to do that. */
10523 if (x == const_true_rtx)
10524 abort ();
10525
10526 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10527 (get_arm_condition_code (x))],
10528 stream);
10529 return;
10530
10531 /* Cirrus registers can be accessed in a variety of ways:
10532 single floating point (f)
10533 double floating point (d)
10534 32bit integer (fx)
10535 64bit integer (dx). */
10536 case 'W': /* Cirrus register in F mode. */
10537 case 'X': /* Cirrus register in D mode. */
10538 case 'Y': /* Cirrus register in FX mode. */
10539 case 'Z': /* Cirrus register in DX mode. */
10540 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10541 abort ();
10542
10543 fprintf (stream, "mv%s%s",
10544 code == 'W' ? "f"
10545 : code == 'X' ? "d"
10546 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10547
10548 return;
10549
10550 /* Print cirrus register in the mode specified by the register's mode. */
10551 case 'V':
10552 {
10553 int mode = GET_MODE (x);
10554
10555 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10556 abort ();
10557
10558 fprintf (stream, "mv%s%s",
10559 mode == DFmode ? "d"
10560 : mode == SImode ? "fx"
10561 : mode == DImode ? "dx"
10562 : "f", reg_names[REGNO (x)] + 2);
10563
10564 return;
10565 }
10566
10567 case 'U':
10568 if (GET_CODE (x) != REG
10569 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10570 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10571 /* Bad value for wCG register number. */
10572 abort ();
10573 else
10574 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10575 return;
10576
10577 /* Print an iWMMXt control register name. */
10578 case 'w':
10579 if (GET_CODE (x) != CONST_INT
10580 || INTVAL (x) < 0
10581 || INTVAL (x) >= 16)
10582 /* Bad value for wC register number. */
10583 abort ();
10584 else
10585 {
10586 static const char * wc_reg_names [16] =
10587 {
10588 "wCID", "wCon", "wCSSF", "wCASF",
10589 "wC4", "wC5", "wC6", "wC7",
10590 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10591 "wC12", "wC13", "wC14", "wC15"
10592 };
10593
10594 fprintf (stream, wc_reg_names [INTVAL (x)]);
10595 }
10596 return;
10597
10598 /* Print a VFP double precision register name. */
10599 case 'P':
10600 {
10601 int mode = GET_MODE (x);
10602 int num;
10603
10604 if (mode != DImode && mode != DFmode)
10605 abort ();
10606
10607 if (GET_CODE (x) != REG
10608 || !IS_VFP_REGNUM (REGNO (x)))
10609 abort ();
10610
10611 num = REGNO(x) - FIRST_VFP_REGNUM;
10612 if (num & 1)
10613 abort ();
10614
10615 fprintf (stream, "d%d", num >> 1);
10616 }
10617 return;
10618
10619 default:
10620 if (x == 0)
10621 abort ();
10622
10623 if (GET_CODE (x) == REG)
10624 asm_fprintf (stream, "%r", REGNO (x));
10625 else if (GET_CODE (x) == MEM)
10626 {
10627 output_memory_reference_mode = GET_MODE (x);
10628 output_address (XEXP (x, 0));
10629 }
10630 else if (GET_CODE (x) == CONST_DOUBLE)
10631 fprintf (stream, "#%s", fp_immediate_constant (x));
10632 else if (GET_CODE (x) == NEG)
10633 abort (); /* This should never happen now. */
10634 else
10635 {
10636 fputc ('#', stream);
10637 output_addr_const (stream, x);
10638 }
10639 }
10640 }
10641 \f
10642 #ifndef AOF_ASSEMBLER
10643 /* Target hook for assembling integer objects. The ARM version needs to
10644 handle word-sized values specially. */
10645 static bool
10646 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10647 {
10648 if (size == UNITS_PER_WORD && aligned_p)
10649 {
10650 fputs ("\t.word\t", asm_out_file);
10651 output_addr_const (asm_out_file, x);
10652
10653 /* Mark symbols as position independent. We only do this in the
10654 .text segment, not in the .data segment. */
10655 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10656 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10657 {
10658 if (GET_CODE (x) == SYMBOL_REF
10659 && (CONSTANT_POOL_ADDRESS_P (x)
10660 || SYMBOL_REF_LOCAL_P (x)))
10661 fputs ("(GOTOFF)", asm_out_file);
10662 else if (GET_CODE (x) == LABEL_REF)
10663 fputs ("(GOTOFF)", asm_out_file);
10664 else
10665 fputs ("(GOT)", asm_out_file);
10666 }
10667 fputc ('\n', asm_out_file);
10668 return true;
10669 }
10670
10671 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
10672 {
10673 int i, units;
10674
10675 if (GET_CODE (x) != CONST_VECTOR)
10676 abort ();
10677
10678 units = CONST_VECTOR_NUNITS (x);
10679
10680 switch (GET_MODE (x))
10681 {
10682 case V2SImode: size = 4; break;
10683 case V4HImode: size = 2; break;
10684 case V8QImode: size = 1; break;
10685 default:
10686 abort ();
10687 }
10688
10689 for (i = 0; i < units; i++)
10690 {
10691 rtx elt;
10692
10693 elt = CONST_VECTOR_ELT (x, i);
10694 assemble_integer
10695 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10696 }
10697
10698 return true;
10699 }
10700
10701 return default_assemble_integer (x, size, aligned_p);
10702 }
10703 #endif
10704 \f
10705 /* A finite state machine takes care of noticing whether or not instructions
10706 can be conditionally executed, and thus decrease execution time and code
10707 size by deleting branch instructions. The fsm is controlled by
10708 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10709
10710 /* The state of the fsm controlling condition codes are:
10711 0: normal, do nothing special
10712 1: make ASM_OUTPUT_OPCODE not output this instruction
10713 2: make ASM_OUTPUT_OPCODE not output this instruction
10714 3: make instructions conditional
10715 4: make instructions conditional
10716
10717 State transitions (state->state by whom under condition):
10718 0 -> 1 final_prescan_insn if the `target' is a label
10719 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10720 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10721 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10722 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10723 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10724 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10725 (the target insn is arm_target_insn).
10726
10727 If the jump clobbers the conditions then we use states 2 and 4.
10728
10729 A similar thing can be done with conditional return insns.
10730
10731 XXX In case the `target' is an unconditional branch, this conditionalising
10732 of the instructions always reduces code size, but not always execution
10733 time. But then, I want to reduce the code size to somewhere near what
10734 /bin/cc produces. */
10735
10736 /* Returns the index of the ARM condition code string in
10737 `arm_condition_codes'. COMPARISON should be an rtx like
10738 `(eq (...) (...))'. */
10739 static enum arm_cond_code
10740 get_arm_condition_code (rtx comparison)
10741 {
10742 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10743 int code;
10744 enum rtx_code comp_code = GET_CODE (comparison);
10745
10746 if (GET_MODE_CLASS (mode) != MODE_CC)
10747 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10748 XEXP (comparison, 1));
10749
10750 switch (mode)
10751 {
10752 case CC_DNEmode: code = ARM_NE; goto dominance;
10753 case CC_DEQmode: code = ARM_EQ; goto dominance;
10754 case CC_DGEmode: code = ARM_GE; goto dominance;
10755 case CC_DGTmode: code = ARM_GT; goto dominance;
10756 case CC_DLEmode: code = ARM_LE; goto dominance;
10757 case CC_DLTmode: code = ARM_LT; goto dominance;
10758 case CC_DGEUmode: code = ARM_CS; goto dominance;
10759 case CC_DGTUmode: code = ARM_HI; goto dominance;
10760 case CC_DLEUmode: code = ARM_LS; goto dominance;
10761 case CC_DLTUmode: code = ARM_CC;
10762
10763 dominance:
10764 if (comp_code != EQ && comp_code != NE)
10765 abort ();
10766
10767 if (comp_code == EQ)
10768 return ARM_INVERSE_CONDITION_CODE (code);
10769 return code;
10770
10771 case CC_NOOVmode:
10772 switch (comp_code)
10773 {
10774 case NE: return ARM_NE;
10775 case EQ: return ARM_EQ;
10776 case GE: return ARM_PL;
10777 case LT: return ARM_MI;
10778 default: abort ();
10779 }
10780
10781 case CC_Zmode:
10782 switch (comp_code)
10783 {
10784 case NE: return ARM_NE;
10785 case EQ: return ARM_EQ;
10786 default: abort ();
10787 }
10788
10789 case CC_Nmode:
10790 switch (comp_code)
10791 {
10792 case NE: return ARM_MI;
10793 case EQ: return ARM_PL;
10794 default: abort ();
10795 }
10796
10797 case CCFPEmode:
10798 case CCFPmode:
10799 /* These encodings assume that AC=1 in the FPA system control
10800 byte. This allows us to handle all cases except UNEQ and
10801 LTGT. */
10802 switch (comp_code)
10803 {
10804 case GE: return ARM_GE;
10805 case GT: return ARM_GT;
10806 case LE: return ARM_LS;
10807 case LT: return ARM_MI;
10808 case NE: return ARM_NE;
10809 case EQ: return ARM_EQ;
10810 case ORDERED: return ARM_VC;
10811 case UNORDERED: return ARM_VS;
10812 case UNLT: return ARM_LT;
10813 case UNLE: return ARM_LE;
10814 case UNGT: return ARM_HI;
10815 case UNGE: return ARM_PL;
10816 /* UNEQ and LTGT do not have a representation. */
10817 case UNEQ: /* Fall through. */
10818 case LTGT: /* Fall through. */
10819 default: abort ();
10820 }
10821
10822 case CC_SWPmode:
10823 switch (comp_code)
10824 {
10825 case NE: return ARM_NE;
10826 case EQ: return ARM_EQ;
10827 case GE: return ARM_LE;
10828 case GT: return ARM_LT;
10829 case LE: return ARM_GE;
10830 case LT: return ARM_GT;
10831 case GEU: return ARM_LS;
10832 case GTU: return ARM_CC;
10833 case LEU: return ARM_CS;
10834 case LTU: return ARM_HI;
10835 default: abort ();
10836 }
10837
10838 case CC_Cmode:
10839 switch (comp_code)
10840 {
10841 case LTU: return ARM_CS;
10842 case GEU: return ARM_CC;
10843 default: abort ();
10844 }
10845
10846 case CCmode:
10847 switch (comp_code)
10848 {
10849 case NE: return ARM_NE;
10850 case EQ: return ARM_EQ;
10851 case GE: return ARM_GE;
10852 case GT: return ARM_GT;
10853 case LE: return ARM_LE;
10854 case LT: return ARM_LT;
10855 case GEU: return ARM_CS;
10856 case GTU: return ARM_HI;
10857 case LEU: return ARM_LS;
10858 case LTU: return ARM_CC;
10859 default: abort ();
10860 }
10861
10862 default: abort ();
10863 }
10864
10865 abort ();
10866 }
10867
10868 void
10869 arm_final_prescan_insn (rtx insn)
10870 {
10871 /* BODY will hold the body of INSN. */
10872 rtx body = PATTERN (insn);
10873
10874 /* This will be 1 if trying to repeat the trick, and things need to be
10875 reversed if it appears to fail. */
10876 int reverse = 0;
10877
10878 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10879 taken are clobbered, even if the rtl suggests otherwise. It also
10880 means that we have to grub around within the jump expression to find
10881 out what the conditions are when the jump isn't taken. */
10882 int jump_clobbers = 0;
10883
10884 /* If we start with a return insn, we only succeed if we find another one. */
10885 int seeking_return = 0;
10886
10887 /* START_INSN will hold the insn from where we start looking. This is the
10888 first insn after the following code_label if REVERSE is true. */
10889 rtx start_insn = insn;
10890
10891 /* If in state 4, check if the target branch is reached, in order to
10892 change back to state 0. */
10893 if (arm_ccfsm_state == 4)
10894 {
10895 if (insn == arm_target_insn)
10896 {
10897 arm_target_insn = NULL;
10898 arm_ccfsm_state = 0;
10899 }
10900 return;
10901 }
10902
10903 /* If in state 3, it is possible to repeat the trick, if this insn is an
10904 unconditional branch to a label, and immediately following this branch
10905 is the previous target label which is only used once, and the label this
10906 branch jumps to is not too far off. */
10907 if (arm_ccfsm_state == 3)
10908 {
10909 if (simplejump_p (insn))
10910 {
10911 start_insn = next_nonnote_insn (start_insn);
10912 if (GET_CODE (start_insn) == BARRIER)
10913 {
10914 /* XXX Isn't this always a barrier? */
10915 start_insn = next_nonnote_insn (start_insn);
10916 }
10917 if (GET_CODE (start_insn) == CODE_LABEL
10918 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10919 && LABEL_NUSES (start_insn) == 1)
10920 reverse = TRUE;
10921 else
10922 return;
10923 }
10924 else if (GET_CODE (body) == RETURN)
10925 {
10926 start_insn = next_nonnote_insn (start_insn);
10927 if (GET_CODE (start_insn) == BARRIER)
10928 start_insn = next_nonnote_insn (start_insn);
10929 if (GET_CODE (start_insn) == CODE_LABEL
10930 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10931 && LABEL_NUSES (start_insn) == 1)
10932 {
10933 reverse = TRUE;
10934 seeking_return = 1;
10935 }
10936 else
10937 return;
10938 }
10939 else
10940 return;
10941 }
10942
10943 if (arm_ccfsm_state != 0 && !reverse)
10944 abort ();
10945 if (GET_CODE (insn) != JUMP_INSN)
10946 return;
10947
10948 /* This jump might be paralleled with a clobber of the condition codes
10949 the jump should always come first */
10950 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10951 body = XVECEXP (body, 0, 0);
10952
10953 if (reverse
10954 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10955 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10956 {
10957 int insns_skipped;
10958 int fail = FALSE, succeed = FALSE;
10959 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10960 int then_not_else = TRUE;
10961 rtx this_insn = start_insn, label = 0;
10962
10963 /* If the jump cannot be done with one instruction, we cannot
10964 conditionally execute the instruction in the inverse case. */
10965 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10966 {
10967 jump_clobbers = 1;
10968 return;
10969 }
10970
10971 /* Register the insn jumped to. */
10972 if (reverse)
10973 {
10974 if (!seeking_return)
10975 label = XEXP (SET_SRC (body), 0);
10976 }
10977 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10978 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10979 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10980 {
10981 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10982 then_not_else = FALSE;
10983 }
10984 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10985 seeking_return = 1;
10986 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10987 {
10988 seeking_return = 1;
10989 then_not_else = FALSE;
10990 }
10991 else
10992 abort ();
10993
10994 /* See how many insns this branch skips, and what kind of insns. If all
10995 insns are okay, and the label or unconditional branch to the same
10996 label is not too far away, succeed. */
10997 for (insns_skipped = 0;
10998 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10999 {
11000 rtx scanbody;
11001
11002 this_insn = next_nonnote_insn (this_insn);
11003 if (!this_insn)
11004 break;
11005
11006 switch (GET_CODE (this_insn))
11007 {
11008 case CODE_LABEL:
11009 /* Succeed if it is the target label, otherwise fail since
11010 control falls in from somewhere else. */
11011 if (this_insn == label)
11012 {
11013 if (jump_clobbers)
11014 {
11015 arm_ccfsm_state = 2;
11016 this_insn = next_nonnote_insn (this_insn);
11017 }
11018 else
11019 arm_ccfsm_state = 1;
11020 succeed = TRUE;
11021 }
11022 else
11023 fail = TRUE;
11024 break;
11025
11026 case BARRIER:
11027 /* Succeed if the following insn is the target label.
11028 Otherwise fail.
11029 If return insns are used then the last insn in a function
11030 will be a barrier. */
11031 this_insn = next_nonnote_insn (this_insn);
11032 if (this_insn && this_insn == label)
11033 {
11034 if (jump_clobbers)
11035 {
11036 arm_ccfsm_state = 2;
11037 this_insn = next_nonnote_insn (this_insn);
11038 }
11039 else
11040 arm_ccfsm_state = 1;
11041 succeed = TRUE;
11042 }
11043 else
11044 fail = TRUE;
11045 break;
11046
11047 case CALL_INSN:
11048 /* If using 32-bit addresses the cc is not preserved over
11049 calls. */
11050 if (TARGET_APCS_32)
11051 {
11052 /* Succeed if the following insn is the target label,
11053 or if the following two insns are a barrier and
11054 the target label. */
11055 this_insn = next_nonnote_insn (this_insn);
11056 if (this_insn && GET_CODE (this_insn) == BARRIER)
11057 this_insn = next_nonnote_insn (this_insn);
11058
11059 if (this_insn && this_insn == label
11060 && insns_skipped < max_insns_skipped)
11061 {
11062 if (jump_clobbers)
11063 {
11064 arm_ccfsm_state = 2;
11065 this_insn = next_nonnote_insn (this_insn);
11066 }
11067 else
11068 arm_ccfsm_state = 1;
11069 succeed = TRUE;
11070 }
11071 else
11072 fail = TRUE;
11073 }
11074 break;
11075
11076 case JUMP_INSN:
11077 /* If this is an unconditional branch to the same label, succeed.
11078 If it is to another label, do nothing. If it is conditional,
11079 fail. */
11080 /* XXX Probably, the tests for SET and the PC are
11081 unnecessary. */
11082
11083 scanbody = PATTERN (this_insn);
11084 if (GET_CODE (scanbody) == SET
11085 && GET_CODE (SET_DEST (scanbody)) == PC)
11086 {
11087 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11088 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11089 {
11090 arm_ccfsm_state = 2;
11091 succeed = TRUE;
11092 }
11093 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11094 fail = TRUE;
11095 }
11096 /* Fail if a conditional return is undesirable (eg on a
11097 StrongARM), but still allow this if optimizing for size. */
11098 else if (GET_CODE (scanbody) == RETURN
11099 && !use_return_insn (TRUE, NULL)
11100 && !optimize_size)
11101 fail = TRUE;
11102 else if (GET_CODE (scanbody) == RETURN
11103 && seeking_return)
11104 {
11105 arm_ccfsm_state = 2;
11106 succeed = TRUE;
11107 }
11108 else if (GET_CODE (scanbody) == PARALLEL)
11109 {
11110 switch (get_attr_conds (this_insn))
11111 {
11112 case CONDS_NOCOND:
11113 break;
11114 default:
11115 fail = TRUE;
11116 break;
11117 }
11118 }
11119 else
11120 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11121
11122 break;
11123
11124 case INSN:
11125 /* Instructions using or affecting the condition codes make it
11126 fail. */
11127 scanbody = PATTERN (this_insn);
11128 if (!(GET_CODE (scanbody) == SET
11129 || GET_CODE (scanbody) == PARALLEL)
11130 || get_attr_conds (this_insn) != CONDS_NOCOND)
11131 fail = TRUE;
11132
11133 /* A conditional cirrus instruction must be followed by
11134 a non Cirrus instruction. However, since we
11135 conditionalize instructions in this function and by
11136 the time we get here we can't add instructions
11137 (nops), because shorten_branches() has already been
11138 called, we will disable conditionalizing Cirrus
11139 instructions to be safe. */
11140 if (GET_CODE (scanbody) != USE
11141 && GET_CODE (scanbody) != CLOBBER
11142 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11143 fail = TRUE;
11144 break;
11145
11146 default:
11147 break;
11148 }
11149 }
11150 if (succeed)
11151 {
11152 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11153 arm_target_label = CODE_LABEL_NUMBER (label);
11154 else if (seeking_return || arm_ccfsm_state == 2)
11155 {
11156 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11157 {
11158 this_insn = next_nonnote_insn (this_insn);
11159 if (this_insn && (GET_CODE (this_insn) == BARRIER
11160 || GET_CODE (this_insn) == CODE_LABEL))
11161 abort ();
11162 }
11163 if (!this_insn)
11164 {
11165 /* Oh, dear! we ran off the end.. give up. */
11166 recog (PATTERN (insn), insn, NULL);
11167 arm_ccfsm_state = 0;
11168 arm_target_insn = NULL;
11169 return;
11170 }
11171 arm_target_insn = this_insn;
11172 }
11173 else
11174 abort ();
11175 if (jump_clobbers)
11176 {
11177 if (reverse)
11178 abort ();
11179 arm_current_cc =
11180 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11181 0), 0), 1));
11182 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11183 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11184 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11185 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11186 }
11187 else
11188 {
11189 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11190 what it was. */
11191 if (!reverse)
11192 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11193 0));
11194 }
11195
11196 if (reverse || then_not_else)
11197 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11198 }
11199
11200 /* Restore recog_data (getting the attributes of other insns can
11201 destroy this array, but final.c assumes that it remains intact
11202 across this call; since the insn has been recognized already we
11203 call recog direct). */
11204 recog (PATTERN (insn), insn, NULL);
11205 }
11206 }
11207
11208 /* Returns true if REGNO is a valid register
11209 for holding a quantity of tyoe MODE. */
11210 int
11211 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11212 {
11213 if (GET_MODE_CLASS (mode) == MODE_CC)
11214 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11215
11216 if (TARGET_THUMB)
11217 /* For the Thumb we only allow values bigger than SImode in
11218 registers 0 - 6, so that there is always a second low
11219 register available to hold the upper part of the value.
11220 We probably we ought to ensure that the register is the
11221 start of an even numbered register pair. */
11222 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11223
11224 if (IS_CIRRUS_REGNUM (regno))
11225 /* We have outlawed SI values in Cirrus registers because they
11226 reside in the lower 32 bits, but SF values reside in the
11227 upper 32 bits. This causes gcc all sorts of grief. We can't
11228 even split the registers into pairs because Cirrus SI values
11229 get sign extended to 64bits-- aldyh. */
11230 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11231
11232 if (IS_VFP_REGNUM (regno))
11233 {
11234 if (mode == SFmode || mode == SImode)
11235 return TRUE;
11236
11237 /* DFmode values are only valid in even register pairs. */
11238 if (mode == DFmode)
11239 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11240 return FALSE;
11241 }
11242
11243 if (IS_IWMMXT_GR_REGNUM (regno))
11244 return mode == SImode;
11245
11246 if (IS_IWMMXT_REGNUM (regno))
11247 return VALID_IWMMXT_REG_MODE (mode);
11248
11249 if (regno <= LAST_ARM_REGNUM)
11250 /* We allow any value to be stored in the general registers. */
11251 return 1;
11252
11253 if ( regno == FRAME_POINTER_REGNUM
11254 || regno == ARG_POINTER_REGNUM)
11255 /* We only allow integers in the fake hard registers. */
11256 return GET_MODE_CLASS (mode) == MODE_INT;
11257
11258 /* The only registers left are the FPA registers
11259 which we only allow to hold FP values. */
11260 return GET_MODE_CLASS (mode) == MODE_FLOAT
11261 && regno >= FIRST_FPA_REGNUM
11262 && regno <= LAST_FPA_REGNUM;
11263 }
11264
11265 int
11266 arm_regno_class (int regno)
11267 {
11268 if (TARGET_THUMB)
11269 {
11270 if (regno == STACK_POINTER_REGNUM)
11271 return STACK_REG;
11272 if (regno == CC_REGNUM)
11273 return CC_REG;
11274 if (regno < 8)
11275 return LO_REGS;
11276 return HI_REGS;
11277 }
11278
11279 if ( regno <= LAST_ARM_REGNUM
11280 || regno == FRAME_POINTER_REGNUM
11281 || regno == ARG_POINTER_REGNUM)
11282 return GENERAL_REGS;
11283
11284 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11285 return NO_REGS;
11286
11287 if (IS_CIRRUS_REGNUM (regno))
11288 return CIRRUS_REGS;
11289
11290 if (IS_VFP_REGNUM (regno))
11291 return VFP_REGS;
11292
11293 if (IS_IWMMXT_REGNUM (regno))
11294 return IWMMXT_REGS;
11295
11296 if (IS_IWMMXT_GR_REGNUM (regno))
11297 return IWMMXT_GR_REGS;
11298
11299 return FPA_REGS;
11300 }
11301
11302 /* Handle a special case when computing the offset
11303 of an argument from the frame pointer. */
11304 int
11305 arm_debugger_arg_offset (int value, rtx addr)
11306 {
11307 rtx insn;
11308
11309 /* We are only interested if dbxout_parms() failed to compute the offset. */
11310 if (value != 0)
11311 return 0;
11312
11313 /* We can only cope with the case where the address is held in a register. */
11314 if (GET_CODE (addr) != REG)
11315 return 0;
11316
11317 /* If we are using the frame pointer to point at the argument, then
11318 an offset of 0 is correct. */
11319 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11320 return 0;
11321
11322 /* If we are using the stack pointer to point at the
11323 argument, then an offset of 0 is correct. */
11324 if ((TARGET_THUMB || !frame_pointer_needed)
11325 && REGNO (addr) == SP_REGNUM)
11326 return 0;
11327
11328 /* Oh dear. The argument is pointed to by a register rather
11329 than being held in a register, or being stored at a known
11330 offset from the frame pointer. Since GDB only understands
11331 those two kinds of argument we must translate the address
11332 held in the register into an offset from the frame pointer.
11333 We do this by searching through the insns for the function
11334 looking to see where this register gets its value. If the
11335 register is initialized from the frame pointer plus an offset
11336 then we are in luck and we can continue, otherwise we give up.
11337
11338 This code is exercised by producing debugging information
11339 for a function with arguments like this:
11340
11341 double func (double a, double b, int c, double d) {return d;}
11342
11343 Without this code the stab for parameter 'd' will be set to
11344 an offset of 0 from the frame pointer, rather than 8. */
11345
11346 /* The if() statement says:
11347
11348 If the insn is a normal instruction
11349 and if the insn is setting the value in a register
11350 and if the register being set is the register holding the address of the argument
11351 and if the address is computing by an addition
11352 that involves adding to a register
11353 which is the frame pointer
11354 a constant integer
11355
11356 then... */
11357
11358 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11359 {
11360 if ( GET_CODE (insn) == INSN
11361 && GET_CODE (PATTERN (insn)) == SET
11362 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11363 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11364 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11365 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11366 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11367 )
11368 {
11369 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11370
11371 break;
11372 }
11373 }
11374
11375 if (value == 0)
11376 {
11377 debug_rtx (addr);
11378 warning ("unable to compute real location of stacked parameter");
11379 value = 8; /* XXX magic hack */
11380 }
11381
11382 return value;
11383 }
11384 \f
11385 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11386 do \
11387 { \
11388 if ((MASK) & insn_flags) \
11389 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
11390 } \
11391 while (0)
11392
11393 struct builtin_description
11394 {
11395 const unsigned int mask;
11396 const enum insn_code icode;
11397 const char * const name;
11398 const enum arm_builtins code;
11399 const enum rtx_code comparison;
11400 const unsigned int flag;
11401 };
11402
11403 static const struct builtin_description bdesc_2arg[] =
11404 {
11405 #define IWMMXT_BUILTIN(code, string, builtin) \
11406 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11407 ARM_BUILTIN_##builtin, 0, 0 },
11408
11409 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11410 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11411 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11412 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11413 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11414 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11415 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11416 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11417 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11418 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11419 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11420 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11421 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11422 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11423 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11424 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11425 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11426 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11427 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11428 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11429 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11430 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11431 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11432 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11433 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11434 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11435 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11436 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11437 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11438 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11439 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11440 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11441 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11442 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11443 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11444 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11445 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11446 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11447 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11448 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11449 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11450 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11451 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11452 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11453 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11454 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11455 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11456 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11457 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11458 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11459 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11460 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11461 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11462 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11463 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11464 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11465 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11466 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11467
11468 #define IWMMXT_BUILTIN2(code, builtin) \
11469 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11470
11471 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11472 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11473 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11474 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11475 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11476 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11477 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11478 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11479 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11480 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11481 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11482 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11483 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11484 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11485 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11486 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11487 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11488 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11489 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11490 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11491 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11492 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11493 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11494 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11495 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11496 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11497 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11498 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11499 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11500 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11501 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11502 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11503 };
11504
11505 static const struct builtin_description bdesc_1arg[] =
11506 {
11507 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11508 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11509 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11510 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11511 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11512 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11513 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11514 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11515 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11516 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11517 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11518 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11519 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11520 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11521 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11522 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11523 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11524 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11525 };
11526
11527 /* Set up all the iWMMXt builtins. This is
11528 not called if TARGET_IWMMXT is zero. */
11529
11530 static void
11531 arm_init_iwmmxt_builtins (void)
11532 {
11533 const struct builtin_description * d;
11534 size_t i;
11535 tree endlink = void_list_node;
11536
11537 tree int_ftype_int
11538 = build_function_type (integer_type_node,
11539 tree_cons (NULL_TREE, integer_type_node, endlink));
11540 tree v8qi_ftype_v8qi_v8qi_int
11541 = build_function_type (V8QI_type_node,
11542 tree_cons (NULL_TREE, V8QI_type_node,
11543 tree_cons (NULL_TREE, V8QI_type_node,
11544 tree_cons (NULL_TREE,
11545 integer_type_node,
11546 endlink))));
11547 tree v4hi_ftype_v4hi_int
11548 = build_function_type (V4HI_type_node,
11549 tree_cons (NULL_TREE, V4HI_type_node,
11550 tree_cons (NULL_TREE, integer_type_node,
11551 endlink)));
11552 tree v2si_ftype_v2si_int
11553 = build_function_type (V2SI_type_node,
11554 tree_cons (NULL_TREE, V2SI_type_node,
11555 tree_cons (NULL_TREE, integer_type_node,
11556 endlink)));
11557 tree v2si_ftype_di_di
11558 = build_function_type (V2SI_type_node,
11559 tree_cons (NULL_TREE, long_long_integer_type_node,
11560 tree_cons (NULL_TREE, long_long_integer_type_node,
11561 endlink)));
11562 tree di_ftype_di_int
11563 = build_function_type (long_long_integer_type_node,
11564 tree_cons (NULL_TREE, long_long_integer_type_node,
11565 tree_cons (NULL_TREE, integer_type_node,
11566 endlink)));
11567 tree di_ftype_di_int_int
11568 = build_function_type (long_long_integer_type_node,
11569 tree_cons (NULL_TREE, long_long_integer_type_node,
11570 tree_cons (NULL_TREE, integer_type_node,
11571 tree_cons (NULL_TREE,
11572 integer_type_node,
11573 endlink))));
11574 tree int_ftype_v8qi
11575 = build_function_type (integer_type_node,
11576 tree_cons (NULL_TREE, V8QI_type_node,
11577 endlink));
11578 tree int_ftype_v4hi
11579 = build_function_type (integer_type_node,
11580 tree_cons (NULL_TREE, V4HI_type_node,
11581 endlink));
11582 tree int_ftype_v2si
11583 = build_function_type (integer_type_node,
11584 tree_cons (NULL_TREE, V2SI_type_node,
11585 endlink));
11586 tree int_ftype_v8qi_int
11587 = build_function_type (integer_type_node,
11588 tree_cons (NULL_TREE, V8QI_type_node,
11589 tree_cons (NULL_TREE, integer_type_node,
11590 endlink)));
11591 tree int_ftype_v4hi_int
11592 = build_function_type (integer_type_node,
11593 tree_cons (NULL_TREE, V4HI_type_node,
11594 tree_cons (NULL_TREE, integer_type_node,
11595 endlink)));
11596 tree int_ftype_v2si_int
11597 = build_function_type (integer_type_node,
11598 tree_cons (NULL_TREE, V2SI_type_node,
11599 tree_cons (NULL_TREE, integer_type_node,
11600 endlink)));
11601 tree v8qi_ftype_v8qi_int_int
11602 = build_function_type (V8QI_type_node,
11603 tree_cons (NULL_TREE, V8QI_type_node,
11604 tree_cons (NULL_TREE, integer_type_node,
11605 tree_cons (NULL_TREE,
11606 integer_type_node,
11607 endlink))));
11608 tree v4hi_ftype_v4hi_int_int
11609 = build_function_type (V4HI_type_node,
11610 tree_cons (NULL_TREE, V4HI_type_node,
11611 tree_cons (NULL_TREE, integer_type_node,
11612 tree_cons (NULL_TREE,
11613 integer_type_node,
11614 endlink))));
11615 tree v2si_ftype_v2si_int_int
11616 = build_function_type (V2SI_type_node,
11617 tree_cons (NULL_TREE, V2SI_type_node,
11618 tree_cons (NULL_TREE, integer_type_node,
11619 tree_cons (NULL_TREE,
11620 integer_type_node,
11621 endlink))));
11622 /* Miscellaneous. */
11623 tree v8qi_ftype_v4hi_v4hi
11624 = build_function_type (V8QI_type_node,
11625 tree_cons (NULL_TREE, V4HI_type_node,
11626 tree_cons (NULL_TREE, V4HI_type_node,
11627 endlink)));
11628 tree v4hi_ftype_v2si_v2si
11629 = build_function_type (V4HI_type_node,
11630 tree_cons (NULL_TREE, V2SI_type_node,
11631 tree_cons (NULL_TREE, V2SI_type_node,
11632 endlink)));
11633 tree v2si_ftype_v4hi_v4hi
11634 = build_function_type (V2SI_type_node,
11635 tree_cons (NULL_TREE, V4HI_type_node,
11636 tree_cons (NULL_TREE, V4HI_type_node,
11637 endlink)));
11638 tree v2si_ftype_v8qi_v8qi
11639 = build_function_type (V2SI_type_node,
11640 tree_cons (NULL_TREE, V8QI_type_node,
11641 tree_cons (NULL_TREE, V8QI_type_node,
11642 endlink)));
11643 tree v4hi_ftype_v4hi_di
11644 = build_function_type (V4HI_type_node,
11645 tree_cons (NULL_TREE, V4HI_type_node,
11646 tree_cons (NULL_TREE,
11647 long_long_integer_type_node,
11648 endlink)));
11649 tree v2si_ftype_v2si_di
11650 = build_function_type (V2SI_type_node,
11651 tree_cons (NULL_TREE, V2SI_type_node,
11652 tree_cons (NULL_TREE,
11653 long_long_integer_type_node,
11654 endlink)));
11655 tree void_ftype_int_int
11656 = build_function_type (void_type_node,
11657 tree_cons (NULL_TREE, integer_type_node,
11658 tree_cons (NULL_TREE, integer_type_node,
11659 endlink)));
11660 tree di_ftype_void
11661 = build_function_type (long_long_unsigned_type_node, endlink);
11662 tree di_ftype_v8qi
11663 = build_function_type (long_long_integer_type_node,
11664 tree_cons (NULL_TREE, V8QI_type_node,
11665 endlink));
11666 tree di_ftype_v4hi
11667 = build_function_type (long_long_integer_type_node,
11668 tree_cons (NULL_TREE, V4HI_type_node,
11669 endlink));
11670 tree di_ftype_v2si
11671 = build_function_type (long_long_integer_type_node,
11672 tree_cons (NULL_TREE, V2SI_type_node,
11673 endlink));
11674 tree v2si_ftype_v4hi
11675 = build_function_type (V2SI_type_node,
11676 tree_cons (NULL_TREE, V4HI_type_node,
11677 endlink));
11678 tree v4hi_ftype_v8qi
11679 = build_function_type (V4HI_type_node,
11680 tree_cons (NULL_TREE, V8QI_type_node,
11681 endlink));
11682
11683 tree di_ftype_di_v4hi_v4hi
11684 = build_function_type (long_long_unsigned_type_node,
11685 tree_cons (NULL_TREE,
11686 long_long_unsigned_type_node,
11687 tree_cons (NULL_TREE, V4HI_type_node,
11688 tree_cons (NULL_TREE,
11689 V4HI_type_node,
11690 endlink))));
11691
11692 tree di_ftype_v4hi_v4hi
11693 = build_function_type (long_long_unsigned_type_node,
11694 tree_cons (NULL_TREE, V4HI_type_node,
11695 tree_cons (NULL_TREE, V4HI_type_node,
11696 endlink)));
11697
11698 /* Normal vector binops. */
11699 tree v8qi_ftype_v8qi_v8qi
11700 = build_function_type (V8QI_type_node,
11701 tree_cons (NULL_TREE, V8QI_type_node,
11702 tree_cons (NULL_TREE, V8QI_type_node,
11703 endlink)));
11704 tree v4hi_ftype_v4hi_v4hi
11705 = build_function_type (V4HI_type_node,
11706 tree_cons (NULL_TREE, V4HI_type_node,
11707 tree_cons (NULL_TREE, V4HI_type_node,
11708 endlink)));
11709 tree v2si_ftype_v2si_v2si
11710 = build_function_type (V2SI_type_node,
11711 tree_cons (NULL_TREE, V2SI_type_node,
11712 tree_cons (NULL_TREE, V2SI_type_node,
11713 endlink)));
11714 tree di_ftype_di_di
11715 = build_function_type (long_long_unsigned_type_node,
11716 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11717 tree_cons (NULL_TREE,
11718 long_long_unsigned_type_node,
11719 endlink)));
11720
11721 /* Add all builtins that are more or less simple operations on two
11722 operands. */
11723 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11724 {
11725 /* Use one of the operands; the target can have a different mode for
11726 mask-generating compares. */
11727 enum machine_mode mode;
11728 tree type;
11729
11730 if (d->name == 0)
11731 continue;
11732
11733 mode = insn_data[d->icode].operand[1].mode;
11734
11735 switch (mode)
11736 {
11737 case V8QImode:
11738 type = v8qi_ftype_v8qi_v8qi;
11739 break;
11740 case V4HImode:
11741 type = v4hi_ftype_v4hi_v4hi;
11742 break;
11743 case V2SImode:
11744 type = v2si_ftype_v2si_v2si;
11745 break;
11746 case DImode:
11747 type = di_ftype_di_di;
11748 break;
11749
11750 default:
11751 abort ();
11752 }
11753
11754 def_mbuiltin (d->mask, d->name, type, d->code);
11755 }
11756
11757 /* Add the remaining MMX insns with somewhat more complicated types. */
11758 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11759 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11760 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11761
11762 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11763 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11764 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11765 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11766 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11767 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11768
11769 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11770 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11771 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11772 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11773 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11774 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11775
11776 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11777 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11778 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11779 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11780 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11781 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11782
11783 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11784 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11785 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11786 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11787 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11788 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11789
11790 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11791
11792 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11793 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11794 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11795 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11796
11797 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11798 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11799 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11800 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11801 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11802 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11803 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11804 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11805 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11806
11807 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11808 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11809 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11810
11811 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11812 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11813 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11814
11815 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11816 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11817 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11818 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11819 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11820 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11821
11822 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11823 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11824 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11825 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11826 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11827 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11828 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11829 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11830 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11831 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11832 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11833 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11834
11835 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11836 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11837 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11838 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11839
11840 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11841 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11842 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11843 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11844 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11845 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11846 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11847 }
11848
11849 static void
11850 arm_init_builtins (void)
11851 {
11852 if (TARGET_REALLY_IWMMXT)
11853 arm_init_iwmmxt_builtins ();
11854 }
11855
11856 /* Errors in the source file can cause expand_expr to return const0_rtx
11857 where we expect a vector. To avoid crashing, use one of the vector
11858 clear instructions. */
11859
11860 static rtx
11861 safe_vector_operand (rtx x, enum machine_mode mode)
11862 {
11863 if (x != const0_rtx)
11864 return x;
11865 x = gen_reg_rtx (mode);
11866
11867 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11868 : gen_rtx_SUBREG (DImode, x, 0)));
11869 return x;
11870 }
11871
11872 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11873
11874 static rtx
11875 arm_expand_binop_builtin (enum insn_code icode,
11876 tree arglist, rtx target)
11877 {
11878 rtx pat;
11879 tree arg0 = TREE_VALUE (arglist);
11880 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11881 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11882 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11883 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11884 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11885 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11886
11887 if (VECTOR_MODE_P (mode0))
11888 op0 = safe_vector_operand (op0, mode0);
11889 if (VECTOR_MODE_P (mode1))
11890 op1 = safe_vector_operand (op1, mode1);
11891
11892 if (! target
11893 || GET_MODE (target) != tmode
11894 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11895 target = gen_reg_rtx (tmode);
11896
11897 /* In case the insn wants input operands in modes different from
11898 the result, abort. */
11899 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11900 abort ();
11901
11902 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11903 op0 = copy_to_mode_reg (mode0, op0);
11904 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11905 op1 = copy_to_mode_reg (mode1, op1);
11906
11907 pat = GEN_FCN (icode) (target, op0, op1);
11908 if (! pat)
11909 return 0;
11910 emit_insn (pat);
11911 return target;
11912 }
11913
11914 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11915
11916 static rtx
11917 arm_expand_unop_builtin (enum insn_code icode,
11918 tree arglist, rtx target, int do_load)
11919 {
11920 rtx pat;
11921 tree arg0 = TREE_VALUE (arglist);
11922 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11923 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11924 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11925
11926 if (! target
11927 || GET_MODE (target) != tmode
11928 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11929 target = gen_reg_rtx (tmode);
11930 if (do_load)
11931 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11932 else
11933 {
11934 if (VECTOR_MODE_P (mode0))
11935 op0 = safe_vector_operand (op0, mode0);
11936
11937 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11938 op0 = copy_to_mode_reg (mode0, op0);
11939 }
11940
11941 pat = GEN_FCN (icode) (target, op0);
11942 if (! pat)
11943 return 0;
11944 emit_insn (pat);
11945 return target;
11946 }
11947
11948 /* Expand an expression EXP that calls a built-in function,
11949 with result going to TARGET if that's convenient
11950 (and in mode MODE if that's convenient).
11951 SUBTARGET may be used as the target for computing one of EXP's operands.
11952 IGNORE is nonzero if the value is to be ignored. */
11953
11954 static rtx
11955 arm_expand_builtin (tree exp,
11956 rtx target,
11957 rtx subtarget ATTRIBUTE_UNUSED,
11958 enum machine_mode mode ATTRIBUTE_UNUSED,
11959 int ignore ATTRIBUTE_UNUSED)
11960 {
11961 const struct builtin_description * d;
11962 enum insn_code icode;
11963 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11964 tree arglist = TREE_OPERAND (exp, 1);
11965 tree arg0;
11966 tree arg1;
11967 tree arg2;
11968 rtx op0;
11969 rtx op1;
11970 rtx op2;
11971 rtx pat;
11972 int fcode = DECL_FUNCTION_CODE (fndecl);
11973 size_t i;
11974 enum machine_mode tmode;
11975 enum machine_mode mode0;
11976 enum machine_mode mode1;
11977 enum machine_mode mode2;
11978
11979 switch (fcode)
11980 {
11981 case ARM_BUILTIN_TEXTRMSB:
11982 case ARM_BUILTIN_TEXTRMUB:
11983 case ARM_BUILTIN_TEXTRMSH:
11984 case ARM_BUILTIN_TEXTRMUH:
11985 case ARM_BUILTIN_TEXTRMSW:
11986 case ARM_BUILTIN_TEXTRMUW:
11987 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11988 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11989 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11990 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11991 : CODE_FOR_iwmmxt_textrmw);
11992
11993 arg0 = TREE_VALUE (arglist);
11994 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11995 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11996 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11997 tmode = insn_data[icode].operand[0].mode;
11998 mode0 = insn_data[icode].operand[1].mode;
11999 mode1 = insn_data[icode].operand[2].mode;
12000
12001 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12002 op0 = copy_to_mode_reg (mode0, op0);
12003 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12004 {
12005 /* @@@ better error message */
12006 error ("selector must be an immediate");
12007 return gen_reg_rtx (tmode);
12008 }
12009 if (target == 0
12010 || GET_MODE (target) != tmode
12011 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12012 target = gen_reg_rtx (tmode);
12013 pat = GEN_FCN (icode) (target, op0, op1);
12014 if (! pat)
12015 return 0;
12016 emit_insn (pat);
12017 return target;
12018
12019 case ARM_BUILTIN_TINSRB:
12020 case ARM_BUILTIN_TINSRH:
12021 case ARM_BUILTIN_TINSRW:
12022 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12023 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12024 : CODE_FOR_iwmmxt_tinsrw);
12025 arg0 = TREE_VALUE (arglist);
12026 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12027 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12028 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12029 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12030 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12031 tmode = insn_data[icode].operand[0].mode;
12032 mode0 = insn_data[icode].operand[1].mode;
12033 mode1 = insn_data[icode].operand[2].mode;
12034 mode2 = insn_data[icode].operand[3].mode;
12035
12036 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12037 op0 = copy_to_mode_reg (mode0, op0);
12038 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12039 op1 = copy_to_mode_reg (mode1, op1);
12040 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12041 {
12042 /* @@@ better error message */
12043 error ("selector must be an immediate");
12044 return const0_rtx;
12045 }
12046 if (target == 0
12047 || GET_MODE (target) != tmode
12048 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12049 target = gen_reg_rtx (tmode);
12050 pat = GEN_FCN (icode) (target, op0, op1, op2);
12051 if (! pat)
12052 return 0;
12053 emit_insn (pat);
12054 return target;
12055
12056 case ARM_BUILTIN_SETWCX:
12057 arg0 = TREE_VALUE (arglist);
12058 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12059 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12060 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12061 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12062 return 0;
12063
12064 case ARM_BUILTIN_GETWCX:
12065 arg0 = TREE_VALUE (arglist);
12066 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12067 target = gen_reg_rtx (SImode);
12068 emit_insn (gen_iwmmxt_tmrc (target, op0));
12069 return target;
12070
12071 case ARM_BUILTIN_WSHUFH:
12072 icode = CODE_FOR_iwmmxt_wshufh;
12073 arg0 = TREE_VALUE (arglist);
12074 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12075 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12076 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12077 tmode = insn_data[icode].operand[0].mode;
12078 mode1 = insn_data[icode].operand[1].mode;
12079 mode2 = insn_data[icode].operand[2].mode;
12080
12081 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12082 op0 = copy_to_mode_reg (mode1, op0);
12083 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12084 {
12085 /* @@@ better error message */
12086 error ("mask must be an immediate");
12087 return const0_rtx;
12088 }
12089 if (target == 0
12090 || GET_MODE (target) != tmode
12091 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12092 target = gen_reg_rtx (tmode);
12093 pat = GEN_FCN (icode) (target, op0, op1);
12094 if (! pat)
12095 return 0;
12096 emit_insn (pat);
12097 return target;
12098
12099 case ARM_BUILTIN_WSADB:
12100 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12101 case ARM_BUILTIN_WSADH:
12102 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12103 case ARM_BUILTIN_WSADBZ:
12104 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12105 case ARM_BUILTIN_WSADHZ:
12106 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12107
12108 /* Several three-argument builtins. */
12109 case ARM_BUILTIN_WMACS:
12110 case ARM_BUILTIN_WMACU:
12111 case ARM_BUILTIN_WALIGN:
12112 case ARM_BUILTIN_TMIA:
12113 case ARM_BUILTIN_TMIAPH:
12114 case ARM_BUILTIN_TMIATT:
12115 case ARM_BUILTIN_TMIATB:
12116 case ARM_BUILTIN_TMIABT:
12117 case ARM_BUILTIN_TMIABB:
12118 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12119 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12120 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12121 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12122 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12123 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12124 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12125 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12126 : CODE_FOR_iwmmxt_walign);
12127 arg0 = TREE_VALUE (arglist);
12128 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12129 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12130 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12131 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12132 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12133 tmode = insn_data[icode].operand[0].mode;
12134 mode0 = insn_data[icode].operand[1].mode;
12135 mode1 = insn_data[icode].operand[2].mode;
12136 mode2 = insn_data[icode].operand[3].mode;
12137
12138 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12139 op0 = copy_to_mode_reg (mode0, op0);
12140 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12141 op1 = copy_to_mode_reg (mode1, op1);
12142 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12143 op2 = copy_to_mode_reg (mode2, op2);
12144 if (target == 0
12145 || GET_MODE (target) != tmode
12146 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12147 target = gen_reg_rtx (tmode);
12148 pat = GEN_FCN (icode) (target, op0, op1, op2);
12149 if (! pat)
12150 return 0;
12151 emit_insn (pat);
12152 return target;
12153
12154 case ARM_BUILTIN_WZERO:
12155 target = gen_reg_rtx (DImode);
12156 emit_insn (gen_iwmmxt_clrdi (target));
12157 return target;
12158
12159 default:
12160 break;
12161 }
12162
12163 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12164 if (d->code == (const enum arm_builtins) fcode)
12165 return arm_expand_binop_builtin (d->icode, arglist, target);
12166
12167 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12168 if (d->code == (const enum arm_builtins) fcode)
12169 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12170
12171 /* @@@ Should really do something sensible here. */
12172 return NULL_RTX;
12173 }
12174 \f
12175 /* Recursively search through all of the blocks in a function
12176 checking to see if any of the variables created in that
12177 function match the RTX called 'orig'. If they do then
12178 replace them with the RTX called 'new'. */
12179 static void
12180 replace_symbols_in_block (tree block, rtx orig, rtx new)
12181 {
12182 for (; block; block = BLOCK_CHAIN (block))
12183 {
12184 tree sym;
12185
12186 if (!TREE_USED (block))
12187 continue;
12188
12189 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12190 {
12191 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12192 || DECL_IGNORED_P (sym)
12193 || TREE_CODE (sym) != VAR_DECL
12194 || DECL_EXTERNAL (sym)
12195 || !rtx_equal_p (DECL_RTL (sym), orig)
12196 )
12197 continue;
12198
12199 SET_DECL_RTL (sym, new);
12200 }
12201
12202 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12203 }
12204 }
12205
12206 /* Return the number (counting from 0) of
12207 the least significant set bit in MASK. */
12208
12209 inline static int
12210 number_of_first_bit_set (int mask)
12211 {
12212 int bit;
12213
12214 for (bit = 0;
12215 (mask & (1 << bit)) == 0;
12216 ++bit)
12217 continue;
12218
12219 return bit;
12220 }
12221
12222 /* Generate code to return from a thumb function.
12223 If 'reg_containing_return_addr' is -1, then the return address is
12224 actually on the stack, at the stack pointer. */
12225 static void
12226 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
12227 {
12228 unsigned regs_available_for_popping;
12229 unsigned regs_to_pop;
12230 int pops_needed;
12231 unsigned available;
12232 unsigned required;
12233 int mode;
12234 int size;
12235 int restore_a4 = FALSE;
12236
12237 /* Compute the registers we need to pop. */
12238 regs_to_pop = 0;
12239 pops_needed = 0;
12240
12241 /* There is an assumption here, that if eh_ofs is not NULL, the
12242 normal return address will have been pushed. */
12243 if (reg_containing_return_addr == -1 || eh_ofs)
12244 {
12245 /* When we are generating a return for __builtin_eh_return,
12246 reg_containing_return_addr must specify the return regno. */
12247 if (eh_ofs && reg_containing_return_addr == -1)
12248 abort ();
12249
12250 regs_to_pop |= 1 << LR_REGNUM;
12251 ++pops_needed;
12252 }
12253
12254 if (TARGET_BACKTRACE)
12255 {
12256 /* Restore the (ARM) frame pointer and stack pointer. */
12257 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12258 pops_needed += 2;
12259 }
12260
12261 /* If there is nothing to pop then just emit the BX instruction and
12262 return. */
12263 if (pops_needed == 0)
12264 {
12265 if (eh_ofs)
12266 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12267
12268 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12269 return;
12270 }
12271 /* Otherwise if we are not supporting interworking and we have not created
12272 a backtrace structure and the function was not entered in ARM mode then
12273 just pop the return address straight into the PC. */
12274 else if (!TARGET_INTERWORK
12275 && !TARGET_BACKTRACE
12276 && !is_called_in_ARM_mode (current_function_decl))
12277 {
12278 if (eh_ofs)
12279 {
12280 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
12281 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12282 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12283 }
12284 else
12285 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12286
12287 return;
12288 }
12289
12290 /* Find out how many of the (return) argument registers we can corrupt. */
12291 regs_available_for_popping = 0;
12292
12293 /* If returning via __builtin_eh_return, the bottom three registers
12294 all contain information needed for the return. */
12295 if (eh_ofs)
12296 size = 12;
12297 else
12298 {
12299 #ifdef RTX_CODE
12300 /* If we can deduce the registers used from the function's
12301 return value. This is more reliable that examining
12302 regs_ever_live[] because that will be set if the register is
12303 ever used in the function, not just if the register is used
12304 to hold a return value. */
12305
12306 if (current_function_return_rtx != 0)
12307 mode = GET_MODE (current_function_return_rtx);
12308 else
12309 #endif
12310 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12311
12312 size = GET_MODE_SIZE (mode);
12313
12314 if (size == 0)
12315 {
12316 /* In a void function we can use any argument register.
12317 In a function that returns a structure on the stack
12318 we can use the second and third argument registers. */
12319 if (mode == VOIDmode)
12320 regs_available_for_popping =
12321 (1 << ARG_REGISTER (1))
12322 | (1 << ARG_REGISTER (2))
12323 | (1 << ARG_REGISTER (3));
12324 else
12325 regs_available_for_popping =
12326 (1 << ARG_REGISTER (2))
12327 | (1 << ARG_REGISTER (3));
12328 }
12329 else if (size <= 4)
12330 regs_available_for_popping =
12331 (1 << ARG_REGISTER (2))
12332 | (1 << ARG_REGISTER (3));
12333 else if (size <= 8)
12334 regs_available_for_popping =
12335 (1 << ARG_REGISTER (3));
12336 }
12337
12338 /* Match registers to be popped with registers into which we pop them. */
12339 for (available = regs_available_for_popping,
12340 required = regs_to_pop;
12341 required != 0 && available != 0;
12342 available &= ~(available & - available),
12343 required &= ~(required & - required))
12344 -- pops_needed;
12345
12346 /* If we have any popping registers left over, remove them. */
12347 if (available > 0)
12348 regs_available_for_popping &= ~available;
12349
12350 /* Otherwise if we need another popping register we can use
12351 the fourth argument register. */
12352 else if (pops_needed)
12353 {
12354 /* If we have not found any free argument registers and
12355 reg a4 contains the return address, we must move it. */
12356 if (regs_available_for_popping == 0
12357 && reg_containing_return_addr == LAST_ARG_REGNUM)
12358 {
12359 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12360 reg_containing_return_addr = LR_REGNUM;
12361 }
12362 else if (size > 12)
12363 {
12364 /* Register a4 is being used to hold part of the return value,
12365 but we have dire need of a free, low register. */
12366 restore_a4 = TRUE;
12367
12368 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12369 }
12370
12371 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12372 {
12373 /* The fourth argument register is available. */
12374 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12375
12376 --pops_needed;
12377 }
12378 }
12379
12380 /* Pop as many registers as we can. */
12381 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12382 regs_available_for_popping);
12383
12384 /* Process the registers we popped. */
12385 if (reg_containing_return_addr == -1)
12386 {
12387 /* The return address was popped into the lowest numbered register. */
12388 regs_to_pop &= ~(1 << LR_REGNUM);
12389
12390 reg_containing_return_addr =
12391 number_of_first_bit_set (regs_available_for_popping);
12392
12393 /* Remove this register for the mask of available registers, so that
12394 the return address will not be corrupted by further pops. */
12395 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12396 }
12397
12398 /* If we popped other registers then handle them here. */
12399 if (regs_available_for_popping)
12400 {
12401 int frame_pointer;
12402
12403 /* Work out which register currently contains the frame pointer. */
12404 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12405
12406 /* Move it into the correct place. */
12407 asm_fprintf (f, "\tmov\t%r, %r\n",
12408 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12409
12410 /* (Temporarily) remove it from the mask of popped registers. */
12411 regs_available_for_popping &= ~(1 << frame_pointer);
12412 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12413
12414 if (regs_available_for_popping)
12415 {
12416 int stack_pointer;
12417
12418 /* We popped the stack pointer as well,
12419 find the register that contains it. */
12420 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12421
12422 /* Move it into the stack register. */
12423 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12424
12425 /* At this point we have popped all necessary registers, so
12426 do not worry about restoring regs_available_for_popping
12427 to its correct value:
12428
12429 assert (pops_needed == 0)
12430 assert (regs_available_for_popping == (1 << frame_pointer))
12431 assert (regs_to_pop == (1 << STACK_POINTER)) */
12432 }
12433 else
12434 {
12435 /* Since we have just move the popped value into the frame
12436 pointer, the popping register is available for reuse, and
12437 we know that we still have the stack pointer left to pop. */
12438 regs_available_for_popping |= (1 << frame_pointer);
12439 }
12440 }
12441
12442 /* If we still have registers left on the stack, but we no longer have
12443 any registers into which we can pop them, then we must move the return
12444 address into the link register and make available the register that
12445 contained it. */
12446 if (regs_available_for_popping == 0 && pops_needed > 0)
12447 {
12448 regs_available_for_popping |= 1 << reg_containing_return_addr;
12449
12450 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12451 reg_containing_return_addr);
12452
12453 reg_containing_return_addr = LR_REGNUM;
12454 }
12455
12456 /* If we have registers left on the stack then pop some more.
12457 We know that at most we will want to pop FP and SP. */
12458 if (pops_needed > 0)
12459 {
12460 int popped_into;
12461 int move_to;
12462
12463 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12464 regs_available_for_popping);
12465
12466 /* We have popped either FP or SP.
12467 Move whichever one it is into the correct register. */
12468 popped_into = number_of_first_bit_set (regs_available_for_popping);
12469 move_to = number_of_first_bit_set (regs_to_pop);
12470
12471 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12472
12473 regs_to_pop &= ~(1 << move_to);
12474
12475 --pops_needed;
12476 }
12477
12478 /* If we still have not popped everything then we must have only
12479 had one register available to us and we are now popping the SP. */
12480 if (pops_needed > 0)
12481 {
12482 int popped_into;
12483
12484 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12485 regs_available_for_popping);
12486
12487 popped_into = number_of_first_bit_set (regs_available_for_popping);
12488
12489 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12490 /*
12491 assert (regs_to_pop == (1 << STACK_POINTER))
12492 assert (pops_needed == 1)
12493 */
12494 }
12495
12496 /* If necessary restore the a4 register. */
12497 if (restore_a4)
12498 {
12499 if (reg_containing_return_addr != LR_REGNUM)
12500 {
12501 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12502 reg_containing_return_addr = LR_REGNUM;
12503 }
12504
12505 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12506 }
12507
12508 if (eh_ofs)
12509 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12510
12511 /* Return to caller. */
12512 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12513 }
12514
12515 /* Emit code to push or pop registers to or from the stack. F is the
12516 assembly file. MASK is the registers to push or pop. PUSH is
12517 nonzero if we should push, and zero if we should pop. For debugging
12518 output, if pushing, adjust CFA_OFFSET by the amount of space added
12519 to the stack. REAL_REGS should have the same number of bits set as
12520 MASK, and will be used instead (in the same order) to describe which
12521 registers were saved - this is used to mark the save slots when we
12522 push high registers after moving them to low registers. */
12523 static void
12524 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12525 {
12526 int regno;
12527 int lo_mask = mask & 0xFF;
12528 int pushed_words = 0;
12529
12530 if (lo_mask == 0 && !push && (mask & (1 << 15)))
12531 {
12532 /* Special case. Do not generate a POP PC statement here, do it in
12533 thumb_exit() */
12534 thumb_exit (f, -1, NULL_RTX);
12535 return;
12536 }
12537
12538 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12539
12540 /* Look at the low registers first. */
12541 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12542 {
12543 if (lo_mask & 1)
12544 {
12545 asm_fprintf (f, "%r", regno);
12546
12547 if ((lo_mask & ~1) != 0)
12548 fprintf (f, ", ");
12549
12550 pushed_words++;
12551 }
12552 }
12553
12554 if (push && (mask & (1 << LR_REGNUM)))
12555 {
12556 /* Catch pushing the LR. */
12557 if (mask & 0xFF)
12558 fprintf (f, ", ");
12559
12560 asm_fprintf (f, "%r", LR_REGNUM);
12561
12562 pushed_words++;
12563 }
12564 else if (!push && (mask & (1 << PC_REGNUM)))
12565 {
12566 /* Catch popping the PC. */
12567 if (TARGET_INTERWORK || TARGET_BACKTRACE)
12568 {
12569 /* The PC is never poped directly, instead
12570 it is popped into r3 and then BX is used. */
12571 fprintf (f, "}\n");
12572
12573 thumb_exit (f, -1, NULL_RTX);
12574
12575 return;
12576 }
12577 else
12578 {
12579 if (mask & 0xFF)
12580 fprintf (f, ", ");
12581
12582 asm_fprintf (f, "%r", PC_REGNUM);
12583 }
12584 }
12585
12586 fprintf (f, "}\n");
12587
12588 if (push && pushed_words && dwarf2out_do_frame ())
12589 {
12590 char *l = dwarf2out_cfi_label ();
12591 int pushed_mask = real_regs;
12592
12593 *cfa_offset += pushed_words * 4;
12594 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12595
12596 pushed_words = 0;
12597 pushed_mask = real_regs;
12598 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12599 {
12600 if (pushed_mask & 1)
12601 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12602 }
12603 }
12604 }
12605 \f
12606 void
12607 thumb_final_prescan_insn (rtx insn)
12608 {
12609 if (flag_print_asm_name)
12610 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12611 INSN_ADDRESSES (INSN_UID (insn)));
12612 }
12613
12614 int
12615 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12616 {
12617 unsigned HOST_WIDE_INT mask = 0xff;
12618 int i;
12619
12620 if (val == 0) /* XXX */
12621 return 0;
12622
12623 for (i = 0; i < 25; i++)
12624 if ((val & (mask << i)) == val)
12625 return 1;
12626
12627 return 0;
12628 }
12629
12630 /* Returns nonzero if the current function contains,
12631 or might contain a far jump. */
12632 int
12633 thumb_far_jump_used_p (int in_prologue)
12634 {
12635 rtx insn;
12636
12637 /* This test is only important for leaf functions. */
12638 /* assert (!leaf_function_p ()); */
12639
12640 /* If we have already decided that far jumps may be used,
12641 do not bother checking again, and always return true even if
12642 it turns out that they are not being used. Once we have made
12643 the decision that far jumps are present (and that hence the link
12644 register will be pushed onto the stack) we cannot go back on it. */
12645 if (cfun->machine->far_jump_used)
12646 return 1;
12647
12648 /* If this function is not being called from the prologue/epilogue
12649 generation code then it must be being called from the
12650 INITIAL_ELIMINATION_OFFSET macro. */
12651 if (!in_prologue)
12652 {
12653 /* In this case we know that we are being asked about the elimination
12654 of the arg pointer register. If that register is not being used,
12655 then there are no arguments on the stack, and we do not have to
12656 worry that a far jump might force the prologue to push the link
12657 register, changing the stack offsets. In this case we can just
12658 return false, since the presence of far jumps in the function will
12659 not affect stack offsets.
12660
12661 If the arg pointer is live (or if it was live, but has now been
12662 eliminated and so set to dead) then we do have to test to see if
12663 the function might contain a far jump. This test can lead to some
12664 false negatives, since before reload is completed, then length of
12665 branch instructions is not known, so gcc defaults to returning their
12666 longest length, which in turn sets the far jump attribute to true.
12667
12668 A false negative will not result in bad code being generated, but it
12669 will result in a needless push and pop of the link register. We
12670 hope that this does not occur too often. */
12671 if (regs_ever_live [ARG_POINTER_REGNUM])
12672 cfun->machine->arg_pointer_live = 1;
12673 else if (!cfun->machine->arg_pointer_live)
12674 return 0;
12675 }
12676
12677 /* Check to see if the function contains a branch
12678 insn with the far jump attribute set. */
12679 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12680 {
12681 if (GET_CODE (insn) == JUMP_INSN
12682 /* Ignore tablejump patterns. */
12683 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12684 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12685 && get_attr_far_jump (insn) == FAR_JUMP_YES
12686 )
12687 {
12688 /* Record the fact that we have decided that
12689 the function does use far jumps. */
12690 cfun->machine->far_jump_used = 1;
12691 return 1;
12692 }
12693 }
12694
12695 return 0;
12696 }
12697
12698 /* Return nonzero if FUNC must be entered in ARM mode. */
12699 int
12700 is_called_in_ARM_mode (tree func)
12701 {
12702 if (TREE_CODE (func) != FUNCTION_DECL)
12703 abort ();
12704
12705 /* Ignore the problem about functions whoes address is taken. */
12706 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12707 return TRUE;
12708
12709 #ifdef ARM_PE
12710 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12711 #else
12712 return FALSE;
12713 #endif
12714 }
12715
12716 /* The bits which aren't usefully expanded as rtl. */
12717 const char *
12718 thumb_unexpanded_epilogue (void)
12719 {
12720 int regno;
12721 int live_regs_mask = 0;
12722 int high_regs_pushed = 0;
12723 int leaf_function = leaf_function_p ();
12724 int had_to_push_lr;
12725 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
12726
12727 if (return_used_this_function)
12728 return "";
12729
12730 if (IS_NAKED (arm_current_func_type ()))
12731 return "";
12732
12733 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12734 if (THUMB_REG_PUSHED_P (regno))
12735 live_regs_mask |= 1 << regno;
12736
12737 for (regno = 8; regno < 13; regno++)
12738 if (THUMB_REG_PUSHED_P (regno))
12739 high_regs_pushed++;
12740
12741 /* The prolog may have pushed some high registers to use as
12742 work registers. eg the testsuite file:
12743 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12744 compiles to produce:
12745 push {r4, r5, r6, r7, lr}
12746 mov r7, r9
12747 mov r6, r8
12748 push {r6, r7}
12749 as part of the prolog. We have to undo that pushing here. */
12750
12751 if (high_regs_pushed)
12752 {
12753 int mask = live_regs_mask;
12754 int next_hi_reg;
12755 int size;
12756 int mode;
12757
12758 #ifdef RTX_CODE
12759 /* If we can deduce the registers used from the function's return value.
12760 This is more reliable that examining regs_ever_live[] because that
12761 will be set if the register is ever used in the function, not just if
12762 the register is used to hold a return value. */
12763
12764 if (current_function_return_rtx != 0)
12765 mode = GET_MODE (current_function_return_rtx);
12766 else
12767 #endif
12768 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12769
12770 size = GET_MODE_SIZE (mode);
12771
12772 /* Unless we are returning a type of size > 12 register r3 is
12773 available. */
12774 if (size < 13)
12775 mask |= 1 << 3;
12776
12777 if (mask == 0)
12778 /* Oh dear! We have no low registers into which we can pop
12779 high registers! */
12780 internal_error
12781 ("no low registers available for popping high registers");
12782
12783 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12784 if (THUMB_REG_PUSHED_P (next_hi_reg))
12785 break;
12786
12787 while (high_regs_pushed)
12788 {
12789 /* Find lo register(s) into which the high register(s) can
12790 be popped. */
12791 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12792 {
12793 if (mask & (1 << regno))
12794 high_regs_pushed--;
12795 if (high_regs_pushed == 0)
12796 break;
12797 }
12798
12799 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12800
12801 /* Pop the values into the low register(s). */
12802 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12803
12804 /* Move the value(s) into the high registers. */
12805 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12806 {
12807 if (mask & (1 << regno))
12808 {
12809 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12810 regno);
12811
12812 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12813 if (THUMB_REG_PUSHED_P (next_hi_reg))
12814 break;
12815 }
12816 }
12817 }
12818 }
12819
12820 had_to_push_lr = (live_regs_mask || !leaf_function
12821 || thumb_far_jump_used_p (1));
12822
12823 if (TARGET_BACKTRACE
12824 && ((live_regs_mask & 0xFF) == 0)
12825 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12826 {
12827 /* The stack backtrace structure creation code had to
12828 push R7 in order to get a work register, so we pop
12829 it now. */
12830 live_regs_mask |= (1 << LAST_LO_REGNUM);
12831 }
12832
12833 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12834 {
12835 if (had_to_push_lr
12836 && !is_called_in_ARM_mode (current_function_decl)
12837 && !eh_ofs)
12838 live_regs_mask |= 1 << PC_REGNUM;
12839
12840 /* Either no argument registers were pushed or a backtrace
12841 structure was created which includes an adjusted stack
12842 pointer, so just pop everything. */
12843 if (live_regs_mask)
12844 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12845 live_regs_mask);
12846
12847 if (eh_ofs)
12848 thumb_exit (asm_out_file, 2, eh_ofs);
12849 /* We have either just popped the return address into the
12850 PC or it is was kept in LR for the entire function or
12851 it is still on the stack because we do not want to
12852 return by doing a pop {pc}. */
12853 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12854 thumb_exit (asm_out_file,
12855 (had_to_push_lr
12856 && is_called_in_ARM_mode (current_function_decl)) ?
12857 -1 : LR_REGNUM, NULL_RTX);
12858 }
12859 else
12860 {
12861 /* Pop everything but the return address. */
12862 live_regs_mask &= ~(1 << PC_REGNUM);
12863
12864 if (live_regs_mask)
12865 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12866 live_regs_mask);
12867
12868 if (had_to_push_lr)
12869 /* Get the return address into a temporary register. */
12870 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12871 1 << LAST_ARG_REGNUM);
12872
12873 /* Remove the argument registers that were pushed onto the stack. */
12874 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12875 SP_REGNUM, SP_REGNUM,
12876 current_function_pretend_args_size);
12877
12878 if (eh_ofs)
12879 thumb_exit (asm_out_file, 2, eh_ofs);
12880 else
12881 thumb_exit (asm_out_file,
12882 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
12883 }
12884
12885 return "";
12886 }
12887
12888 /* Functions to save and restore machine-specific function data. */
12889 static struct machine_function *
12890 arm_init_machine_status (void)
12891 {
12892 struct machine_function *machine;
12893 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12894
12895 #if ARM_FT_UNKNOWN != 0
12896 machine->func_type = ARM_FT_UNKNOWN;
12897 #endif
12898 return machine;
12899 }
12900
12901 /* Return an RTX indicating where the return address to the
12902 calling function can be found. */
12903 rtx
12904 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12905 {
12906 if (count != 0)
12907 return NULL_RTX;
12908
12909 if (TARGET_APCS_32)
12910 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12911 else
12912 {
12913 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
12914 GEN_INT (RETURN_ADDR_MASK26));
12915 return get_func_hard_reg_initial_val (cfun, lr);
12916 }
12917 }
12918
12919 /* Do anything needed before RTL is emitted for each function. */
12920 void
12921 arm_init_expanders (void)
12922 {
12923 /* Arrange to initialize and mark the machine per-function status. */
12924 init_machine_status = arm_init_machine_status;
12925 }
12926
12927 HOST_WIDE_INT
12928 thumb_get_frame_size (void)
12929 {
12930 int regno;
12931
12932 int base_size = ROUND_UP_WORD (get_frame_size ());
12933 int count_regs = 0;
12934 int entry_size = 0;
12935 int leaf;
12936
12937 if (! TARGET_THUMB)
12938 abort ();
12939
12940 if (! TARGET_ATPCS)
12941 return base_size;
12942
12943 /* We need to know if we are a leaf function. Unfortunately, it
12944 is possible to be called after start_sequence has been called,
12945 which causes get_insns to return the insns for the sequence,
12946 not the function, which will cause leaf_function_p to return
12947 the incorrect result.
12948
12949 To work around this, we cache the computed frame size. This
12950 works because we will only be calling RTL expanders that need
12951 to know about leaf functions once reload has completed, and the
12952 frame size cannot be changed after that time, so we can safely
12953 use the cached value. */
12954
12955 if (reload_completed)
12956 return cfun->machine->frame_size;
12957
12958 leaf = leaf_function_p ();
12959
12960 /* A leaf function does not need any stack alignment if it has nothing
12961 on the stack. */
12962 if (leaf && base_size == 0)
12963 {
12964 cfun->machine->frame_size = 0;
12965 return 0;
12966 }
12967
12968 /* We know that SP will be word aligned on entry, and we must
12969 preserve that condition at any subroutine call. But those are
12970 the only constraints. */
12971
12972 /* Space for variadic functions. */
12973 if (current_function_pretend_args_size)
12974 entry_size += current_function_pretend_args_size;
12975
12976 /* Space for pushed lo registers. */
12977 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12978 if (THUMB_REG_PUSHED_P (regno))
12979 count_regs++;
12980
12981 /* Space for backtrace structure. */
12982 if (TARGET_BACKTRACE)
12983 {
12984 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
12985 entry_size += 20;
12986 else
12987 entry_size += 16;
12988 }
12989
12990 if (count_regs || !leaf || thumb_far_jump_used_p (1))
12991 count_regs++; /* LR */
12992
12993 entry_size += count_regs * 4;
12994 count_regs = 0;
12995
12996 /* Space for pushed hi regs. */
12997 for (regno = 8; regno < 13; regno++)
12998 if (THUMB_REG_PUSHED_P (regno))
12999 count_regs++;
13000
13001 entry_size += count_regs * 4;
13002
13003 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
13004 base_size += 4;
13005 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
13006 abort ();
13007
13008 cfun->machine->frame_size = base_size;
13009
13010 return base_size;
13011 }
13012
13013 /* Generate the rest of a function's prologue. */
13014 void
13015 thumb_expand_prologue (void)
13016 {
13017 rtx insn, dwarf;
13018
13019 HOST_WIDE_INT amount = (thumb_get_frame_size ()
13020 + current_function_outgoing_args_size);
13021 unsigned long func_type;
13022
13023 func_type = arm_current_func_type ();
13024
13025 /* Naked functions don't have prologues. */
13026 if (IS_NAKED (func_type))
13027 return;
13028
13029 if (IS_INTERRUPT (func_type))
13030 {
13031 error ("interrupt Service Routines cannot be coded in Thumb mode");
13032 return;
13033 }
13034
13035 if (frame_pointer_needed)
13036 {
13037 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
13038 RTX_FRAME_RELATED_P (insn) = 1;
13039 }
13040
13041 if (amount)
13042 {
13043 amount = ROUND_UP_WORD (amount);
13044
13045 if (amount < 512)
13046 {
13047 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13048 GEN_INT (- amount)));
13049 RTX_FRAME_RELATED_P (insn) = 1;
13050 }
13051 else
13052 {
13053 int regno;
13054 rtx reg;
13055
13056 /* The stack decrement is too big for an immediate value in a single
13057 insn. In theory we could issue multiple subtracts, but after
13058 three of them it becomes more space efficient to place the full
13059 value in the constant pool and load into a register. (Also the
13060 ARM debugger really likes to see only one stack decrement per
13061 function). So instead we look for a scratch register into which
13062 we can load the decrement, and then we subtract this from the
13063 stack pointer. Unfortunately on the thumb the only available
13064 scratch registers are the argument registers, and we cannot use
13065 these as they may hold arguments to the function. Instead we
13066 attempt to locate a call preserved register which is used by this
13067 function. If we can find one, then we know that it will have
13068 been pushed at the start of the prologue and so we can corrupt
13069 it now. */
13070 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13071 if (THUMB_REG_PUSHED_P (regno)
13072 && !(frame_pointer_needed
13073 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13074 break;
13075
13076 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13077 {
13078 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13079
13080 /* Choose an arbitrary, non-argument low register. */
13081 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13082
13083 /* Save it by copying it into a high, scratch register. */
13084 emit_insn (gen_movsi (spare, reg));
13085 /* Add a USE to stop propagate_one_insn() from barfing. */
13086 emit_insn (gen_prologue_use (spare));
13087
13088 /* Decrement the stack. */
13089 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13090 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13091 stack_pointer_rtx, reg));
13092 RTX_FRAME_RELATED_P (insn) = 1;
13093 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13094 plus_constant (stack_pointer_rtx,
13095 GEN_INT (- amount)));
13096 RTX_FRAME_RELATED_P (dwarf) = 1;
13097 REG_NOTES (insn)
13098 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13099 REG_NOTES (insn));
13100
13101 /* Restore the low register's original value. */
13102 emit_insn (gen_movsi (reg, spare));
13103
13104 /* Emit a USE of the restored scratch register, so that flow
13105 analysis will not consider the restore redundant. The
13106 register won't be used again in this function and isn't
13107 restored by the epilogue. */
13108 emit_insn (gen_prologue_use (reg));
13109 }
13110 else
13111 {
13112 reg = gen_rtx_REG (SImode, regno);
13113
13114 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13115
13116 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13117 stack_pointer_rtx, reg));
13118 RTX_FRAME_RELATED_P (insn) = 1;
13119 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13120 plus_constant (stack_pointer_rtx,
13121 GEN_INT (- amount)));
13122 RTX_FRAME_RELATED_P (dwarf) = 1;
13123 REG_NOTES (insn)
13124 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13125 REG_NOTES (insn));
13126 }
13127 }
13128 }
13129
13130 if (current_function_profile || TARGET_NO_SCHED_PRO)
13131 emit_insn (gen_blockage ());
13132 }
13133
13134 void
13135 thumb_expand_epilogue (void)
13136 {
13137 HOST_WIDE_INT amount = (thumb_get_frame_size ()
13138 + current_function_outgoing_args_size);
13139 int regno;
13140
13141 /* Naked functions don't have prologues. */
13142 if (IS_NAKED (arm_current_func_type ()))
13143 return;
13144
13145 if (frame_pointer_needed)
13146 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13147 else if (amount)
13148 {
13149 amount = ROUND_UP_WORD (amount);
13150
13151 if (amount < 512)
13152 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13153 GEN_INT (amount)));
13154 else
13155 {
13156 /* r3 is always free in the epilogue. */
13157 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13158
13159 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13160 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13161 }
13162 }
13163
13164 /* Emit a USE (stack_pointer_rtx), so that
13165 the stack adjustment will not be deleted. */
13166 emit_insn (gen_prologue_use (stack_pointer_rtx));
13167
13168 if (current_function_profile || TARGET_NO_SCHED_PRO)
13169 emit_insn (gen_blockage ());
13170
13171 /* Emit a clobber for each insn that will be restored in the epilogue,
13172 so that flow2 will get register lifetimes correct. */
13173 for (regno = 0; regno < 13; regno++)
13174 if (regs_ever_live[regno] && !call_used_regs[regno])
13175 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13176
13177 if (! regs_ever_live[LR_REGNUM])
13178 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13179 }
13180
13181 static void
13182 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13183 {
13184 int live_regs_mask = 0;
13185 int high_regs_pushed = 0;
13186 int cfa_offset = 0;
13187 int regno;
13188
13189 if (IS_NAKED (arm_current_func_type ()))
13190 return;
13191
13192 if (is_called_in_ARM_mode (current_function_decl))
13193 {
13194 const char * name;
13195
13196 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13197 abort ();
13198 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13199 abort ();
13200 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13201
13202 /* Generate code sequence to switch us into Thumb mode. */
13203 /* The .code 32 directive has already been emitted by
13204 ASM_DECLARE_FUNCTION_NAME. */
13205 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13206 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13207
13208 /* Generate a label, so that the debugger will notice the
13209 change in instruction sets. This label is also used by
13210 the assembler to bypass the ARM code when this function
13211 is called from a Thumb encoded function elsewhere in the
13212 same file. Hence the definition of STUB_NAME here must
13213 agree with the definition in gas/config/tc-arm.c. */
13214
13215 #define STUB_NAME ".real_start_of"
13216
13217 fprintf (f, "\t.code\t16\n");
13218 #ifdef ARM_PE
13219 if (arm_dllexport_name_p (name))
13220 name = arm_strip_name_encoding (name);
13221 #endif
13222 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13223 fprintf (f, "\t.thumb_func\n");
13224 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13225 }
13226
13227 if (current_function_pretend_args_size)
13228 {
13229 if (cfun->machine->uses_anonymous_args)
13230 {
13231 int num_pushes;
13232
13233 fprintf (f, "\tpush\t{");
13234
13235 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13236
13237 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13238 regno <= LAST_ARG_REGNUM;
13239 regno++)
13240 asm_fprintf (f, "%r%s", regno,
13241 regno == LAST_ARG_REGNUM ? "" : ", ");
13242
13243 fprintf (f, "}\n");
13244 }
13245 else
13246 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13247 SP_REGNUM, SP_REGNUM,
13248 current_function_pretend_args_size);
13249
13250 /* We don't need to record the stores for unwinding (would it
13251 help the debugger any if we did?), but record the change in
13252 the stack pointer. */
13253 if (dwarf2out_do_frame ())
13254 {
13255 char *l = dwarf2out_cfi_label ();
13256 cfa_offset = cfa_offset + current_function_pretend_args_size;
13257 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13258 }
13259 }
13260
13261 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13262 if (THUMB_REG_PUSHED_P (regno))
13263 live_regs_mask |= 1 << regno;
13264
13265 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
13266 live_regs_mask |= 1 << LR_REGNUM;
13267
13268 if (TARGET_BACKTRACE)
13269 {
13270 int offset;
13271 int work_register = 0;
13272 int wr;
13273
13274 /* We have been asked to create a stack backtrace structure.
13275 The code looks like this:
13276
13277 0 .align 2
13278 0 func:
13279 0 sub SP, #16 Reserve space for 4 registers.
13280 2 push {R7} Get a work register.
13281 4 add R7, SP, #20 Get the stack pointer before the push.
13282 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13283 8 mov R7, PC Get hold of the start of this code plus 12.
13284 10 str R7, [SP, #16] Store it.
13285 12 mov R7, FP Get hold of the current frame pointer.
13286 14 str R7, [SP, #4] Store it.
13287 16 mov R7, LR Get hold of the current return address.
13288 18 str R7, [SP, #12] Store it.
13289 20 add R7, SP, #16 Point at the start of the backtrace structure.
13290 22 mov FP, R7 Put this value into the frame pointer. */
13291
13292 if ((live_regs_mask & 0xFF) == 0)
13293 {
13294 /* See if the a4 register is free. */
13295
13296 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
13297 work_register = LAST_ARG_REGNUM;
13298 else /* We must push a register of our own. */
13299 live_regs_mask |= (1 << LAST_LO_REGNUM);
13300 }
13301
13302 if (work_register == 0)
13303 {
13304 /* Select a register from the list that will be pushed to
13305 use as our work register. */
13306 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
13307 if ((1 << work_register) & live_regs_mask)
13308 break;
13309 }
13310
13311 asm_fprintf
13312 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13313 SP_REGNUM, SP_REGNUM);
13314
13315 if (dwarf2out_do_frame ())
13316 {
13317 char *l = dwarf2out_cfi_label ();
13318 cfa_offset = cfa_offset + 16;
13319 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13320 }
13321
13322 if (live_regs_mask)
13323 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13324
13325 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
13326 if (wr & live_regs_mask)
13327 offset += 4;
13328
13329 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13330 offset + 16 + current_function_pretend_args_size);
13331
13332 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13333 offset + 4);
13334
13335 /* Make sure that the instruction fetching the PC is in the right place
13336 to calculate "start of backtrace creation code + 12". */
13337 if (live_regs_mask)
13338 {
13339 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13340 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13341 offset + 12);
13342 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13343 ARM_HARD_FRAME_POINTER_REGNUM);
13344 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13345 offset);
13346 }
13347 else
13348 {
13349 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13350 ARM_HARD_FRAME_POINTER_REGNUM);
13351 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13352 offset);
13353 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13354 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13355 offset + 12);
13356 }
13357
13358 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13359 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13360 offset + 8);
13361 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13362 offset + 12);
13363 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13364 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13365 }
13366 else if (live_regs_mask)
13367 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13368
13369 for (regno = 8; regno < 13; regno++)
13370 if (THUMB_REG_PUSHED_P (regno))
13371 high_regs_pushed++;
13372
13373 if (high_regs_pushed)
13374 {
13375 int pushable_regs = 0;
13376 int mask = live_regs_mask & 0xff;
13377 int next_hi_reg;
13378
13379 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13380 if (THUMB_REG_PUSHED_P (next_hi_reg))
13381 break;
13382
13383 pushable_regs = mask;
13384
13385 if (pushable_regs == 0)
13386 {
13387 /* Desperation time -- this probably will never happen. */
13388 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
13389 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
13390 mask = 1 << LAST_ARG_REGNUM;
13391 }
13392
13393 while (high_regs_pushed > 0)
13394 {
13395 int real_regs_mask = 0;
13396
13397 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13398 {
13399 if (mask & (1 << regno))
13400 {
13401 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13402
13403 high_regs_pushed--;
13404 real_regs_mask |= (1 << next_hi_reg);
13405
13406 if (high_regs_pushed)
13407 {
13408 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13409 next_hi_reg--)
13410 if (THUMB_REG_PUSHED_P (next_hi_reg))
13411 break;
13412 }
13413 else
13414 {
13415 mask &= ~((1 << regno) - 1);
13416 break;
13417 }
13418 }
13419 }
13420
13421 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
13422 }
13423
13424 if (pushable_regs == 0
13425 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
13426 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13427 }
13428 }
13429
13430 /* Handle the case of a double word load into a low register from
13431 a computed memory address. The computed address may involve a
13432 register which is overwritten by the load. */
13433 const char *
13434 thumb_load_double_from_address (rtx *operands)
13435 {
13436 rtx addr;
13437 rtx base;
13438 rtx offset;
13439 rtx arg1;
13440 rtx arg2;
13441
13442 if (GET_CODE (operands[0]) != REG)
13443 abort ();
13444
13445 if (GET_CODE (operands[1]) != MEM)
13446 abort ();
13447
13448 /* Get the memory address. */
13449 addr = XEXP (operands[1], 0);
13450
13451 /* Work out how the memory address is computed. */
13452 switch (GET_CODE (addr))
13453 {
13454 case REG:
13455 operands[2] = gen_rtx_MEM (SImode,
13456 plus_constant (XEXP (operands[1], 0), 4));
13457
13458 if (REGNO (operands[0]) == REGNO (addr))
13459 {
13460 output_asm_insn ("ldr\t%H0, %2", operands);
13461 output_asm_insn ("ldr\t%0, %1", operands);
13462 }
13463 else
13464 {
13465 output_asm_insn ("ldr\t%0, %1", operands);
13466 output_asm_insn ("ldr\t%H0, %2", operands);
13467 }
13468 break;
13469
13470 case CONST:
13471 /* Compute <address> + 4 for the high order load. */
13472 operands[2] = gen_rtx_MEM (SImode,
13473 plus_constant (XEXP (operands[1], 0), 4));
13474
13475 output_asm_insn ("ldr\t%0, %1", operands);
13476 output_asm_insn ("ldr\t%H0, %2", operands);
13477 break;
13478
13479 case PLUS:
13480 arg1 = XEXP (addr, 0);
13481 arg2 = XEXP (addr, 1);
13482
13483 if (CONSTANT_P (arg1))
13484 base = arg2, offset = arg1;
13485 else
13486 base = arg1, offset = arg2;
13487
13488 if (GET_CODE (base) != REG)
13489 abort ();
13490
13491 /* Catch the case of <address> = <reg> + <reg> */
13492 if (GET_CODE (offset) == REG)
13493 {
13494 int reg_offset = REGNO (offset);
13495 int reg_base = REGNO (base);
13496 int reg_dest = REGNO (operands[0]);
13497
13498 /* Add the base and offset registers together into the
13499 higher destination register. */
13500 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13501 reg_dest + 1, reg_base, reg_offset);
13502
13503 /* Load the lower destination register from the address in
13504 the higher destination register. */
13505 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13506 reg_dest, reg_dest + 1);
13507
13508 /* Load the higher destination register from its own address
13509 plus 4. */
13510 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13511 reg_dest + 1, reg_dest + 1);
13512 }
13513 else
13514 {
13515 /* Compute <address> + 4 for the high order load. */
13516 operands[2] = gen_rtx_MEM (SImode,
13517 plus_constant (XEXP (operands[1], 0), 4));
13518
13519 /* If the computed address is held in the low order register
13520 then load the high order register first, otherwise always
13521 load the low order register first. */
13522 if (REGNO (operands[0]) == REGNO (base))
13523 {
13524 output_asm_insn ("ldr\t%H0, %2", operands);
13525 output_asm_insn ("ldr\t%0, %1", operands);
13526 }
13527 else
13528 {
13529 output_asm_insn ("ldr\t%0, %1", operands);
13530 output_asm_insn ("ldr\t%H0, %2", operands);
13531 }
13532 }
13533 break;
13534
13535 case LABEL_REF:
13536 /* With no registers to worry about we can just load the value
13537 directly. */
13538 operands[2] = gen_rtx_MEM (SImode,
13539 plus_constant (XEXP (operands[1], 0), 4));
13540
13541 output_asm_insn ("ldr\t%H0, %2", operands);
13542 output_asm_insn ("ldr\t%0, %1", operands);
13543 break;
13544
13545 default:
13546 abort ();
13547 break;
13548 }
13549
13550 return "";
13551 }
13552
13553 const char *
13554 thumb_output_move_mem_multiple (int n, rtx *operands)
13555 {
13556 rtx tmp;
13557
13558 switch (n)
13559 {
13560 case 2:
13561 if (REGNO (operands[4]) > REGNO (operands[5]))
13562 {
13563 tmp = operands[4];
13564 operands[4] = operands[5];
13565 operands[5] = tmp;
13566 }
13567 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13568 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13569 break;
13570
13571 case 3:
13572 if (REGNO (operands[4]) > REGNO (operands[5]))
13573 {
13574 tmp = operands[4];
13575 operands[4] = operands[5];
13576 operands[5] = tmp;
13577 }
13578 if (REGNO (operands[5]) > REGNO (operands[6]))
13579 {
13580 tmp = operands[5];
13581 operands[5] = operands[6];
13582 operands[6] = tmp;
13583 }
13584 if (REGNO (operands[4]) > REGNO (operands[5]))
13585 {
13586 tmp = operands[4];
13587 operands[4] = operands[5];
13588 operands[5] = tmp;
13589 }
13590
13591 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13592 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13593 break;
13594
13595 default:
13596 abort ();
13597 }
13598
13599 return "";
13600 }
13601
13602 /* Routines for generating rtl. */
13603 void
13604 thumb_expand_movstrqi (rtx *operands)
13605 {
13606 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13607 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13608 HOST_WIDE_INT len = INTVAL (operands[2]);
13609 HOST_WIDE_INT offset = 0;
13610
13611 while (len >= 12)
13612 {
13613 emit_insn (gen_movmem12b (out, in, out, in));
13614 len -= 12;
13615 }
13616
13617 if (len >= 8)
13618 {
13619 emit_insn (gen_movmem8b (out, in, out, in));
13620 len -= 8;
13621 }
13622
13623 if (len >= 4)
13624 {
13625 rtx reg = gen_reg_rtx (SImode);
13626 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13627 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13628 len -= 4;
13629 offset += 4;
13630 }
13631
13632 if (len >= 2)
13633 {
13634 rtx reg = gen_reg_rtx (HImode);
13635 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13636 plus_constant (in, offset))));
13637 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13638 reg));
13639 len -= 2;
13640 offset += 2;
13641 }
13642
13643 if (len)
13644 {
13645 rtx reg = gen_reg_rtx (QImode);
13646 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13647 plus_constant (in, offset))));
13648 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13649 reg));
13650 }
13651 }
13652
13653 int
13654 thumb_cmp_operand (rtx op, enum machine_mode mode)
13655 {
13656 return ((GET_CODE (op) == CONST_INT
13657 && INTVAL (op) < 256
13658 && INTVAL (op) >= 0)
13659 || s_register_operand (op, mode));
13660 }
13661
13662 int
13663 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13664 {
13665 return (GET_CODE (op) == CONST_INT
13666 && INTVAL (op) < 0
13667 && INTVAL (op) > -256);
13668 }
13669
13670 /* Return TRUE if a result can be stored in OP without clobbering the
13671 condition code register. Prior to reload we only accept a
13672 register. After reload we have to be able to handle memory as
13673 well, since a pseudo may not get a hard reg and reload cannot
13674 handle output-reloads on jump insns.
13675
13676 We could possibly handle mem before reload as well, but that might
13677 complicate things with the need to handle increment
13678 side-effects. */
13679
13680 int
13681 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
13682 {
13683 return (s_register_operand (op, mode)
13684 || ((reload_in_progress || reload_completed)
13685 && memory_operand (op, mode)));
13686 }
13687
13688 /* Handle storing a half-word to memory during reload. */
13689 void
13690 thumb_reload_out_hi (rtx *operands)
13691 {
13692 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13693 }
13694
13695 /* Handle reading a half-word from memory during reload. */
13696 void
13697 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13698 {
13699 abort ();
13700 }
13701
13702 /* Return the length of a function name prefix
13703 that starts with the character 'c'. */
13704 static int
13705 arm_get_strip_length (int c)
13706 {
13707 switch (c)
13708 {
13709 ARM_NAME_ENCODING_LENGTHS
13710 default: return 0;
13711 }
13712 }
13713
13714 /* Return a pointer to a function's name with any
13715 and all prefix encodings stripped from it. */
13716 const char *
13717 arm_strip_name_encoding (const char *name)
13718 {
13719 int skip;
13720
13721 while ((skip = arm_get_strip_length (* name)))
13722 name += skip;
13723
13724 return name;
13725 }
13726
13727 /* If there is a '*' anywhere in the name's prefix, then
13728 emit the stripped name verbatim, otherwise prepend an
13729 underscore if leading underscores are being used. */
13730 void
13731 arm_asm_output_labelref (FILE *stream, const char *name)
13732 {
13733 int skip;
13734 int verbatim = 0;
13735
13736 while ((skip = arm_get_strip_length (* name)))
13737 {
13738 verbatim |= (*name == '*');
13739 name += skip;
13740 }
13741
13742 if (verbatim)
13743 fputs (name, stream);
13744 else
13745 asm_fprintf (stream, "%U%s", name);
13746 }
13747
13748 rtx aof_pic_label;
13749
13750 #ifdef AOF_ASSEMBLER
13751 /* Special functions only needed when producing AOF syntax assembler. */
13752
13753 struct pic_chain
13754 {
13755 struct pic_chain * next;
13756 const char * symname;
13757 };
13758
13759 static struct pic_chain * aof_pic_chain = NULL;
13760
13761 rtx
13762 aof_pic_entry (rtx x)
13763 {
13764 struct pic_chain ** chainp;
13765 int offset;
13766
13767 if (aof_pic_label == NULL_RTX)
13768 {
13769 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13770 }
13771
13772 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13773 offset += 4, chainp = &(*chainp)->next)
13774 if ((*chainp)->symname == XSTR (x, 0))
13775 return plus_constant (aof_pic_label, offset);
13776
13777 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13778 (*chainp)->next = NULL;
13779 (*chainp)->symname = XSTR (x, 0);
13780 return plus_constant (aof_pic_label, offset);
13781 }
13782
13783 void
13784 aof_dump_pic_table (FILE *f)
13785 {
13786 struct pic_chain * chain;
13787
13788 if (aof_pic_chain == NULL)
13789 return;
13790
13791 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13792 PIC_OFFSET_TABLE_REGNUM,
13793 PIC_OFFSET_TABLE_REGNUM);
13794 fputs ("|x$adcons|\n", f);
13795
13796 for (chain = aof_pic_chain; chain; chain = chain->next)
13797 {
13798 fputs ("\tDCD\t", f);
13799 assemble_name (f, chain->symname);
13800 fputs ("\n", f);
13801 }
13802 }
13803
13804 int arm_text_section_count = 1;
13805
13806 char *
13807 aof_text_section (void )
13808 {
13809 static char buf[100];
13810 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13811 arm_text_section_count++);
13812 if (flag_pic)
13813 strcat (buf, ", PIC, REENTRANT");
13814 return buf;
13815 }
13816
13817 static int arm_data_section_count = 1;
13818
13819 char *
13820 aof_data_section (void)
13821 {
13822 static char buf[100];
13823 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13824 return buf;
13825 }
13826
13827 /* The AOF assembler is religiously strict about declarations of
13828 imported and exported symbols, so that it is impossible to declare
13829 a function as imported near the beginning of the file, and then to
13830 export it later on. It is, however, possible to delay the decision
13831 until all the functions in the file have been compiled. To get
13832 around this, we maintain a list of the imports and exports, and
13833 delete from it any that are subsequently defined. At the end of
13834 compilation we spit the remainder of the list out before the END
13835 directive. */
13836
13837 struct import
13838 {
13839 struct import * next;
13840 const char * name;
13841 };
13842
13843 static struct import * imports_list = NULL;
13844
13845 void
13846 aof_add_import (const char *name)
13847 {
13848 struct import * new;
13849
13850 for (new = imports_list; new; new = new->next)
13851 if (new->name == name)
13852 return;
13853
13854 new = (struct import *) xmalloc (sizeof (struct import));
13855 new->next = imports_list;
13856 imports_list = new;
13857 new->name = name;
13858 }
13859
13860 void
13861 aof_delete_import (const char *name)
13862 {
13863 struct import ** old;
13864
13865 for (old = &imports_list; *old; old = & (*old)->next)
13866 {
13867 if ((*old)->name == name)
13868 {
13869 *old = (*old)->next;
13870 return;
13871 }
13872 }
13873 }
13874
13875 int arm_main_function = 0;
13876
13877 static void
13878 aof_dump_imports (FILE *f)
13879 {
13880 /* The AOF assembler needs this to cause the startup code to be extracted
13881 from the library. Brining in __main causes the whole thing to work
13882 automagically. */
13883 if (arm_main_function)
13884 {
13885 text_section ();
13886 fputs ("\tIMPORT __main\n", f);
13887 fputs ("\tDCD __main\n", f);
13888 }
13889
13890 /* Now dump the remaining imports. */
13891 while (imports_list)
13892 {
13893 fprintf (f, "\tIMPORT\t");
13894 assemble_name (f, imports_list->name);
13895 fputc ('\n', f);
13896 imports_list = imports_list->next;
13897 }
13898 }
13899
13900 static void
13901 aof_globalize_label (FILE *stream, const char *name)
13902 {
13903 default_globalize_label (stream, name);
13904 if (! strcmp (name, "main"))
13905 arm_main_function = 1;
13906 }
13907
13908 static void
13909 aof_file_start (void)
13910 {
13911 fputs ("__r0\tRN\t0\n", asm_out_file);
13912 fputs ("__a1\tRN\t0\n", asm_out_file);
13913 fputs ("__a2\tRN\t1\n", asm_out_file);
13914 fputs ("__a3\tRN\t2\n", asm_out_file);
13915 fputs ("__a4\tRN\t3\n", asm_out_file);
13916 fputs ("__v1\tRN\t4\n", asm_out_file);
13917 fputs ("__v2\tRN\t5\n", asm_out_file);
13918 fputs ("__v3\tRN\t6\n", asm_out_file);
13919 fputs ("__v4\tRN\t7\n", asm_out_file);
13920 fputs ("__v5\tRN\t8\n", asm_out_file);
13921 fputs ("__v6\tRN\t9\n", asm_out_file);
13922 fputs ("__sl\tRN\t10\n", asm_out_file);
13923 fputs ("__fp\tRN\t11\n", asm_out_file);
13924 fputs ("__ip\tRN\t12\n", asm_out_file);
13925 fputs ("__sp\tRN\t13\n", asm_out_file);
13926 fputs ("__lr\tRN\t14\n", asm_out_file);
13927 fputs ("__pc\tRN\t15\n", asm_out_file);
13928 fputs ("__f0\tFN\t0\n", asm_out_file);
13929 fputs ("__f1\tFN\t1\n", asm_out_file);
13930 fputs ("__f2\tFN\t2\n", asm_out_file);
13931 fputs ("__f3\tFN\t3\n", asm_out_file);
13932 fputs ("__f4\tFN\t4\n", asm_out_file);
13933 fputs ("__f5\tFN\t5\n", asm_out_file);
13934 fputs ("__f6\tFN\t6\n", asm_out_file);
13935 fputs ("__f7\tFN\t7\n", asm_out_file);
13936 text_section ();
13937 }
13938
13939 static void
13940 aof_file_end (void)
13941 {
13942 if (flag_pic)
13943 aof_dump_pic_table (asm_out_file);
13944 aof_dump_imports (asm_out_file);
13945 fputs ("\tEND\n", asm_out_file);
13946 }
13947 #endif /* AOF_ASSEMBLER */
13948
13949 #ifdef OBJECT_FORMAT_ELF
13950 /* Switch to an arbitrary section NAME with attributes as specified
13951 by FLAGS. ALIGN specifies any known alignment requirements for
13952 the section; 0 if the default should be used.
13953
13954 Differs from the default elf version only in the prefix character
13955 used before the section type. */
13956
13957 static void
13958 arm_elf_asm_named_section (const char *name, unsigned int flags)
13959 {
13960 char flagchars[10], *f = flagchars;
13961
13962 if (! named_section_first_declaration (name))
13963 {
13964 fprintf (asm_out_file, "\t.section\t%s\n", name);
13965 return;
13966 }
13967
13968 if (!(flags & SECTION_DEBUG))
13969 *f++ = 'a';
13970 if (flags & SECTION_WRITE)
13971 *f++ = 'w';
13972 if (flags & SECTION_CODE)
13973 *f++ = 'x';
13974 if (flags & SECTION_SMALL)
13975 *f++ = 's';
13976 if (flags & SECTION_MERGE)
13977 *f++ = 'M';
13978 if (flags & SECTION_STRINGS)
13979 *f++ = 'S';
13980 if (flags & SECTION_TLS)
13981 *f++ = 'T';
13982 *f = '\0';
13983
13984 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
13985
13986 if (!(flags & SECTION_NOTYPE))
13987 {
13988 const char *type;
13989
13990 if (flags & SECTION_BSS)
13991 type = "nobits";
13992 else
13993 type = "progbits";
13994
13995 fprintf (asm_out_file, ",%%%s", type);
13996
13997 if (flags & SECTION_ENTSIZE)
13998 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
13999 }
14000
14001 putc ('\n', asm_out_file);
14002 }
14003 #endif
14004
14005 #ifndef ARM_PE
14006 /* Symbols in the text segment can be accessed without indirecting via the
14007 constant pool; it may take an extra binary operation, but this is still
14008 faster than indirecting via memory. Don't do this when not optimizing,
14009 since we won't be calculating al of the offsets necessary to do this
14010 simplification. */
14011
14012 static void
14013 arm_encode_section_info (tree decl, rtx rtl, int first)
14014 {
14015 /* This doesn't work with AOF syntax, since the string table may be in
14016 a different AREA. */
14017 #ifndef AOF_ASSEMBLER
14018 if (optimize > 0 && TREE_CONSTANT (decl))
14019 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14020 #endif
14021
14022 /* If we are referencing a function that is weak then encode a long call
14023 flag in the function name, otherwise if the function is static or
14024 or known to be defined in this file then encode a short call flag. */
14025 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14026 {
14027 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14028 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14029 else if (! TREE_PUBLIC (decl))
14030 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14031 }
14032 }
14033 #endif /* !ARM_PE */
14034
14035 static void
14036 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14037 {
14038 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14039 && !strcmp (prefix, "L"))
14040 {
14041 arm_ccfsm_state = 0;
14042 arm_target_insn = NULL;
14043 }
14044 default_internal_label (stream, prefix, labelno);
14045 }
14046
14047 /* Output code to add DELTA to the first argument, and then jump
14048 to FUNCTION. Used for C++ multiple inheritance. */
14049 static void
14050 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14051 HOST_WIDE_INT delta,
14052 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14053 tree function)
14054 {
14055 static int thunk_label = 0;
14056 char label[256];
14057 int mi_delta = delta;
14058 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14059 int shift = 0;
14060 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14061 ? 1 : 0);
14062 if (mi_delta < 0)
14063 mi_delta = - mi_delta;
14064 if (TARGET_THUMB)
14065 {
14066 int labelno = thunk_label++;
14067 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14068 fputs ("\tldr\tr12, ", file);
14069 assemble_name (file, label);
14070 fputc ('\n', file);
14071 }
14072 while (mi_delta != 0)
14073 {
14074 if ((mi_delta & (3 << shift)) == 0)
14075 shift += 2;
14076 else
14077 {
14078 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14079 mi_op, this_regno, this_regno,
14080 mi_delta & (0xff << shift));
14081 mi_delta &= ~(0xff << shift);
14082 shift += 8;
14083 }
14084 }
14085 if (TARGET_THUMB)
14086 {
14087 fprintf (file, "\tbx\tr12\n");
14088 ASM_OUTPUT_ALIGN (file, 2);
14089 assemble_name (file, label);
14090 fputs (":\n", file);
14091 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14092 }
14093 else
14094 {
14095 fputs ("\tb\t", file);
14096 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14097 if (NEED_PLT_RELOC)
14098 fputs ("(PLT)", file);
14099 fputc ('\n', file);
14100 }
14101 }
14102
14103 int
14104 arm_emit_vector_const (FILE *file, rtx x)
14105 {
14106 int i;
14107 const char * pattern;
14108
14109 if (GET_CODE (x) != CONST_VECTOR)
14110 abort ();
14111
14112 switch (GET_MODE (x))
14113 {
14114 case V2SImode: pattern = "%08x"; break;
14115 case V4HImode: pattern = "%04x"; break;
14116 case V8QImode: pattern = "%02x"; break;
14117 default: abort ();
14118 }
14119
14120 fprintf (file, "0x");
14121 for (i = CONST_VECTOR_NUNITS (x); i--;)
14122 {
14123 rtx element;
14124
14125 element = CONST_VECTOR_ELT (x, i);
14126 fprintf (file, pattern, INTVAL (element));
14127 }
14128
14129 return 1;
14130 }
14131
14132 const char *
14133 arm_output_load_gr (rtx *operands)
14134 {
14135 rtx reg;
14136 rtx offset;
14137 rtx wcgr;
14138 rtx sum;
14139
14140 if (GET_CODE (operands [1]) != MEM
14141 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14142 || GET_CODE (reg = XEXP (sum, 0)) != REG
14143 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14144 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14145 return "wldrw%?\t%0, %1";
14146
14147 /* Fix up an out-of-range load of a GR register. */
14148 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14149 wcgr = operands[0];
14150 operands[0] = reg;
14151 output_asm_insn ("ldr%?\t%0, %1", operands);
14152
14153 operands[0] = wcgr;
14154 operands[1] = reg;
14155 output_asm_insn ("tmcr%?\t%0, %1", operands);
14156 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14157
14158 return "";
14159 }
14160
14161 static rtx
14162 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14163 int incoming ATTRIBUTE_UNUSED)
14164 {
14165 #if 0
14166 /* FIXME: The ARM backend has special code to handle structure
14167 returns, and will reserve its own hidden first argument. So
14168 if this macro is enabled a *second* hidden argument will be
14169 reserved, which will break binary compatibility with old
14170 toolchains and also thunk handling. One day this should be
14171 fixed. */
14172 return 0;
14173 #else
14174 /* Register in which address to store a structure value
14175 is passed to a function. */
14176 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14177 #endif
14178 }
14179
14180 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14181
14182 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14183 named arg and all anonymous args onto the stack.
14184 XXX I know the prologue shouldn't be pushing registers, but it is faster
14185 that way. */
14186
14187 static void
14188 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14189 enum machine_mode mode ATTRIBUTE_UNUSED,
14190 tree type ATTRIBUTE_UNUSED,
14191 int *pretend_size,
14192 int second_time ATTRIBUTE_UNUSED)
14193 {
14194 cfun->machine->uses_anonymous_args = 1;
14195 if (cum->nregs < NUM_ARG_REGS)
14196 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14197 }
14198
14199 /* Return nonzero if the CONSUMER instruction (a store) does not need
14200 PRODUCER's value to calculate the address. */
14201
14202 int
14203 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14204 {
14205 rtx value = PATTERN (producer);
14206 rtx addr = PATTERN (consumer);
14207
14208 if (GET_CODE (value) == COND_EXEC)
14209 value = COND_EXEC_CODE (value);
14210 if (GET_CODE (value) == PARALLEL)
14211 value = XVECEXP (value, 0, 0);
14212 value = XEXP (value, 0);
14213 if (GET_CODE (addr) == COND_EXEC)
14214 addr = COND_EXEC_CODE (addr);
14215 if (GET_CODE (addr) == PARALLEL)
14216 addr = XVECEXP (addr, 0, 0);
14217 addr = XEXP (addr, 0);
14218
14219 return !reg_overlap_mentioned_p (value, addr);
14220 }
14221
14222 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14223 have an early register shift value or amount dependency on the
14224 result of PRODUCER. */
14225
14226 int
14227 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14228 {
14229 rtx value = PATTERN (producer);
14230 rtx op = PATTERN (consumer);
14231 rtx early_op;
14232
14233 if (GET_CODE (value) == COND_EXEC)
14234 value = COND_EXEC_CODE (value);
14235 if (GET_CODE (value) == PARALLEL)
14236 value = XVECEXP (value, 0, 0);
14237 value = XEXP (value, 0);
14238 if (GET_CODE (op) == COND_EXEC)
14239 op = COND_EXEC_CODE (op);
14240 if (GET_CODE (op) == PARALLEL)
14241 op = XVECEXP (op, 0, 0);
14242 op = XEXP (op, 1);
14243
14244 early_op = XEXP (op, 0);
14245 /* This is either an actual independent shift, or a shift applied to
14246 the first operand of another operation. We want the whole shift
14247 operation. */
14248 if (GET_CODE (early_op) == REG)
14249 early_op = op;
14250
14251 return !reg_overlap_mentioned_p (value, early_op);
14252 }
14253
14254 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14255 have an early register shift value dependency on the result of
14256 PRODUCER. */
14257
14258 int
14259 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14260 {
14261 rtx value = PATTERN (producer);
14262 rtx op = PATTERN (consumer);
14263 rtx early_op;
14264
14265 if (GET_CODE (value) == COND_EXEC)
14266 value = COND_EXEC_CODE (value);
14267 if (GET_CODE (value) == PARALLEL)
14268 value = XVECEXP (value, 0, 0);
14269 value = XEXP (value, 0);
14270 if (GET_CODE (op) == COND_EXEC)
14271 op = COND_EXEC_CODE (op);
14272 if (GET_CODE (op) == PARALLEL)
14273 op = XVECEXP (op, 0, 0);
14274 op = XEXP (op, 1);
14275
14276 early_op = XEXP (op, 0);
14277
14278 /* This is either an actual independent shift, or a shift applied to
14279 the first operand of another operation. We want the value being
14280 shifted, in either case. */
14281 if (GET_CODE (early_op) != REG)
14282 early_op = XEXP (early_op, 0);
14283
14284 return !reg_overlap_mentioned_p (value, early_op);
14285 }
14286
14287 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14288 have an early register mult dependency on the result of
14289 PRODUCER. */
14290
14291 int
14292 arm_no_early_mul_dep (rtx producer, rtx consumer)
14293 {
14294 rtx value = PATTERN (producer);
14295 rtx op = PATTERN (consumer);
14296
14297 if (GET_CODE (value) == COND_EXEC)
14298 value = COND_EXEC_CODE (value);
14299 if (GET_CODE (value) == PARALLEL)
14300 value = XVECEXP (value, 0, 0);
14301 value = XEXP (value, 0);
14302 if (GET_CODE (op) == COND_EXEC)
14303 op = COND_EXEC_CODE (op);
14304 if (GET_CODE (op) == PARALLEL)
14305 op = XVECEXP (op, 0, 0);
14306 op = XEXP (op, 1);
14307
14308 return (GET_CODE (op) == PLUS
14309 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14310 }
14311