]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/arm.c
Merge dataflow branch into mainline
[thirdparty/gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
55 #include "df.h"
56
57 /* Forward definitions of types. */
58 typedef struct minipool_node Mnode;
59 typedef struct minipool_fixup Mfix;
60
61 const struct attribute_spec arm_attribute_table[];
62
63 /* Forward function declarations. */
64 static arm_stack_offsets *arm_get_frame_offsets (void);
65 static void arm_add_gc_roots (void);
66 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
67 HOST_WIDE_INT, rtx, rtx, int, int);
68 static unsigned bit_count (unsigned long);
69 static int arm_address_register_rtx_p (rtx, int);
70 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
71 static int thumb2_legitimate_index_p (enum machine_mode, rtx, int);
72 static int thumb1_base_register_rtx_p (rtx, enum machine_mode, int);
73 inline static int thumb1_index_register_rtx_p (rtx, int);
74 static int thumb_far_jump_used_p (void);
75 static bool thumb_force_lr_save (void);
76 static unsigned long thumb1_compute_save_reg_mask (void);
77 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
78 static rtx emit_sfm (int, int);
79 static int arm_size_return_regs (void);
80 #ifndef AOF_ASSEMBLER
81 static bool arm_assemble_integer (rtx, unsigned int, int);
82 #endif
83 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
84 static arm_cc get_arm_condition_code (rtx);
85 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
86 static rtx is_jump_table (rtx);
87 static const char *output_multi_immediate (rtx *, const char *, const char *,
88 int, HOST_WIDE_INT);
89 static const char *shift_op (rtx, HOST_WIDE_INT *);
90 static struct machine_function *arm_init_machine_status (void);
91 static void thumb_exit (FILE *, int);
92 static rtx is_jump_table (rtx);
93 static HOST_WIDE_INT get_jump_table_size (rtx);
94 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
95 static Mnode *add_minipool_forward_ref (Mfix *);
96 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_backward_ref (Mfix *);
98 static void assign_minipool_offsets (Mfix *);
99 static void arm_print_value (FILE *, rtx);
100 static void dump_minipool (rtx);
101 static int arm_barrier_cost (rtx);
102 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
103 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
104 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
105 rtx);
106 static void arm_reorg (void);
107 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
108 static unsigned long arm_compute_save_reg0_reg12_mask (void);
109 static unsigned long arm_compute_save_reg_mask (void);
110 static unsigned long arm_isr_value (tree);
111 static unsigned long arm_compute_func_type (void);
112 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
113 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
114 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
115 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
116 #endif
117 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
118 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void thumb1_output_function_prologue (FILE *, HOST_WIDE_INT);
120 static int arm_comp_type_attributes (tree, tree);
121 static void arm_set_default_type_attributes (tree);
122 static int arm_adjust_cost (rtx, rtx, rtx, int);
123 static int count_insns_for_constant (HOST_WIDE_INT, int);
124 static int arm_get_strip_length (int);
125 static bool arm_function_ok_for_sibcall (tree, tree);
126 static void arm_internal_label (FILE *, const char *, unsigned long);
127 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
128 tree);
129 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
130 static bool arm_size_rtx_costs (rtx, int, int, int *);
131 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
132 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
133 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
134 static bool arm_9e_rtx_costs (rtx, int, int, int *);
135 static int arm_address_cost (rtx);
136 static bool arm_memory_load_p (rtx);
137 static bool arm_cirrus_insn_p (rtx);
138 static void cirrus_reorg (rtx);
139 static void arm_init_builtins (void);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
141 static void arm_init_iwmmxt_builtins (void);
142 static rtx safe_vector_operand (rtx, enum machine_mode);
143 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
144 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
145 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
146 static void emit_constant_insn (rtx cond, rtx pattern);
147 static rtx emit_set_insn (rtx, rtx);
148 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
149 tree, bool);
150
151 #ifdef OBJECT_FORMAT_ELF
152 static void arm_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
153 static void arm_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
154 #endif
155 #ifndef ARM_PE
156 static void arm_encode_section_info (tree, rtx, int);
157 #endif
158
159 static void arm_file_end (void);
160 static void arm_file_start (void);
161
162 #ifdef AOF_ASSEMBLER
163 static void aof_globalize_label (FILE *, const char *);
164 static void aof_dump_imports (FILE *);
165 static void aof_dump_pic_table (FILE *);
166 static void aof_file_start (void);
167 static void aof_file_end (void);
168 static void aof_asm_init_sections (void);
169 #endif
170 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
171 tree, int *, int);
172 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
173 enum machine_mode, tree, bool);
174 static bool arm_promote_prototypes (tree);
175 static bool arm_default_short_enums (void);
176 static bool arm_align_anon_bitfield (void);
177 static bool arm_return_in_msb (tree);
178 static bool arm_must_pass_in_stack (enum machine_mode, tree);
179 #ifdef TARGET_UNWIND_INFO
180 static void arm_unwind_emit (FILE *, rtx);
181 static bool arm_output_ttype (rtx);
182 #endif
183 static void arm_dwarf_handle_frame_unspec (const char *, rtx, int);
184
185 static tree arm_cxx_guard_type (void);
186 static bool arm_cxx_guard_mask_bit (void);
187 static tree arm_get_cookie_size (tree);
188 static bool arm_cookie_has_size (void);
189 static bool arm_cxx_cdtor_returns_this (void);
190 static bool arm_cxx_key_method_may_be_inline (void);
191 static void arm_cxx_determine_class_data_visibility (tree);
192 static bool arm_cxx_class_data_always_comdat (void);
193 static bool arm_cxx_use_aeabi_atexit (void);
194 static void arm_init_libfuncs (void);
195 static bool arm_handle_option (size_t, const char *, int);
196 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
197 static bool arm_cannot_copy_insn_p (rtx);
198 static bool arm_tls_symbol_p (rtx x);
199 static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
200
201 \f
202 /* Initialize the GCC target structure. */
203 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
204 #undef TARGET_MERGE_DECL_ATTRIBUTES
205 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
206 #endif
207
208 #undef TARGET_ATTRIBUTE_TABLE
209 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
210
211 #undef TARGET_ASM_FILE_START
212 #define TARGET_ASM_FILE_START arm_file_start
213 #undef TARGET_ASM_FILE_END
214 #define TARGET_ASM_FILE_END arm_file_end
215
216 #ifdef AOF_ASSEMBLER
217 #undef TARGET_ASM_BYTE_OP
218 #define TARGET_ASM_BYTE_OP "\tDCB\t"
219 #undef TARGET_ASM_ALIGNED_HI_OP
220 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
221 #undef TARGET_ASM_ALIGNED_SI_OP
222 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
223 #undef TARGET_ASM_GLOBALIZE_LABEL
224 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
225 #undef TARGET_ASM_FILE_START
226 #define TARGET_ASM_FILE_START aof_file_start
227 #undef TARGET_ASM_FILE_END
228 #define TARGET_ASM_FILE_END aof_file_end
229 #else
230 #undef TARGET_ASM_ALIGNED_SI_OP
231 #define TARGET_ASM_ALIGNED_SI_OP NULL
232 #undef TARGET_ASM_INTEGER
233 #define TARGET_ASM_INTEGER arm_assemble_integer
234 #endif
235
236 #undef TARGET_ASM_FUNCTION_PROLOGUE
237 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
238
239 #undef TARGET_ASM_FUNCTION_EPILOGUE
240 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
241
242 #undef TARGET_DEFAULT_TARGET_FLAGS
243 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
244 #undef TARGET_HANDLE_OPTION
245 #define TARGET_HANDLE_OPTION arm_handle_option
246
247 #undef TARGET_COMP_TYPE_ATTRIBUTES
248 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
249
250 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
251 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
252
253 #undef TARGET_SCHED_ADJUST_COST
254 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
255
256 #undef TARGET_ENCODE_SECTION_INFO
257 #ifdef ARM_PE
258 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
259 #else
260 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
261 #endif
262
263 #undef TARGET_STRIP_NAME_ENCODING
264 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
265
266 #undef TARGET_ASM_INTERNAL_LABEL
267 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
268
269 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
270 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
271
272 #undef TARGET_ASM_OUTPUT_MI_THUNK
273 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
274 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
275 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
276
277 /* This will be overridden in arm_override_options. */
278 #undef TARGET_RTX_COSTS
279 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
280 #undef TARGET_ADDRESS_COST
281 #define TARGET_ADDRESS_COST arm_address_cost
282
283 #undef TARGET_SHIFT_TRUNCATION_MASK
284 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
285 #undef TARGET_VECTOR_MODE_SUPPORTED_P
286 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
287
288 #undef TARGET_MACHINE_DEPENDENT_REORG
289 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
290
291 #undef TARGET_INIT_BUILTINS
292 #define TARGET_INIT_BUILTINS arm_init_builtins
293 #undef TARGET_EXPAND_BUILTIN
294 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
295
296 #undef TARGET_INIT_LIBFUNCS
297 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
298
299 #undef TARGET_PROMOTE_FUNCTION_ARGS
300 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
301 #undef TARGET_PROMOTE_FUNCTION_RETURN
302 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
303 #undef TARGET_PROMOTE_PROTOTYPES
304 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
305 #undef TARGET_PASS_BY_REFERENCE
306 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
307 #undef TARGET_ARG_PARTIAL_BYTES
308 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
309
310 #undef TARGET_SETUP_INCOMING_VARARGS
311 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
312
313 #undef TARGET_DEFAULT_SHORT_ENUMS
314 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
315
316 #undef TARGET_ALIGN_ANON_BITFIELD
317 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
318
319 #undef TARGET_NARROW_VOLATILE_BITFIELD
320 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
321
322 #undef TARGET_CXX_GUARD_TYPE
323 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
324
325 #undef TARGET_CXX_GUARD_MASK_BIT
326 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
327
328 #undef TARGET_CXX_GET_COOKIE_SIZE
329 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
330
331 #undef TARGET_CXX_COOKIE_HAS_SIZE
332 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
333
334 #undef TARGET_CXX_CDTOR_RETURNS_THIS
335 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
336
337 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
338 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
339
340 #undef TARGET_CXX_USE_AEABI_ATEXIT
341 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
342
343 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
344 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
345 arm_cxx_determine_class_data_visibility
346
347 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
348 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
349
350 #undef TARGET_RETURN_IN_MSB
351 #define TARGET_RETURN_IN_MSB arm_return_in_msb
352
353 #undef TARGET_MUST_PASS_IN_STACK
354 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
355
356 #ifdef TARGET_UNWIND_INFO
357 #undef TARGET_UNWIND_EMIT
358 #define TARGET_UNWIND_EMIT arm_unwind_emit
359
360 /* EABI unwinding tables use a different format for the typeinfo tables. */
361 #undef TARGET_ASM_TTYPE
362 #define TARGET_ASM_TTYPE arm_output_ttype
363
364 #undef TARGET_ARM_EABI_UNWINDER
365 #define TARGET_ARM_EABI_UNWINDER true
366 #endif /* TARGET_UNWIND_INFO */
367
368 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
369 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec
370
371 #undef TARGET_CANNOT_COPY_INSN_P
372 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
373
374 #ifdef HAVE_AS_TLS
375 #undef TARGET_HAVE_TLS
376 #define TARGET_HAVE_TLS true
377 #endif
378
379 #undef TARGET_CANNOT_FORCE_CONST_MEM
380 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
381
382 #ifdef HAVE_AS_TLS
383 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
384 #define TARGET_ASM_OUTPUT_DWARF_DTPREL arm_output_dwarf_dtprel
385 #endif
386
387 struct gcc_target targetm = TARGET_INITIALIZER;
388 \f
389 /* Obstack for minipool constant handling. */
390 static struct obstack minipool_obstack;
391 static char * minipool_startobj;
392
393 /* The maximum number of insns skipped which
394 will be conditionalised if possible. */
395 static int max_insns_skipped = 5;
396
397 extern FILE * asm_out_file;
398
399 /* True if we are currently building a constant table. */
400 int making_const_table;
401
402 /* Define the information needed to generate branch insns. This is
403 stored from the compare operation. */
404 rtx arm_compare_op0, arm_compare_op1;
405
406 /* The processor for which instructions should be scheduled. */
407 enum processor_type arm_tune = arm_none;
408
409 /* The default processor used if not overridden by commandline. */
410 static enum processor_type arm_default_cpu = arm_none;
411
412 /* Which floating point model to use. */
413 enum arm_fp_model arm_fp_model;
414
415 /* Which floating point hardware is available. */
416 enum fputype arm_fpu_arch;
417
418 /* Which floating point hardware to schedule for. */
419 enum fputype arm_fpu_tune;
420
421 /* Whether to use floating point hardware. */
422 enum float_abi_type arm_float_abi;
423
424 /* Which ABI to use. */
425 enum arm_abi_type arm_abi;
426
427 /* Which thread pointer model to use. */
428 enum arm_tp_type target_thread_pointer = TP_AUTO;
429
430 /* Used to parse -mstructure_size_boundary command line option. */
431 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
432
433 /* Used for Thumb call_via trampolines. */
434 rtx thumb_call_via_label[14];
435 static int thumb_call_reg_needed;
436
437 /* Bit values used to identify processor capabilities. */
438 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
439 #define FL_ARCH3M (1 << 1) /* Extended multiply */
440 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
441 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
442 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
443 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
444 #define FL_THUMB (1 << 6) /* Thumb aware */
445 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
446 #define FL_STRONG (1 << 8) /* StrongARM */
447 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
448 #define FL_XSCALE (1 << 10) /* XScale */
449 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
450 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
451 media instructions. */
452 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
453 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
454 Note: ARM6 & 7 derivatives only. */
455 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
456 #define FL_THUMB2 (1 << 16) /* Thumb-2. */
457 #define FL_NOTM (1 << 17) /* Instructions not present in the 'M'
458 profile. */
459 #define FL_DIV (1 << 18) /* Hardware divide. */
460
461 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
462
463 #define FL_FOR_ARCH2 FL_NOTM
464 #define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
465 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
466 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
467 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
468 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
469 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
470 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
471 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
472 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
473 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
474 #define FL_FOR_ARCH6J FL_FOR_ARCH6
475 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
476 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
477 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
478 #define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
479 #define FL_FOR_ARCH7 (FL_FOR_ARCH6T2 &~ FL_NOTM)
480 #define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM)
481 #define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV)
482 #define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_DIV)
483
484 /* The bits in this mask specify which
485 instructions we are allowed to generate. */
486 static unsigned long insn_flags = 0;
487
488 /* The bits in this mask specify which instruction scheduling options should
489 be used. */
490 static unsigned long tune_flags = 0;
491
492 /* The following are used in the arm.md file as equivalents to bits
493 in the above two flag variables. */
494
495 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
496 int arm_arch3m = 0;
497
498 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
499 int arm_arch4 = 0;
500
501 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
502 int arm_arch4t = 0;
503
504 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
505 int arm_arch5 = 0;
506
507 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
508 int arm_arch5e = 0;
509
510 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
511 int arm_arch6 = 0;
512
513 /* Nonzero if this chip supports the ARM 6K extensions. */
514 int arm_arch6k = 0;
515
516 /* Nonzero if instructions not present in the 'M' profile can be used. */
517 int arm_arch_notm = 0;
518
519 /* Nonzero if this chip can benefit from load scheduling. */
520 int arm_ld_sched = 0;
521
522 /* Nonzero if this chip is a StrongARM. */
523 int arm_tune_strongarm = 0;
524
525 /* Nonzero if this chip is a Cirrus variant. */
526 int arm_arch_cirrus = 0;
527
528 /* Nonzero if this chip supports Intel Wireless MMX technology. */
529 int arm_arch_iwmmxt = 0;
530
531 /* Nonzero if this chip is an XScale. */
532 int arm_arch_xscale = 0;
533
534 /* Nonzero if tuning for XScale */
535 int arm_tune_xscale = 0;
536
537 /* Nonzero if we want to tune for stores that access the write-buffer.
538 This typically means an ARM6 or ARM7 with MMU or MPU. */
539 int arm_tune_wbuf = 0;
540
541 /* Nonzero if generating Thumb instructions. */
542 int thumb_code = 0;
543
544 /* Nonzero if we should define __THUMB_INTERWORK__ in the
545 preprocessor.
546 XXX This is a bit of a hack, it's intended to help work around
547 problems in GLD which doesn't understand that armv5t code is
548 interworking clean. */
549 int arm_cpp_interwork = 0;
550
551 /* Nonzero if chip supports Thumb 2. */
552 int arm_arch_thumb2;
553
554 /* Nonzero if chip supports integer division instruction. */
555 int arm_arch_hwdiv;
556
557 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
558 must report the mode of the memory reference from PRINT_OPERAND to
559 PRINT_OPERAND_ADDRESS. */
560 enum machine_mode output_memory_reference_mode;
561
562 /* The register number to be used for the PIC offset register. */
563 unsigned arm_pic_register = INVALID_REGNUM;
564
565 /* Set to 1 when a return insn is output, this means that the epilogue
566 is not needed. */
567 int return_used_this_function;
568
569 /* Set to 1 after arm_reorg has started. Reset to start at the start of
570 the next function. */
571 static int after_arm_reorg = 0;
572
573 /* The maximum number of insns to be used when loading a constant. */
574 static int arm_constant_limit = 3;
575
576 /* For an explanation of these variables, see final_prescan_insn below. */
577 int arm_ccfsm_state;
578 /* arm_current_cc is also used for Thumb-2 cond_exec blocks. */
579 enum arm_cond_code arm_current_cc;
580 rtx arm_target_insn;
581 int arm_target_label;
582 /* The number of conditionally executed insns, including the current insn. */
583 int arm_condexec_count = 0;
584 /* A bitmask specifying the patterns for the IT block.
585 Zero means do not output an IT block before this insn. */
586 int arm_condexec_mask = 0;
587 /* The number of bits used in arm_condexec_mask. */
588 int arm_condexec_masklen = 0;
589
590 /* The condition codes of the ARM, and the inverse function. */
591 static const char * const arm_condition_codes[] =
592 {
593 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
594 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
595 };
596
597 #define ARM_LSL_NAME (TARGET_UNIFIED_ASM ? "lsl" : "asl")
598 #define streq(string1, string2) (strcmp (string1, string2) == 0)
599
600 #define THUMB2_WORK_REGS (0xff & ~( (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
601 | (1 << SP_REGNUM) | (1 << PC_REGNUM) \
602 | (1 << PIC_OFFSET_TABLE_REGNUM)))
603 \f
604 /* Initialization code. */
605
606 struct processors
607 {
608 const char *const name;
609 enum processor_type core;
610 const char *arch;
611 const unsigned long flags;
612 bool (* rtx_costs) (rtx, int, int, int *);
613 };
614
615 /* Not all of these give usefully different compilation alternatives,
616 but there is no simple way of generalizing them. */
617 static const struct processors all_cores[] =
618 {
619 /* ARM Cores */
620 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
621 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
622 #include "arm-cores.def"
623 #undef ARM_CORE
624 {NULL, arm_none, NULL, 0, NULL}
625 };
626
627 static const struct processors all_architectures[] =
628 {
629 /* ARM Architectures */
630 /* We don't specify rtx_costs here as it will be figured out
631 from the core. */
632
633 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
634 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
635 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
636 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
637 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
638 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
639 implementations that support it, so we will leave it out for now. */
640 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
641 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
642 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
643 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
644 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
645 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
646 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
647 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
648 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
649 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
650 {"armv6t2", arm1156t2s, "6T2", FL_CO_PROC | FL_FOR_ARCH6T2, NULL},
651 {"armv7", cortexa8, "7", FL_CO_PROC | FL_FOR_ARCH7, NULL},
652 {"armv7-a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL},
653 {"armv7-r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL},
654 {"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
655 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
656 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
657 {NULL, arm_none, NULL, 0 , NULL}
658 };
659
660 struct arm_cpu_select
661 {
662 const char * string;
663 const char * name;
664 const struct processors * processors;
665 };
666
667 /* This is a magic structure. The 'string' field is magically filled in
668 with a pointer to the value specified by the user on the command line
669 assuming that the user has specified such a value. */
670
671 static struct arm_cpu_select arm_select[] =
672 {
673 /* string name processors */
674 { NULL, "-mcpu=", all_cores },
675 { NULL, "-march=", all_architectures },
676 { NULL, "-mtune=", all_cores }
677 };
678
679 /* Defines representing the indexes into the above table. */
680 #define ARM_OPT_SET_CPU 0
681 #define ARM_OPT_SET_ARCH 1
682 #define ARM_OPT_SET_TUNE 2
683
684 /* The name of the preprocessor macro to define for this architecture. */
685
686 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
687
688 struct fpu_desc
689 {
690 const char * name;
691 enum fputype fpu;
692 };
693
694
695 /* Available values for -mfpu=. */
696
697 static const struct fpu_desc all_fpus[] =
698 {
699 {"fpa", FPUTYPE_FPA},
700 {"fpe2", FPUTYPE_FPA_EMU2},
701 {"fpe3", FPUTYPE_FPA_EMU2},
702 {"maverick", FPUTYPE_MAVERICK},
703 {"vfp", FPUTYPE_VFP}
704 };
705
706
707 /* Floating point models used by the different hardware.
708 See fputype in arm.h. */
709
710 static const enum fputype fp_model_for_fpu[] =
711 {
712 /* No FP hardware. */
713 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
714 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
715 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
716 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
717 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
718 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
719 };
720
721
722 struct float_abi
723 {
724 const char * name;
725 enum float_abi_type abi_type;
726 };
727
728
729 /* Available values for -mfloat-abi=. */
730
731 static const struct float_abi all_float_abis[] =
732 {
733 {"soft", ARM_FLOAT_ABI_SOFT},
734 {"softfp", ARM_FLOAT_ABI_SOFTFP},
735 {"hard", ARM_FLOAT_ABI_HARD}
736 };
737
738
739 struct abi_name
740 {
741 const char *name;
742 enum arm_abi_type abi_type;
743 };
744
745
746 /* Available values for -mabi=. */
747
748 static const struct abi_name arm_all_abis[] =
749 {
750 {"apcs-gnu", ARM_ABI_APCS},
751 {"atpcs", ARM_ABI_ATPCS},
752 {"aapcs", ARM_ABI_AAPCS},
753 {"iwmmxt", ARM_ABI_IWMMXT},
754 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
755 };
756
757 /* Supported TLS relocations. */
758
759 enum tls_reloc {
760 TLS_GD32,
761 TLS_LDM32,
762 TLS_LDO32,
763 TLS_IE32,
764 TLS_LE32
765 };
766
767 /* Emit an insn that's a simple single-set. Both the operands must be known
768 to be valid. */
769 inline static rtx
770 emit_set_insn (rtx x, rtx y)
771 {
772 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
773 }
774
775 /* Return the number of bits set in VALUE. */
776 static unsigned
777 bit_count (unsigned long value)
778 {
779 unsigned long count = 0;
780
781 while (value)
782 {
783 count++;
784 value &= value - 1; /* Clear the least-significant set bit. */
785 }
786
787 return count;
788 }
789
790 /* Set up library functions unique to ARM. */
791
792 static void
793 arm_init_libfuncs (void)
794 {
795 /* There are no special library functions unless we are using the
796 ARM BPABI. */
797 if (!TARGET_BPABI)
798 return;
799
800 /* The functions below are described in Section 4 of the "Run-Time
801 ABI for the ARM architecture", Version 1.0. */
802
803 /* Double-precision floating-point arithmetic. Table 2. */
804 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
805 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
806 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
807 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
808 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
809
810 /* Double-precision comparisons. Table 3. */
811 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
812 set_optab_libfunc (ne_optab, DFmode, NULL);
813 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
814 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
815 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
816 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
817 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
818
819 /* Single-precision floating-point arithmetic. Table 4. */
820 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
821 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
822 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
823 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
824 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
825
826 /* Single-precision comparisons. Table 5. */
827 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
828 set_optab_libfunc (ne_optab, SFmode, NULL);
829 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
830 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
831 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
832 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
833 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
834
835 /* Floating-point to integer conversions. Table 6. */
836 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
837 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
838 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
839 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
840 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
841 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
842 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
843 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
844
845 /* Conversions between floating types. Table 7. */
846 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
847 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
848
849 /* Integer to floating-point conversions. Table 8. */
850 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
851 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
852 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
853 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
854 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
855 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
856 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
857 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
858
859 /* Long long. Table 9. */
860 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
861 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
862 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
863 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
864 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
865 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
866 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
867 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
868
869 /* Integer (32/32->32) division. \S 4.3.1. */
870 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
871 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
872
873 /* The divmod functions are designed so that they can be used for
874 plain division, even though they return both the quotient and the
875 remainder. The quotient is returned in the usual location (i.e.,
876 r0 for SImode, {r0, r1} for DImode), just as would be expected
877 for an ordinary division routine. Because the AAPCS calling
878 conventions specify that all of { r0, r1, r2, r3 } are
879 callee-saved registers, there is no need to tell the compiler
880 explicitly that those registers are clobbered by these
881 routines. */
882 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
883 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
884
885 /* For SImode division the ABI provides div-without-mod routines,
886 which are faster. */
887 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
888 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
889
890 /* We don't have mod libcalls. Fortunately gcc knows how to use the
891 divmod libcalls instead. */
892 set_optab_libfunc (smod_optab, DImode, NULL);
893 set_optab_libfunc (umod_optab, DImode, NULL);
894 set_optab_libfunc (smod_optab, SImode, NULL);
895 set_optab_libfunc (umod_optab, SImode, NULL);
896 }
897
898 /* Implement TARGET_HANDLE_OPTION. */
899
900 static bool
901 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
902 {
903 switch (code)
904 {
905 case OPT_march_:
906 arm_select[1].string = arg;
907 return true;
908
909 case OPT_mcpu_:
910 arm_select[0].string = arg;
911 return true;
912
913 case OPT_mhard_float:
914 target_float_abi_name = "hard";
915 return true;
916
917 case OPT_msoft_float:
918 target_float_abi_name = "soft";
919 return true;
920
921 case OPT_mtune_:
922 arm_select[2].string = arg;
923 return true;
924
925 default:
926 return true;
927 }
928 }
929
930 /* Fix up any incompatible options that the user has specified.
931 This has now turned into a maze. */
932 void
933 arm_override_options (void)
934 {
935 unsigned i;
936 enum processor_type target_arch_cpu = arm_none;
937
938 /* Set up the flags based on the cpu/architecture selected by the user. */
939 for (i = ARRAY_SIZE (arm_select); i--;)
940 {
941 struct arm_cpu_select * ptr = arm_select + i;
942
943 if (ptr->string != NULL && ptr->string[0] != '\0')
944 {
945 const struct processors * sel;
946
947 for (sel = ptr->processors; sel->name != NULL; sel++)
948 if (streq (ptr->string, sel->name))
949 {
950 /* Set the architecture define. */
951 if (i != ARM_OPT_SET_TUNE)
952 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
953
954 /* Determine the processor core for which we should
955 tune code-generation. */
956 if (/* -mcpu= is a sensible default. */
957 i == ARM_OPT_SET_CPU
958 /* -mtune= overrides -mcpu= and -march=. */
959 || i == ARM_OPT_SET_TUNE)
960 arm_tune = (enum processor_type) (sel - ptr->processors);
961
962 /* Remember the CPU associated with this architecture.
963 If no other option is used to set the CPU type,
964 we'll use this to guess the most suitable tuning
965 options. */
966 if (i == ARM_OPT_SET_ARCH)
967 target_arch_cpu = sel->core;
968
969 if (i != ARM_OPT_SET_TUNE)
970 {
971 /* If we have been given an architecture and a processor
972 make sure that they are compatible. We only generate
973 a warning though, and we prefer the CPU over the
974 architecture. */
975 if (insn_flags != 0 && (insn_flags ^ sel->flags))
976 warning (0, "switch -mcpu=%s conflicts with -march= switch",
977 ptr->string);
978
979 insn_flags = sel->flags;
980 }
981
982 break;
983 }
984
985 if (sel->name == NULL)
986 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
987 }
988 }
989
990 /* Guess the tuning options from the architecture if necessary. */
991 if (arm_tune == arm_none)
992 arm_tune = target_arch_cpu;
993
994 /* If the user did not specify a processor, choose one for them. */
995 if (insn_flags == 0)
996 {
997 const struct processors * sel;
998 unsigned int sought;
999 enum processor_type cpu;
1000
1001 cpu = TARGET_CPU_DEFAULT;
1002 if (cpu == arm_none)
1003 {
1004 #ifdef SUBTARGET_CPU_DEFAULT
1005 /* Use the subtarget default CPU if none was specified by
1006 configure. */
1007 cpu = SUBTARGET_CPU_DEFAULT;
1008 #endif
1009 /* Default to ARM6. */
1010 if (cpu == arm_none)
1011 cpu = arm6;
1012 }
1013 sel = &all_cores[cpu];
1014
1015 insn_flags = sel->flags;
1016
1017 /* Now check to see if the user has specified some command line
1018 switch that require certain abilities from the cpu. */
1019 sought = 0;
1020
1021 if (TARGET_INTERWORK || TARGET_THUMB)
1022 {
1023 sought |= (FL_THUMB | FL_MODE32);
1024
1025 /* There are no ARM processors that support both APCS-26 and
1026 interworking. Therefore we force FL_MODE26 to be removed
1027 from insn_flags here (if it was set), so that the search
1028 below will always be able to find a compatible processor. */
1029 insn_flags &= ~FL_MODE26;
1030 }
1031
1032 if (sought != 0 && ((sought & insn_flags) != sought))
1033 {
1034 /* Try to locate a CPU type that supports all of the abilities
1035 of the default CPU, plus the extra abilities requested by
1036 the user. */
1037 for (sel = all_cores; sel->name != NULL; sel++)
1038 if ((sel->flags & sought) == (sought | insn_flags))
1039 break;
1040
1041 if (sel->name == NULL)
1042 {
1043 unsigned current_bit_count = 0;
1044 const struct processors * best_fit = NULL;
1045
1046 /* Ideally we would like to issue an error message here
1047 saying that it was not possible to find a CPU compatible
1048 with the default CPU, but which also supports the command
1049 line options specified by the programmer, and so they
1050 ought to use the -mcpu=<name> command line option to
1051 override the default CPU type.
1052
1053 If we cannot find a cpu that has both the
1054 characteristics of the default cpu and the given
1055 command line options we scan the array again looking
1056 for a best match. */
1057 for (sel = all_cores; sel->name != NULL; sel++)
1058 if ((sel->flags & sought) == sought)
1059 {
1060 unsigned count;
1061
1062 count = bit_count (sel->flags & insn_flags);
1063
1064 if (count >= current_bit_count)
1065 {
1066 best_fit = sel;
1067 current_bit_count = count;
1068 }
1069 }
1070
1071 gcc_assert (best_fit);
1072 sel = best_fit;
1073 }
1074
1075 insn_flags = sel->flags;
1076 }
1077 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1078 arm_default_cpu = (enum processor_type) (sel - all_cores);
1079 if (arm_tune == arm_none)
1080 arm_tune = arm_default_cpu;
1081 }
1082
1083 /* The processor for which we should tune should now have been
1084 chosen. */
1085 gcc_assert (arm_tune != arm_none);
1086
1087 tune_flags = all_cores[(int)arm_tune].flags;
1088 if (optimize_size)
1089 targetm.rtx_costs = arm_size_rtx_costs;
1090 else
1091 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1092
1093 /* Make sure that the processor choice does not conflict with any of the
1094 other command line choices. */
1095 if (TARGET_ARM && !(insn_flags & FL_NOTM))
1096 error ("target CPU does not support ARM mode");
1097
1098 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1099 {
1100 warning (0, "target CPU does not support interworking" );
1101 target_flags &= ~MASK_INTERWORK;
1102 }
1103
1104 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1105 {
1106 warning (0, "target CPU does not support THUMB instructions");
1107 target_flags &= ~MASK_THUMB;
1108 }
1109
1110 if (TARGET_APCS_FRAME && TARGET_THUMB)
1111 {
1112 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1113 target_flags &= ~MASK_APCS_FRAME;
1114 }
1115
1116 /* Callee super interworking implies thumb interworking. Adding
1117 this to the flags here simplifies the logic elsewhere. */
1118 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1119 target_flags |= MASK_INTERWORK;
1120
1121 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1122 from here where no function is being compiled currently. */
1123 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1124 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1125
1126 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1127 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1128
1129 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1130 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1131
1132 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1133 {
1134 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1135 target_flags |= MASK_APCS_FRAME;
1136 }
1137
1138 if (TARGET_POKE_FUNCTION_NAME)
1139 target_flags |= MASK_APCS_FRAME;
1140
1141 if (TARGET_APCS_REENT && flag_pic)
1142 error ("-fpic and -mapcs-reent are incompatible");
1143
1144 if (TARGET_APCS_REENT)
1145 warning (0, "APCS reentrant code not supported. Ignored");
1146
1147 /* If this target is normally configured to use APCS frames, warn if they
1148 are turned off and debugging is turned on. */
1149 if (TARGET_ARM
1150 && write_symbols != NO_DEBUG
1151 && !TARGET_APCS_FRAME
1152 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1153 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1154
1155 if (TARGET_APCS_FLOAT)
1156 warning (0, "passing floating point arguments in fp regs not yet supported");
1157
1158 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1159 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1160 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1161 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1162 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1163 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1164 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1165 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1166 arm_arch_notm = (insn_flags & FL_NOTM) != 0;
1167 arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
1168 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1169 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1170
1171 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1172 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1173 thumb_code = (TARGET_ARM == 0);
1174 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1175 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1176 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1177 arm_arch_hwdiv = (insn_flags & FL_DIV) != 0;
1178
1179 /* V5 code we generate is completely interworking capable, so we turn off
1180 TARGET_INTERWORK here to avoid many tests later on. */
1181
1182 /* XXX However, we must pass the right pre-processor defines to CPP
1183 or GLD can get confused. This is a hack. */
1184 if (TARGET_INTERWORK)
1185 arm_cpp_interwork = 1;
1186
1187 if (arm_arch5)
1188 target_flags &= ~MASK_INTERWORK;
1189
1190 if (target_abi_name)
1191 {
1192 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1193 {
1194 if (streq (arm_all_abis[i].name, target_abi_name))
1195 {
1196 arm_abi = arm_all_abis[i].abi_type;
1197 break;
1198 }
1199 }
1200 if (i == ARRAY_SIZE (arm_all_abis))
1201 error ("invalid ABI option: -mabi=%s", target_abi_name);
1202 }
1203 else
1204 arm_abi = ARM_DEFAULT_ABI;
1205
1206 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1207 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1208
1209 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1210 error ("iwmmxt abi requires an iwmmxt capable cpu");
1211
1212 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1213 if (target_fpu_name == NULL && target_fpe_name != NULL)
1214 {
1215 if (streq (target_fpe_name, "2"))
1216 target_fpu_name = "fpe2";
1217 else if (streq (target_fpe_name, "3"))
1218 target_fpu_name = "fpe3";
1219 else
1220 error ("invalid floating point emulation option: -mfpe=%s",
1221 target_fpe_name);
1222 }
1223 if (target_fpu_name != NULL)
1224 {
1225 /* The user specified a FPU. */
1226 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1227 {
1228 if (streq (all_fpus[i].name, target_fpu_name))
1229 {
1230 arm_fpu_arch = all_fpus[i].fpu;
1231 arm_fpu_tune = arm_fpu_arch;
1232 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1233 break;
1234 }
1235 }
1236 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1237 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1238 }
1239 else
1240 {
1241 #ifdef FPUTYPE_DEFAULT
1242 /* Use the default if it is specified for this platform. */
1243 arm_fpu_arch = FPUTYPE_DEFAULT;
1244 arm_fpu_tune = FPUTYPE_DEFAULT;
1245 #else
1246 /* Pick one based on CPU type. */
1247 /* ??? Some targets assume FPA is the default.
1248 if ((insn_flags & FL_VFP) != 0)
1249 arm_fpu_arch = FPUTYPE_VFP;
1250 else
1251 */
1252 if (arm_arch_cirrus)
1253 arm_fpu_arch = FPUTYPE_MAVERICK;
1254 else
1255 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1256 #endif
1257 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1258 arm_fpu_tune = FPUTYPE_FPA;
1259 else
1260 arm_fpu_tune = arm_fpu_arch;
1261 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1262 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1263 }
1264
1265 if (target_float_abi_name != NULL)
1266 {
1267 /* The user specified a FP ABI. */
1268 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1269 {
1270 if (streq (all_float_abis[i].name, target_float_abi_name))
1271 {
1272 arm_float_abi = all_float_abis[i].abi_type;
1273 break;
1274 }
1275 }
1276 if (i == ARRAY_SIZE (all_float_abis))
1277 error ("invalid floating point abi: -mfloat-abi=%s",
1278 target_float_abi_name);
1279 }
1280 else
1281 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1282
1283 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1284 sorry ("-mfloat-abi=hard and VFP");
1285
1286 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1287 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1288 will ever exist. GCC makes no attempt to support this combination. */
1289 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1290 sorry ("iWMMXt and hardware floating point");
1291
1292 /* ??? iWMMXt insn patterns need auditing for Thumb-2. */
1293 if (TARGET_THUMB2 && TARGET_IWMMXT)
1294 sorry ("Thumb-2 iWMMXt");
1295
1296 /* If soft-float is specified then don't use FPU. */
1297 if (TARGET_SOFT_FLOAT)
1298 arm_fpu_arch = FPUTYPE_NONE;
1299
1300 /* For arm2/3 there is no need to do any scheduling if there is only
1301 a floating point emulator, or we are doing software floating-point. */
1302 if ((TARGET_SOFT_FLOAT
1303 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1304 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1305 && (tune_flags & FL_MODE32) == 0)
1306 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1307
1308 if (target_thread_switch)
1309 {
1310 if (strcmp (target_thread_switch, "soft") == 0)
1311 target_thread_pointer = TP_SOFT;
1312 else if (strcmp (target_thread_switch, "auto") == 0)
1313 target_thread_pointer = TP_AUTO;
1314 else if (strcmp (target_thread_switch, "cp15") == 0)
1315 target_thread_pointer = TP_CP15;
1316 else
1317 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1318 }
1319
1320 /* Use the cp15 method if it is available. */
1321 if (target_thread_pointer == TP_AUTO)
1322 {
1323 if (arm_arch6k && !TARGET_THUMB)
1324 target_thread_pointer = TP_CP15;
1325 else
1326 target_thread_pointer = TP_SOFT;
1327 }
1328
1329 if (TARGET_HARD_TP && TARGET_THUMB1)
1330 error ("can not use -mtp=cp15 with 16-bit Thumb");
1331
1332 /* Override the default structure alignment for AAPCS ABI. */
1333 if (TARGET_AAPCS_BASED)
1334 arm_structure_size_boundary = 8;
1335
1336 if (structure_size_string != NULL)
1337 {
1338 int size = strtol (structure_size_string, NULL, 0);
1339
1340 if (size == 8 || size == 32
1341 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1342 arm_structure_size_boundary = size;
1343 else
1344 warning (0, "structure size boundary can only be set to %s",
1345 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1346 }
1347
1348 if (!TARGET_ARM && TARGET_VXWORKS_RTP && flag_pic)
1349 {
1350 error ("RTP PIC is incompatible with Thumb");
1351 flag_pic = 0;
1352 }
1353
1354 /* If stack checking is disabled, we can use r10 as the PIC register,
1355 which keeps r9 available. The EABI specifies r9 as the PIC register. */
1356 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1357 {
1358 if (TARGET_VXWORKS_RTP)
1359 warning (0, "RTP PIC is incompatible with -msingle-pic-base");
1360 arm_pic_register = (TARGET_APCS_STACK || TARGET_AAPCS_BASED) ? 9 : 10;
1361 }
1362
1363 if (flag_pic && TARGET_VXWORKS_RTP)
1364 arm_pic_register = 9;
1365
1366 if (arm_pic_register_string != NULL)
1367 {
1368 int pic_register = decode_reg_name (arm_pic_register_string);
1369
1370 if (!flag_pic)
1371 warning (0, "-mpic-register= is useless without -fpic");
1372
1373 /* Prevent the user from choosing an obviously stupid PIC register. */
1374 else if (pic_register < 0 || call_used_regs[pic_register]
1375 || pic_register == HARD_FRAME_POINTER_REGNUM
1376 || pic_register == STACK_POINTER_REGNUM
1377 || pic_register >= PC_REGNUM
1378 || (TARGET_VXWORKS_RTP
1379 && (unsigned int) pic_register != arm_pic_register))
1380 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1381 else
1382 arm_pic_register = pic_register;
1383 }
1384
1385 /* ??? We might want scheduling for thumb2. */
1386 if (TARGET_THUMB && flag_schedule_insns)
1387 {
1388 /* Don't warn since it's on by default in -O2. */
1389 flag_schedule_insns = 0;
1390 }
1391
1392 if (optimize_size)
1393 {
1394 arm_constant_limit = 1;
1395
1396 /* If optimizing for size, bump the number of instructions that we
1397 are prepared to conditionally execute (even on a StrongARM). */
1398 max_insns_skipped = 6;
1399 }
1400 else
1401 {
1402 /* For processors with load scheduling, it never costs more than
1403 2 cycles to load a constant, and the load scheduler may well
1404 reduce that to 1. */
1405 if (arm_ld_sched)
1406 arm_constant_limit = 1;
1407
1408 /* On XScale the longer latency of a load makes it more difficult
1409 to achieve a good schedule, so it's faster to synthesize
1410 constants that can be done in two insns. */
1411 if (arm_tune_xscale)
1412 arm_constant_limit = 2;
1413
1414 /* StrongARM has early execution of branches, so a sequence
1415 that is worth skipping is shorter. */
1416 if (arm_tune_strongarm)
1417 max_insns_skipped = 3;
1418 }
1419
1420 /* Register global variables with the garbage collector. */
1421 arm_add_gc_roots ();
1422 }
1423
1424 static void
1425 arm_add_gc_roots (void)
1426 {
1427 gcc_obstack_init(&minipool_obstack);
1428 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1429 }
1430 \f
1431 /* A table of known ARM exception types.
1432 For use with the interrupt function attribute. */
1433
1434 typedef struct
1435 {
1436 const char *const arg;
1437 const unsigned long return_value;
1438 }
1439 isr_attribute_arg;
1440
1441 static const isr_attribute_arg isr_attribute_args [] =
1442 {
1443 { "IRQ", ARM_FT_ISR },
1444 { "irq", ARM_FT_ISR },
1445 { "FIQ", ARM_FT_FIQ },
1446 { "fiq", ARM_FT_FIQ },
1447 { "ABORT", ARM_FT_ISR },
1448 { "abort", ARM_FT_ISR },
1449 { "ABORT", ARM_FT_ISR },
1450 { "abort", ARM_FT_ISR },
1451 { "UNDEF", ARM_FT_EXCEPTION },
1452 { "undef", ARM_FT_EXCEPTION },
1453 { "SWI", ARM_FT_EXCEPTION },
1454 { "swi", ARM_FT_EXCEPTION },
1455 { NULL, ARM_FT_NORMAL }
1456 };
1457
1458 /* Returns the (interrupt) function type of the current
1459 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1460
1461 static unsigned long
1462 arm_isr_value (tree argument)
1463 {
1464 const isr_attribute_arg * ptr;
1465 const char * arg;
1466
1467 if (!arm_arch_notm)
1468 return ARM_FT_NORMAL | ARM_FT_STACKALIGN;
1469
1470 /* No argument - default to IRQ. */
1471 if (argument == NULL_TREE)
1472 return ARM_FT_ISR;
1473
1474 /* Get the value of the argument. */
1475 if (TREE_VALUE (argument) == NULL_TREE
1476 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1477 return ARM_FT_UNKNOWN;
1478
1479 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1480
1481 /* Check it against the list of known arguments. */
1482 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1483 if (streq (arg, ptr->arg))
1484 return ptr->return_value;
1485
1486 /* An unrecognized interrupt type. */
1487 return ARM_FT_UNKNOWN;
1488 }
1489
1490 /* Computes the type of the current function. */
1491
1492 static unsigned long
1493 arm_compute_func_type (void)
1494 {
1495 unsigned long type = ARM_FT_UNKNOWN;
1496 tree a;
1497 tree attr;
1498
1499 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1500
1501 /* Decide if the current function is volatile. Such functions
1502 never return, and many memory cycles can be saved by not storing
1503 register values that will never be needed again. This optimization
1504 was added to speed up context switching in a kernel application. */
1505 if (optimize > 0
1506 && (TREE_NOTHROW (current_function_decl)
1507 || !(flag_unwind_tables
1508 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1509 && TREE_THIS_VOLATILE (current_function_decl))
1510 type |= ARM_FT_VOLATILE;
1511
1512 if (cfun->static_chain_decl != NULL)
1513 type |= ARM_FT_NESTED;
1514
1515 attr = DECL_ATTRIBUTES (current_function_decl);
1516
1517 a = lookup_attribute ("naked", attr);
1518 if (a != NULL_TREE)
1519 type |= ARM_FT_NAKED;
1520
1521 a = lookup_attribute ("isr", attr);
1522 if (a == NULL_TREE)
1523 a = lookup_attribute ("interrupt", attr);
1524
1525 if (a == NULL_TREE)
1526 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1527 else
1528 type |= arm_isr_value (TREE_VALUE (a));
1529
1530 return type;
1531 }
1532
1533 /* Returns the type of the current function. */
1534
1535 unsigned long
1536 arm_current_func_type (void)
1537 {
1538 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1539 cfun->machine->func_type = arm_compute_func_type ();
1540
1541 return cfun->machine->func_type;
1542 }
1543 \f
1544 /* Return 1 if it is possible to return using a single instruction.
1545 If SIBLING is non-null, this is a test for a return before a sibling
1546 call. SIBLING is the call insn, so we can examine its register usage. */
1547
1548 int
1549 use_return_insn (int iscond, rtx sibling)
1550 {
1551 int regno;
1552 unsigned int func_type;
1553 unsigned long saved_int_regs;
1554 unsigned HOST_WIDE_INT stack_adjust;
1555 arm_stack_offsets *offsets;
1556
1557 /* Never use a return instruction before reload has run. */
1558 if (!reload_completed)
1559 return 0;
1560
1561 func_type = arm_current_func_type ();
1562
1563 /* Naked, volatile and stack alignment functions need special
1564 consideration. */
1565 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED | ARM_FT_STACKALIGN))
1566 return 0;
1567
1568 /* So do interrupt functions that use the frame pointer and Thumb
1569 interrupt functions. */
1570 if (IS_INTERRUPT (func_type) && (frame_pointer_needed || TARGET_THUMB))
1571 return 0;
1572
1573 offsets = arm_get_frame_offsets ();
1574 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1575
1576 /* As do variadic functions. */
1577 if (current_function_pretend_args_size
1578 || cfun->machine->uses_anonymous_args
1579 /* Or if the function calls __builtin_eh_return () */
1580 || current_function_calls_eh_return
1581 /* Or if the function calls alloca */
1582 || current_function_calls_alloca
1583 /* Or if there is a stack adjustment. However, if the stack pointer
1584 is saved on the stack, we can use a pre-incrementing stack load. */
1585 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1586 return 0;
1587
1588 saved_int_regs = arm_compute_save_reg_mask ();
1589
1590 /* Unfortunately, the insn
1591
1592 ldmib sp, {..., sp, ...}
1593
1594 triggers a bug on most SA-110 based devices, such that the stack
1595 pointer won't be correctly restored if the instruction takes a
1596 page fault. We work around this problem by popping r3 along with
1597 the other registers, since that is never slower than executing
1598 another instruction.
1599
1600 We test for !arm_arch5 here, because code for any architecture
1601 less than this could potentially be run on one of the buggy
1602 chips. */
1603 if (stack_adjust == 4 && !arm_arch5 && TARGET_ARM)
1604 {
1605 /* Validate that r3 is a call-clobbered register (always true in
1606 the default abi) ... */
1607 if (!call_used_regs[3])
1608 return 0;
1609
1610 /* ... that it isn't being used for a return value ... */
1611 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1612 return 0;
1613
1614 /* ... or for a tail-call argument ... */
1615 if (sibling)
1616 {
1617 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1618
1619 if (find_regno_fusage (sibling, USE, 3))
1620 return 0;
1621 }
1622
1623 /* ... and that there are no call-saved registers in r0-r2
1624 (always true in the default ABI). */
1625 if (saved_int_regs & 0x7)
1626 return 0;
1627 }
1628
1629 /* Can't be done if interworking with Thumb, and any registers have been
1630 stacked. */
1631 if (TARGET_INTERWORK && saved_int_regs != 0 && !IS_INTERRUPT(func_type))
1632 return 0;
1633
1634 /* On StrongARM, conditional returns are expensive if they aren't
1635 taken and multiple registers have been stacked. */
1636 if (iscond && arm_tune_strongarm)
1637 {
1638 /* Conditional return when just the LR is stored is a simple
1639 conditional-load instruction, that's not expensive. */
1640 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1641 return 0;
1642
1643 if (flag_pic
1644 && arm_pic_register != INVALID_REGNUM
1645 && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1646 return 0;
1647 }
1648
1649 /* If there are saved registers but the LR isn't saved, then we need
1650 two instructions for the return. */
1651 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1652 return 0;
1653
1654 /* Can't be done if any of the FPA regs are pushed,
1655 since this also requires an insn. */
1656 if (TARGET_HARD_FLOAT && TARGET_FPA)
1657 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1658 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
1659 return 0;
1660
1661 /* Likewise VFP regs. */
1662 if (TARGET_HARD_FLOAT && TARGET_VFP)
1663 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1664 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
1665 return 0;
1666
1667 if (TARGET_REALLY_IWMMXT)
1668 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1669 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
1670 return 0;
1671
1672 return 1;
1673 }
1674
1675 /* Return TRUE if int I is a valid immediate ARM constant. */
1676
1677 int
1678 const_ok_for_arm (HOST_WIDE_INT i)
1679 {
1680 int lowbit;
1681
1682 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1683 be all zero, or all one. */
1684 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1685 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1686 != ((~(unsigned HOST_WIDE_INT) 0)
1687 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1688 return FALSE;
1689
1690 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1691
1692 /* Fast return for 0 and small values. We must do this for zero, since
1693 the code below can't handle that one case. */
1694 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1695 return TRUE;
1696
1697 /* Get the number of trailing zeros. */
1698 lowbit = ffs((int) i) - 1;
1699
1700 /* Only even shifts are allowed in ARM mode so round down to the
1701 nearest even number. */
1702 if (TARGET_ARM)
1703 lowbit &= ~1;
1704
1705 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1706 return TRUE;
1707
1708 if (TARGET_ARM)
1709 {
1710 /* Allow rotated constants in ARM mode. */
1711 if (lowbit <= 4
1712 && ((i & ~0xc000003f) == 0
1713 || (i & ~0xf000000f) == 0
1714 || (i & ~0xfc000003) == 0))
1715 return TRUE;
1716 }
1717 else
1718 {
1719 HOST_WIDE_INT v;
1720
1721 /* Allow repeated pattern. */
1722 v = i & 0xff;
1723 v |= v << 16;
1724 if (i == v || i == (v | (v << 8)))
1725 return TRUE;
1726 }
1727
1728 return FALSE;
1729 }
1730
1731 /* Return true if I is a valid constant for the operation CODE. */
1732 static int
1733 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1734 {
1735 if (const_ok_for_arm (i))
1736 return 1;
1737
1738 switch (code)
1739 {
1740 case PLUS:
1741 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1742
1743 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1744 case XOR:
1745 case IOR:
1746 return 0;
1747
1748 case AND:
1749 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1750
1751 default:
1752 gcc_unreachable ();
1753 }
1754 }
1755
1756 /* Emit a sequence of insns to handle a large constant.
1757 CODE is the code of the operation required, it can be any of SET, PLUS,
1758 IOR, AND, XOR, MINUS;
1759 MODE is the mode in which the operation is being performed;
1760 VAL is the integer to operate on;
1761 SOURCE is the other operand (a register, or a null-pointer for SET);
1762 SUBTARGETS means it is safe to create scratch registers if that will
1763 either produce a simpler sequence, or we will want to cse the values.
1764 Return value is the number of insns emitted. */
1765
1766 /* ??? Tweak this for thumb2. */
1767 int
1768 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1769 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1770 {
1771 rtx cond;
1772
1773 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1774 cond = COND_EXEC_TEST (PATTERN (insn));
1775 else
1776 cond = NULL_RTX;
1777
1778 if (subtargets || code == SET
1779 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1780 && REGNO (target) != REGNO (source)))
1781 {
1782 /* After arm_reorg has been called, we can't fix up expensive
1783 constants by pushing them into memory so we must synthesize
1784 them in-line, regardless of the cost. This is only likely to
1785 be more costly on chips that have load delay slots and we are
1786 compiling without running the scheduler (so no splitting
1787 occurred before the final instruction emission).
1788
1789 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1790 */
1791 if (!after_arm_reorg
1792 && !cond
1793 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1794 1, 0)
1795 > arm_constant_limit + (code != SET)))
1796 {
1797 if (code == SET)
1798 {
1799 /* Currently SET is the only monadic value for CODE, all
1800 the rest are diadic. */
1801 emit_set_insn (target, GEN_INT (val));
1802 return 1;
1803 }
1804 else
1805 {
1806 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1807
1808 emit_set_insn (temp, GEN_INT (val));
1809 /* For MINUS, the value is subtracted from, since we never
1810 have subtraction of a constant. */
1811 if (code == MINUS)
1812 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1813 else
1814 emit_set_insn (target,
1815 gen_rtx_fmt_ee (code, mode, source, temp));
1816 return 2;
1817 }
1818 }
1819 }
1820
1821 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1822 1);
1823 }
1824
1825 /* Return the number of ARM instructions required to synthesize the given
1826 constant. */
1827 static int
1828 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1829 {
1830 HOST_WIDE_INT temp1;
1831 int num_insns = 0;
1832 do
1833 {
1834 int end;
1835
1836 if (i <= 0)
1837 i += 32;
1838 if (remainder & (3 << (i - 2)))
1839 {
1840 end = i - 8;
1841 if (end < 0)
1842 end += 32;
1843 temp1 = remainder & ((0x0ff << end)
1844 | ((i < end) ? (0xff >> (32 - end)) : 0));
1845 remainder &= ~temp1;
1846 num_insns++;
1847 i -= 6;
1848 }
1849 i -= 2;
1850 } while (remainder);
1851 return num_insns;
1852 }
1853
1854 /* Emit an instruction with the indicated PATTERN. If COND is
1855 non-NULL, conditionalize the execution of the instruction on COND
1856 being true. */
1857
1858 static void
1859 emit_constant_insn (rtx cond, rtx pattern)
1860 {
1861 if (cond)
1862 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1863 emit_insn (pattern);
1864 }
1865
1866 /* As above, but extra parameter GENERATE which, if clear, suppresses
1867 RTL generation. */
1868 /* ??? This needs more work for thumb2. */
1869
1870 static int
1871 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1872 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1873 int generate)
1874 {
1875 int can_invert = 0;
1876 int can_negate = 0;
1877 int can_negate_initial = 0;
1878 int can_shift = 0;
1879 int i;
1880 int num_bits_set = 0;
1881 int set_sign_bit_copies = 0;
1882 int clear_sign_bit_copies = 0;
1883 int clear_zero_bit_copies = 0;
1884 int set_zero_bit_copies = 0;
1885 int insns = 0;
1886 unsigned HOST_WIDE_INT temp1, temp2;
1887 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1888
1889 /* Find out which operations are safe for a given CODE. Also do a quick
1890 check for degenerate cases; these can occur when DImode operations
1891 are split. */
1892 switch (code)
1893 {
1894 case SET:
1895 can_invert = 1;
1896 can_shift = 1;
1897 can_negate = 1;
1898 break;
1899
1900 case PLUS:
1901 can_negate = 1;
1902 can_negate_initial = 1;
1903 break;
1904
1905 case IOR:
1906 if (remainder == 0xffffffff)
1907 {
1908 if (generate)
1909 emit_constant_insn (cond,
1910 gen_rtx_SET (VOIDmode, target,
1911 GEN_INT (ARM_SIGN_EXTEND (val))));
1912 return 1;
1913 }
1914 if (remainder == 0)
1915 {
1916 if (reload_completed && rtx_equal_p (target, source))
1917 return 0;
1918 if (generate)
1919 emit_constant_insn (cond,
1920 gen_rtx_SET (VOIDmode, target, source));
1921 return 1;
1922 }
1923 break;
1924
1925 case AND:
1926 if (remainder == 0)
1927 {
1928 if (generate)
1929 emit_constant_insn (cond,
1930 gen_rtx_SET (VOIDmode, target, const0_rtx));
1931 return 1;
1932 }
1933 if (remainder == 0xffffffff)
1934 {
1935 if (reload_completed && rtx_equal_p (target, source))
1936 return 0;
1937 if (generate)
1938 emit_constant_insn (cond,
1939 gen_rtx_SET (VOIDmode, target, source));
1940 return 1;
1941 }
1942 can_invert = 1;
1943 break;
1944
1945 case XOR:
1946 if (remainder == 0)
1947 {
1948 if (reload_completed && rtx_equal_p (target, source))
1949 return 0;
1950 if (generate)
1951 emit_constant_insn (cond,
1952 gen_rtx_SET (VOIDmode, target, source));
1953 return 1;
1954 }
1955
1956 /* We don't know how to handle other cases yet. */
1957 gcc_assert (remainder == 0xffffffff);
1958
1959 if (generate)
1960 emit_constant_insn (cond,
1961 gen_rtx_SET (VOIDmode, target,
1962 gen_rtx_NOT (mode, source)));
1963 return 1;
1964
1965 case MINUS:
1966 /* We treat MINUS as (val - source), since (source - val) is always
1967 passed as (source + (-val)). */
1968 if (remainder == 0)
1969 {
1970 if (generate)
1971 emit_constant_insn (cond,
1972 gen_rtx_SET (VOIDmode, target,
1973 gen_rtx_NEG (mode, source)));
1974 return 1;
1975 }
1976 if (const_ok_for_arm (val))
1977 {
1978 if (generate)
1979 emit_constant_insn (cond,
1980 gen_rtx_SET (VOIDmode, target,
1981 gen_rtx_MINUS (mode, GEN_INT (val),
1982 source)));
1983 return 1;
1984 }
1985 can_negate = 1;
1986
1987 break;
1988
1989 default:
1990 gcc_unreachable ();
1991 }
1992
1993 /* If we can do it in one insn get out quickly. */
1994 if (const_ok_for_arm (val)
1995 || (can_negate_initial && const_ok_for_arm (-val))
1996 || (can_invert && const_ok_for_arm (~val)))
1997 {
1998 if (generate)
1999 emit_constant_insn (cond,
2000 gen_rtx_SET (VOIDmode, target,
2001 (source
2002 ? gen_rtx_fmt_ee (code, mode, source,
2003 GEN_INT (val))
2004 : GEN_INT (val))));
2005 return 1;
2006 }
2007
2008 /* Calculate a few attributes that may be useful for specific
2009 optimizations. */
2010 for (i = 31; i >= 0; i--)
2011 {
2012 if ((remainder & (1 << i)) == 0)
2013 clear_sign_bit_copies++;
2014 else
2015 break;
2016 }
2017
2018 for (i = 31; i >= 0; i--)
2019 {
2020 if ((remainder & (1 << i)) != 0)
2021 set_sign_bit_copies++;
2022 else
2023 break;
2024 }
2025
2026 for (i = 0; i <= 31; i++)
2027 {
2028 if ((remainder & (1 << i)) == 0)
2029 clear_zero_bit_copies++;
2030 else
2031 break;
2032 }
2033
2034 for (i = 0; i <= 31; i++)
2035 {
2036 if ((remainder & (1 << i)) != 0)
2037 set_zero_bit_copies++;
2038 else
2039 break;
2040 }
2041
2042 switch (code)
2043 {
2044 case SET:
2045 /* See if we can use movw. */
2046 if (arm_arch_thumb2 && (remainder & 0xffff0000) == 0)
2047 {
2048 if (generate)
2049 emit_constant_insn (cond, gen_rtx_SET (VOIDmode, target,
2050 GEN_INT (val)));
2051 return 1;
2052 }
2053
2054 /* See if we can do this by sign_extending a constant that is known
2055 to be negative. This is a good, way of doing it, since the shift
2056 may well merge into a subsequent insn. */
2057 if (set_sign_bit_copies > 1)
2058 {
2059 if (const_ok_for_arm
2060 (temp1 = ARM_SIGN_EXTEND (remainder
2061 << (set_sign_bit_copies - 1))))
2062 {
2063 if (generate)
2064 {
2065 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2066 emit_constant_insn (cond,
2067 gen_rtx_SET (VOIDmode, new_src,
2068 GEN_INT (temp1)));
2069 emit_constant_insn (cond,
2070 gen_ashrsi3 (target, new_src,
2071 GEN_INT (set_sign_bit_copies - 1)));
2072 }
2073 return 2;
2074 }
2075 /* For an inverted constant, we will need to set the low bits,
2076 these will be shifted out of harm's way. */
2077 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
2078 if (const_ok_for_arm (~temp1))
2079 {
2080 if (generate)
2081 {
2082 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2083 emit_constant_insn (cond,
2084 gen_rtx_SET (VOIDmode, new_src,
2085 GEN_INT (temp1)));
2086 emit_constant_insn (cond,
2087 gen_ashrsi3 (target, new_src,
2088 GEN_INT (set_sign_bit_copies - 1)));
2089 }
2090 return 2;
2091 }
2092 }
2093
2094 /* See if we can calculate the value as the difference between two
2095 valid immediates. */
2096 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
2097 {
2098 int topshift = clear_sign_bit_copies & ~1;
2099
2100 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
2101 & (0xff000000 >> topshift));
2102
2103 /* If temp1 is zero, then that means the 9 most significant
2104 bits of remainder were 1 and we've caused it to overflow.
2105 When topshift is 0 we don't need to do anything since we
2106 can borrow from 'bit 32'. */
2107 if (temp1 == 0 && topshift != 0)
2108 temp1 = 0x80000000 >> (topshift - 1);
2109
2110 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
2111
2112 if (const_ok_for_arm (temp2))
2113 {
2114 if (generate)
2115 {
2116 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2117 emit_constant_insn (cond,
2118 gen_rtx_SET (VOIDmode, new_src,
2119 GEN_INT (temp1)));
2120 emit_constant_insn (cond,
2121 gen_addsi3 (target, new_src,
2122 GEN_INT (-temp2)));
2123 }
2124
2125 return 2;
2126 }
2127 }
2128
2129 /* See if we can generate this by setting the bottom (or the top)
2130 16 bits, and then shifting these into the other half of the
2131 word. We only look for the simplest cases, to do more would cost
2132 too much. Be careful, however, not to generate this when the
2133 alternative would take fewer insns. */
2134 if (val & 0xffff0000)
2135 {
2136 temp1 = remainder & 0xffff0000;
2137 temp2 = remainder & 0x0000ffff;
2138
2139 /* Overlaps outside this range are best done using other methods. */
2140 for (i = 9; i < 24; i++)
2141 {
2142 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2143 && !const_ok_for_arm (temp2))
2144 {
2145 rtx new_src = (subtargets
2146 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2147 : target);
2148 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2149 source, subtargets, generate);
2150 source = new_src;
2151 if (generate)
2152 emit_constant_insn
2153 (cond,
2154 gen_rtx_SET
2155 (VOIDmode, target,
2156 gen_rtx_IOR (mode,
2157 gen_rtx_ASHIFT (mode, source,
2158 GEN_INT (i)),
2159 source)));
2160 return insns + 1;
2161 }
2162 }
2163
2164 /* Don't duplicate cases already considered. */
2165 for (i = 17; i < 24; i++)
2166 {
2167 if (((temp1 | (temp1 >> i)) == remainder)
2168 && !const_ok_for_arm (temp1))
2169 {
2170 rtx new_src = (subtargets
2171 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2172 : target);
2173 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2174 source, subtargets, generate);
2175 source = new_src;
2176 if (generate)
2177 emit_constant_insn
2178 (cond,
2179 gen_rtx_SET (VOIDmode, target,
2180 gen_rtx_IOR
2181 (mode,
2182 gen_rtx_LSHIFTRT (mode, source,
2183 GEN_INT (i)),
2184 source)));
2185 return insns + 1;
2186 }
2187 }
2188 }
2189 break;
2190
2191 case IOR:
2192 case XOR:
2193 /* If we have IOR or XOR, and the constant can be loaded in a
2194 single instruction, and we can find a temporary to put it in,
2195 then this can be done in two instructions instead of 3-4. */
2196 if (subtargets
2197 /* TARGET can't be NULL if SUBTARGETS is 0 */
2198 || (reload_completed && !reg_mentioned_p (target, source)))
2199 {
2200 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2201 {
2202 if (generate)
2203 {
2204 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2205
2206 emit_constant_insn (cond,
2207 gen_rtx_SET (VOIDmode, sub,
2208 GEN_INT (val)));
2209 emit_constant_insn (cond,
2210 gen_rtx_SET (VOIDmode, target,
2211 gen_rtx_fmt_ee (code, mode,
2212 source, sub)));
2213 }
2214 return 2;
2215 }
2216 }
2217
2218 if (code == XOR)
2219 break;
2220
2221 if (set_sign_bit_copies > 8
2222 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2223 {
2224 if (generate)
2225 {
2226 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2227 rtx shift = GEN_INT (set_sign_bit_copies);
2228
2229 emit_constant_insn
2230 (cond,
2231 gen_rtx_SET (VOIDmode, sub,
2232 gen_rtx_NOT (mode,
2233 gen_rtx_ASHIFT (mode,
2234 source,
2235 shift))));
2236 emit_constant_insn
2237 (cond,
2238 gen_rtx_SET (VOIDmode, target,
2239 gen_rtx_NOT (mode,
2240 gen_rtx_LSHIFTRT (mode, sub,
2241 shift))));
2242 }
2243 return 2;
2244 }
2245
2246 if (set_zero_bit_copies > 8
2247 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2248 {
2249 if (generate)
2250 {
2251 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2252 rtx shift = GEN_INT (set_zero_bit_copies);
2253
2254 emit_constant_insn
2255 (cond,
2256 gen_rtx_SET (VOIDmode, sub,
2257 gen_rtx_NOT (mode,
2258 gen_rtx_LSHIFTRT (mode,
2259 source,
2260 shift))));
2261 emit_constant_insn
2262 (cond,
2263 gen_rtx_SET (VOIDmode, target,
2264 gen_rtx_NOT (mode,
2265 gen_rtx_ASHIFT (mode, sub,
2266 shift))));
2267 }
2268 return 2;
2269 }
2270
2271 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2272 {
2273 if (generate)
2274 {
2275 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2276 emit_constant_insn (cond,
2277 gen_rtx_SET (VOIDmode, sub,
2278 gen_rtx_NOT (mode, source)));
2279 source = sub;
2280 if (subtargets)
2281 sub = gen_reg_rtx (mode);
2282 emit_constant_insn (cond,
2283 gen_rtx_SET (VOIDmode, sub,
2284 gen_rtx_AND (mode, source,
2285 GEN_INT (temp1))));
2286 emit_constant_insn (cond,
2287 gen_rtx_SET (VOIDmode, target,
2288 gen_rtx_NOT (mode, sub)));
2289 }
2290 return 3;
2291 }
2292 break;
2293
2294 case AND:
2295 /* See if two shifts will do 2 or more insn's worth of work. */
2296 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2297 {
2298 HOST_WIDE_INT shift_mask = ((0xffffffff
2299 << (32 - clear_sign_bit_copies))
2300 & 0xffffffff);
2301
2302 if ((remainder | shift_mask) != 0xffffffff)
2303 {
2304 if (generate)
2305 {
2306 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2307 insns = arm_gen_constant (AND, mode, cond,
2308 remainder | shift_mask,
2309 new_src, source, subtargets, 1);
2310 source = new_src;
2311 }
2312 else
2313 {
2314 rtx targ = subtargets ? NULL_RTX : target;
2315 insns = arm_gen_constant (AND, mode, cond,
2316 remainder | shift_mask,
2317 targ, source, subtargets, 0);
2318 }
2319 }
2320
2321 if (generate)
2322 {
2323 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2324 rtx shift = GEN_INT (clear_sign_bit_copies);
2325
2326 emit_insn (gen_ashlsi3 (new_src, source, shift));
2327 emit_insn (gen_lshrsi3 (target, new_src, shift));
2328 }
2329
2330 return insns + 2;
2331 }
2332
2333 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2334 {
2335 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2336
2337 if ((remainder | shift_mask) != 0xffffffff)
2338 {
2339 if (generate)
2340 {
2341 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2342
2343 insns = arm_gen_constant (AND, mode, cond,
2344 remainder | shift_mask,
2345 new_src, source, subtargets, 1);
2346 source = new_src;
2347 }
2348 else
2349 {
2350 rtx targ = subtargets ? NULL_RTX : target;
2351
2352 insns = arm_gen_constant (AND, mode, cond,
2353 remainder | shift_mask,
2354 targ, source, subtargets, 0);
2355 }
2356 }
2357
2358 if (generate)
2359 {
2360 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2361 rtx shift = GEN_INT (clear_zero_bit_copies);
2362
2363 emit_insn (gen_lshrsi3 (new_src, source, shift));
2364 emit_insn (gen_ashlsi3 (target, new_src, shift));
2365 }
2366
2367 return insns + 2;
2368 }
2369
2370 break;
2371
2372 default:
2373 break;
2374 }
2375
2376 for (i = 0; i < 32; i++)
2377 if (remainder & (1 << i))
2378 num_bits_set++;
2379
2380 if (code == AND || (can_invert && num_bits_set > 16))
2381 remainder = (~remainder) & 0xffffffff;
2382 else if (code == PLUS && num_bits_set > 16)
2383 remainder = (-remainder) & 0xffffffff;
2384 else
2385 {
2386 can_invert = 0;
2387 can_negate = 0;
2388 }
2389
2390 /* Now try and find a way of doing the job in either two or three
2391 instructions.
2392 We start by looking for the largest block of zeros that are aligned on
2393 a 2-bit boundary, we then fill up the temps, wrapping around to the
2394 top of the word when we drop off the bottom.
2395 In the worst case this code should produce no more than four insns.
2396 Thumb-2 constants are shifted, not rotated, so the MSB is always the
2397 best place to start. */
2398
2399 /* ??? Use thumb2 replicated constants when the high and low halfwords are
2400 the same. */
2401 {
2402 int best_start = 0;
2403 if (!TARGET_THUMB2)
2404 {
2405 int best_consecutive_zeros = 0;
2406
2407 for (i = 0; i < 32; i += 2)
2408 {
2409 int consecutive_zeros = 0;
2410
2411 if (!(remainder & (3 << i)))
2412 {
2413 while ((i < 32) && !(remainder & (3 << i)))
2414 {
2415 consecutive_zeros += 2;
2416 i += 2;
2417 }
2418 if (consecutive_zeros > best_consecutive_zeros)
2419 {
2420 best_consecutive_zeros = consecutive_zeros;
2421 best_start = i - consecutive_zeros;
2422 }
2423 i -= 2;
2424 }
2425 }
2426
2427 /* So long as it won't require any more insns to do so, it's
2428 desirable to emit a small constant (in bits 0...9) in the last
2429 insn. This way there is more chance that it can be combined with
2430 a later addressing insn to form a pre-indexed load or store
2431 operation. Consider:
2432
2433 *((volatile int *)0xe0000100) = 1;
2434 *((volatile int *)0xe0000110) = 2;
2435
2436 We want this to wind up as:
2437
2438 mov rA, #0xe0000000
2439 mov rB, #1
2440 str rB, [rA, #0x100]
2441 mov rB, #2
2442 str rB, [rA, #0x110]
2443
2444 rather than having to synthesize both large constants from scratch.
2445
2446 Therefore, we calculate how many insns would be required to emit
2447 the constant starting from `best_start', and also starting from
2448 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2449 yield a shorter sequence, we may as well use zero. */
2450 if (best_start != 0
2451 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2452 && (count_insns_for_constant (remainder, 0) <=
2453 count_insns_for_constant (remainder, best_start)))
2454 best_start = 0;
2455 }
2456
2457 /* Now start emitting the insns. */
2458 i = best_start;
2459 do
2460 {
2461 int end;
2462
2463 if (i <= 0)
2464 i += 32;
2465 if (remainder & (3 << (i - 2)))
2466 {
2467 end = i - 8;
2468 if (end < 0)
2469 end += 32;
2470 temp1 = remainder & ((0x0ff << end)
2471 | ((i < end) ? (0xff >> (32 - end)) : 0));
2472 remainder &= ~temp1;
2473
2474 if (generate)
2475 {
2476 rtx new_src, temp1_rtx;
2477
2478 if (code == SET || code == MINUS)
2479 {
2480 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2481 if (can_invert && code != MINUS)
2482 temp1 = ~temp1;
2483 }
2484 else
2485 {
2486 if (remainder && subtargets)
2487 new_src = gen_reg_rtx (mode);
2488 else
2489 new_src = target;
2490 if (can_invert)
2491 temp1 = ~temp1;
2492 else if (can_negate)
2493 temp1 = -temp1;
2494 }
2495
2496 temp1 = trunc_int_for_mode (temp1, mode);
2497 temp1_rtx = GEN_INT (temp1);
2498
2499 if (code == SET)
2500 ;
2501 else if (code == MINUS)
2502 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2503 else
2504 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2505
2506 emit_constant_insn (cond,
2507 gen_rtx_SET (VOIDmode, new_src,
2508 temp1_rtx));
2509 source = new_src;
2510 }
2511
2512 if (code == SET)
2513 {
2514 can_invert = 0;
2515 code = PLUS;
2516 }
2517 else if (code == MINUS)
2518 code = PLUS;
2519
2520 insns++;
2521 if (TARGET_ARM)
2522 i -= 6;
2523 else
2524 i -= 7;
2525 }
2526 /* Arm allows rotates by a multiple of two. Thumb-2 allows arbitrary
2527 shifts. */
2528 if (TARGET_ARM)
2529 i -= 2;
2530 else
2531 i--;
2532 }
2533 while (remainder);
2534 }
2535
2536 return insns;
2537 }
2538
2539 /* Canonicalize a comparison so that we are more likely to recognize it.
2540 This can be done for a few constant compares, where we can make the
2541 immediate value easier to load. */
2542
2543 enum rtx_code
2544 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2545 rtx * op1)
2546 {
2547 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2548 unsigned HOST_WIDE_INT maxval;
2549 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2550
2551 switch (code)
2552 {
2553 case EQ:
2554 case NE:
2555 return code;
2556
2557 case GT:
2558 case LE:
2559 if (i != maxval
2560 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2561 {
2562 *op1 = GEN_INT (i + 1);
2563 return code == GT ? GE : LT;
2564 }
2565 break;
2566
2567 case GE:
2568 case LT:
2569 if (i != ~maxval
2570 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2571 {
2572 *op1 = GEN_INT (i - 1);
2573 return code == GE ? GT : LE;
2574 }
2575 break;
2576
2577 case GTU:
2578 case LEU:
2579 if (i != ~((unsigned HOST_WIDE_INT) 0)
2580 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2581 {
2582 *op1 = GEN_INT (i + 1);
2583 return code == GTU ? GEU : LTU;
2584 }
2585 break;
2586
2587 case GEU:
2588 case LTU:
2589 if (i != 0
2590 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2591 {
2592 *op1 = GEN_INT (i - 1);
2593 return code == GEU ? GTU : LEU;
2594 }
2595 break;
2596
2597 default:
2598 gcc_unreachable ();
2599 }
2600
2601 return code;
2602 }
2603
2604
2605 /* Define how to find the value returned by a function. */
2606
2607 rtx
2608 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2609 {
2610 enum machine_mode mode;
2611 int unsignedp ATTRIBUTE_UNUSED;
2612 rtx r ATTRIBUTE_UNUSED;
2613
2614 mode = TYPE_MODE (type);
2615 /* Promote integer types. */
2616 if (INTEGRAL_TYPE_P (type))
2617 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2618
2619 /* Promotes small structs returned in a register to full-word size
2620 for big-endian AAPCS. */
2621 if (arm_return_in_msb (type))
2622 {
2623 HOST_WIDE_INT size = int_size_in_bytes (type);
2624 if (size % UNITS_PER_WORD != 0)
2625 {
2626 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2627 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2628 }
2629 }
2630
2631 return LIBCALL_VALUE(mode);
2632 }
2633
2634 /* Determine the amount of memory needed to store the possible return
2635 registers of an untyped call. */
2636 int
2637 arm_apply_result_size (void)
2638 {
2639 int size = 16;
2640
2641 if (TARGET_ARM)
2642 {
2643 if (TARGET_HARD_FLOAT_ABI)
2644 {
2645 if (TARGET_FPA)
2646 size += 12;
2647 if (TARGET_MAVERICK)
2648 size += 8;
2649 }
2650 if (TARGET_IWMMXT_ABI)
2651 size += 8;
2652 }
2653
2654 return size;
2655 }
2656
2657 /* Decide whether a type should be returned in memory (true)
2658 or in a register (false). This is called by the macro
2659 RETURN_IN_MEMORY. */
2660 int
2661 arm_return_in_memory (tree type)
2662 {
2663 HOST_WIDE_INT size;
2664
2665 if (!AGGREGATE_TYPE_P (type) &&
2666 (TREE_CODE (type) != VECTOR_TYPE) &&
2667 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2668 /* All simple types are returned in registers.
2669 For AAPCS, complex types are treated the same as aggregates. */
2670 return 0;
2671
2672 size = int_size_in_bytes (type);
2673
2674 if (arm_abi != ARM_ABI_APCS)
2675 {
2676 /* ATPCS and later return aggregate types in memory only if they are
2677 larger than a word (or are variable size). */
2678 return (size < 0 || size > UNITS_PER_WORD);
2679 }
2680
2681 /* To maximize backwards compatibility with previous versions of gcc,
2682 return vectors up to 4 words in registers. */
2683 if (TREE_CODE (type) == VECTOR_TYPE)
2684 return (size < 0 || size > (4 * UNITS_PER_WORD));
2685
2686 /* For the arm-wince targets we choose to be compatible with Microsoft's
2687 ARM and Thumb compilers, which always return aggregates in memory. */
2688 #ifndef ARM_WINCE
2689 /* All structures/unions bigger than one word are returned in memory.
2690 Also catch the case where int_size_in_bytes returns -1. In this case
2691 the aggregate is either huge or of variable size, and in either case
2692 we will want to return it via memory and not in a register. */
2693 if (size < 0 || size > UNITS_PER_WORD)
2694 return 1;
2695
2696 if (TREE_CODE (type) == RECORD_TYPE)
2697 {
2698 tree field;
2699
2700 /* For a struct the APCS says that we only return in a register
2701 if the type is 'integer like' and every addressable element
2702 has an offset of zero. For practical purposes this means
2703 that the structure can have at most one non bit-field element
2704 and that this element must be the first one in the structure. */
2705
2706 /* Find the first field, ignoring non FIELD_DECL things which will
2707 have been created by C++. */
2708 for (field = TYPE_FIELDS (type);
2709 field && TREE_CODE (field) != FIELD_DECL;
2710 field = TREE_CHAIN (field))
2711 continue;
2712
2713 if (field == NULL)
2714 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2715
2716 /* Check that the first field is valid for returning in a register. */
2717
2718 /* ... Floats are not allowed */
2719 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2720 return 1;
2721
2722 /* ... Aggregates that are not themselves valid for returning in
2723 a register are not allowed. */
2724 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2725 return 1;
2726
2727 /* Now check the remaining fields, if any. Only bitfields are allowed,
2728 since they are not addressable. */
2729 for (field = TREE_CHAIN (field);
2730 field;
2731 field = TREE_CHAIN (field))
2732 {
2733 if (TREE_CODE (field) != FIELD_DECL)
2734 continue;
2735
2736 if (!DECL_BIT_FIELD_TYPE (field))
2737 return 1;
2738 }
2739
2740 return 0;
2741 }
2742
2743 if (TREE_CODE (type) == UNION_TYPE)
2744 {
2745 tree field;
2746
2747 /* Unions can be returned in registers if every element is
2748 integral, or can be returned in an integer register. */
2749 for (field = TYPE_FIELDS (type);
2750 field;
2751 field = TREE_CHAIN (field))
2752 {
2753 if (TREE_CODE (field) != FIELD_DECL)
2754 continue;
2755
2756 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2757 return 1;
2758
2759 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2760 return 1;
2761 }
2762
2763 return 0;
2764 }
2765 #endif /* not ARM_WINCE */
2766
2767 /* Return all other types in memory. */
2768 return 1;
2769 }
2770
2771 /* Indicate whether or not words of a double are in big-endian order. */
2772
2773 int
2774 arm_float_words_big_endian (void)
2775 {
2776 if (TARGET_MAVERICK)
2777 return 0;
2778
2779 /* For FPA, float words are always big-endian. For VFP, floats words
2780 follow the memory system mode. */
2781
2782 if (TARGET_FPA)
2783 {
2784 return 1;
2785 }
2786
2787 if (TARGET_VFP)
2788 return (TARGET_BIG_END ? 1 : 0);
2789
2790 return 1;
2791 }
2792
2793 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2794 for a call to a function whose data type is FNTYPE.
2795 For a library call, FNTYPE is NULL. */
2796 void
2797 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2798 rtx libname ATTRIBUTE_UNUSED,
2799 tree fndecl ATTRIBUTE_UNUSED)
2800 {
2801 /* On the ARM, the offset starts at 0. */
2802 pcum->nregs = 0;
2803 pcum->iwmmxt_nregs = 0;
2804 pcum->can_split = true;
2805
2806 /* Varargs vectors are treated the same as long long.
2807 named_count avoids having to change the way arm handles 'named' */
2808 pcum->named_count = 0;
2809 pcum->nargs = 0;
2810
2811 if (TARGET_REALLY_IWMMXT && fntype)
2812 {
2813 tree fn_arg;
2814
2815 for (fn_arg = TYPE_ARG_TYPES (fntype);
2816 fn_arg;
2817 fn_arg = TREE_CHAIN (fn_arg))
2818 pcum->named_count += 1;
2819
2820 if (! pcum->named_count)
2821 pcum->named_count = INT_MAX;
2822 }
2823 }
2824
2825
2826 /* Return true if mode/type need doubleword alignment. */
2827 bool
2828 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2829 {
2830 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2831 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2832 }
2833
2834
2835 /* Determine where to put an argument to a function.
2836 Value is zero to push the argument on the stack,
2837 or a hard register in which to store the argument.
2838
2839 MODE is the argument's machine mode.
2840 TYPE is the data type of the argument (as a tree).
2841 This is null for libcalls where that information may
2842 not be available.
2843 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2844 the preceding args and about the function being called.
2845 NAMED is nonzero if this argument is a named parameter
2846 (otherwise it is an extra parameter matching an ellipsis). */
2847
2848 rtx
2849 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2850 tree type, int named)
2851 {
2852 int nregs;
2853
2854 /* Varargs vectors are treated the same as long long.
2855 named_count avoids having to change the way arm handles 'named' */
2856 if (TARGET_IWMMXT_ABI
2857 && arm_vector_mode_supported_p (mode)
2858 && pcum->named_count > pcum->nargs + 1)
2859 {
2860 if (pcum->iwmmxt_nregs <= 9)
2861 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2862 else
2863 {
2864 pcum->can_split = false;
2865 return NULL_RTX;
2866 }
2867 }
2868
2869 /* Put doubleword aligned quantities in even register pairs. */
2870 if (pcum->nregs & 1
2871 && ARM_DOUBLEWORD_ALIGN
2872 && arm_needs_doubleword_align (mode, type))
2873 pcum->nregs++;
2874
2875 if (mode == VOIDmode)
2876 /* Pick an arbitrary value for operand 2 of the call insn. */
2877 return const0_rtx;
2878
2879 /* Only allow splitting an arg between regs and memory if all preceding
2880 args were allocated to regs. For args passed by reference we only count
2881 the reference pointer. */
2882 if (pcum->can_split)
2883 nregs = 1;
2884 else
2885 nregs = ARM_NUM_REGS2 (mode, type);
2886
2887 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2888 return NULL_RTX;
2889
2890 return gen_rtx_REG (mode, pcum->nregs);
2891 }
2892
2893 static int
2894 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2895 tree type, bool named ATTRIBUTE_UNUSED)
2896 {
2897 int nregs = pcum->nregs;
2898
2899 if (arm_vector_mode_supported_p (mode))
2900 return 0;
2901
2902 if (NUM_ARG_REGS > nregs
2903 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2904 && pcum->can_split)
2905 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2906
2907 return 0;
2908 }
2909
2910 /* Variable sized types are passed by reference. This is a GCC
2911 extension to the ARM ABI. */
2912
2913 static bool
2914 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2915 enum machine_mode mode ATTRIBUTE_UNUSED,
2916 tree type, bool named ATTRIBUTE_UNUSED)
2917 {
2918 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2919 }
2920 \f
2921 /* Encode the current state of the #pragma [no_]long_calls. */
2922 typedef enum
2923 {
2924 OFF, /* No #pragma [no_]long_calls is in effect. */
2925 LONG, /* #pragma long_calls is in effect. */
2926 SHORT /* #pragma no_long_calls is in effect. */
2927 } arm_pragma_enum;
2928
2929 static arm_pragma_enum arm_pragma_long_calls = OFF;
2930
2931 void
2932 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2933 {
2934 arm_pragma_long_calls = LONG;
2935 }
2936
2937 void
2938 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2939 {
2940 arm_pragma_long_calls = SHORT;
2941 }
2942
2943 void
2944 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2945 {
2946 arm_pragma_long_calls = OFF;
2947 }
2948 \f
2949 /* Table of machine attributes. */
2950 const struct attribute_spec arm_attribute_table[] =
2951 {
2952 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2953 /* Function calls made to this symbol must be done indirectly, because
2954 it may lie outside of the 26 bit addressing range of a normal function
2955 call. */
2956 { "long_call", 0, 0, false, true, true, NULL },
2957 /* Whereas these functions are always known to reside within the 26 bit
2958 addressing range. */
2959 { "short_call", 0, 0, false, true, true, NULL },
2960 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2961 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2962 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2963 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2964 #ifdef ARM_PE
2965 /* ARM/PE has three new attributes:
2966 interfacearm - ?
2967 dllexport - for exporting a function/variable that will live in a dll
2968 dllimport - for importing a function/variable from a dll
2969
2970 Microsoft allows multiple declspecs in one __declspec, separating
2971 them with spaces. We do NOT support this. Instead, use __declspec
2972 multiple times.
2973 */
2974 { "dllimport", 0, 0, true, false, false, NULL },
2975 { "dllexport", 0, 0, true, false, false, NULL },
2976 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2977 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2978 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2979 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2980 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2981 #endif
2982 { NULL, 0, 0, false, false, false, NULL }
2983 };
2984
2985 /* Handle an attribute requiring a FUNCTION_DECL;
2986 arguments as in struct attribute_spec.handler. */
2987 static tree
2988 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2989 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2990 {
2991 if (TREE_CODE (*node) != FUNCTION_DECL)
2992 {
2993 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2994 IDENTIFIER_POINTER (name));
2995 *no_add_attrs = true;
2996 }
2997
2998 return NULL_TREE;
2999 }
3000
3001 /* Handle an "interrupt" or "isr" attribute;
3002 arguments as in struct attribute_spec.handler. */
3003 static tree
3004 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
3005 bool *no_add_attrs)
3006 {
3007 if (DECL_P (*node))
3008 {
3009 if (TREE_CODE (*node) != FUNCTION_DECL)
3010 {
3011 warning (OPT_Wattributes, "%qs attribute only applies to functions",
3012 IDENTIFIER_POINTER (name));
3013 *no_add_attrs = true;
3014 }
3015 /* FIXME: the argument if any is checked for type attributes;
3016 should it be checked for decl ones? */
3017 }
3018 else
3019 {
3020 if (TREE_CODE (*node) == FUNCTION_TYPE
3021 || TREE_CODE (*node) == METHOD_TYPE)
3022 {
3023 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
3024 {
3025 warning (OPT_Wattributes, "%qs attribute ignored",
3026 IDENTIFIER_POINTER (name));
3027 *no_add_attrs = true;
3028 }
3029 }
3030 else if (TREE_CODE (*node) == POINTER_TYPE
3031 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
3032 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
3033 && arm_isr_value (args) != ARM_FT_UNKNOWN)
3034 {
3035 *node = build_variant_type_copy (*node);
3036 TREE_TYPE (*node) = build_type_attribute_variant
3037 (TREE_TYPE (*node),
3038 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
3039 *no_add_attrs = true;
3040 }
3041 else
3042 {
3043 /* Possibly pass this attribute on from the type to a decl. */
3044 if (flags & ((int) ATTR_FLAG_DECL_NEXT
3045 | (int) ATTR_FLAG_FUNCTION_NEXT
3046 | (int) ATTR_FLAG_ARRAY_NEXT))
3047 {
3048 *no_add_attrs = true;
3049 return tree_cons (name, args, NULL_TREE);
3050 }
3051 else
3052 {
3053 warning (OPT_Wattributes, "%qs attribute ignored",
3054 IDENTIFIER_POINTER (name));
3055 }
3056 }
3057 }
3058
3059 return NULL_TREE;
3060 }
3061
3062 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
3063 /* Handle the "notshared" attribute. This attribute is another way of
3064 requesting hidden visibility. ARM's compiler supports
3065 "__declspec(notshared)"; we support the same thing via an
3066 attribute. */
3067
3068 static tree
3069 arm_handle_notshared_attribute (tree *node,
3070 tree name ATTRIBUTE_UNUSED,
3071 tree args ATTRIBUTE_UNUSED,
3072 int flags ATTRIBUTE_UNUSED,
3073 bool *no_add_attrs)
3074 {
3075 tree decl = TYPE_NAME (*node);
3076
3077 if (decl)
3078 {
3079 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
3080 DECL_VISIBILITY_SPECIFIED (decl) = 1;
3081 *no_add_attrs = false;
3082 }
3083 return NULL_TREE;
3084 }
3085 #endif
3086
3087 /* Return 0 if the attributes for two types are incompatible, 1 if they
3088 are compatible, and 2 if they are nearly compatible (which causes a
3089 warning to be generated). */
3090 static int
3091 arm_comp_type_attributes (tree type1, tree type2)
3092 {
3093 int l1, l2, s1, s2;
3094
3095 /* Check for mismatch of non-default calling convention. */
3096 if (TREE_CODE (type1) != FUNCTION_TYPE)
3097 return 1;
3098
3099 /* Check for mismatched call attributes. */
3100 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
3101 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
3102 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
3103 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
3104
3105 /* Only bother to check if an attribute is defined. */
3106 if (l1 | l2 | s1 | s2)
3107 {
3108 /* If one type has an attribute, the other must have the same attribute. */
3109 if ((l1 != l2) || (s1 != s2))
3110 return 0;
3111
3112 /* Disallow mixed attributes. */
3113 if ((l1 & s2) || (l2 & s1))
3114 return 0;
3115 }
3116
3117 /* Check for mismatched ISR attribute. */
3118 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3119 if (! l1)
3120 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3121 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3122 if (! l2)
3123 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3124 if (l1 != l2)
3125 return 0;
3126
3127 return 1;
3128 }
3129
3130 /* Assigns default attributes to newly defined type. This is used to
3131 set short_call/long_call attributes for function types of
3132 functions defined inside corresponding #pragma scopes. */
3133 static void
3134 arm_set_default_type_attributes (tree type)
3135 {
3136 /* Add __attribute__ ((long_call)) to all functions, when
3137 inside #pragma long_calls or __attribute__ ((short_call)),
3138 when inside #pragma no_long_calls. */
3139 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3140 {
3141 tree type_attr_list, attr_name;
3142 type_attr_list = TYPE_ATTRIBUTES (type);
3143
3144 if (arm_pragma_long_calls == LONG)
3145 attr_name = get_identifier ("long_call");
3146 else if (arm_pragma_long_calls == SHORT)
3147 attr_name = get_identifier ("short_call");
3148 else
3149 return;
3150
3151 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3152 TYPE_ATTRIBUTES (type) = type_attr_list;
3153 }
3154 }
3155 \f
3156 /* Return true if DECL is known to be linked into section SECTION. */
3157
3158 static bool
3159 arm_function_in_section_p (tree decl, section *section)
3160 {
3161 /* We can only be certain about functions defined in the same
3162 compilation unit. */
3163 if (!TREE_STATIC (decl))
3164 return false;
3165
3166 /* Make sure that SYMBOL always binds to the definition in this
3167 compilation unit. */
3168 if (!targetm.binds_local_p (decl))
3169 return false;
3170
3171 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
3172 if (!DECL_SECTION_NAME (decl))
3173 {
3174 /* Only cater for unit-at-a-time mode, where we know that the user
3175 cannot later specify a section for DECL. */
3176 if (!flag_unit_at_a_time)
3177 return false;
3178
3179 /* Make sure that we will not create a unique section for DECL. */
3180 if (flag_function_sections || DECL_ONE_ONLY (decl))
3181 return false;
3182 }
3183
3184 return function_section (decl) == section;
3185 }
3186
3187 /* Return nonzero if a 32-bit "long_call" should be generated for
3188 a call from the current function to DECL. We generate a long_call
3189 if the function:
3190
3191 a. has an __attribute__((long call))
3192 or b. is within the scope of a #pragma long_calls
3193 or c. the -mlong-calls command line switch has been specified
3194
3195 However we do not generate a long call if the function:
3196
3197 d. has an __attribute__ ((short_call))
3198 or e. is inside the scope of a #pragma no_long_calls
3199 or f. is defined in the same section as the current function. */
3200
3201 bool
3202 arm_is_long_call_p (tree decl)
3203 {
3204 tree attrs;
3205
3206 if (!decl)
3207 return TARGET_LONG_CALLS;
3208
3209 attrs = TYPE_ATTRIBUTES (TREE_TYPE (decl));
3210 if (lookup_attribute ("short_call", attrs))
3211 return false;
3212
3213 /* For "f", be conservative, and only cater for cases in which the
3214 whole of the current function is placed in the same section. */
3215 if (!flag_reorder_blocks_and_partition
3216 && arm_function_in_section_p (decl, current_function_section ()))
3217 return false;
3218
3219 if (lookup_attribute ("long_call", attrs))
3220 return true;
3221
3222 return TARGET_LONG_CALLS;
3223 }
3224
3225 /* Return nonzero if it is ok to make a tail-call to DECL. */
3226 static bool
3227 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3228 {
3229 unsigned long func_type;
3230
3231 if (cfun->machine->sibcall_blocked)
3232 return false;
3233
3234 /* Never tailcall something for which we have no decl, or if we
3235 are in Thumb mode. */
3236 if (decl == NULL || TARGET_THUMB)
3237 return false;
3238
3239 /* The PIC register is live on entry to VxWorks PLT entries, so we
3240 must make the call before restoring the PIC register. */
3241 if (TARGET_VXWORKS_RTP && flag_pic && !targetm.binds_local_p (decl))
3242 return false;
3243
3244 /* Cannot tail-call to long calls, since these are out of range of
3245 a branch instruction. */
3246 if (arm_is_long_call_p (decl))
3247 return false;
3248
3249 /* If we are interworking and the function is not declared static
3250 then we can't tail-call it unless we know that it exists in this
3251 compilation unit (since it might be a Thumb routine). */
3252 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3253 return false;
3254
3255 func_type = arm_current_func_type ();
3256 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3257 if (IS_INTERRUPT (func_type))
3258 return false;
3259
3260 /* Never tailcall if function may be called with a misaligned SP. */
3261 if (IS_STACKALIGN (func_type))
3262 return false;
3263
3264 /* Everything else is ok. */
3265 return true;
3266 }
3267
3268 \f
3269 /* Addressing mode support functions. */
3270
3271 /* Return nonzero if X is a legitimate immediate operand when compiling
3272 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3273 int
3274 legitimate_pic_operand_p (rtx x)
3275 {
3276 if (GET_CODE (x) == SYMBOL_REF
3277 || (GET_CODE (x) == CONST
3278 && GET_CODE (XEXP (x, 0)) == PLUS
3279 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3280 return 0;
3281
3282 return 1;
3283 }
3284
3285 /* Record that the current function needs a PIC register. Initialize
3286 cfun->machine->pic_reg if we have not already done so. */
3287
3288 static void
3289 require_pic_register (void)
3290 {
3291 /* A lot of the logic here is made obscure by the fact that this
3292 routine gets called as part of the rtx cost estimation process.
3293 We don't want those calls to affect any assumptions about the real
3294 function; and further, we can't call entry_of_function() until we
3295 start the real expansion process. */
3296 if (!current_function_uses_pic_offset_table)
3297 {
3298 gcc_assert (!no_new_pseudos);
3299 if (arm_pic_register != INVALID_REGNUM)
3300 {
3301 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3302
3303 /* Play games to avoid marking the function as needing pic
3304 if we are being called as part of the cost-estimation
3305 process. */
3306 if (current_ir_type () != IR_GIMPLE)
3307 current_function_uses_pic_offset_table = 1;
3308 }
3309 else
3310 {
3311 rtx seq;
3312
3313 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3314
3315 /* Play games to avoid marking the function as needing pic
3316 if we are being called as part of the cost-estimation
3317 process. */
3318 if (current_ir_type () != IR_GIMPLE)
3319 {
3320 current_function_uses_pic_offset_table = 1;
3321 start_sequence ();
3322
3323 arm_load_pic_register (0UL);
3324
3325 seq = get_insns ();
3326 end_sequence ();
3327 emit_insn_after (seq, entry_of_function ());
3328 }
3329 }
3330 }
3331 }
3332
3333 rtx
3334 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3335 {
3336 if (GET_CODE (orig) == SYMBOL_REF
3337 || GET_CODE (orig) == LABEL_REF)
3338 {
3339 #ifndef AOF_ASSEMBLER
3340 rtx pic_ref, address;
3341 #endif
3342 rtx insn;
3343 int subregs = 0;
3344
3345 /* If this function doesn't have a pic register, create one now. */
3346 require_pic_register ();
3347
3348 if (reg == 0)
3349 {
3350 gcc_assert (!no_new_pseudos);
3351 reg = gen_reg_rtx (Pmode);
3352
3353 subregs = 1;
3354 }
3355
3356 #ifdef AOF_ASSEMBLER
3357 /* The AOF assembler can generate relocations for these directly, and
3358 understands that the PIC register has to be added into the offset. */
3359 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3360 #else
3361 if (subregs)
3362 address = gen_reg_rtx (Pmode);
3363 else
3364 address = reg;
3365
3366 if (TARGET_ARM)
3367 emit_insn (gen_pic_load_addr_arm (address, orig));
3368 else if (TARGET_THUMB2)
3369 emit_insn (gen_pic_load_addr_thumb2 (address, orig));
3370 else /* TARGET_THUMB1 */
3371 emit_insn (gen_pic_load_addr_thumb1 (address, orig));
3372
3373 /* VxWorks does not impose a fixed gap between segments; the run-time
3374 gap can be different from the object-file gap. We therefore can't
3375 use GOTOFF unless we are absolutely sure that the symbol is in the
3376 same segment as the GOT. Unfortunately, the flexibility of linker
3377 scripts means that we can't be sure of that in general, so assume
3378 that GOTOFF is never valid on VxWorks. */
3379 if ((GET_CODE (orig) == LABEL_REF
3380 || (GET_CODE (orig) == SYMBOL_REF &&
3381 SYMBOL_REF_LOCAL_P (orig)))
3382 && NEED_GOT_RELOC
3383 && !TARGET_VXWORKS_RTP)
3384 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3385 else
3386 {
3387 pic_ref = gen_const_mem (Pmode,
3388 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3389 address));
3390 }
3391
3392 insn = emit_move_insn (reg, pic_ref);
3393 #endif
3394 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3395 by loop. */
3396 set_unique_reg_note (insn, REG_EQUAL, orig);
3397
3398 return reg;
3399 }
3400 else if (GET_CODE (orig) == CONST)
3401 {
3402 rtx base, offset;
3403
3404 if (GET_CODE (XEXP (orig, 0)) == PLUS
3405 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3406 return orig;
3407
3408 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3409 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3410 return orig;
3411
3412 if (reg == 0)
3413 {
3414 gcc_assert (!no_new_pseudos);
3415 reg = gen_reg_rtx (Pmode);
3416 }
3417
3418 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3419
3420 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3421 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3422 base == reg ? 0 : reg);
3423
3424 if (GET_CODE (offset) == CONST_INT)
3425 {
3426 /* The base register doesn't really matter, we only want to
3427 test the index for the appropriate mode. */
3428 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3429 {
3430 gcc_assert (!no_new_pseudos);
3431 offset = force_reg (Pmode, offset);
3432 }
3433
3434 if (GET_CODE (offset) == CONST_INT)
3435 return plus_constant (base, INTVAL (offset));
3436 }
3437
3438 if (GET_MODE_SIZE (mode) > 4
3439 && (GET_MODE_CLASS (mode) == MODE_INT
3440 || TARGET_SOFT_FLOAT))
3441 {
3442 emit_insn (gen_addsi3 (reg, base, offset));
3443 return reg;
3444 }
3445
3446 return gen_rtx_PLUS (Pmode, base, offset);
3447 }
3448
3449 return orig;
3450 }
3451
3452
3453 /* Find a spare register to use during the prolog of a function. */
3454
3455 static int
3456 thumb_find_work_register (unsigned long pushed_regs_mask)
3457 {
3458 int reg;
3459
3460 /* Check the argument registers first as these are call-used. The
3461 register allocation order means that sometimes r3 might be used
3462 but earlier argument registers might not, so check them all. */
3463 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3464 if (!df_regs_ever_live_p (reg))
3465 return reg;
3466
3467 /* Before going on to check the call-saved registers we can try a couple
3468 more ways of deducing that r3 is available. The first is when we are
3469 pushing anonymous arguments onto the stack and we have less than 4
3470 registers worth of fixed arguments(*). In this case r3 will be part of
3471 the variable argument list and so we can be sure that it will be
3472 pushed right at the start of the function. Hence it will be available
3473 for the rest of the prologue.
3474 (*): ie current_function_pretend_args_size is greater than 0. */
3475 if (cfun->machine->uses_anonymous_args
3476 && current_function_pretend_args_size > 0)
3477 return LAST_ARG_REGNUM;
3478
3479 /* The other case is when we have fixed arguments but less than 4 registers
3480 worth. In this case r3 might be used in the body of the function, but
3481 it is not being used to convey an argument into the function. In theory
3482 we could just check current_function_args_size to see how many bytes are
3483 being passed in argument registers, but it seems that it is unreliable.
3484 Sometimes it will have the value 0 when in fact arguments are being
3485 passed. (See testcase execute/20021111-1.c for an example). So we also
3486 check the args_info.nregs field as well. The problem with this field is
3487 that it makes no allowances for arguments that are passed to the
3488 function but which are not used. Hence we could miss an opportunity
3489 when a function has an unused argument in r3. But it is better to be
3490 safe than to be sorry. */
3491 if (! cfun->machine->uses_anonymous_args
3492 && current_function_args_size >= 0
3493 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3494 && cfun->args_info.nregs < 4)
3495 return LAST_ARG_REGNUM;
3496
3497 /* Otherwise look for a call-saved register that is going to be pushed. */
3498 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3499 if (pushed_regs_mask & (1 << reg))
3500 return reg;
3501
3502 if (TARGET_THUMB2)
3503 {
3504 /* Thumb-2 can use high regs. */
3505 for (reg = FIRST_HI_REGNUM; reg < 15; reg ++)
3506 if (pushed_regs_mask & (1 << reg))
3507 return reg;
3508 }
3509 /* Something went wrong - thumb_compute_save_reg_mask()
3510 should have arranged for a suitable register to be pushed. */
3511 gcc_unreachable ();
3512 }
3513
3514 static GTY(()) int pic_labelno;
3515
3516 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3517 low register. */
3518
3519 void
3520 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3521 {
3522 #ifndef AOF_ASSEMBLER
3523 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx, pic_reg;
3524 rtx global_offset_table;
3525
3526 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3527 return;
3528
3529 gcc_assert (flag_pic);
3530
3531 pic_reg = cfun->machine->pic_reg;
3532 if (TARGET_VXWORKS_RTP)
3533 {
3534 pic_rtx = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE);
3535 pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
3536 emit_insn (gen_pic_load_addr_arm (pic_reg, pic_rtx));
3537
3538 emit_insn (gen_rtx_SET (Pmode, pic_reg, gen_rtx_MEM (Pmode, pic_reg)));
3539
3540 pic_tmp = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
3541 emit_insn (gen_pic_offset_arm (pic_reg, pic_reg, pic_tmp));
3542 }
3543 else
3544 {
3545 /* We use an UNSPEC rather than a LABEL_REF because this label
3546 never appears in the code stream. */
3547
3548 labelno = GEN_INT (pic_labelno++);
3549 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3550 l1 = gen_rtx_CONST (VOIDmode, l1);
3551
3552 global_offset_table
3553 = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3554 /* On the ARM the PC register contains 'dot + 8' at the time of the
3555 addition, on the Thumb it is 'dot + 4'. */
3556 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3557 if (GOT_PCREL)
3558 {
3559 pic_tmp2 = gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx);
3560 pic_tmp2 = gen_rtx_CONST (VOIDmode, pic_tmp2);
3561 }
3562 else
3563 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3564
3565 pic_rtx = gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp);
3566 pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
3567
3568 if (TARGET_ARM)
3569 {
3570 emit_insn (gen_pic_load_addr_arm (pic_reg, pic_rtx));
3571 emit_insn (gen_pic_add_dot_plus_eight (pic_reg, pic_reg, labelno));
3572 }
3573 else if (TARGET_THUMB2)
3574 {
3575 /* Thumb-2 only allows very limited access to the PC. Calculate the
3576 address in a temporary register. */
3577 if (arm_pic_register != INVALID_REGNUM)
3578 {
3579 pic_tmp = gen_rtx_REG (SImode,
3580 thumb_find_work_register (saved_regs));
3581 }
3582 else
3583 {
3584 gcc_assert (!no_new_pseudos);
3585 pic_tmp = gen_reg_rtx (Pmode);
3586 }
3587
3588 emit_insn (gen_pic_load_addr_thumb2 (pic_reg, pic_rtx));
3589 emit_insn (gen_pic_load_dot_plus_four (pic_tmp, labelno));
3590 emit_insn (gen_addsi3 (pic_reg, pic_reg, pic_tmp));
3591 }
3592 else /* TARGET_THUMB1 */
3593 {
3594 if (arm_pic_register != INVALID_REGNUM
3595 && REGNO (pic_reg) > LAST_LO_REGNUM)
3596 {
3597 /* We will have pushed the pic register, so we should always be
3598 able to find a work register. */
3599 pic_tmp = gen_rtx_REG (SImode,
3600 thumb_find_work_register (saved_regs));
3601 emit_insn (gen_pic_load_addr_thumb1 (pic_tmp, pic_rtx));
3602 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3603 }
3604 else
3605 emit_insn (gen_pic_load_addr_thumb1 (pic_reg, pic_rtx));
3606 emit_insn (gen_pic_add_dot_plus_four (pic_reg, pic_reg, labelno));
3607 }
3608 }
3609
3610 /* Need to emit this whether or not we obey regdecls,
3611 since setjmp/longjmp can cause life info to screw up. */
3612 emit_insn (gen_rtx_USE (VOIDmode, pic_reg));
3613 #endif /* AOF_ASSEMBLER */
3614 }
3615
3616
3617 /* Return nonzero if X is valid as an ARM state addressing register. */
3618 static int
3619 arm_address_register_rtx_p (rtx x, int strict_p)
3620 {
3621 int regno;
3622
3623 if (GET_CODE (x) != REG)
3624 return 0;
3625
3626 regno = REGNO (x);
3627
3628 if (strict_p)
3629 return ARM_REGNO_OK_FOR_BASE_P (regno);
3630
3631 return (regno <= LAST_ARM_REGNUM
3632 || regno >= FIRST_PSEUDO_REGISTER
3633 || regno == FRAME_POINTER_REGNUM
3634 || regno == ARG_POINTER_REGNUM);
3635 }
3636
3637 /* Return TRUE if this rtx is the difference of a symbol and a label,
3638 and will reduce to a PC-relative relocation in the object file.
3639 Expressions like this can be left alone when generating PIC, rather
3640 than forced through the GOT. */
3641 static int
3642 pcrel_constant_p (rtx x)
3643 {
3644 if (GET_CODE (x) == MINUS)
3645 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3646
3647 return FALSE;
3648 }
3649
3650 /* Return nonzero if X is a valid ARM state address operand. */
3651 int
3652 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3653 int strict_p)
3654 {
3655 bool use_ldrd;
3656 enum rtx_code code = GET_CODE (x);
3657
3658 if (arm_address_register_rtx_p (x, strict_p))
3659 return 1;
3660
3661 use_ldrd = (TARGET_LDRD
3662 && (mode == DImode
3663 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3664
3665 if (code == POST_INC || code == PRE_DEC
3666 || ((code == PRE_INC || code == POST_DEC)
3667 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3668 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3669
3670 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3671 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3672 && GET_CODE (XEXP (x, 1)) == PLUS
3673 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3674 {
3675 rtx addend = XEXP (XEXP (x, 1), 1);
3676
3677 /* Don't allow ldrd post increment by register because it's hard
3678 to fixup invalid register choices. */
3679 if (use_ldrd
3680 && GET_CODE (x) == POST_MODIFY
3681 && GET_CODE (addend) == REG)
3682 return 0;
3683
3684 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3685 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3686 }
3687
3688 /* After reload constants split into minipools will have addresses
3689 from a LABEL_REF. */
3690 else if (reload_completed
3691 && (code == LABEL_REF
3692 || (code == CONST
3693 && GET_CODE (XEXP (x, 0)) == PLUS
3694 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3695 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3696 return 1;
3697
3698 else if (mode == TImode)
3699 return 0;
3700
3701 else if (code == PLUS)
3702 {
3703 rtx xop0 = XEXP (x, 0);
3704 rtx xop1 = XEXP (x, 1);
3705
3706 return ((arm_address_register_rtx_p (xop0, strict_p)
3707 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3708 || (arm_address_register_rtx_p (xop1, strict_p)
3709 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3710 }
3711
3712 #if 0
3713 /* Reload currently can't handle MINUS, so disable this for now */
3714 else if (GET_CODE (x) == MINUS)
3715 {
3716 rtx xop0 = XEXP (x, 0);
3717 rtx xop1 = XEXP (x, 1);
3718
3719 return (arm_address_register_rtx_p (xop0, strict_p)
3720 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3721 }
3722 #endif
3723
3724 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3725 && code == SYMBOL_REF
3726 && CONSTANT_POOL_ADDRESS_P (x)
3727 && ! (flag_pic
3728 && symbol_mentioned_p (get_pool_constant (x))
3729 && ! pcrel_constant_p (get_pool_constant (x))))
3730 return 1;
3731
3732 return 0;
3733 }
3734
3735 /* Return nonzero if X is a valid Thumb-2 address operand. */
3736 int
3737 thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3738 {
3739 bool use_ldrd;
3740 enum rtx_code code = GET_CODE (x);
3741
3742 if (arm_address_register_rtx_p (x, strict_p))
3743 return 1;
3744
3745 use_ldrd = (TARGET_LDRD
3746 && (mode == DImode
3747 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3748
3749 if (code == POST_INC || code == PRE_DEC
3750 || ((code == PRE_INC || code == POST_DEC)
3751 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3752 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3753
3754 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3755 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3756 && GET_CODE (XEXP (x, 1)) == PLUS
3757 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3758 {
3759 /* Thumb-2 only has autoincrement by constant. */
3760 rtx addend = XEXP (XEXP (x, 1), 1);
3761 HOST_WIDE_INT offset;
3762
3763 if (GET_CODE (addend) != CONST_INT)
3764 return 0;
3765
3766 offset = INTVAL(addend);
3767 if (GET_MODE_SIZE (mode) <= 4)
3768 return (offset > -256 && offset < 256);
3769
3770 return (use_ldrd && offset > -1024 && offset < 1024
3771 && (offset & 3) == 0);
3772 }
3773
3774 /* After reload constants split into minipools will have addresses
3775 from a LABEL_REF. */
3776 else if (reload_completed
3777 && (code == LABEL_REF
3778 || (code == CONST
3779 && GET_CODE (XEXP (x, 0)) == PLUS
3780 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3781 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3782 return 1;
3783
3784 else if (mode == TImode)
3785 return 0;
3786
3787 else if (code == PLUS)
3788 {
3789 rtx xop0 = XEXP (x, 0);
3790 rtx xop1 = XEXP (x, 1);
3791
3792 return ((arm_address_register_rtx_p (xop0, strict_p)
3793 && thumb2_legitimate_index_p (mode, xop1, strict_p))
3794 || (arm_address_register_rtx_p (xop1, strict_p)
3795 && thumb2_legitimate_index_p (mode, xop0, strict_p)));
3796 }
3797
3798 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3799 && code == SYMBOL_REF
3800 && CONSTANT_POOL_ADDRESS_P (x)
3801 && ! (flag_pic
3802 && symbol_mentioned_p (get_pool_constant (x))
3803 && ! pcrel_constant_p (get_pool_constant (x))))
3804 return 1;
3805
3806 return 0;
3807 }
3808
3809 /* Return nonzero if INDEX is valid for an address index operand in
3810 ARM state. */
3811 static int
3812 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3813 int strict_p)
3814 {
3815 HOST_WIDE_INT range;
3816 enum rtx_code code = GET_CODE (index);
3817
3818 /* Standard coprocessor addressing modes. */
3819 if (TARGET_HARD_FLOAT
3820 && (TARGET_FPA || TARGET_MAVERICK)
3821 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3822 || (TARGET_MAVERICK && mode == DImode)))
3823 return (code == CONST_INT && INTVAL (index) < 1024
3824 && INTVAL (index) > -1024
3825 && (INTVAL (index) & 3) == 0);
3826
3827 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3828 return (code == CONST_INT
3829 && INTVAL (index) < 1024
3830 && INTVAL (index) > -1024
3831 && (INTVAL (index) & 3) == 0);
3832
3833 if (arm_address_register_rtx_p (index, strict_p)
3834 && (GET_MODE_SIZE (mode) <= 4))
3835 return 1;
3836
3837 if (mode == DImode || mode == DFmode)
3838 {
3839 if (code == CONST_INT)
3840 {
3841 HOST_WIDE_INT val = INTVAL (index);
3842
3843 if (TARGET_LDRD)
3844 return val > -256 && val < 256;
3845 else
3846 return val > -4096 && val < 4092;
3847 }
3848
3849 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3850 }
3851
3852 if (GET_MODE_SIZE (mode) <= 4
3853 && ! (arm_arch4
3854 && (mode == HImode
3855 || (mode == QImode && outer == SIGN_EXTEND))))
3856 {
3857 if (code == MULT)
3858 {
3859 rtx xiop0 = XEXP (index, 0);
3860 rtx xiop1 = XEXP (index, 1);
3861
3862 return ((arm_address_register_rtx_p (xiop0, strict_p)
3863 && power_of_two_operand (xiop1, SImode))
3864 || (arm_address_register_rtx_p (xiop1, strict_p)
3865 && power_of_two_operand (xiop0, SImode)));
3866 }
3867 else if (code == LSHIFTRT || code == ASHIFTRT
3868 || code == ASHIFT || code == ROTATERT)
3869 {
3870 rtx op = XEXP (index, 1);
3871
3872 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3873 && GET_CODE (op) == CONST_INT
3874 && INTVAL (op) > 0
3875 && INTVAL (op) <= 31);
3876 }
3877 }
3878
3879 /* For ARM v4 we may be doing a sign-extend operation during the
3880 load. */
3881 if (arm_arch4)
3882 {
3883 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3884 range = 256;
3885 else
3886 range = 4096;
3887 }
3888 else
3889 range = (mode == HImode) ? 4095 : 4096;
3890
3891 return (code == CONST_INT
3892 && INTVAL (index) < range
3893 && INTVAL (index) > -range);
3894 }
3895
3896 /* Return true if OP is a valid index scaling factor for Thumb-2 address
3897 index operand. i.e. 1, 2, 4 or 8. */
3898 static bool
3899 thumb2_index_mul_operand (rtx op)
3900 {
3901 HOST_WIDE_INT val;
3902
3903 if (GET_CODE(op) != CONST_INT)
3904 return false;
3905
3906 val = INTVAL(op);
3907 return (val == 1 || val == 2 || val == 4 || val == 8);
3908 }
3909
3910 /* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
3911 static int
3912 thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
3913 {
3914 enum rtx_code code = GET_CODE (index);
3915
3916 /* ??? Combine arm and thumb2 coprocessor addressing modes. */
3917 /* Standard coprocessor addressing modes. */
3918 if (TARGET_HARD_FLOAT
3919 && (TARGET_FPA || TARGET_MAVERICK)
3920 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3921 || (TARGET_MAVERICK && mode == DImode)))
3922 return (code == CONST_INT && INTVAL (index) < 1024
3923 && INTVAL (index) > -1024
3924 && (INTVAL (index) & 3) == 0);
3925
3926 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3927 {
3928 /* For DImode assume values will usually live in core regs
3929 and only allow LDRD addressing modes. */
3930 if (!TARGET_LDRD || mode != DImode)
3931 return (code == CONST_INT
3932 && INTVAL (index) < 1024
3933 && INTVAL (index) > -1024
3934 && (INTVAL (index) & 3) == 0);
3935 }
3936
3937 if (arm_address_register_rtx_p (index, strict_p)
3938 && (GET_MODE_SIZE (mode) <= 4))
3939 return 1;
3940
3941 if (mode == DImode || mode == DFmode)
3942 {
3943 HOST_WIDE_INT val = INTVAL (index);
3944 /* ??? Can we assume ldrd for thumb2? */
3945 /* Thumb-2 ldrd only has reg+const addressing modes. */
3946 if (code != CONST_INT)
3947 return 0;
3948
3949 /* ldrd supports offsets of +-1020.
3950 However the ldr fallback does not. */
3951 return val > -256 && val < 256 && (val & 3) == 0;
3952 }
3953
3954 if (code == MULT)
3955 {
3956 rtx xiop0 = XEXP (index, 0);
3957 rtx xiop1 = XEXP (index, 1);
3958
3959 return ((arm_address_register_rtx_p (xiop0, strict_p)
3960 && thumb2_index_mul_operand (xiop1))
3961 || (arm_address_register_rtx_p (xiop1, strict_p)
3962 && thumb2_index_mul_operand (xiop0)));
3963 }
3964 else if (code == ASHIFT)
3965 {
3966 rtx op = XEXP (index, 1);
3967
3968 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3969 && GET_CODE (op) == CONST_INT
3970 && INTVAL (op) > 0
3971 && INTVAL (op) <= 3);
3972 }
3973
3974 return (code == CONST_INT
3975 && INTVAL (index) < 4096
3976 && INTVAL (index) > -256);
3977 }
3978
3979 /* Return nonzero if X is valid as a 16-bit Thumb state base register. */
3980 static int
3981 thumb1_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3982 {
3983 int regno;
3984
3985 if (GET_CODE (x) != REG)
3986 return 0;
3987
3988 regno = REGNO (x);
3989
3990 if (strict_p)
3991 return THUMB1_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3992
3993 return (regno <= LAST_LO_REGNUM
3994 || regno > LAST_VIRTUAL_REGISTER
3995 || regno == FRAME_POINTER_REGNUM
3996 || (GET_MODE_SIZE (mode) >= 4
3997 && (regno == STACK_POINTER_REGNUM
3998 || regno >= FIRST_PSEUDO_REGISTER
3999 || x == hard_frame_pointer_rtx
4000 || x == arg_pointer_rtx)));
4001 }
4002
4003 /* Return nonzero if x is a legitimate index register. This is the case
4004 for any base register that can access a QImode object. */
4005 inline static int
4006 thumb1_index_register_rtx_p (rtx x, int strict_p)
4007 {
4008 return thumb1_base_register_rtx_p (x, QImode, strict_p);
4009 }
4010
4011 /* Return nonzero if x is a legitimate 16-bit Thumb-state address.
4012
4013 The AP may be eliminated to either the SP or the FP, so we use the
4014 least common denominator, e.g. SImode, and offsets from 0 to 64.
4015
4016 ??? Verify whether the above is the right approach.
4017
4018 ??? Also, the FP may be eliminated to the SP, so perhaps that
4019 needs special handling also.
4020
4021 ??? Look at how the mips16 port solves this problem. It probably uses
4022 better ways to solve some of these problems.
4023
4024 Although it is not incorrect, we don't accept QImode and HImode
4025 addresses based on the frame pointer or arg pointer until the
4026 reload pass starts. This is so that eliminating such addresses
4027 into stack based ones won't produce impossible code. */
4028 int
4029 thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
4030 {
4031 /* ??? Not clear if this is right. Experiment. */
4032 if (GET_MODE_SIZE (mode) < 4
4033 && !(reload_in_progress || reload_completed)
4034 && (reg_mentioned_p (frame_pointer_rtx, x)
4035 || reg_mentioned_p (arg_pointer_rtx, x)
4036 || reg_mentioned_p (virtual_incoming_args_rtx, x)
4037 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
4038 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
4039 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
4040 return 0;
4041
4042 /* Accept any base register. SP only in SImode or larger. */
4043 else if (thumb1_base_register_rtx_p (x, mode, strict_p))
4044 return 1;
4045
4046 /* This is PC relative data before arm_reorg runs. */
4047 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
4048 && GET_CODE (x) == SYMBOL_REF
4049 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
4050 return 1;
4051
4052 /* This is PC relative data after arm_reorg runs. */
4053 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
4054 && (GET_CODE (x) == LABEL_REF
4055 || (GET_CODE (x) == CONST
4056 && GET_CODE (XEXP (x, 0)) == PLUS
4057 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
4058 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
4059 return 1;
4060
4061 /* Post-inc indexing only supported for SImode and larger. */
4062 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
4063 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p))
4064 return 1;
4065
4066 else if (GET_CODE (x) == PLUS)
4067 {
4068 /* REG+REG address can be any two index registers. */
4069 /* We disallow FRAME+REG addressing since we know that FRAME
4070 will be replaced with STACK, and SP relative addressing only
4071 permits SP+OFFSET. */
4072 if (GET_MODE_SIZE (mode) <= 4
4073 && XEXP (x, 0) != frame_pointer_rtx
4074 && XEXP (x, 1) != frame_pointer_rtx
4075 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
4076 && thumb1_index_register_rtx_p (XEXP (x, 1), strict_p))
4077 return 1;
4078
4079 /* REG+const has 5-7 bit offset for non-SP registers. */
4080 else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
4081 || XEXP (x, 0) == arg_pointer_rtx)
4082 && GET_CODE (XEXP (x, 1)) == CONST_INT
4083 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4084 return 1;
4085
4086 /* REG+const has 10-bit offset for SP, but only SImode and
4087 larger is supported. */
4088 /* ??? Should probably check for DI/DFmode overflow here
4089 just like GO_IF_LEGITIMATE_OFFSET does. */
4090 else if (GET_CODE (XEXP (x, 0)) == REG
4091 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
4092 && GET_MODE_SIZE (mode) >= 4
4093 && GET_CODE (XEXP (x, 1)) == CONST_INT
4094 && INTVAL (XEXP (x, 1)) >= 0
4095 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
4096 && (INTVAL (XEXP (x, 1)) & 3) == 0)
4097 return 1;
4098
4099 else if (GET_CODE (XEXP (x, 0)) == REG
4100 && (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
4101 || REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
4102 || (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
4103 && REGNO (XEXP (x, 0)) <= LAST_VIRTUAL_REGISTER))
4104 && GET_MODE_SIZE (mode) >= 4
4105 && GET_CODE (XEXP (x, 1)) == CONST_INT
4106 && (INTVAL (XEXP (x, 1)) & 3) == 0)
4107 return 1;
4108 }
4109
4110 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
4111 && GET_MODE_SIZE (mode) == 4
4112 && GET_CODE (x) == SYMBOL_REF
4113 && CONSTANT_POOL_ADDRESS_P (x)
4114 && ! (flag_pic
4115 && symbol_mentioned_p (get_pool_constant (x))
4116 && ! pcrel_constant_p (get_pool_constant (x))))
4117 return 1;
4118
4119 return 0;
4120 }
4121
4122 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
4123 instruction of mode MODE. */
4124 int
4125 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
4126 {
4127 switch (GET_MODE_SIZE (mode))
4128 {
4129 case 1:
4130 return val >= 0 && val < 32;
4131
4132 case 2:
4133 return val >= 0 && val < 64 && (val & 1) == 0;
4134
4135 default:
4136 return (val >= 0
4137 && (val + GET_MODE_SIZE (mode)) <= 128
4138 && (val & 3) == 0);
4139 }
4140 }
4141
4142 /* Build the SYMBOL_REF for __tls_get_addr. */
4143
4144 static GTY(()) rtx tls_get_addr_libfunc;
4145
4146 static rtx
4147 get_tls_get_addr (void)
4148 {
4149 if (!tls_get_addr_libfunc)
4150 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
4151 return tls_get_addr_libfunc;
4152 }
4153
4154 static rtx
4155 arm_load_tp (rtx target)
4156 {
4157 if (!target)
4158 target = gen_reg_rtx (SImode);
4159
4160 if (TARGET_HARD_TP)
4161 {
4162 /* Can return in any reg. */
4163 emit_insn (gen_load_tp_hard (target));
4164 }
4165 else
4166 {
4167 /* Always returned in r0. Immediately copy the result into a pseudo,
4168 otherwise other uses of r0 (e.g. setting up function arguments) may
4169 clobber the value. */
4170
4171 rtx tmp;
4172
4173 emit_insn (gen_load_tp_soft ());
4174
4175 tmp = gen_rtx_REG (SImode, 0);
4176 emit_move_insn (target, tmp);
4177 }
4178 return target;
4179 }
4180
4181 static rtx
4182 load_tls_operand (rtx x, rtx reg)
4183 {
4184 rtx tmp;
4185
4186 if (reg == NULL_RTX)
4187 reg = gen_reg_rtx (SImode);
4188
4189 tmp = gen_rtx_CONST (SImode, x);
4190
4191 emit_move_insn (reg, tmp);
4192
4193 return reg;
4194 }
4195
4196 static rtx
4197 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
4198 {
4199 rtx insns, label, labelno, sum;
4200
4201 start_sequence ();
4202
4203 labelno = GEN_INT (pic_labelno++);
4204 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
4205 label = gen_rtx_CONST (VOIDmode, label);
4206
4207 sum = gen_rtx_UNSPEC (Pmode,
4208 gen_rtvec (4, x, GEN_INT (reloc), label,
4209 GEN_INT (TARGET_ARM ? 8 : 4)),
4210 UNSPEC_TLS);
4211 reg = load_tls_operand (sum, reg);
4212
4213 if (TARGET_ARM)
4214 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
4215 else if (TARGET_THUMB2)
4216 {
4217 rtx tmp;
4218 /* Thumb-2 only allows very limited access to the PC. Calculate
4219 the address in a temporary register. */
4220 tmp = gen_reg_rtx (SImode);
4221 emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
4222 emit_insn (gen_addsi3(reg, reg, tmp));
4223 }
4224 else /* TARGET_THUMB1 */
4225 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
4226
4227 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
4228 Pmode, 1, reg, Pmode);
4229
4230 insns = get_insns ();
4231 end_sequence ();
4232
4233 return insns;
4234 }
4235
4236 rtx
4237 legitimize_tls_address (rtx x, rtx reg)
4238 {
4239 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
4240 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
4241
4242 switch (model)
4243 {
4244 case TLS_MODEL_GLOBAL_DYNAMIC:
4245 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
4246 dest = gen_reg_rtx (Pmode);
4247 emit_libcall_block (insns, dest, ret, x);
4248 return dest;
4249
4250 case TLS_MODEL_LOCAL_DYNAMIC:
4251 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
4252
4253 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
4254 share the LDM result with other LD model accesses. */
4255 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
4256 UNSPEC_TLS);
4257 dest = gen_reg_rtx (Pmode);
4258 emit_libcall_block (insns, dest, ret, eqv);
4259
4260 /* Load the addend. */
4261 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
4262 UNSPEC_TLS);
4263 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
4264 return gen_rtx_PLUS (Pmode, dest, addend);
4265
4266 case TLS_MODEL_INITIAL_EXEC:
4267 labelno = GEN_INT (pic_labelno++);
4268 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
4269 label = gen_rtx_CONST (VOIDmode, label);
4270 sum = gen_rtx_UNSPEC (Pmode,
4271 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
4272 GEN_INT (TARGET_ARM ? 8 : 4)),
4273 UNSPEC_TLS);
4274 reg = load_tls_operand (sum, reg);
4275
4276 if (TARGET_ARM)
4277 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
4278 else if (TARGET_THUMB2)
4279 {
4280 rtx tmp;
4281 /* Thumb-2 only allows very limited access to the PC. Calculate
4282 the address in a temporary register. */
4283 tmp = gen_reg_rtx (SImode);
4284 emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
4285 emit_insn (gen_addsi3(reg, reg, tmp));
4286 emit_move_insn (reg, gen_const_mem (SImode, reg));
4287 }
4288 else
4289 {
4290 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
4291 emit_move_insn (reg, gen_const_mem (SImode, reg));
4292 }
4293
4294 tp = arm_load_tp (NULL_RTX);
4295
4296 return gen_rtx_PLUS (Pmode, tp, reg);
4297
4298 case TLS_MODEL_LOCAL_EXEC:
4299 tp = arm_load_tp (NULL_RTX);
4300
4301 reg = gen_rtx_UNSPEC (Pmode,
4302 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
4303 UNSPEC_TLS);
4304 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
4305
4306 return gen_rtx_PLUS (Pmode, tp, reg);
4307
4308 default:
4309 abort ();
4310 }
4311 }
4312
4313 /* Try machine-dependent ways of modifying an illegitimate address
4314 to be legitimate. If we find one, return the new, valid address. */
4315 rtx
4316 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4317 {
4318 if (arm_tls_symbol_p (x))
4319 return legitimize_tls_address (x, NULL_RTX);
4320
4321 if (GET_CODE (x) == PLUS)
4322 {
4323 rtx xop0 = XEXP (x, 0);
4324 rtx xop1 = XEXP (x, 1);
4325
4326 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
4327 xop0 = force_reg (SImode, xop0);
4328
4329 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4330 xop1 = force_reg (SImode, xop1);
4331
4332 if (ARM_BASE_REGISTER_RTX_P (xop0)
4333 && GET_CODE (xop1) == CONST_INT)
4334 {
4335 HOST_WIDE_INT n, low_n;
4336 rtx base_reg, val;
4337 n = INTVAL (xop1);
4338
4339 /* VFP addressing modes actually allow greater offsets, but for
4340 now we just stick with the lowest common denominator. */
4341 if (mode == DImode
4342 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4343 {
4344 low_n = n & 0x0f;
4345 n &= ~0x0f;
4346 if (low_n > 4)
4347 {
4348 n += 16;
4349 low_n -= 16;
4350 }
4351 }
4352 else
4353 {
4354 low_n = ((mode) == TImode ? 0
4355 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4356 n -= low_n;
4357 }
4358
4359 base_reg = gen_reg_rtx (SImode);
4360 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4361 emit_move_insn (base_reg, val);
4362 x = plus_constant (base_reg, low_n);
4363 }
4364 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4365 x = gen_rtx_PLUS (SImode, xop0, xop1);
4366 }
4367
4368 /* XXX We don't allow MINUS any more -- see comment in
4369 arm_legitimate_address_p (). */
4370 else if (GET_CODE (x) == MINUS)
4371 {
4372 rtx xop0 = XEXP (x, 0);
4373 rtx xop1 = XEXP (x, 1);
4374
4375 if (CONSTANT_P (xop0))
4376 xop0 = force_reg (SImode, xop0);
4377
4378 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4379 xop1 = force_reg (SImode, xop1);
4380
4381 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4382 x = gen_rtx_MINUS (SImode, xop0, xop1);
4383 }
4384
4385 /* Make sure to take full advantage of the pre-indexed addressing mode
4386 with absolute addresses which often allows for the base register to
4387 be factorized for multiple adjacent memory references, and it might
4388 even allows for the mini pool to be avoided entirely. */
4389 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4390 {
4391 unsigned int bits;
4392 HOST_WIDE_INT mask, base, index;
4393 rtx base_reg;
4394
4395 /* ldr and ldrb can use a 12-bit index, ldrsb and the rest can only
4396 use a 8-bit index. So let's use a 12-bit index for SImode only and
4397 hope that arm_gen_constant will enable ldrb to use more bits. */
4398 bits = (mode == SImode) ? 12 : 8;
4399 mask = (1 << bits) - 1;
4400 base = INTVAL (x) & ~mask;
4401 index = INTVAL (x) & mask;
4402 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4403 {
4404 /* It'll most probably be more efficient to generate the base
4405 with more bits set and use a negative index instead. */
4406 base |= mask;
4407 index -= mask;
4408 }
4409 base_reg = force_reg (SImode, GEN_INT (base));
4410 x = plus_constant (base_reg, index);
4411 }
4412
4413 if (flag_pic)
4414 {
4415 /* We need to find and carefully transform any SYMBOL and LABEL
4416 references; so go back to the original address expression. */
4417 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4418
4419 if (new_x != orig_x)
4420 x = new_x;
4421 }
4422
4423 return x;
4424 }
4425
4426
4427 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4428 to be legitimate. If we find one, return the new, valid address. */
4429 rtx
4430 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4431 {
4432 if (arm_tls_symbol_p (x))
4433 return legitimize_tls_address (x, NULL_RTX);
4434
4435 if (GET_CODE (x) == PLUS
4436 && GET_CODE (XEXP (x, 1)) == CONST_INT
4437 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4438 || INTVAL (XEXP (x, 1)) < 0))
4439 {
4440 rtx xop0 = XEXP (x, 0);
4441 rtx xop1 = XEXP (x, 1);
4442 HOST_WIDE_INT offset = INTVAL (xop1);
4443
4444 /* Try and fold the offset into a biasing of the base register and
4445 then offsetting that. Don't do this when optimizing for space
4446 since it can cause too many CSEs. */
4447 if (optimize_size && offset >= 0
4448 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4449 {
4450 HOST_WIDE_INT delta;
4451
4452 if (offset >= 256)
4453 delta = offset - (256 - GET_MODE_SIZE (mode));
4454 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4455 delta = 31 * GET_MODE_SIZE (mode);
4456 else
4457 delta = offset & (~31 * GET_MODE_SIZE (mode));
4458
4459 xop0 = force_operand (plus_constant (xop0, offset - delta),
4460 NULL_RTX);
4461 x = plus_constant (xop0, delta);
4462 }
4463 else if (offset < 0 && offset > -256)
4464 /* Small negative offsets are best done with a subtract before the
4465 dereference, forcing these into a register normally takes two
4466 instructions. */
4467 x = force_operand (x, NULL_RTX);
4468 else
4469 {
4470 /* For the remaining cases, force the constant into a register. */
4471 xop1 = force_reg (SImode, xop1);
4472 x = gen_rtx_PLUS (SImode, xop0, xop1);
4473 }
4474 }
4475 else if (GET_CODE (x) == PLUS
4476 && s_register_operand (XEXP (x, 1), SImode)
4477 && !s_register_operand (XEXP (x, 0), SImode))
4478 {
4479 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4480
4481 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4482 }
4483
4484 if (flag_pic)
4485 {
4486 /* We need to find and carefully transform any SYMBOL and LABEL
4487 references; so go back to the original address expression. */
4488 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4489
4490 if (new_x != orig_x)
4491 x = new_x;
4492 }
4493
4494 return x;
4495 }
4496
4497 rtx
4498 thumb_legitimize_reload_address (rtx *x_p,
4499 enum machine_mode mode,
4500 int opnum, int type,
4501 int ind_levels ATTRIBUTE_UNUSED)
4502 {
4503 rtx x = *x_p;
4504
4505 if (GET_CODE (x) == PLUS
4506 && GET_MODE_SIZE (mode) < 4
4507 && REG_P (XEXP (x, 0))
4508 && XEXP (x, 0) == stack_pointer_rtx
4509 && GET_CODE (XEXP (x, 1)) == CONST_INT
4510 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4511 {
4512 rtx orig_x = x;
4513
4514 x = copy_rtx (x);
4515 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4516 Pmode, VOIDmode, 0, 0, opnum, type);
4517 return x;
4518 }
4519
4520 /* If both registers are hi-regs, then it's better to reload the
4521 entire expression rather than each register individually. That
4522 only requires one reload register rather than two. */
4523 if (GET_CODE (x) == PLUS
4524 && REG_P (XEXP (x, 0))
4525 && REG_P (XEXP (x, 1))
4526 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4527 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4528 {
4529 rtx orig_x = x;
4530
4531 x = copy_rtx (x);
4532 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4533 Pmode, VOIDmode, 0, 0, opnum, type);
4534 return x;
4535 }
4536
4537 return NULL;
4538 }
4539
4540 /* Test for various thread-local symbols. */
4541
4542 /* Return TRUE if X is a thread-local symbol. */
4543
4544 static bool
4545 arm_tls_symbol_p (rtx x)
4546 {
4547 if (! TARGET_HAVE_TLS)
4548 return false;
4549
4550 if (GET_CODE (x) != SYMBOL_REF)
4551 return false;
4552
4553 return SYMBOL_REF_TLS_MODEL (x) != 0;
4554 }
4555
4556 /* Helper for arm_tls_referenced_p. */
4557
4558 static int
4559 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4560 {
4561 if (GET_CODE (*x) == SYMBOL_REF)
4562 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4563
4564 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4565 TLS offsets, not real symbol references. */
4566 if (GET_CODE (*x) == UNSPEC
4567 && XINT (*x, 1) == UNSPEC_TLS)
4568 return -1;
4569
4570 return 0;
4571 }
4572
4573 /* Return TRUE if X contains any TLS symbol references. */
4574
4575 bool
4576 arm_tls_referenced_p (rtx x)
4577 {
4578 if (! TARGET_HAVE_TLS)
4579 return false;
4580
4581 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4582 }
4583 \f
4584 #define REG_OR_SUBREG_REG(X) \
4585 (GET_CODE (X) == REG \
4586 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4587
4588 #define REG_OR_SUBREG_RTX(X) \
4589 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4590
4591 #ifndef COSTS_N_INSNS
4592 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4593 #endif
4594 static inline int
4595 thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4596 {
4597 enum machine_mode mode = GET_MODE (x);
4598
4599 switch (code)
4600 {
4601 case ASHIFT:
4602 case ASHIFTRT:
4603 case LSHIFTRT:
4604 case ROTATERT:
4605 case PLUS:
4606 case MINUS:
4607 case COMPARE:
4608 case NEG:
4609 case NOT:
4610 return COSTS_N_INSNS (1);
4611
4612 case MULT:
4613 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4614 {
4615 int cycles = 0;
4616 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4617
4618 while (i)
4619 {
4620 i >>= 2;
4621 cycles++;
4622 }
4623 return COSTS_N_INSNS (2) + cycles;
4624 }
4625 return COSTS_N_INSNS (1) + 16;
4626
4627 case SET:
4628 return (COSTS_N_INSNS (1)
4629 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4630 + GET_CODE (SET_DEST (x)) == MEM));
4631
4632 case CONST_INT:
4633 if (outer == SET)
4634 {
4635 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4636 return 0;
4637 if (thumb_shiftable_const (INTVAL (x)))
4638 return COSTS_N_INSNS (2);
4639 return COSTS_N_INSNS (3);
4640 }
4641 else if ((outer == PLUS || outer == COMPARE)
4642 && INTVAL (x) < 256 && INTVAL (x) > -256)
4643 return 0;
4644 else if (outer == AND
4645 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4646 return COSTS_N_INSNS (1);
4647 else if (outer == ASHIFT || outer == ASHIFTRT
4648 || outer == LSHIFTRT)
4649 return 0;
4650 return COSTS_N_INSNS (2);
4651
4652 case CONST:
4653 case CONST_DOUBLE:
4654 case LABEL_REF:
4655 case SYMBOL_REF:
4656 return COSTS_N_INSNS (3);
4657
4658 case UDIV:
4659 case UMOD:
4660 case DIV:
4661 case MOD:
4662 return 100;
4663
4664 case TRUNCATE:
4665 return 99;
4666
4667 case AND:
4668 case XOR:
4669 case IOR:
4670 /* XXX guess. */
4671 return 8;
4672
4673 case MEM:
4674 /* XXX another guess. */
4675 /* Memory costs quite a lot for the first word, but subsequent words
4676 load at the equivalent of a single insn each. */
4677 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4678 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4679 ? 4 : 0));
4680
4681 case IF_THEN_ELSE:
4682 /* XXX a guess. */
4683 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4684 return 14;
4685 return 2;
4686
4687 case ZERO_EXTEND:
4688 /* XXX still guessing. */
4689 switch (GET_MODE (XEXP (x, 0)))
4690 {
4691 case QImode:
4692 return (1 + (mode == DImode ? 4 : 0)
4693 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4694
4695 case HImode:
4696 return (4 + (mode == DImode ? 4 : 0)
4697 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4698
4699 case SImode:
4700 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4701
4702 default:
4703 return 99;
4704 }
4705
4706 default:
4707 return 99;
4708 }
4709 }
4710
4711
4712 /* Worker routine for arm_rtx_costs. */
4713 /* ??? This needs updating for thumb2. */
4714 static inline int
4715 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4716 {
4717 enum machine_mode mode = GET_MODE (x);
4718 enum rtx_code subcode;
4719 int extra_cost;
4720
4721 switch (code)
4722 {
4723 case MEM:
4724 /* Memory costs quite a lot for the first word, but subsequent words
4725 load at the equivalent of a single insn each. */
4726 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4727 + (GET_CODE (x) == SYMBOL_REF
4728 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4729
4730 case DIV:
4731 case MOD:
4732 case UDIV:
4733 case UMOD:
4734 return optimize_size ? COSTS_N_INSNS (2) : 100;
4735
4736 case ROTATE:
4737 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4738 return 4;
4739 /* Fall through */
4740 case ROTATERT:
4741 if (mode != SImode)
4742 return 8;
4743 /* Fall through */
4744 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4745 if (mode == DImode)
4746 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4747 + ((GET_CODE (XEXP (x, 0)) == REG
4748 || (GET_CODE (XEXP (x, 0)) == SUBREG
4749 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4750 ? 0 : 8));
4751 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4752 || (GET_CODE (XEXP (x, 0)) == SUBREG
4753 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4754 ? 0 : 4)
4755 + ((GET_CODE (XEXP (x, 1)) == REG
4756 || (GET_CODE (XEXP (x, 1)) == SUBREG
4757 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4758 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4759 ? 0 : 4));
4760
4761 case MINUS:
4762 if (GET_CODE (XEXP (x, 1)) == MULT && mode == SImode && arm_arch_thumb2)
4763 {
4764 extra_cost = rtx_cost (XEXP (x, 1), code);
4765 if (!REG_OR_SUBREG_REG (XEXP (x, 0)))
4766 extra_cost += 4 * ARM_NUM_REGS (mode);
4767 return extra_cost;
4768 }
4769
4770 if (mode == DImode)
4771 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4772 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4773 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4774 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4775 ? 0 : 8));
4776
4777 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4778 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4779 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4780 && arm_const_double_rtx (XEXP (x, 1))))
4781 ? 0 : 8)
4782 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4783 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4784 && arm_const_double_rtx (XEXP (x, 0))))
4785 ? 0 : 8));
4786
4787 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4788 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4789 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4790 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4791 || subcode == ASHIFTRT || subcode == LSHIFTRT
4792 || subcode == ROTATE || subcode == ROTATERT
4793 || (subcode == MULT
4794 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4795 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4796 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4797 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4798 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4799 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4800 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4801 return 1;
4802 /* Fall through */
4803
4804 case PLUS:
4805 if (GET_CODE (XEXP (x, 0)) == MULT)
4806 {
4807 extra_cost = rtx_cost (XEXP (x, 0), code);
4808 if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
4809 extra_cost += 4 * ARM_NUM_REGS (mode);
4810 return extra_cost;
4811 }
4812
4813 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4814 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4815 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4816 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4817 && arm_const_double_rtx (XEXP (x, 1))))
4818 ? 0 : 8));
4819
4820 /* Fall through */
4821 case AND: case XOR: case IOR:
4822 extra_cost = 0;
4823
4824 /* Normally the frame registers will be spilt into reg+const during
4825 reload, so it is a bad idea to combine them with other instructions,
4826 since then they might not be moved outside of loops. As a compromise
4827 we allow integration with ops that have a constant as their second
4828 operand. */
4829 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4830 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4831 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4832 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4833 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4834 extra_cost = 4;
4835
4836 if (mode == DImode)
4837 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4838 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4839 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4840 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4841 ? 0 : 8));
4842
4843 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4844 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4845 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4846 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4847 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4848 ? 0 : 4));
4849
4850 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4851 return (1 + extra_cost
4852 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4853 || subcode == LSHIFTRT || subcode == ASHIFTRT
4854 || subcode == ROTATE || subcode == ROTATERT
4855 || (subcode == MULT
4856 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4857 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4858 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4859 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4860 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4861 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4862 ? 0 : 4));
4863
4864 return 8;
4865
4866 case MULT:
4867 /* This should have been handled by the CPU specific routines. */
4868 gcc_unreachable ();
4869
4870 case TRUNCATE:
4871 if (arm_arch3m && mode == SImode
4872 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4873 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4874 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4875 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4876 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4877 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4878 return 8;
4879 return 99;
4880
4881 case NEG:
4882 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4883 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4884 /* Fall through */
4885 case NOT:
4886 if (mode == DImode)
4887 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4888
4889 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4890
4891 case IF_THEN_ELSE:
4892 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4893 return 14;
4894 return 2;
4895
4896 case COMPARE:
4897 return 1;
4898
4899 case ABS:
4900 return 4 + (mode == DImode ? 4 : 0);
4901
4902 case SIGN_EXTEND:
4903 /* ??? value extensions are cheaper on armv6. */
4904 if (GET_MODE (XEXP (x, 0)) == QImode)
4905 return (4 + (mode == DImode ? 4 : 0)
4906 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4907 /* Fall through */
4908 case ZERO_EXTEND:
4909 switch (GET_MODE (XEXP (x, 0)))
4910 {
4911 case QImode:
4912 return (1 + (mode == DImode ? 4 : 0)
4913 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4914
4915 case HImode:
4916 return (4 + (mode == DImode ? 4 : 0)
4917 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4918
4919 case SImode:
4920 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4921
4922 case V8QImode:
4923 case V4HImode:
4924 case V2SImode:
4925 case V4QImode:
4926 case V2HImode:
4927 return 1;
4928
4929 default:
4930 gcc_unreachable ();
4931 }
4932 gcc_unreachable ();
4933
4934 case CONST_INT:
4935 if (const_ok_for_arm (INTVAL (x)))
4936 return outer == SET ? 2 : -1;
4937 else if (outer == AND
4938 && const_ok_for_arm (~INTVAL (x)))
4939 return -1;
4940 else if ((outer == COMPARE
4941 || outer == PLUS || outer == MINUS)
4942 && const_ok_for_arm (-INTVAL (x)))
4943 return -1;
4944 else
4945 return 5;
4946
4947 case CONST:
4948 case LABEL_REF:
4949 case SYMBOL_REF:
4950 return 6;
4951
4952 case CONST_DOUBLE:
4953 if (arm_const_double_rtx (x))
4954 return outer == SET ? 2 : -1;
4955 else if ((outer == COMPARE || outer == PLUS)
4956 && neg_const_double_rtx_ok_for_fpa (x))
4957 return -1;
4958 return 7;
4959
4960 default:
4961 return 99;
4962 }
4963 }
4964
4965 /* RTX costs when optimizing for size. */
4966 static bool
4967 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4968 {
4969 enum machine_mode mode = GET_MODE (x);
4970
4971 if (TARGET_THUMB)
4972 {
4973 /* XXX TBD. For now, use the standard costs. */
4974 *total = thumb1_rtx_costs (x, code, outer_code);
4975 return true;
4976 }
4977
4978 switch (code)
4979 {
4980 case MEM:
4981 /* A memory access costs 1 insn if the mode is small, or the address is
4982 a single register, otherwise it costs one insn per word. */
4983 if (REG_P (XEXP (x, 0)))
4984 *total = COSTS_N_INSNS (1);
4985 else
4986 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4987 return true;
4988
4989 case DIV:
4990 case MOD:
4991 case UDIV:
4992 case UMOD:
4993 /* Needs a libcall, so it costs about this. */
4994 *total = COSTS_N_INSNS (2);
4995 return false;
4996
4997 case ROTATE:
4998 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4999 {
5000 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
5001 return true;
5002 }
5003 /* Fall through */
5004 case ROTATERT:
5005 case ASHIFT:
5006 case LSHIFTRT:
5007 case ASHIFTRT:
5008 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
5009 {
5010 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
5011 return true;
5012 }
5013 else if (mode == SImode)
5014 {
5015 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
5016 /* Slightly disparage register shifts, but not by much. */
5017 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5018 *total += 1 + rtx_cost (XEXP (x, 1), code);
5019 return true;
5020 }
5021
5022 /* Needs a libcall. */
5023 *total = COSTS_N_INSNS (2);
5024 return false;
5025
5026 case MINUS:
5027 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5028 {
5029 *total = COSTS_N_INSNS (1);
5030 return false;
5031 }
5032
5033 if (mode == SImode)
5034 {
5035 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
5036 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
5037
5038 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
5039 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
5040 || subcode1 == ROTATE || subcode1 == ROTATERT
5041 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
5042 || subcode1 == ASHIFTRT)
5043 {
5044 /* It's just the cost of the two operands. */
5045 *total = 0;
5046 return false;
5047 }
5048
5049 *total = COSTS_N_INSNS (1);
5050 return false;
5051 }
5052
5053 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5054 return false;
5055
5056 case PLUS:
5057 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5058 {
5059 *total = COSTS_N_INSNS (1);
5060 return false;
5061 }
5062
5063 /* Fall through */
5064 case AND: case XOR: case IOR:
5065 if (mode == SImode)
5066 {
5067 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
5068
5069 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
5070 || subcode == LSHIFTRT || subcode == ASHIFTRT
5071 || (code == AND && subcode == NOT))
5072 {
5073 /* It's just the cost of the two operands. */
5074 *total = 0;
5075 return false;
5076 }
5077 }
5078
5079 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5080 return false;
5081
5082 case MULT:
5083 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5084 return false;
5085
5086 case NEG:
5087 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5088 *total = COSTS_N_INSNS (1);
5089 /* Fall through */
5090 case NOT:
5091 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5092
5093 return false;
5094
5095 case IF_THEN_ELSE:
5096 *total = 0;
5097 return false;
5098
5099 case COMPARE:
5100 if (cc_register (XEXP (x, 0), VOIDmode))
5101 * total = 0;
5102 else
5103 *total = COSTS_N_INSNS (1);
5104 return false;
5105
5106 case ABS:
5107 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5108 *total = COSTS_N_INSNS (1);
5109 else
5110 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
5111 return false;
5112
5113 case SIGN_EXTEND:
5114 *total = 0;
5115 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
5116 {
5117 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
5118 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
5119 }
5120 if (mode == DImode)
5121 *total += COSTS_N_INSNS (1);
5122 return false;
5123
5124 case ZERO_EXTEND:
5125 *total = 0;
5126 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
5127 {
5128 switch (GET_MODE (XEXP (x, 0)))
5129 {
5130 case QImode:
5131 *total += COSTS_N_INSNS (1);
5132 break;
5133
5134 case HImode:
5135 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
5136
5137 case SImode:
5138 break;
5139
5140 default:
5141 *total += COSTS_N_INSNS (2);
5142 }
5143 }
5144
5145 if (mode == DImode)
5146 *total += COSTS_N_INSNS (1);
5147
5148 return false;
5149
5150 case CONST_INT:
5151 if (const_ok_for_arm (INTVAL (x)))
5152 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
5153 else if (const_ok_for_arm (~INTVAL (x)))
5154 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
5155 else if (const_ok_for_arm (-INTVAL (x)))
5156 {
5157 if (outer_code == COMPARE || outer_code == PLUS
5158 || outer_code == MINUS)
5159 *total = 0;
5160 else
5161 *total = COSTS_N_INSNS (1);
5162 }
5163 else
5164 *total = COSTS_N_INSNS (2);
5165 return true;
5166
5167 case CONST:
5168 case LABEL_REF:
5169 case SYMBOL_REF:
5170 *total = COSTS_N_INSNS (2);
5171 return true;
5172
5173 case CONST_DOUBLE:
5174 *total = COSTS_N_INSNS (4);
5175 return true;
5176
5177 default:
5178 if (mode != VOIDmode)
5179 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5180 else
5181 *total = COSTS_N_INSNS (4); /* How knows? */
5182 return false;
5183 }
5184 }
5185
5186 /* RTX costs for cores with a slow MUL implementation. Thumb-2 is not
5187 supported on any "slowmul" cores, so it can be ignored. */
5188
5189 static bool
5190 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
5191 {
5192 enum machine_mode mode = GET_MODE (x);
5193
5194 if (TARGET_THUMB)
5195 {
5196 *total = thumb1_rtx_costs (x, code, outer_code);
5197 return true;
5198 }
5199
5200 switch (code)
5201 {
5202 case MULT:
5203 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5204 || mode == DImode)
5205 {
5206 *total = 30;
5207 return true;
5208 }
5209
5210 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5211 {
5212 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5213 & (unsigned HOST_WIDE_INT) 0xffffffff);
5214 int cost, const_ok = const_ok_for_arm (i);
5215 int j, booth_unit_size;
5216
5217 /* Tune as appropriate. */
5218 cost = const_ok ? 4 : 8;
5219 booth_unit_size = 2;
5220 for (j = 0; i && j < 32; j += booth_unit_size)
5221 {
5222 i >>= booth_unit_size;
5223 cost += 2;
5224 }
5225
5226 *total = cost;
5227 return true;
5228 }
5229
5230 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5231 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5232 return true;
5233
5234 default:
5235 *total = arm_rtx_costs_1 (x, code, outer_code);
5236 return true;
5237 }
5238 }
5239
5240
5241 /* RTX cost for cores with a fast multiply unit (M variants). */
5242
5243 static bool
5244 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
5245 {
5246 enum machine_mode mode = GET_MODE (x);
5247
5248 if (TARGET_THUMB1)
5249 {
5250 *total = thumb1_rtx_costs (x, code, outer_code);
5251 return true;
5252 }
5253
5254 /* ??? should thumb2 use different costs? */
5255 switch (code)
5256 {
5257 case MULT:
5258 /* There is no point basing this on the tuning, since it is always the
5259 fast variant if it exists at all. */
5260 if (mode == DImode
5261 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5262 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5263 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5264 {
5265 *total = 8;
5266 return true;
5267 }
5268
5269
5270 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5271 || mode == DImode)
5272 {
5273 *total = 30;
5274 return true;
5275 }
5276
5277 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5278 {
5279 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5280 & (unsigned HOST_WIDE_INT) 0xffffffff);
5281 int cost, const_ok = const_ok_for_arm (i);
5282 int j, booth_unit_size;
5283
5284 /* Tune as appropriate. */
5285 cost = const_ok ? 4 : 8;
5286 booth_unit_size = 8;
5287 for (j = 0; i && j < 32; j += booth_unit_size)
5288 {
5289 i >>= booth_unit_size;
5290 cost += 2;
5291 }
5292
5293 *total = cost;
5294 return true;
5295 }
5296
5297 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5298 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5299 return true;
5300
5301 default:
5302 *total = arm_rtx_costs_1 (x, code, outer_code);
5303 return true;
5304 }
5305 }
5306
5307
5308 /* RTX cost for XScale CPUs. Thumb-2 is not supported on any xscale cores,
5309 so it can be ignored. */
5310
5311 static bool
5312 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
5313 {
5314 enum machine_mode mode = GET_MODE (x);
5315
5316 if (TARGET_THUMB)
5317 {
5318 *total = thumb1_rtx_costs (x, code, outer_code);
5319 return true;
5320 }
5321
5322 switch (code)
5323 {
5324 case MULT:
5325 /* There is no point basing this on the tuning, since it is always the
5326 fast variant if it exists at all. */
5327 if (mode == DImode
5328 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5329 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5330 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5331 {
5332 *total = 8;
5333 return true;
5334 }
5335
5336
5337 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5338 || mode == DImode)
5339 {
5340 *total = 30;
5341 return true;
5342 }
5343
5344 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5345 {
5346 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5347 & (unsigned HOST_WIDE_INT) 0xffffffff);
5348 int cost, const_ok = const_ok_for_arm (i);
5349 unsigned HOST_WIDE_INT masked_const;
5350
5351 /* The cost will be related to two insns.
5352 First a load of the constant (MOV or LDR), then a multiply. */
5353 cost = 2;
5354 if (! const_ok)
5355 cost += 1; /* LDR is probably more expensive because
5356 of longer result latency. */
5357 masked_const = i & 0xffff8000;
5358 if (masked_const != 0 && masked_const != 0xffff8000)
5359 {
5360 masked_const = i & 0xf8000000;
5361 if (masked_const == 0 || masked_const == 0xf8000000)
5362 cost += 1;
5363 else
5364 cost += 2;
5365 }
5366 *total = cost;
5367 return true;
5368 }
5369
5370 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5371 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5372 return true;
5373
5374 case COMPARE:
5375 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5376 will stall until the multiplication is complete. */
5377 if (GET_CODE (XEXP (x, 0)) == MULT)
5378 *total = 4 + rtx_cost (XEXP (x, 0), code);
5379 else
5380 *total = arm_rtx_costs_1 (x, code, outer_code);
5381 return true;
5382
5383 default:
5384 *total = arm_rtx_costs_1 (x, code, outer_code);
5385 return true;
5386 }
5387 }
5388
5389
5390 /* RTX costs for 9e (and later) cores. */
5391
5392 static bool
5393 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5394 {
5395 enum machine_mode mode = GET_MODE (x);
5396 int nonreg_cost;
5397 int cost;
5398
5399 if (TARGET_THUMB1)
5400 {
5401 switch (code)
5402 {
5403 case MULT:
5404 *total = COSTS_N_INSNS (3);
5405 return true;
5406
5407 default:
5408 *total = thumb1_rtx_costs (x, code, outer_code);
5409 return true;
5410 }
5411 }
5412
5413 switch (code)
5414 {
5415 case MULT:
5416 /* There is no point basing this on the tuning, since it is always the
5417 fast variant if it exists at all. */
5418 if (mode == DImode
5419 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5420 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5421 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5422 {
5423 *total = 3;
5424 return true;
5425 }
5426
5427
5428 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5429 {
5430 *total = 30;
5431 return true;
5432 }
5433 if (mode == DImode)
5434 {
5435 cost = 7;
5436 nonreg_cost = 8;
5437 }
5438 else
5439 {
5440 cost = 2;
5441 nonreg_cost = 4;
5442 }
5443
5444
5445 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5446 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5447 return true;
5448
5449 default:
5450 *total = arm_rtx_costs_1 (x, code, outer_code);
5451 return true;
5452 }
5453 }
5454 /* All address computations that can be done are free, but rtx cost returns
5455 the same for practically all of them. So we weight the different types
5456 of address here in the order (most pref first):
5457 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5458 static inline int
5459 arm_arm_address_cost (rtx x)
5460 {
5461 enum rtx_code c = GET_CODE (x);
5462
5463 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5464 return 0;
5465 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5466 return 10;
5467
5468 if (c == PLUS || c == MINUS)
5469 {
5470 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5471 return 2;
5472
5473 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5474 return 3;
5475
5476 return 4;
5477 }
5478
5479 return 6;
5480 }
5481
5482 static inline int
5483 arm_thumb_address_cost (rtx x)
5484 {
5485 enum rtx_code c = GET_CODE (x);
5486
5487 if (c == REG)
5488 return 1;
5489 if (c == PLUS
5490 && GET_CODE (XEXP (x, 0)) == REG
5491 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5492 return 1;
5493
5494 return 2;
5495 }
5496
5497 static int
5498 arm_address_cost (rtx x)
5499 {
5500 return TARGET_32BIT ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5501 }
5502
5503 static int
5504 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5505 {
5506 rtx i_pat, d_pat;
5507
5508 /* Some true dependencies can have a higher cost depending
5509 on precisely how certain input operands are used. */
5510 if (arm_tune_xscale
5511 && REG_NOTE_KIND (link) == 0
5512 && recog_memoized (insn) >= 0
5513 && recog_memoized (dep) >= 0)
5514 {
5515 int shift_opnum = get_attr_shift (insn);
5516 enum attr_type attr_type = get_attr_type (dep);
5517
5518 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5519 operand for INSN. If we have a shifted input operand and the
5520 instruction we depend on is another ALU instruction, then we may
5521 have to account for an additional stall. */
5522 if (shift_opnum != 0
5523 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5524 {
5525 rtx shifted_operand;
5526 int opno;
5527
5528 /* Get the shifted operand. */
5529 extract_insn (insn);
5530 shifted_operand = recog_data.operand[shift_opnum];
5531
5532 /* Iterate over all the operands in DEP. If we write an operand
5533 that overlaps with SHIFTED_OPERAND, then we have increase the
5534 cost of this dependency. */
5535 extract_insn (dep);
5536 preprocess_constraints ();
5537 for (opno = 0; opno < recog_data.n_operands; opno++)
5538 {
5539 /* We can ignore strict inputs. */
5540 if (recog_data.operand_type[opno] == OP_IN)
5541 continue;
5542
5543 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5544 shifted_operand))
5545 return 2;
5546 }
5547 }
5548 }
5549
5550 /* XXX This is not strictly true for the FPA. */
5551 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5552 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5553 return 0;
5554
5555 /* Call insns don't incur a stall, even if they follow a load. */
5556 if (REG_NOTE_KIND (link) == 0
5557 && GET_CODE (insn) == CALL_INSN)
5558 return 1;
5559
5560 if ((i_pat = single_set (insn)) != NULL
5561 && GET_CODE (SET_SRC (i_pat)) == MEM
5562 && (d_pat = single_set (dep)) != NULL
5563 && GET_CODE (SET_DEST (d_pat)) == MEM)
5564 {
5565 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5566 /* This is a load after a store, there is no conflict if the load reads
5567 from a cached area. Assume that loads from the stack, and from the
5568 constant pool are cached, and that others will miss. This is a
5569 hack. */
5570
5571 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5572 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5573 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5574 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5575 return 1;
5576 }
5577
5578 return cost;
5579 }
5580
5581 static int fp_consts_inited = 0;
5582
5583 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5584 static const char * const strings_fp[8] =
5585 {
5586 "0", "1", "2", "3",
5587 "4", "5", "0.5", "10"
5588 };
5589
5590 static REAL_VALUE_TYPE values_fp[8];
5591
5592 static void
5593 init_fp_table (void)
5594 {
5595 int i;
5596 REAL_VALUE_TYPE r;
5597
5598 if (TARGET_VFP)
5599 fp_consts_inited = 1;
5600 else
5601 fp_consts_inited = 8;
5602
5603 for (i = 0; i < fp_consts_inited; i++)
5604 {
5605 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5606 values_fp[i] = r;
5607 }
5608 }
5609
5610 /* Return TRUE if rtx X is a valid immediate FP constant. */
5611 int
5612 arm_const_double_rtx (rtx x)
5613 {
5614 REAL_VALUE_TYPE r;
5615 int i;
5616
5617 if (!fp_consts_inited)
5618 init_fp_table ();
5619
5620 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5621 if (REAL_VALUE_MINUS_ZERO (r))
5622 return 0;
5623
5624 for (i = 0; i < fp_consts_inited; i++)
5625 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5626 return 1;
5627
5628 return 0;
5629 }
5630
5631 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5632 int
5633 neg_const_double_rtx_ok_for_fpa (rtx x)
5634 {
5635 REAL_VALUE_TYPE r;
5636 int i;
5637
5638 if (!fp_consts_inited)
5639 init_fp_table ();
5640
5641 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5642 r = REAL_VALUE_NEGATE (r);
5643 if (REAL_VALUE_MINUS_ZERO (r))
5644 return 0;
5645
5646 for (i = 0; i < 8; i++)
5647 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5648 return 1;
5649
5650 return 0;
5651 }
5652 \f
5653 /* Predicates for `match_operand' and `match_operator'. */
5654
5655 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5656 int
5657 cirrus_memory_offset (rtx op)
5658 {
5659 /* Reject eliminable registers. */
5660 if (! (reload_in_progress || reload_completed)
5661 && ( reg_mentioned_p (frame_pointer_rtx, op)
5662 || reg_mentioned_p (arg_pointer_rtx, op)
5663 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5664 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5665 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5666 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5667 return 0;
5668
5669 if (GET_CODE (op) == MEM)
5670 {
5671 rtx ind;
5672
5673 ind = XEXP (op, 0);
5674
5675 /* Match: (mem (reg)). */
5676 if (GET_CODE (ind) == REG)
5677 return 1;
5678
5679 /* Match:
5680 (mem (plus (reg)
5681 (const))). */
5682 if (GET_CODE (ind) == PLUS
5683 && GET_CODE (XEXP (ind, 0)) == REG
5684 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5685 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5686 return 1;
5687 }
5688
5689 return 0;
5690 }
5691
5692 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5693 WB is true if full writeback address modes are allowed and is false
5694 if limited writeback address modes (POST_INC and PRE_DEC) are
5695 allowed. */
5696
5697 int
5698 arm_coproc_mem_operand (rtx op, bool wb)
5699 {
5700 rtx ind;
5701
5702 /* Reject eliminable registers. */
5703 if (! (reload_in_progress || reload_completed)
5704 && ( reg_mentioned_p (frame_pointer_rtx, op)
5705 || reg_mentioned_p (arg_pointer_rtx, op)
5706 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5707 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5708 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5709 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5710 return FALSE;
5711
5712 /* Constants are converted into offsets from labels. */
5713 if (GET_CODE (op) != MEM)
5714 return FALSE;
5715
5716 ind = XEXP (op, 0);
5717
5718 if (reload_completed
5719 && (GET_CODE (ind) == LABEL_REF
5720 || (GET_CODE (ind) == CONST
5721 && GET_CODE (XEXP (ind, 0)) == PLUS
5722 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5723 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5724 return TRUE;
5725
5726 /* Match: (mem (reg)). */
5727 if (GET_CODE (ind) == REG)
5728 return arm_address_register_rtx_p (ind, 0);
5729
5730 /* Autoincremment addressing modes. POST_INC and PRE_DEC are
5731 acceptable in any case (subject to verification by
5732 arm_address_register_rtx_p). We need WB to be true to accept
5733 PRE_INC and POST_DEC. */
5734 if (GET_CODE (ind) == POST_INC
5735 || GET_CODE (ind) == PRE_DEC
5736 || (wb
5737 && (GET_CODE (ind) == PRE_INC
5738 || GET_CODE (ind) == POST_DEC)))
5739 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5740
5741 if (wb
5742 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5743 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5744 && GET_CODE (XEXP (ind, 1)) == PLUS
5745 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5746 ind = XEXP (ind, 1);
5747
5748 /* Match:
5749 (plus (reg)
5750 (const)). */
5751 if (GET_CODE (ind) == PLUS
5752 && GET_CODE (XEXP (ind, 0)) == REG
5753 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5754 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5755 && INTVAL (XEXP (ind, 1)) > -1024
5756 && INTVAL (XEXP (ind, 1)) < 1024
5757 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5758 return TRUE;
5759
5760 return FALSE;
5761 }
5762
5763 /* Return true if X is a register that will be eliminated later on. */
5764 int
5765 arm_eliminable_register (rtx x)
5766 {
5767 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5768 || REGNO (x) == ARG_POINTER_REGNUM
5769 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5770 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5771 }
5772
5773 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5774 coprocessor registers. Otherwise return NO_REGS. */
5775
5776 enum reg_class
5777 coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
5778 {
5779 if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
5780 return NO_REGS;
5781
5782 return GENERAL_REGS;
5783 }
5784
5785 /* Values which must be returned in the most-significant end of the return
5786 register. */
5787
5788 static bool
5789 arm_return_in_msb (tree valtype)
5790 {
5791 return (TARGET_AAPCS_BASED
5792 && BYTES_BIG_ENDIAN
5793 && (AGGREGATE_TYPE_P (valtype)
5794 || TREE_CODE (valtype) == COMPLEX_TYPE));
5795 }
5796
5797 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5798 Use by the Cirrus Maverick code which has to workaround
5799 a hardware bug triggered by such instructions. */
5800 static bool
5801 arm_memory_load_p (rtx insn)
5802 {
5803 rtx body, lhs, rhs;;
5804
5805 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5806 return false;
5807
5808 body = PATTERN (insn);
5809
5810 if (GET_CODE (body) != SET)
5811 return false;
5812
5813 lhs = XEXP (body, 0);
5814 rhs = XEXP (body, 1);
5815
5816 lhs = REG_OR_SUBREG_RTX (lhs);
5817
5818 /* If the destination is not a general purpose
5819 register we do not have to worry. */
5820 if (GET_CODE (lhs) != REG
5821 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5822 return false;
5823
5824 /* As well as loads from memory we also have to react
5825 to loads of invalid constants which will be turned
5826 into loads from the minipool. */
5827 return (GET_CODE (rhs) == MEM
5828 || GET_CODE (rhs) == SYMBOL_REF
5829 || note_invalid_constants (insn, -1, false));
5830 }
5831
5832 /* Return TRUE if INSN is a Cirrus instruction. */
5833 static bool
5834 arm_cirrus_insn_p (rtx insn)
5835 {
5836 enum attr_cirrus attr;
5837
5838 /* get_attr cannot accept USE or CLOBBER. */
5839 if (!insn
5840 || GET_CODE (insn) != INSN
5841 || GET_CODE (PATTERN (insn)) == USE
5842 || GET_CODE (PATTERN (insn)) == CLOBBER)
5843 return 0;
5844
5845 attr = get_attr_cirrus (insn);
5846
5847 return attr != CIRRUS_NOT;
5848 }
5849
5850 /* Cirrus reorg for invalid instruction combinations. */
5851 static void
5852 cirrus_reorg (rtx first)
5853 {
5854 enum attr_cirrus attr;
5855 rtx body = PATTERN (first);
5856 rtx t;
5857 int nops;
5858
5859 /* Any branch must be followed by 2 non Cirrus instructions. */
5860 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5861 {
5862 nops = 0;
5863 t = next_nonnote_insn (first);
5864
5865 if (arm_cirrus_insn_p (t))
5866 ++ nops;
5867
5868 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5869 ++ nops;
5870
5871 while (nops --)
5872 emit_insn_after (gen_nop (), first);
5873
5874 return;
5875 }
5876
5877 /* (float (blah)) is in parallel with a clobber. */
5878 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5879 body = XVECEXP (body, 0, 0);
5880
5881 if (GET_CODE (body) == SET)
5882 {
5883 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5884
5885 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5886 be followed by a non Cirrus insn. */
5887 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5888 {
5889 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5890 emit_insn_after (gen_nop (), first);
5891
5892 return;
5893 }
5894 else if (arm_memory_load_p (first))
5895 {
5896 unsigned int arm_regno;
5897
5898 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5899 ldr/cfmv64hr combination where the Rd field is the same
5900 in both instructions must be split with a non Cirrus
5901 insn. Example:
5902
5903 ldr r0, blah
5904 nop
5905 cfmvsr mvf0, r0. */
5906
5907 /* Get Arm register number for ldr insn. */
5908 if (GET_CODE (lhs) == REG)
5909 arm_regno = REGNO (lhs);
5910 else
5911 {
5912 gcc_assert (GET_CODE (rhs) == REG);
5913 arm_regno = REGNO (rhs);
5914 }
5915
5916 /* Next insn. */
5917 first = next_nonnote_insn (first);
5918
5919 if (! arm_cirrus_insn_p (first))
5920 return;
5921
5922 body = PATTERN (first);
5923
5924 /* (float (blah)) is in parallel with a clobber. */
5925 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5926 body = XVECEXP (body, 0, 0);
5927
5928 if (GET_CODE (body) == FLOAT)
5929 body = XEXP (body, 0);
5930
5931 if (get_attr_cirrus (first) == CIRRUS_MOVE
5932 && GET_CODE (XEXP (body, 1)) == REG
5933 && arm_regno == REGNO (XEXP (body, 1)))
5934 emit_insn_after (gen_nop (), first);
5935
5936 return;
5937 }
5938 }
5939
5940 /* get_attr cannot accept USE or CLOBBER. */
5941 if (!first
5942 || GET_CODE (first) != INSN
5943 || GET_CODE (PATTERN (first)) == USE
5944 || GET_CODE (PATTERN (first)) == CLOBBER)
5945 return;
5946
5947 attr = get_attr_cirrus (first);
5948
5949 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5950 must be followed by a non-coprocessor instruction. */
5951 if (attr == CIRRUS_COMPARE)
5952 {
5953 nops = 0;
5954
5955 t = next_nonnote_insn (first);
5956
5957 if (arm_cirrus_insn_p (t))
5958 ++ nops;
5959
5960 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5961 ++ nops;
5962
5963 while (nops --)
5964 emit_insn_after (gen_nop (), first);
5965
5966 return;
5967 }
5968 }
5969
5970 /* Return TRUE if X references a SYMBOL_REF. */
5971 int
5972 symbol_mentioned_p (rtx x)
5973 {
5974 const char * fmt;
5975 int i;
5976
5977 if (GET_CODE (x) == SYMBOL_REF)
5978 return 1;
5979
5980 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5981 are constant offsets, not symbols. */
5982 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5983 return 0;
5984
5985 fmt = GET_RTX_FORMAT (GET_CODE (x));
5986
5987 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5988 {
5989 if (fmt[i] == 'E')
5990 {
5991 int j;
5992
5993 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5994 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5995 return 1;
5996 }
5997 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5998 return 1;
5999 }
6000
6001 return 0;
6002 }
6003
6004 /* Return TRUE if X references a LABEL_REF. */
6005 int
6006 label_mentioned_p (rtx x)
6007 {
6008 const char * fmt;
6009 int i;
6010
6011 if (GET_CODE (x) == LABEL_REF)
6012 return 1;
6013
6014 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
6015 instruction, but they are constant offsets, not symbols. */
6016 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
6017 return 0;
6018
6019 fmt = GET_RTX_FORMAT (GET_CODE (x));
6020 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6021 {
6022 if (fmt[i] == 'E')
6023 {
6024 int j;
6025
6026 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6027 if (label_mentioned_p (XVECEXP (x, i, j)))
6028 return 1;
6029 }
6030 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
6031 return 1;
6032 }
6033
6034 return 0;
6035 }
6036
6037 int
6038 tls_mentioned_p (rtx x)
6039 {
6040 switch (GET_CODE (x))
6041 {
6042 case CONST:
6043 return tls_mentioned_p (XEXP (x, 0));
6044
6045 case UNSPEC:
6046 if (XINT (x, 1) == UNSPEC_TLS)
6047 return 1;
6048
6049 default:
6050 return 0;
6051 }
6052 }
6053
6054 /* Must not copy a SET whose source operand is PC-relative. */
6055
6056 static bool
6057 arm_cannot_copy_insn_p (rtx insn)
6058 {
6059 rtx pat = PATTERN (insn);
6060
6061 if (GET_CODE (pat) == PARALLEL
6062 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
6063 {
6064 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
6065
6066 if (GET_CODE (rhs) == UNSPEC
6067 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
6068 return TRUE;
6069
6070 if (GET_CODE (rhs) == MEM
6071 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
6072 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
6073 return TRUE;
6074 }
6075
6076 return FALSE;
6077 }
6078
6079 enum rtx_code
6080 minmax_code (rtx x)
6081 {
6082 enum rtx_code code = GET_CODE (x);
6083
6084 switch (code)
6085 {
6086 case SMAX:
6087 return GE;
6088 case SMIN:
6089 return LE;
6090 case UMIN:
6091 return LEU;
6092 case UMAX:
6093 return GEU;
6094 default:
6095 gcc_unreachable ();
6096 }
6097 }
6098
6099 /* Return 1 if memory locations are adjacent. */
6100 int
6101 adjacent_mem_locations (rtx a, rtx b)
6102 {
6103 /* We don't guarantee to preserve the order of these memory refs. */
6104 if (volatile_refs_p (a) || volatile_refs_p (b))
6105 return 0;
6106
6107 if ((GET_CODE (XEXP (a, 0)) == REG
6108 || (GET_CODE (XEXP (a, 0)) == PLUS
6109 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
6110 && (GET_CODE (XEXP (b, 0)) == REG
6111 || (GET_CODE (XEXP (b, 0)) == PLUS
6112 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
6113 {
6114 HOST_WIDE_INT val0 = 0, val1 = 0;
6115 rtx reg0, reg1;
6116 int val_diff;
6117
6118 if (GET_CODE (XEXP (a, 0)) == PLUS)
6119 {
6120 reg0 = XEXP (XEXP (a, 0), 0);
6121 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
6122 }
6123 else
6124 reg0 = XEXP (a, 0);
6125
6126 if (GET_CODE (XEXP (b, 0)) == PLUS)
6127 {
6128 reg1 = XEXP (XEXP (b, 0), 0);
6129 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
6130 }
6131 else
6132 reg1 = XEXP (b, 0);
6133
6134 /* Don't accept any offset that will require multiple
6135 instructions to handle, since this would cause the
6136 arith_adjacentmem pattern to output an overlong sequence. */
6137 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
6138 return 0;
6139
6140 /* Don't allow an eliminable register: register elimination can make
6141 the offset too large. */
6142 if (arm_eliminable_register (reg0))
6143 return 0;
6144
6145 val_diff = val1 - val0;
6146
6147 if (arm_ld_sched)
6148 {
6149 /* If the target has load delay slots, then there's no benefit
6150 to using an ldm instruction unless the offset is zero and
6151 we are optimizing for size. */
6152 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
6153 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
6154 && (val_diff == 4 || val_diff == -4));
6155 }
6156
6157 return ((REGNO (reg0) == REGNO (reg1))
6158 && (val_diff == 4 || val_diff == -4));
6159 }
6160
6161 return 0;
6162 }
6163
6164 int
6165 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6166 HOST_WIDE_INT *load_offset)
6167 {
6168 int unsorted_regs[4];
6169 HOST_WIDE_INT unsorted_offsets[4];
6170 int order[4];
6171 int base_reg = -1;
6172 int i;
6173
6174 /* Can only handle 2, 3, or 4 insns at present,
6175 though could be easily extended if required. */
6176 gcc_assert (nops >= 2 && nops <= 4);
6177
6178 /* Loop over the operands and check that the memory references are
6179 suitable (i.e. immediate offsets from the same base register). At
6180 the same time, extract the target register, and the memory
6181 offsets. */
6182 for (i = 0; i < nops; i++)
6183 {
6184 rtx reg;
6185 rtx offset;
6186
6187 /* Convert a subreg of a mem into the mem itself. */
6188 if (GET_CODE (operands[nops + i]) == SUBREG)
6189 operands[nops + i] = alter_subreg (operands + (nops + i));
6190
6191 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6192
6193 /* Don't reorder volatile memory references; it doesn't seem worth
6194 looking for the case where the order is ok anyway. */
6195 if (MEM_VOLATILE_P (operands[nops + i]))
6196 return 0;
6197
6198 offset = const0_rtx;
6199
6200 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6201 || (GET_CODE (reg) == SUBREG
6202 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6203 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6204 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6205 == REG)
6206 || (GET_CODE (reg) == SUBREG
6207 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6208 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6209 == CONST_INT)))
6210 {
6211 if (i == 0)
6212 {
6213 base_reg = REGNO (reg);
6214 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6215 ? REGNO (operands[i])
6216 : REGNO (SUBREG_REG (operands[i])));
6217 order[0] = 0;
6218 }
6219 else
6220 {
6221 if (base_reg != (int) REGNO (reg))
6222 /* Not addressed from the same base register. */
6223 return 0;
6224
6225 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6226 ? REGNO (operands[i])
6227 : REGNO (SUBREG_REG (operands[i])));
6228 if (unsorted_regs[i] < unsorted_regs[order[0]])
6229 order[0] = i;
6230 }
6231
6232 /* If it isn't an integer register, or if it overwrites the
6233 base register but isn't the last insn in the list, then
6234 we can't do this. */
6235 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
6236 || (i != nops - 1 && unsorted_regs[i] == base_reg))
6237 return 0;
6238
6239 unsorted_offsets[i] = INTVAL (offset);
6240 }
6241 else
6242 /* Not a suitable memory address. */
6243 return 0;
6244 }
6245
6246 /* All the useful information has now been extracted from the
6247 operands into unsorted_regs and unsorted_offsets; additionally,
6248 order[0] has been set to the lowest numbered register in the
6249 list. Sort the registers into order, and check that the memory
6250 offsets are ascending and adjacent. */
6251
6252 for (i = 1; i < nops; i++)
6253 {
6254 int j;
6255
6256 order[i] = order[i - 1];
6257 for (j = 0; j < nops; j++)
6258 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6259 && (order[i] == order[i - 1]
6260 || unsorted_regs[j] < unsorted_regs[order[i]]))
6261 order[i] = j;
6262
6263 /* Have we found a suitable register? if not, one must be used more
6264 than once. */
6265 if (order[i] == order[i - 1])
6266 return 0;
6267
6268 /* Is the memory address adjacent and ascending? */
6269 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6270 return 0;
6271 }
6272
6273 if (base)
6274 {
6275 *base = base_reg;
6276
6277 for (i = 0; i < nops; i++)
6278 regs[i] = unsorted_regs[order[i]];
6279
6280 *load_offset = unsorted_offsets[order[0]];
6281 }
6282
6283 if (unsorted_offsets[order[0]] == 0)
6284 return 1; /* ldmia */
6285
6286 if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
6287 return 2; /* ldmib */
6288
6289 if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
6290 return 3; /* ldmda */
6291
6292 if (unsorted_offsets[order[nops - 1]] == -4)
6293 return 4; /* ldmdb */
6294
6295 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
6296 if the offset isn't small enough. The reason 2 ldrs are faster
6297 is because these ARMs are able to do more than one cache access
6298 in a single cycle. The ARM9 and StrongARM have Harvard caches,
6299 whilst the ARM8 has a double bandwidth cache. This means that
6300 these cores can do both an instruction fetch and a data fetch in
6301 a single cycle, so the trick of calculating the address into a
6302 scratch register (one of the result regs) and then doing a load
6303 multiple actually becomes slower (and no smaller in code size).
6304 That is the transformation
6305
6306 ldr rd1, [rbase + offset]
6307 ldr rd2, [rbase + offset + 4]
6308
6309 to
6310
6311 add rd1, rbase, offset
6312 ldmia rd1, {rd1, rd2}
6313
6314 produces worse code -- '3 cycles + any stalls on rd2' instead of
6315 '2 cycles + any stalls on rd2'. On ARMs with only one cache
6316 access per cycle, the first sequence could never complete in less
6317 than 6 cycles, whereas the ldm sequence would only take 5 and
6318 would make better use of sequential accesses if not hitting the
6319 cache.
6320
6321 We cheat here and test 'arm_ld_sched' which we currently know to
6322 only be true for the ARM8, ARM9 and StrongARM. If this ever
6323 changes, then the test below needs to be reworked. */
6324 if (nops == 2 && arm_ld_sched)
6325 return 0;
6326
6327 /* Can't do it without setting up the offset, only do this if it takes
6328 no more than one insn. */
6329 return (const_ok_for_arm (unsorted_offsets[order[0]])
6330 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
6331 }
6332
6333 const char *
6334 emit_ldm_seq (rtx *operands, int nops)
6335 {
6336 int regs[4];
6337 int base_reg;
6338 HOST_WIDE_INT offset;
6339 char buf[100];
6340 int i;
6341
6342 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6343 {
6344 case 1:
6345 strcpy (buf, "ldm%(ia%)\t");
6346 break;
6347
6348 case 2:
6349 strcpy (buf, "ldm%(ib%)\t");
6350 break;
6351
6352 case 3:
6353 strcpy (buf, "ldm%(da%)\t");
6354 break;
6355
6356 case 4:
6357 strcpy (buf, "ldm%(db%)\t");
6358 break;
6359
6360 case 5:
6361 if (offset >= 0)
6362 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6363 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6364 (long) offset);
6365 else
6366 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6367 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6368 (long) -offset);
6369 output_asm_insn (buf, operands);
6370 base_reg = regs[0];
6371 strcpy (buf, "ldm%(ia%)\t");
6372 break;
6373
6374 default:
6375 gcc_unreachable ();
6376 }
6377
6378 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6379 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6380
6381 for (i = 1; i < nops; i++)
6382 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6383 reg_names[regs[i]]);
6384
6385 strcat (buf, "}\t%@ phole ldm");
6386
6387 output_asm_insn (buf, operands);
6388 return "";
6389 }
6390
6391 int
6392 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6393 HOST_WIDE_INT * load_offset)
6394 {
6395 int unsorted_regs[4];
6396 HOST_WIDE_INT unsorted_offsets[4];
6397 int order[4];
6398 int base_reg = -1;
6399 int i;
6400
6401 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6402 extended if required. */
6403 gcc_assert (nops >= 2 && nops <= 4);
6404
6405 /* Loop over the operands and check that the memory references are
6406 suitable (i.e. immediate offsets from the same base register). At
6407 the same time, extract the target register, and the memory
6408 offsets. */
6409 for (i = 0; i < nops; i++)
6410 {
6411 rtx reg;
6412 rtx offset;
6413
6414 /* Convert a subreg of a mem into the mem itself. */
6415 if (GET_CODE (operands[nops + i]) == SUBREG)
6416 operands[nops + i] = alter_subreg (operands + (nops + i));
6417
6418 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6419
6420 /* Don't reorder volatile memory references; it doesn't seem worth
6421 looking for the case where the order is ok anyway. */
6422 if (MEM_VOLATILE_P (operands[nops + i]))
6423 return 0;
6424
6425 offset = const0_rtx;
6426
6427 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6428 || (GET_CODE (reg) == SUBREG
6429 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6430 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6431 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6432 == REG)
6433 || (GET_CODE (reg) == SUBREG
6434 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6435 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6436 == CONST_INT)))
6437 {
6438 if (i == 0)
6439 {
6440 base_reg = REGNO (reg);
6441 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6442 ? REGNO (operands[i])
6443 : REGNO (SUBREG_REG (operands[i])));
6444 order[0] = 0;
6445 }
6446 else
6447 {
6448 if (base_reg != (int) REGNO (reg))
6449 /* Not addressed from the same base register. */
6450 return 0;
6451
6452 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6453 ? REGNO (operands[i])
6454 : REGNO (SUBREG_REG (operands[i])));
6455 if (unsorted_regs[i] < unsorted_regs[order[0]])
6456 order[0] = i;
6457 }
6458
6459 /* If it isn't an integer register, then we can't do this. */
6460 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6461 return 0;
6462
6463 unsorted_offsets[i] = INTVAL (offset);
6464 }
6465 else
6466 /* Not a suitable memory address. */
6467 return 0;
6468 }
6469
6470 /* All the useful information has now been extracted from the
6471 operands into unsorted_regs and unsorted_offsets; additionally,
6472 order[0] has been set to the lowest numbered register in the
6473 list. Sort the registers into order, and check that the memory
6474 offsets are ascending and adjacent. */
6475
6476 for (i = 1; i < nops; i++)
6477 {
6478 int j;
6479
6480 order[i] = order[i - 1];
6481 for (j = 0; j < nops; j++)
6482 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6483 && (order[i] == order[i - 1]
6484 || unsorted_regs[j] < unsorted_regs[order[i]]))
6485 order[i] = j;
6486
6487 /* Have we found a suitable register? if not, one must be used more
6488 than once. */
6489 if (order[i] == order[i - 1])
6490 return 0;
6491
6492 /* Is the memory address adjacent and ascending? */
6493 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6494 return 0;
6495 }
6496
6497 if (base)
6498 {
6499 *base = base_reg;
6500
6501 for (i = 0; i < nops; i++)
6502 regs[i] = unsorted_regs[order[i]];
6503
6504 *load_offset = unsorted_offsets[order[0]];
6505 }
6506
6507 if (unsorted_offsets[order[0]] == 0)
6508 return 1; /* stmia */
6509
6510 if (unsorted_offsets[order[0]] == 4)
6511 return 2; /* stmib */
6512
6513 if (unsorted_offsets[order[nops - 1]] == 0)
6514 return 3; /* stmda */
6515
6516 if (unsorted_offsets[order[nops - 1]] == -4)
6517 return 4; /* stmdb */
6518
6519 return 0;
6520 }
6521
6522 const char *
6523 emit_stm_seq (rtx *operands, int nops)
6524 {
6525 int regs[4];
6526 int base_reg;
6527 HOST_WIDE_INT offset;
6528 char buf[100];
6529 int i;
6530
6531 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6532 {
6533 case 1:
6534 strcpy (buf, "stm%(ia%)\t");
6535 break;
6536
6537 case 2:
6538 strcpy (buf, "stm%(ib%)\t");
6539 break;
6540
6541 case 3:
6542 strcpy (buf, "stm%(da%)\t");
6543 break;
6544
6545 case 4:
6546 strcpy (buf, "stm%(db%)\t");
6547 break;
6548
6549 default:
6550 gcc_unreachable ();
6551 }
6552
6553 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6554 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6555
6556 for (i = 1; i < nops; i++)
6557 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6558 reg_names[regs[i]]);
6559
6560 strcat (buf, "}\t%@ phole stm");
6561
6562 output_asm_insn (buf, operands);
6563 return "";
6564 }
6565 \f
6566 /* Routines for use in generating RTL. */
6567
6568 rtx
6569 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6570 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6571 {
6572 HOST_WIDE_INT offset = *offsetp;
6573 int i = 0, j;
6574 rtx result;
6575 int sign = up ? 1 : -1;
6576 rtx mem, addr;
6577
6578 /* XScale has load-store double instructions, but they have stricter
6579 alignment requirements than load-store multiple, so we cannot
6580 use them.
6581
6582 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6583 the pipeline until completion.
6584
6585 NREGS CYCLES
6586 1 3
6587 2 4
6588 3 5
6589 4 6
6590
6591 An ldr instruction takes 1-3 cycles, but does not block the
6592 pipeline.
6593
6594 NREGS CYCLES
6595 1 1-3
6596 2 2-6
6597 3 3-9
6598 4 4-12
6599
6600 Best case ldr will always win. However, the more ldr instructions
6601 we issue, the less likely we are to be able to schedule them well.
6602 Using ldr instructions also increases code size.
6603
6604 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6605 for counts of 3 or 4 regs. */
6606 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6607 {
6608 rtx seq;
6609
6610 start_sequence ();
6611
6612 for (i = 0; i < count; i++)
6613 {
6614 addr = plus_constant (from, i * 4 * sign);
6615 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6616 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6617 offset += 4 * sign;
6618 }
6619
6620 if (write_back)
6621 {
6622 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6623 *offsetp = offset;
6624 }
6625
6626 seq = get_insns ();
6627 end_sequence ();
6628
6629 return seq;
6630 }
6631
6632 result = gen_rtx_PARALLEL (VOIDmode,
6633 rtvec_alloc (count + (write_back ? 1 : 0)));
6634 if (write_back)
6635 {
6636 XVECEXP (result, 0, 0)
6637 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6638 i = 1;
6639 count++;
6640 }
6641
6642 for (j = 0; i < count; i++, j++)
6643 {
6644 addr = plus_constant (from, j * 4 * sign);
6645 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6646 XVECEXP (result, 0, i)
6647 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6648 offset += 4 * sign;
6649 }
6650
6651 if (write_back)
6652 *offsetp = offset;
6653
6654 return result;
6655 }
6656
6657 rtx
6658 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6659 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6660 {
6661 HOST_WIDE_INT offset = *offsetp;
6662 int i = 0, j;
6663 rtx result;
6664 int sign = up ? 1 : -1;
6665 rtx mem, addr;
6666
6667 /* See arm_gen_load_multiple for discussion of
6668 the pros/cons of ldm/stm usage for XScale. */
6669 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6670 {
6671 rtx seq;
6672
6673 start_sequence ();
6674
6675 for (i = 0; i < count; i++)
6676 {
6677 addr = plus_constant (to, i * 4 * sign);
6678 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6679 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6680 offset += 4 * sign;
6681 }
6682
6683 if (write_back)
6684 {
6685 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6686 *offsetp = offset;
6687 }
6688
6689 seq = get_insns ();
6690 end_sequence ();
6691
6692 return seq;
6693 }
6694
6695 result = gen_rtx_PARALLEL (VOIDmode,
6696 rtvec_alloc (count + (write_back ? 1 : 0)));
6697 if (write_back)
6698 {
6699 XVECEXP (result, 0, 0)
6700 = gen_rtx_SET (VOIDmode, to,
6701 plus_constant (to, count * 4 * sign));
6702 i = 1;
6703 count++;
6704 }
6705
6706 for (j = 0; i < count; i++, j++)
6707 {
6708 addr = plus_constant (to, j * 4 * sign);
6709 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6710 XVECEXP (result, 0, i)
6711 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6712 offset += 4 * sign;
6713 }
6714
6715 if (write_back)
6716 *offsetp = offset;
6717
6718 return result;
6719 }
6720
6721 int
6722 arm_gen_movmemqi (rtx *operands)
6723 {
6724 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6725 HOST_WIDE_INT srcoffset, dstoffset;
6726 int i;
6727 rtx src, dst, srcbase, dstbase;
6728 rtx part_bytes_reg = NULL;
6729 rtx mem;
6730
6731 if (GET_CODE (operands[2]) != CONST_INT
6732 || GET_CODE (operands[3]) != CONST_INT
6733 || INTVAL (operands[2]) > 64
6734 || INTVAL (operands[3]) & 3)
6735 return 0;
6736
6737 dstbase = operands[0];
6738 srcbase = operands[1];
6739
6740 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6741 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6742
6743 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6744 out_words_to_go = INTVAL (operands[2]) / 4;
6745 last_bytes = INTVAL (operands[2]) & 3;
6746 dstoffset = srcoffset = 0;
6747
6748 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6749 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6750
6751 for (i = 0; in_words_to_go >= 2; i+=4)
6752 {
6753 if (in_words_to_go > 4)
6754 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6755 srcbase, &srcoffset));
6756 else
6757 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6758 FALSE, srcbase, &srcoffset));
6759
6760 if (out_words_to_go)
6761 {
6762 if (out_words_to_go > 4)
6763 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6764 dstbase, &dstoffset));
6765 else if (out_words_to_go != 1)
6766 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6767 dst, TRUE,
6768 (last_bytes == 0
6769 ? FALSE : TRUE),
6770 dstbase, &dstoffset));
6771 else
6772 {
6773 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6774 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6775 if (last_bytes != 0)
6776 {
6777 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6778 dstoffset += 4;
6779 }
6780 }
6781 }
6782
6783 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6784 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6785 }
6786
6787 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6788 if (out_words_to_go)
6789 {
6790 rtx sreg;
6791
6792 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6793 sreg = copy_to_reg (mem);
6794
6795 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6796 emit_move_insn (mem, sreg);
6797 in_words_to_go--;
6798
6799 gcc_assert (!in_words_to_go); /* Sanity check */
6800 }
6801
6802 if (in_words_to_go)
6803 {
6804 gcc_assert (in_words_to_go > 0);
6805
6806 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6807 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6808 }
6809
6810 gcc_assert (!last_bytes || part_bytes_reg);
6811
6812 if (BYTES_BIG_ENDIAN && last_bytes)
6813 {
6814 rtx tmp = gen_reg_rtx (SImode);
6815
6816 /* The bytes we want are in the top end of the word. */
6817 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6818 GEN_INT (8 * (4 - last_bytes))));
6819 part_bytes_reg = tmp;
6820
6821 while (last_bytes)
6822 {
6823 mem = adjust_automodify_address (dstbase, QImode,
6824 plus_constant (dst, last_bytes - 1),
6825 dstoffset + last_bytes - 1);
6826 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6827
6828 if (--last_bytes)
6829 {
6830 tmp = gen_reg_rtx (SImode);
6831 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6832 part_bytes_reg = tmp;
6833 }
6834 }
6835
6836 }
6837 else
6838 {
6839 if (last_bytes > 1)
6840 {
6841 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6842 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6843 last_bytes -= 2;
6844 if (last_bytes)
6845 {
6846 rtx tmp = gen_reg_rtx (SImode);
6847 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6848 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6849 part_bytes_reg = tmp;
6850 dstoffset += 2;
6851 }
6852 }
6853
6854 if (last_bytes)
6855 {
6856 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6857 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6858 }
6859 }
6860
6861 return 1;
6862 }
6863
6864 /* Select a dominance comparison mode if possible for a test of the general
6865 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6866 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6867 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6868 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6869 In all cases OP will be either EQ or NE, but we don't need to know which
6870 here. If we are unable to support a dominance comparison we return
6871 CC mode. This will then fail to match for the RTL expressions that
6872 generate this call. */
6873 enum machine_mode
6874 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6875 {
6876 enum rtx_code cond1, cond2;
6877 int swapped = 0;
6878
6879 /* Currently we will probably get the wrong result if the individual
6880 comparisons are not simple. This also ensures that it is safe to
6881 reverse a comparison if necessary. */
6882 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6883 != CCmode)
6884 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6885 != CCmode))
6886 return CCmode;
6887
6888 /* The if_then_else variant of this tests the second condition if the
6889 first passes, but is true if the first fails. Reverse the first
6890 condition to get a true "inclusive-or" expression. */
6891 if (cond_or == DOM_CC_NX_OR_Y)
6892 cond1 = reverse_condition (cond1);
6893
6894 /* If the comparisons are not equal, and one doesn't dominate the other,
6895 then we can't do this. */
6896 if (cond1 != cond2
6897 && !comparison_dominates_p (cond1, cond2)
6898 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6899 return CCmode;
6900
6901 if (swapped)
6902 {
6903 enum rtx_code temp = cond1;
6904 cond1 = cond2;
6905 cond2 = temp;
6906 }
6907
6908 switch (cond1)
6909 {
6910 case EQ:
6911 if (cond_or == DOM_CC_X_AND_Y)
6912 return CC_DEQmode;
6913
6914 switch (cond2)
6915 {
6916 case EQ: return CC_DEQmode;
6917 case LE: return CC_DLEmode;
6918 case LEU: return CC_DLEUmode;
6919 case GE: return CC_DGEmode;
6920 case GEU: return CC_DGEUmode;
6921 default: gcc_unreachable ();
6922 }
6923
6924 case LT:
6925 if (cond_or == DOM_CC_X_AND_Y)
6926 return CC_DLTmode;
6927
6928 switch (cond2)
6929 {
6930 case LT:
6931 return CC_DLTmode;
6932 case LE:
6933 return CC_DLEmode;
6934 case NE:
6935 return CC_DNEmode;
6936 default:
6937 gcc_unreachable ();
6938 }
6939
6940 case GT:
6941 if (cond_or == DOM_CC_X_AND_Y)
6942 return CC_DGTmode;
6943
6944 switch (cond2)
6945 {
6946 case GT:
6947 return CC_DGTmode;
6948 case GE:
6949 return CC_DGEmode;
6950 case NE:
6951 return CC_DNEmode;
6952 default:
6953 gcc_unreachable ();
6954 }
6955
6956 case LTU:
6957 if (cond_or == DOM_CC_X_AND_Y)
6958 return CC_DLTUmode;
6959
6960 switch (cond2)
6961 {
6962 case LTU:
6963 return CC_DLTUmode;
6964 case LEU:
6965 return CC_DLEUmode;
6966 case NE:
6967 return CC_DNEmode;
6968 default:
6969 gcc_unreachable ();
6970 }
6971
6972 case GTU:
6973 if (cond_or == DOM_CC_X_AND_Y)
6974 return CC_DGTUmode;
6975
6976 switch (cond2)
6977 {
6978 case GTU:
6979 return CC_DGTUmode;
6980 case GEU:
6981 return CC_DGEUmode;
6982 case NE:
6983 return CC_DNEmode;
6984 default:
6985 gcc_unreachable ();
6986 }
6987
6988 /* The remaining cases only occur when both comparisons are the
6989 same. */
6990 case NE:
6991 gcc_assert (cond1 == cond2);
6992 return CC_DNEmode;
6993
6994 case LE:
6995 gcc_assert (cond1 == cond2);
6996 return CC_DLEmode;
6997
6998 case GE:
6999 gcc_assert (cond1 == cond2);
7000 return CC_DGEmode;
7001
7002 case LEU:
7003 gcc_assert (cond1 == cond2);
7004 return CC_DLEUmode;
7005
7006 case GEU:
7007 gcc_assert (cond1 == cond2);
7008 return CC_DGEUmode;
7009
7010 default:
7011 gcc_unreachable ();
7012 }
7013 }
7014
7015 enum machine_mode
7016 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
7017 {
7018 /* All floating point compares return CCFP if it is an equality
7019 comparison, and CCFPE otherwise. */
7020 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
7021 {
7022 switch (op)
7023 {
7024 case EQ:
7025 case NE:
7026 case UNORDERED:
7027 case ORDERED:
7028 case UNLT:
7029 case UNLE:
7030 case UNGT:
7031 case UNGE:
7032 case UNEQ:
7033 case LTGT:
7034 return CCFPmode;
7035
7036 case LT:
7037 case LE:
7038 case GT:
7039 case GE:
7040 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
7041 return CCFPmode;
7042 return CCFPEmode;
7043
7044 default:
7045 gcc_unreachable ();
7046 }
7047 }
7048
7049 /* A compare with a shifted operand. Because of canonicalization, the
7050 comparison will have to be swapped when we emit the assembler. */
7051 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
7052 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
7053 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
7054 || GET_CODE (x) == ROTATERT))
7055 return CC_SWPmode;
7056
7057 /* This operation is performed swapped, but since we only rely on the Z
7058 flag we don't need an additional mode. */
7059 if (GET_MODE (y) == SImode && REG_P (y)
7060 && GET_CODE (x) == NEG
7061 && (op == EQ || op == NE))
7062 return CC_Zmode;
7063
7064 /* This is a special case that is used by combine to allow a
7065 comparison of a shifted byte load to be split into a zero-extend
7066 followed by a comparison of the shifted integer (only valid for
7067 equalities and unsigned inequalities). */
7068 if (GET_MODE (x) == SImode
7069 && GET_CODE (x) == ASHIFT
7070 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
7071 && GET_CODE (XEXP (x, 0)) == SUBREG
7072 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
7073 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
7074 && (op == EQ || op == NE
7075 || op == GEU || op == GTU || op == LTU || op == LEU)
7076 && GET_CODE (y) == CONST_INT)
7077 return CC_Zmode;
7078
7079 /* A construct for a conditional compare, if the false arm contains
7080 0, then both conditions must be true, otherwise either condition
7081 must be true. Not all conditions are possible, so CCmode is
7082 returned if it can't be done. */
7083 if (GET_CODE (x) == IF_THEN_ELSE
7084 && (XEXP (x, 2) == const0_rtx
7085 || XEXP (x, 2) == const1_rtx)
7086 && COMPARISON_P (XEXP (x, 0))
7087 && COMPARISON_P (XEXP (x, 1)))
7088 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7089 INTVAL (XEXP (x, 2)));
7090
7091 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
7092 if (GET_CODE (x) == AND
7093 && COMPARISON_P (XEXP (x, 0))
7094 && COMPARISON_P (XEXP (x, 1)))
7095 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7096 DOM_CC_X_AND_Y);
7097
7098 if (GET_CODE (x) == IOR
7099 && COMPARISON_P (XEXP (x, 0))
7100 && COMPARISON_P (XEXP (x, 1)))
7101 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7102 DOM_CC_X_OR_Y);
7103
7104 /* An operation (on Thumb) where we want to test for a single bit.
7105 This is done by shifting that bit up into the top bit of a
7106 scratch register; we can then branch on the sign bit. */
7107 if (TARGET_THUMB1
7108 && GET_MODE (x) == SImode
7109 && (op == EQ || op == NE)
7110 && GET_CODE (x) == ZERO_EXTRACT
7111 && XEXP (x, 1) == const1_rtx)
7112 return CC_Nmode;
7113
7114 /* An operation that sets the condition codes as a side-effect, the
7115 V flag is not set correctly, so we can only use comparisons where
7116 this doesn't matter. (For LT and GE we can use "mi" and "pl"
7117 instead.) */
7118 /* ??? Does the ZERO_EXTRACT case really apply to thumb2? */
7119 if (GET_MODE (x) == SImode
7120 && y == const0_rtx
7121 && (op == EQ || op == NE || op == LT || op == GE)
7122 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
7123 || GET_CODE (x) == AND || GET_CODE (x) == IOR
7124 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
7125 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
7126 || GET_CODE (x) == LSHIFTRT
7127 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
7128 || GET_CODE (x) == ROTATERT
7129 || (TARGET_32BIT && GET_CODE (x) == ZERO_EXTRACT)))
7130 return CC_NOOVmode;
7131
7132 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
7133 return CC_Zmode;
7134
7135 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
7136 && GET_CODE (x) == PLUS
7137 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
7138 return CC_Cmode;
7139
7140 return CCmode;
7141 }
7142
7143 /* X and Y are two things to compare using CODE. Emit the compare insn and
7144 return the rtx for register 0 in the proper mode. FP means this is a
7145 floating point compare: I don't think that it is needed on the arm. */
7146 rtx
7147 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
7148 {
7149 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
7150 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
7151
7152 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
7153
7154 return cc_reg;
7155 }
7156
7157 /* Generate a sequence of insns that will generate the correct return
7158 address mask depending on the physical architecture that the program
7159 is running on. */
7160 rtx
7161 arm_gen_return_addr_mask (void)
7162 {
7163 rtx reg = gen_reg_rtx (Pmode);
7164
7165 emit_insn (gen_return_addr_mask (reg));
7166 return reg;
7167 }
7168
7169 void
7170 arm_reload_in_hi (rtx *operands)
7171 {
7172 rtx ref = operands[1];
7173 rtx base, scratch;
7174 HOST_WIDE_INT offset = 0;
7175
7176 if (GET_CODE (ref) == SUBREG)
7177 {
7178 offset = SUBREG_BYTE (ref);
7179 ref = SUBREG_REG (ref);
7180 }
7181
7182 if (GET_CODE (ref) == REG)
7183 {
7184 /* We have a pseudo which has been spilt onto the stack; there
7185 are two cases here: the first where there is a simple
7186 stack-slot replacement and a second where the stack-slot is
7187 out of range, or is used as a subreg. */
7188 if (reg_equiv_mem[REGNO (ref)])
7189 {
7190 ref = reg_equiv_mem[REGNO (ref)];
7191 base = find_replacement (&XEXP (ref, 0));
7192 }
7193 else
7194 /* The slot is out of range, or was dressed up in a SUBREG. */
7195 base = reg_equiv_address[REGNO (ref)];
7196 }
7197 else
7198 base = find_replacement (&XEXP (ref, 0));
7199
7200 /* Handle the case where the address is too complex to be offset by 1. */
7201 if (GET_CODE (base) == MINUS
7202 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
7203 {
7204 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7205
7206 emit_set_insn (base_plus, base);
7207 base = base_plus;
7208 }
7209 else if (GET_CODE (base) == PLUS)
7210 {
7211 /* The addend must be CONST_INT, or we would have dealt with it above. */
7212 HOST_WIDE_INT hi, lo;
7213
7214 offset += INTVAL (XEXP (base, 1));
7215 base = XEXP (base, 0);
7216
7217 /* Rework the address into a legal sequence of insns. */
7218 /* Valid range for lo is -4095 -> 4095 */
7219 lo = (offset >= 0
7220 ? (offset & 0xfff)
7221 : -((-offset) & 0xfff));
7222
7223 /* Corner case, if lo is the max offset then we would be out of range
7224 once we have added the additional 1 below, so bump the msb into the
7225 pre-loading insn(s). */
7226 if (lo == 4095)
7227 lo &= 0x7ff;
7228
7229 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7230 ^ (HOST_WIDE_INT) 0x80000000)
7231 - (HOST_WIDE_INT) 0x80000000);
7232
7233 gcc_assert (hi + lo == offset);
7234
7235 if (hi != 0)
7236 {
7237 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7238
7239 /* Get the base address; addsi3 knows how to handle constants
7240 that require more than one insn. */
7241 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7242 base = base_plus;
7243 offset = lo;
7244 }
7245 }
7246
7247 /* Operands[2] may overlap operands[0] (though it won't overlap
7248 operands[1]), that's why we asked for a DImode reg -- so we can
7249 use the bit that does not overlap. */
7250 if (REGNO (operands[2]) == REGNO (operands[0]))
7251 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7252 else
7253 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
7254
7255 emit_insn (gen_zero_extendqisi2 (scratch,
7256 gen_rtx_MEM (QImode,
7257 plus_constant (base,
7258 offset))));
7259 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
7260 gen_rtx_MEM (QImode,
7261 plus_constant (base,
7262 offset + 1))));
7263 if (!BYTES_BIG_ENDIAN)
7264 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
7265 gen_rtx_IOR (SImode,
7266 gen_rtx_ASHIFT
7267 (SImode,
7268 gen_rtx_SUBREG (SImode, operands[0], 0),
7269 GEN_INT (8)),
7270 scratch));
7271 else
7272 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
7273 gen_rtx_IOR (SImode,
7274 gen_rtx_ASHIFT (SImode, scratch,
7275 GEN_INT (8)),
7276 gen_rtx_SUBREG (SImode, operands[0], 0)));
7277 }
7278
7279 /* Handle storing a half-word to memory during reload by synthesizing as two
7280 byte stores. Take care not to clobber the input values until after we
7281 have moved them somewhere safe. This code assumes that if the DImode
7282 scratch in operands[2] overlaps either the input value or output address
7283 in some way, then that value must die in this insn (we absolutely need
7284 two scratch registers for some corner cases). */
7285 void
7286 arm_reload_out_hi (rtx *operands)
7287 {
7288 rtx ref = operands[0];
7289 rtx outval = operands[1];
7290 rtx base, scratch;
7291 HOST_WIDE_INT offset = 0;
7292
7293 if (GET_CODE (ref) == SUBREG)
7294 {
7295 offset = SUBREG_BYTE (ref);
7296 ref = SUBREG_REG (ref);
7297 }
7298
7299 if (GET_CODE (ref) == REG)
7300 {
7301 /* We have a pseudo which has been spilt onto the stack; there
7302 are two cases here: the first where there is a simple
7303 stack-slot replacement and a second where the stack-slot is
7304 out of range, or is used as a subreg. */
7305 if (reg_equiv_mem[REGNO (ref)])
7306 {
7307 ref = reg_equiv_mem[REGNO (ref)];
7308 base = find_replacement (&XEXP (ref, 0));
7309 }
7310 else
7311 /* The slot is out of range, or was dressed up in a SUBREG. */
7312 base = reg_equiv_address[REGNO (ref)];
7313 }
7314 else
7315 base = find_replacement (&XEXP (ref, 0));
7316
7317 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
7318
7319 /* Handle the case where the address is too complex to be offset by 1. */
7320 if (GET_CODE (base) == MINUS
7321 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
7322 {
7323 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7324
7325 /* Be careful not to destroy OUTVAL. */
7326 if (reg_overlap_mentioned_p (base_plus, outval))
7327 {
7328 /* Updating base_plus might destroy outval, see if we can
7329 swap the scratch and base_plus. */
7330 if (!reg_overlap_mentioned_p (scratch, outval))
7331 {
7332 rtx tmp = scratch;
7333 scratch = base_plus;
7334 base_plus = tmp;
7335 }
7336 else
7337 {
7338 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7339
7340 /* Be conservative and copy OUTVAL into the scratch now,
7341 this should only be necessary if outval is a subreg
7342 of something larger than a word. */
7343 /* XXX Might this clobber base? I can't see how it can,
7344 since scratch is known to overlap with OUTVAL, and
7345 must be wider than a word. */
7346 emit_insn (gen_movhi (scratch_hi, outval));
7347 outval = scratch_hi;
7348 }
7349 }
7350
7351 emit_set_insn (base_plus, base);
7352 base = base_plus;
7353 }
7354 else if (GET_CODE (base) == PLUS)
7355 {
7356 /* The addend must be CONST_INT, or we would have dealt with it above. */
7357 HOST_WIDE_INT hi, lo;
7358
7359 offset += INTVAL (XEXP (base, 1));
7360 base = XEXP (base, 0);
7361
7362 /* Rework the address into a legal sequence of insns. */
7363 /* Valid range for lo is -4095 -> 4095 */
7364 lo = (offset >= 0
7365 ? (offset & 0xfff)
7366 : -((-offset) & 0xfff));
7367
7368 /* Corner case, if lo is the max offset then we would be out of range
7369 once we have added the additional 1 below, so bump the msb into the
7370 pre-loading insn(s). */
7371 if (lo == 4095)
7372 lo &= 0x7ff;
7373
7374 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7375 ^ (HOST_WIDE_INT) 0x80000000)
7376 - (HOST_WIDE_INT) 0x80000000);
7377
7378 gcc_assert (hi + lo == offset);
7379
7380 if (hi != 0)
7381 {
7382 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7383
7384 /* Be careful not to destroy OUTVAL. */
7385 if (reg_overlap_mentioned_p (base_plus, outval))
7386 {
7387 /* Updating base_plus might destroy outval, see if we
7388 can swap the scratch and base_plus. */
7389 if (!reg_overlap_mentioned_p (scratch, outval))
7390 {
7391 rtx tmp = scratch;
7392 scratch = base_plus;
7393 base_plus = tmp;
7394 }
7395 else
7396 {
7397 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7398
7399 /* Be conservative and copy outval into scratch now,
7400 this should only be necessary if outval is a
7401 subreg of something larger than a word. */
7402 /* XXX Might this clobber base? I can't see how it
7403 can, since scratch is known to overlap with
7404 outval. */
7405 emit_insn (gen_movhi (scratch_hi, outval));
7406 outval = scratch_hi;
7407 }
7408 }
7409
7410 /* Get the base address; addsi3 knows how to handle constants
7411 that require more than one insn. */
7412 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7413 base = base_plus;
7414 offset = lo;
7415 }
7416 }
7417
7418 if (BYTES_BIG_ENDIAN)
7419 {
7420 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7421 plus_constant (base, offset + 1)),
7422 gen_lowpart (QImode, outval)));
7423 emit_insn (gen_lshrsi3 (scratch,
7424 gen_rtx_SUBREG (SImode, outval, 0),
7425 GEN_INT (8)));
7426 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7427 gen_lowpart (QImode, scratch)));
7428 }
7429 else
7430 {
7431 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7432 gen_lowpart (QImode, outval)));
7433 emit_insn (gen_lshrsi3 (scratch,
7434 gen_rtx_SUBREG (SImode, outval, 0),
7435 GEN_INT (8)));
7436 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7437 plus_constant (base, offset + 1)),
7438 gen_lowpart (QImode, scratch)));
7439 }
7440 }
7441
7442 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7443 (padded to the size of a word) should be passed in a register. */
7444
7445 static bool
7446 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7447 {
7448 if (TARGET_AAPCS_BASED)
7449 return must_pass_in_stack_var_size (mode, type);
7450 else
7451 return must_pass_in_stack_var_size_or_pad (mode, type);
7452 }
7453
7454
7455 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7456 Return true if an argument passed on the stack should be padded upwards,
7457 i.e. if the least-significant byte has useful data.
7458 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7459 aggregate types are placed in the lowest memory address. */
7460
7461 bool
7462 arm_pad_arg_upward (enum machine_mode mode, tree type)
7463 {
7464 if (!TARGET_AAPCS_BASED)
7465 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7466
7467 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7468 return false;
7469
7470 return true;
7471 }
7472
7473
7474 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7475 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7476 byte of the register has useful data, and return the opposite if the
7477 most significant byte does.
7478 For AAPCS, small aggregates and small complex types are always padded
7479 upwards. */
7480
7481 bool
7482 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7483 tree type, int first ATTRIBUTE_UNUSED)
7484 {
7485 if (TARGET_AAPCS_BASED
7486 && BYTES_BIG_ENDIAN
7487 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7488 && int_size_in_bytes (type) <= 4)
7489 return true;
7490
7491 /* Otherwise, use default padding. */
7492 return !BYTES_BIG_ENDIAN;
7493 }
7494
7495 \f
7496 /* Print a symbolic form of X to the debug file, F. */
7497 static void
7498 arm_print_value (FILE *f, rtx x)
7499 {
7500 switch (GET_CODE (x))
7501 {
7502 case CONST_INT:
7503 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7504 return;
7505
7506 case CONST_DOUBLE:
7507 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7508 return;
7509
7510 case CONST_VECTOR:
7511 {
7512 int i;
7513
7514 fprintf (f, "<");
7515 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7516 {
7517 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7518 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7519 fputc (',', f);
7520 }
7521 fprintf (f, ">");
7522 }
7523 return;
7524
7525 case CONST_STRING:
7526 fprintf (f, "\"%s\"", XSTR (x, 0));
7527 return;
7528
7529 case SYMBOL_REF:
7530 fprintf (f, "`%s'", XSTR (x, 0));
7531 return;
7532
7533 case LABEL_REF:
7534 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7535 return;
7536
7537 case CONST:
7538 arm_print_value (f, XEXP (x, 0));
7539 return;
7540
7541 case PLUS:
7542 arm_print_value (f, XEXP (x, 0));
7543 fprintf (f, "+");
7544 arm_print_value (f, XEXP (x, 1));
7545 return;
7546
7547 case PC:
7548 fprintf (f, "pc");
7549 return;
7550
7551 default:
7552 fprintf (f, "????");
7553 return;
7554 }
7555 }
7556 \f
7557 /* Routines for manipulation of the constant pool. */
7558
7559 /* Arm instructions cannot load a large constant directly into a
7560 register; they have to come from a pc relative load. The constant
7561 must therefore be placed in the addressable range of the pc
7562 relative load. Depending on the precise pc relative load
7563 instruction the range is somewhere between 256 bytes and 4k. This
7564 means that we often have to dump a constant inside a function, and
7565 generate code to branch around it.
7566
7567 It is important to minimize this, since the branches will slow
7568 things down and make the code larger.
7569
7570 Normally we can hide the table after an existing unconditional
7571 branch so that there is no interruption of the flow, but in the
7572 worst case the code looks like this:
7573
7574 ldr rn, L1
7575 ...
7576 b L2
7577 align
7578 L1: .long value
7579 L2:
7580 ...
7581
7582 ldr rn, L3
7583 ...
7584 b L4
7585 align
7586 L3: .long value
7587 L4:
7588 ...
7589
7590 We fix this by performing a scan after scheduling, which notices
7591 which instructions need to have their operands fetched from the
7592 constant table and builds the table.
7593
7594 The algorithm starts by building a table of all the constants that
7595 need fixing up and all the natural barriers in the function (places
7596 where a constant table can be dropped without breaking the flow).
7597 For each fixup we note how far the pc-relative replacement will be
7598 able to reach and the offset of the instruction into the function.
7599
7600 Having built the table we then group the fixes together to form
7601 tables that are as large as possible (subject to addressing
7602 constraints) and emit each table of constants after the last
7603 barrier that is within range of all the instructions in the group.
7604 If a group does not contain a barrier, then we forcibly create one
7605 by inserting a jump instruction into the flow. Once the table has
7606 been inserted, the insns are then modified to reference the
7607 relevant entry in the pool.
7608
7609 Possible enhancements to the algorithm (not implemented) are:
7610
7611 1) For some processors and object formats, there may be benefit in
7612 aligning the pools to the start of cache lines; this alignment
7613 would need to be taken into account when calculating addressability
7614 of a pool. */
7615
7616 /* These typedefs are located at the start of this file, so that
7617 they can be used in the prototypes there. This comment is to
7618 remind readers of that fact so that the following structures
7619 can be understood more easily.
7620
7621 typedef struct minipool_node Mnode;
7622 typedef struct minipool_fixup Mfix; */
7623
7624 struct minipool_node
7625 {
7626 /* Doubly linked chain of entries. */
7627 Mnode * next;
7628 Mnode * prev;
7629 /* The maximum offset into the code that this entry can be placed. While
7630 pushing fixes for forward references, all entries are sorted in order
7631 of increasing max_address. */
7632 HOST_WIDE_INT max_address;
7633 /* Similarly for an entry inserted for a backwards ref. */
7634 HOST_WIDE_INT min_address;
7635 /* The number of fixes referencing this entry. This can become zero
7636 if we "unpush" an entry. In this case we ignore the entry when we
7637 come to emit the code. */
7638 int refcount;
7639 /* The offset from the start of the minipool. */
7640 HOST_WIDE_INT offset;
7641 /* The value in table. */
7642 rtx value;
7643 /* The mode of value. */
7644 enum machine_mode mode;
7645 /* The size of the value. With iWMMXt enabled
7646 sizes > 4 also imply an alignment of 8-bytes. */
7647 int fix_size;
7648 };
7649
7650 struct minipool_fixup
7651 {
7652 Mfix * next;
7653 rtx insn;
7654 HOST_WIDE_INT address;
7655 rtx * loc;
7656 enum machine_mode mode;
7657 int fix_size;
7658 rtx value;
7659 Mnode * minipool;
7660 HOST_WIDE_INT forwards;
7661 HOST_WIDE_INT backwards;
7662 };
7663
7664 /* Fixes less than a word need padding out to a word boundary. */
7665 #define MINIPOOL_FIX_SIZE(mode) \
7666 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7667
7668 static Mnode * minipool_vector_head;
7669 static Mnode * minipool_vector_tail;
7670 static rtx minipool_vector_label;
7671 static int minipool_pad;
7672
7673 /* The linked list of all minipool fixes required for this function. */
7674 Mfix * minipool_fix_head;
7675 Mfix * minipool_fix_tail;
7676 /* The fix entry for the current minipool, once it has been placed. */
7677 Mfix * minipool_barrier;
7678
7679 /* Determines if INSN is the start of a jump table. Returns the end
7680 of the TABLE or NULL_RTX. */
7681 static rtx
7682 is_jump_table (rtx insn)
7683 {
7684 rtx table;
7685
7686 if (GET_CODE (insn) == JUMP_INSN
7687 && JUMP_LABEL (insn) != NULL
7688 && ((table = next_real_insn (JUMP_LABEL (insn)))
7689 == next_real_insn (insn))
7690 && table != NULL
7691 && GET_CODE (table) == JUMP_INSN
7692 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7693 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7694 return table;
7695
7696 return NULL_RTX;
7697 }
7698
7699 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7700 #define JUMP_TABLES_IN_TEXT_SECTION 0
7701 #endif
7702
7703 static HOST_WIDE_INT
7704 get_jump_table_size (rtx insn)
7705 {
7706 /* ADDR_VECs only take room if read-only data does into the text
7707 section. */
7708 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7709 {
7710 rtx body = PATTERN (insn);
7711 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7712 HOST_WIDE_INT size;
7713 HOST_WIDE_INT modesize;
7714
7715 modesize = GET_MODE_SIZE (GET_MODE (body));
7716 size = modesize * XVECLEN (body, elt);
7717 switch (modesize)
7718 {
7719 case 1:
7720 /* Round up size of TBB table to a halfword boundary. */
7721 size = (size + 1) & ~(HOST_WIDE_INT)1;
7722 break;
7723 case 2:
7724 /* No padding necessary for TBH. */
7725 break;
7726 case 4:
7727 /* Add two bytes for alignment on Thumb. */
7728 if (TARGET_THUMB)
7729 size += 2;
7730 break;
7731 default:
7732 gcc_unreachable ();
7733 }
7734 return size;
7735 }
7736
7737 return 0;
7738 }
7739
7740 /* Move a minipool fix MP from its current location to before MAX_MP.
7741 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7742 constraints may need updating. */
7743 static Mnode *
7744 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7745 HOST_WIDE_INT max_address)
7746 {
7747 /* The code below assumes these are different. */
7748 gcc_assert (mp != max_mp);
7749
7750 if (max_mp == NULL)
7751 {
7752 if (max_address < mp->max_address)
7753 mp->max_address = max_address;
7754 }
7755 else
7756 {
7757 if (max_address > max_mp->max_address - mp->fix_size)
7758 mp->max_address = max_mp->max_address - mp->fix_size;
7759 else
7760 mp->max_address = max_address;
7761
7762 /* Unlink MP from its current position. Since max_mp is non-null,
7763 mp->prev must be non-null. */
7764 mp->prev->next = mp->next;
7765 if (mp->next != NULL)
7766 mp->next->prev = mp->prev;
7767 else
7768 minipool_vector_tail = mp->prev;
7769
7770 /* Re-insert it before MAX_MP. */
7771 mp->next = max_mp;
7772 mp->prev = max_mp->prev;
7773 max_mp->prev = mp;
7774
7775 if (mp->prev != NULL)
7776 mp->prev->next = mp;
7777 else
7778 minipool_vector_head = mp;
7779 }
7780
7781 /* Save the new entry. */
7782 max_mp = mp;
7783
7784 /* Scan over the preceding entries and adjust their addresses as
7785 required. */
7786 while (mp->prev != NULL
7787 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7788 {
7789 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7790 mp = mp->prev;
7791 }
7792
7793 return max_mp;
7794 }
7795
7796 /* Add a constant to the minipool for a forward reference. Returns the
7797 node added or NULL if the constant will not fit in this pool. */
7798 static Mnode *
7799 add_minipool_forward_ref (Mfix *fix)
7800 {
7801 /* If set, max_mp is the first pool_entry that has a lower
7802 constraint than the one we are trying to add. */
7803 Mnode * max_mp = NULL;
7804 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7805 Mnode * mp;
7806
7807 /* If the minipool starts before the end of FIX->INSN then this FIX
7808 can not be placed into the current pool. Furthermore, adding the
7809 new constant pool entry may cause the pool to start FIX_SIZE bytes
7810 earlier. */
7811 if (minipool_vector_head &&
7812 (fix->address + get_attr_length (fix->insn)
7813 >= minipool_vector_head->max_address - fix->fix_size))
7814 return NULL;
7815
7816 /* Scan the pool to see if a constant with the same value has
7817 already been added. While we are doing this, also note the
7818 location where we must insert the constant if it doesn't already
7819 exist. */
7820 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7821 {
7822 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7823 && fix->mode == mp->mode
7824 && (GET_CODE (fix->value) != CODE_LABEL
7825 || (CODE_LABEL_NUMBER (fix->value)
7826 == CODE_LABEL_NUMBER (mp->value)))
7827 && rtx_equal_p (fix->value, mp->value))
7828 {
7829 /* More than one fix references this entry. */
7830 mp->refcount++;
7831 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7832 }
7833
7834 /* Note the insertion point if necessary. */
7835 if (max_mp == NULL
7836 && mp->max_address > max_address)
7837 max_mp = mp;
7838
7839 /* If we are inserting an 8-bytes aligned quantity and
7840 we have not already found an insertion point, then
7841 make sure that all such 8-byte aligned quantities are
7842 placed at the start of the pool. */
7843 if (ARM_DOUBLEWORD_ALIGN
7844 && max_mp == NULL
7845 && fix->fix_size == 8
7846 && mp->fix_size != 8)
7847 {
7848 max_mp = mp;
7849 max_address = mp->max_address;
7850 }
7851 }
7852
7853 /* The value is not currently in the minipool, so we need to create
7854 a new entry for it. If MAX_MP is NULL, the entry will be put on
7855 the end of the list since the placement is less constrained than
7856 any existing entry. Otherwise, we insert the new fix before
7857 MAX_MP and, if necessary, adjust the constraints on the other
7858 entries. */
7859 mp = XNEW (Mnode);
7860 mp->fix_size = fix->fix_size;
7861 mp->mode = fix->mode;
7862 mp->value = fix->value;
7863 mp->refcount = 1;
7864 /* Not yet required for a backwards ref. */
7865 mp->min_address = -65536;
7866
7867 if (max_mp == NULL)
7868 {
7869 mp->max_address = max_address;
7870 mp->next = NULL;
7871 mp->prev = minipool_vector_tail;
7872
7873 if (mp->prev == NULL)
7874 {
7875 minipool_vector_head = mp;
7876 minipool_vector_label = gen_label_rtx ();
7877 }
7878 else
7879 mp->prev->next = mp;
7880
7881 minipool_vector_tail = mp;
7882 }
7883 else
7884 {
7885 if (max_address > max_mp->max_address - mp->fix_size)
7886 mp->max_address = max_mp->max_address - mp->fix_size;
7887 else
7888 mp->max_address = max_address;
7889
7890 mp->next = max_mp;
7891 mp->prev = max_mp->prev;
7892 max_mp->prev = mp;
7893 if (mp->prev != NULL)
7894 mp->prev->next = mp;
7895 else
7896 minipool_vector_head = mp;
7897 }
7898
7899 /* Save the new entry. */
7900 max_mp = mp;
7901
7902 /* Scan over the preceding entries and adjust their addresses as
7903 required. */
7904 while (mp->prev != NULL
7905 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7906 {
7907 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7908 mp = mp->prev;
7909 }
7910
7911 return max_mp;
7912 }
7913
7914 static Mnode *
7915 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7916 HOST_WIDE_INT min_address)
7917 {
7918 HOST_WIDE_INT offset;
7919
7920 /* The code below assumes these are different. */
7921 gcc_assert (mp != min_mp);
7922
7923 if (min_mp == NULL)
7924 {
7925 if (min_address > mp->min_address)
7926 mp->min_address = min_address;
7927 }
7928 else
7929 {
7930 /* We will adjust this below if it is too loose. */
7931 mp->min_address = min_address;
7932
7933 /* Unlink MP from its current position. Since min_mp is non-null,
7934 mp->next must be non-null. */
7935 mp->next->prev = mp->prev;
7936 if (mp->prev != NULL)
7937 mp->prev->next = mp->next;
7938 else
7939 minipool_vector_head = mp->next;
7940
7941 /* Reinsert it after MIN_MP. */
7942 mp->prev = min_mp;
7943 mp->next = min_mp->next;
7944 min_mp->next = mp;
7945 if (mp->next != NULL)
7946 mp->next->prev = mp;
7947 else
7948 minipool_vector_tail = mp;
7949 }
7950
7951 min_mp = mp;
7952
7953 offset = 0;
7954 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7955 {
7956 mp->offset = offset;
7957 if (mp->refcount > 0)
7958 offset += mp->fix_size;
7959
7960 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7961 mp->next->min_address = mp->min_address + mp->fix_size;
7962 }
7963
7964 return min_mp;
7965 }
7966
7967 /* Add a constant to the minipool for a backward reference. Returns the
7968 node added or NULL if the constant will not fit in this pool.
7969
7970 Note that the code for insertion for a backwards reference can be
7971 somewhat confusing because the calculated offsets for each fix do
7972 not take into account the size of the pool (which is still under
7973 construction. */
7974 static Mnode *
7975 add_minipool_backward_ref (Mfix *fix)
7976 {
7977 /* If set, min_mp is the last pool_entry that has a lower constraint
7978 than the one we are trying to add. */
7979 Mnode *min_mp = NULL;
7980 /* This can be negative, since it is only a constraint. */
7981 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7982 Mnode *mp;
7983
7984 /* If we can't reach the current pool from this insn, or if we can't
7985 insert this entry at the end of the pool without pushing other
7986 fixes out of range, then we don't try. This ensures that we
7987 can't fail later on. */
7988 if (min_address >= minipool_barrier->address
7989 || (minipool_vector_tail->min_address + fix->fix_size
7990 >= minipool_barrier->address))
7991 return NULL;
7992
7993 /* Scan the pool to see if a constant with the same value has
7994 already been added. While we are doing this, also note the
7995 location where we must insert the constant if it doesn't already
7996 exist. */
7997 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7998 {
7999 if (GET_CODE (fix->value) == GET_CODE (mp->value)
8000 && fix->mode == mp->mode
8001 && (GET_CODE (fix->value) != CODE_LABEL
8002 || (CODE_LABEL_NUMBER (fix->value)
8003 == CODE_LABEL_NUMBER (mp->value)))
8004 && rtx_equal_p (fix->value, mp->value)
8005 /* Check that there is enough slack to move this entry to the
8006 end of the table (this is conservative). */
8007 && (mp->max_address
8008 > (minipool_barrier->address
8009 + minipool_vector_tail->offset
8010 + minipool_vector_tail->fix_size)))
8011 {
8012 mp->refcount++;
8013 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
8014 }
8015
8016 if (min_mp != NULL)
8017 mp->min_address += fix->fix_size;
8018 else
8019 {
8020 /* Note the insertion point if necessary. */
8021 if (mp->min_address < min_address)
8022 {
8023 /* For now, we do not allow the insertion of 8-byte alignment
8024 requiring nodes anywhere but at the start of the pool. */
8025 if (ARM_DOUBLEWORD_ALIGN
8026 && fix->fix_size == 8 && mp->fix_size != 8)
8027 return NULL;
8028 else
8029 min_mp = mp;
8030 }
8031 else if (mp->max_address
8032 < minipool_barrier->address + mp->offset + fix->fix_size)
8033 {
8034 /* Inserting before this entry would push the fix beyond
8035 its maximum address (which can happen if we have
8036 re-located a forwards fix); force the new fix to come
8037 after it. */
8038 min_mp = mp;
8039 min_address = mp->min_address + fix->fix_size;
8040 }
8041 /* If we are inserting an 8-bytes aligned quantity and
8042 we have not already found an insertion point, then
8043 make sure that all such 8-byte aligned quantities are
8044 placed at the start of the pool. */
8045 else if (ARM_DOUBLEWORD_ALIGN
8046 && min_mp == NULL
8047 && fix->fix_size == 8
8048 && mp->fix_size < 8)
8049 {
8050 min_mp = mp;
8051 min_address = mp->min_address + fix->fix_size;
8052 }
8053 }
8054 }
8055
8056 /* We need to create a new entry. */
8057 mp = XNEW (Mnode);
8058 mp->fix_size = fix->fix_size;
8059 mp->mode = fix->mode;
8060 mp->value = fix->value;
8061 mp->refcount = 1;
8062 mp->max_address = minipool_barrier->address + 65536;
8063
8064 mp->min_address = min_address;
8065
8066 if (min_mp == NULL)
8067 {
8068 mp->prev = NULL;
8069 mp->next = minipool_vector_head;
8070
8071 if (mp->next == NULL)
8072 {
8073 minipool_vector_tail = mp;
8074 minipool_vector_label = gen_label_rtx ();
8075 }
8076 else
8077 mp->next->prev = mp;
8078
8079 minipool_vector_head = mp;
8080 }
8081 else
8082 {
8083 mp->next = min_mp->next;
8084 mp->prev = min_mp;
8085 min_mp->next = mp;
8086
8087 if (mp->next != NULL)
8088 mp->next->prev = mp;
8089 else
8090 minipool_vector_tail = mp;
8091 }
8092
8093 /* Save the new entry. */
8094 min_mp = mp;
8095
8096 if (mp->prev)
8097 mp = mp->prev;
8098 else
8099 mp->offset = 0;
8100
8101 /* Scan over the following entries and adjust their offsets. */
8102 while (mp->next != NULL)
8103 {
8104 if (mp->next->min_address < mp->min_address + mp->fix_size)
8105 mp->next->min_address = mp->min_address + mp->fix_size;
8106
8107 if (mp->refcount)
8108 mp->next->offset = mp->offset + mp->fix_size;
8109 else
8110 mp->next->offset = mp->offset;
8111
8112 mp = mp->next;
8113 }
8114
8115 return min_mp;
8116 }
8117
8118 static void
8119 assign_minipool_offsets (Mfix *barrier)
8120 {
8121 HOST_WIDE_INT offset = 0;
8122 Mnode *mp;
8123
8124 minipool_barrier = barrier;
8125
8126 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8127 {
8128 mp->offset = offset;
8129
8130 if (mp->refcount > 0)
8131 offset += mp->fix_size;
8132 }
8133 }
8134
8135 /* Output the literal table */
8136 static void
8137 dump_minipool (rtx scan)
8138 {
8139 Mnode * mp;
8140 Mnode * nmp;
8141 int align64 = 0;
8142
8143 if (ARM_DOUBLEWORD_ALIGN)
8144 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8145 if (mp->refcount > 0 && mp->fix_size == 8)
8146 {
8147 align64 = 1;
8148 break;
8149 }
8150
8151 if (dump_file)
8152 fprintf (dump_file,
8153 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
8154 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
8155
8156 scan = emit_label_after (gen_label_rtx (), scan);
8157 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
8158 scan = emit_label_after (minipool_vector_label, scan);
8159
8160 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
8161 {
8162 if (mp->refcount > 0)
8163 {
8164 if (dump_file)
8165 {
8166 fprintf (dump_file,
8167 ";; Offset %u, min %ld, max %ld ",
8168 (unsigned) mp->offset, (unsigned long) mp->min_address,
8169 (unsigned long) mp->max_address);
8170 arm_print_value (dump_file, mp->value);
8171 fputc ('\n', dump_file);
8172 }
8173
8174 switch (mp->fix_size)
8175 {
8176 #ifdef HAVE_consttable_1
8177 case 1:
8178 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
8179 break;
8180
8181 #endif
8182 #ifdef HAVE_consttable_2
8183 case 2:
8184 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
8185 break;
8186
8187 #endif
8188 #ifdef HAVE_consttable_4
8189 case 4:
8190 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
8191 break;
8192
8193 #endif
8194 #ifdef HAVE_consttable_8
8195 case 8:
8196 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
8197 break;
8198
8199 #endif
8200 default:
8201 gcc_unreachable ();
8202 }
8203 }
8204
8205 nmp = mp->next;
8206 free (mp);
8207 }
8208
8209 minipool_vector_head = minipool_vector_tail = NULL;
8210 scan = emit_insn_after (gen_consttable_end (), scan);
8211 scan = emit_barrier_after (scan);
8212 }
8213
8214 /* Return the cost of forcibly inserting a barrier after INSN. */
8215 static int
8216 arm_barrier_cost (rtx insn)
8217 {
8218 /* Basing the location of the pool on the loop depth is preferable,
8219 but at the moment, the basic block information seems to be
8220 corrupt by this stage of the compilation. */
8221 int base_cost = 50;
8222 rtx next = next_nonnote_insn (insn);
8223
8224 if (next != NULL && GET_CODE (next) == CODE_LABEL)
8225 base_cost -= 20;
8226
8227 switch (GET_CODE (insn))
8228 {
8229 case CODE_LABEL:
8230 /* It will always be better to place the table before the label, rather
8231 than after it. */
8232 return 50;
8233
8234 case INSN:
8235 case CALL_INSN:
8236 return base_cost;
8237
8238 case JUMP_INSN:
8239 return base_cost - 10;
8240
8241 default:
8242 return base_cost + 10;
8243 }
8244 }
8245
8246 /* Find the best place in the insn stream in the range
8247 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
8248 Create the barrier by inserting a jump and add a new fix entry for
8249 it. */
8250 static Mfix *
8251 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
8252 {
8253 HOST_WIDE_INT count = 0;
8254 rtx barrier;
8255 rtx from = fix->insn;
8256 /* The instruction after which we will insert the jump. */
8257 rtx selected = NULL;
8258 int selected_cost;
8259 /* The address at which the jump instruction will be placed. */
8260 HOST_WIDE_INT selected_address;
8261 Mfix * new_fix;
8262 HOST_WIDE_INT max_count = max_address - fix->address;
8263 rtx label = gen_label_rtx ();
8264
8265 selected_cost = arm_barrier_cost (from);
8266 selected_address = fix->address;
8267
8268 while (from && count < max_count)
8269 {
8270 rtx tmp;
8271 int new_cost;
8272
8273 /* This code shouldn't have been called if there was a natural barrier
8274 within range. */
8275 gcc_assert (GET_CODE (from) != BARRIER);
8276
8277 /* Count the length of this insn. */
8278 count += get_attr_length (from);
8279
8280 /* If there is a jump table, add its length. */
8281 tmp = is_jump_table (from);
8282 if (tmp != NULL)
8283 {
8284 count += get_jump_table_size (tmp);
8285
8286 /* Jump tables aren't in a basic block, so base the cost on
8287 the dispatch insn. If we select this location, we will
8288 still put the pool after the table. */
8289 new_cost = arm_barrier_cost (from);
8290
8291 if (count < max_count
8292 && (!selected || new_cost <= selected_cost))
8293 {
8294 selected = tmp;
8295 selected_cost = new_cost;
8296 selected_address = fix->address + count;
8297 }
8298
8299 /* Continue after the dispatch table. */
8300 from = NEXT_INSN (tmp);
8301 continue;
8302 }
8303
8304 new_cost = arm_barrier_cost (from);
8305
8306 if (count < max_count
8307 && (!selected || new_cost <= selected_cost))
8308 {
8309 selected = from;
8310 selected_cost = new_cost;
8311 selected_address = fix->address + count;
8312 }
8313
8314 from = NEXT_INSN (from);
8315 }
8316
8317 /* Make sure that we found a place to insert the jump. */
8318 gcc_assert (selected);
8319
8320 /* Create a new JUMP_INSN that branches around a barrier. */
8321 from = emit_jump_insn_after (gen_jump (label), selected);
8322 JUMP_LABEL (from) = label;
8323 barrier = emit_barrier_after (from);
8324 emit_label_after (label, barrier);
8325
8326 /* Create a minipool barrier entry for the new barrier. */
8327 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
8328 new_fix->insn = barrier;
8329 new_fix->address = selected_address;
8330 new_fix->next = fix->next;
8331 fix->next = new_fix;
8332
8333 return new_fix;
8334 }
8335
8336 /* Record that there is a natural barrier in the insn stream at
8337 ADDRESS. */
8338 static void
8339 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
8340 {
8341 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8342
8343 fix->insn = insn;
8344 fix->address = address;
8345
8346 fix->next = NULL;
8347 if (minipool_fix_head != NULL)
8348 minipool_fix_tail->next = fix;
8349 else
8350 minipool_fix_head = fix;
8351
8352 minipool_fix_tail = fix;
8353 }
8354
8355 /* Record INSN, which will need fixing up to load a value from the
8356 minipool. ADDRESS is the offset of the insn since the start of the
8357 function; LOC is a pointer to the part of the insn which requires
8358 fixing; VALUE is the constant that must be loaded, which is of type
8359 MODE. */
8360 static void
8361 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
8362 enum machine_mode mode, rtx value)
8363 {
8364 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8365
8366 #ifdef AOF_ASSEMBLER
8367 /* PIC symbol references need to be converted into offsets into the
8368 based area. */
8369 /* XXX This shouldn't be done here. */
8370 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
8371 value = aof_pic_entry (value);
8372 #endif /* AOF_ASSEMBLER */
8373
8374 fix->insn = insn;
8375 fix->address = address;
8376 fix->loc = loc;
8377 fix->mode = mode;
8378 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8379 fix->value = value;
8380 fix->forwards = get_attr_pool_range (insn);
8381 fix->backwards = get_attr_neg_pool_range (insn);
8382 fix->minipool = NULL;
8383
8384 /* If an insn doesn't have a range defined for it, then it isn't
8385 expecting to be reworked by this code. Better to stop now than
8386 to generate duff assembly code. */
8387 gcc_assert (fix->forwards || fix->backwards);
8388
8389 /* If an entry requires 8-byte alignment then assume all constant pools
8390 require 4 bytes of padding. Trying to do this later on a per-pool
8391 basis is awkward because existing pool entries have to be modified. */
8392 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8393 minipool_pad = 4;
8394
8395 if (dump_file)
8396 {
8397 fprintf (dump_file,
8398 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8399 GET_MODE_NAME (mode),
8400 INSN_UID (insn), (unsigned long) address,
8401 -1 * (long)fix->backwards, (long)fix->forwards);
8402 arm_print_value (dump_file, fix->value);
8403 fprintf (dump_file, "\n");
8404 }
8405
8406 /* Add it to the chain of fixes. */
8407 fix->next = NULL;
8408
8409 if (minipool_fix_head != NULL)
8410 minipool_fix_tail->next = fix;
8411 else
8412 minipool_fix_head = fix;
8413
8414 minipool_fix_tail = fix;
8415 }
8416
8417 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8418 Returns the number of insns needed, or 99 if we don't know how to
8419 do it. */
8420 int
8421 arm_const_double_inline_cost (rtx val)
8422 {
8423 rtx lowpart, highpart;
8424 enum machine_mode mode;
8425
8426 mode = GET_MODE (val);
8427
8428 if (mode == VOIDmode)
8429 mode = DImode;
8430
8431 gcc_assert (GET_MODE_SIZE (mode) == 8);
8432
8433 lowpart = gen_lowpart (SImode, val);
8434 highpart = gen_highpart_mode (SImode, mode, val);
8435
8436 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8437 gcc_assert (GET_CODE (highpart) == CONST_INT);
8438
8439 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8440 NULL_RTX, NULL_RTX, 0, 0)
8441 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8442 NULL_RTX, NULL_RTX, 0, 0));
8443 }
8444
8445 /* Return true if it is worthwhile to split a 64-bit constant into two
8446 32-bit operations. This is the case if optimizing for size, or
8447 if we have load delay slots, or if one 32-bit part can be done with
8448 a single data operation. */
8449 bool
8450 arm_const_double_by_parts (rtx val)
8451 {
8452 enum machine_mode mode = GET_MODE (val);
8453 rtx part;
8454
8455 if (optimize_size || arm_ld_sched)
8456 return true;
8457
8458 if (mode == VOIDmode)
8459 mode = DImode;
8460
8461 part = gen_highpart_mode (SImode, mode, val);
8462
8463 gcc_assert (GET_CODE (part) == CONST_INT);
8464
8465 if (const_ok_for_arm (INTVAL (part))
8466 || const_ok_for_arm (~INTVAL (part)))
8467 return true;
8468
8469 part = gen_lowpart (SImode, val);
8470
8471 gcc_assert (GET_CODE (part) == CONST_INT);
8472
8473 if (const_ok_for_arm (INTVAL (part))
8474 || const_ok_for_arm (~INTVAL (part)))
8475 return true;
8476
8477 return false;
8478 }
8479
8480 /* Scan INSN and note any of its operands that need fixing.
8481 If DO_PUSHES is false we do not actually push any of the fixups
8482 needed. The function returns TRUE if any fixups were needed/pushed.
8483 This is used by arm_memory_load_p() which needs to know about loads
8484 of constants that will be converted into minipool loads. */
8485 static bool
8486 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8487 {
8488 bool result = false;
8489 int opno;
8490
8491 extract_insn (insn);
8492
8493 if (!constrain_operands (1))
8494 fatal_insn_not_found (insn);
8495
8496 if (recog_data.n_alternatives == 0)
8497 return false;
8498
8499 /* Fill in recog_op_alt with information about the constraints of
8500 this insn. */
8501 preprocess_constraints ();
8502
8503 for (opno = 0; opno < recog_data.n_operands; opno++)
8504 {
8505 /* Things we need to fix can only occur in inputs. */
8506 if (recog_data.operand_type[opno] != OP_IN)
8507 continue;
8508
8509 /* If this alternative is a memory reference, then any mention
8510 of constants in this alternative is really to fool reload
8511 into allowing us to accept one there. We need to fix them up
8512 now so that we output the right code. */
8513 if (recog_op_alt[opno][which_alternative].memory_ok)
8514 {
8515 rtx op = recog_data.operand[opno];
8516
8517 if (CONSTANT_P (op))
8518 {
8519 if (do_pushes)
8520 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8521 recog_data.operand_mode[opno], op);
8522 result = true;
8523 }
8524 else if (GET_CODE (op) == MEM
8525 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8526 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8527 {
8528 if (do_pushes)
8529 {
8530 rtx cop = avoid_constant_pool_reference (op);
8531
8532 /* Casting the address of something to a mode narrower
8533 than a word can cause avoid_constant_pool_reference()
8534 to return the pool reference itself. That's no good to
8535 us here. Lets just hope that we can use the
8536 constant pool value directly. */
8537 if (op == cop)
8538 cop = get_pool_constant (XEXP (op, 0));
8539
8540 push_minipool_fix (insn, address,
8541 recog_data.operand_loc[opno],
8542 recog_data.operand_mode[opno], cop);
8543 }
8544
8545 result = true;
8546 }
8547 }
8548 }
8549
8550 return result;
8551 }
8552
8553 /* Gcc puts the pool in the wrong place for ARM, since we can only
8554 load addresses a limited distance around the pc. We do some
8555 special munging to move the constant pool values to the correct
8556 point in the code. */
8557 static void
8558 arm_reorg (void)
8559 {
8560 rtx insn;
8561 HOST_WIDE_INT address = 0;
8562 Mfix * fix;
8563
8564 minipool_fix_head = minipool_fix_tail = NULL;
8565
8566 /* The first insn must always be a note, or the code below won't
8567 scan it properly. */
8568 insn = get_insns ();
8569 gcc_assert (GET_CODE (insn) == NOTE);
8570 minipool_pad = 0;
8571
8572 /* Scan all the insns and record the operands that will need fixing. */
8573 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8574 {
8575 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8576 && (arm_cirrus_insn_p (insn)
8577 || GET_CODE (insn) == JUMP_INSN
8578 || arm_memory_load_p (insn)))
8579 cirrus_reorg (insn);
8580
8581 if (GET_CODE (insn) == BARRIER)
8582 push_minipool_barrier (insn, address);
8583 else if (INSN_P (insn))
8584 {
8585 rtx table;
8586
8587 note_invalid_constants (insn, address, true);
8588 address += get_attr_length (insn);
8589
8590 /* If the insn is a vector jump, add the size of the table
8591 and skip the table. */
8592 if ((table = is_jump_table (insn)) != NULL)
8593 {
8594 address += get_jump_table_size (table);
8595 insn = table;
8596 }
8597 }
8598 }
8599
8600 fix = minipool_fix_head;
8601
8602 /* Now scan the fixups and perform the required changes. */
8603 while (fix)
8604 {
8605 Mfix * ftmp;
8606 Mfix * fdel;
8607 Mfix * last_added_fix;
8608 Mfix * last_barrier = NULL;
8609 Mfix * this_fix;
8610
8611 /* Skip any further barriers before the next fix. */
8612 while (fix && GET_CODE (fix->insn) == BARRIER)
8613 fix = fix->next;
8614
8615 /* No more fixes. */
8616 if (fix == NULL)
8617 break;
8618
8619 last_added_fix = NULL;
8620
8621 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8622 {
8623 if (GET_CODE (ftmp->insn) == BARRIER)
8624 {
8625 if (ftmp->address >= minipool_vector_head->max_address)
8626 break;
8627
8628 last_barrier = ftmp;
8629 }
8630 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8631 break;
8632
8633 last_added_fix = ftmp; /* Keep track of the last fix added. */
8634 }
8635
8636 /* If we found a barrier, drop back to that; any fixes that we
8637 could have reached but come after the barrier will now go in
8638 the next mini-pool. */
8639 if (last_barrier != NULL)
8640 {
8641 /* Reduce the refcount for those fixes that won't go into this
8642 pool after all. */
8643 for (fdel = last_barrier->next;
8644 fdel && fdel != ftmp;
8645 fdel = fdel->next)
8646 {
8647 fdel->minipool->refcount--;
8648 fdel->minipool = NULL;
8649 }
8650
8651 ftmp = last_barrier;
8652 }
8653 else
8654 {
8655 /* ftmp is first fix that we can't fit into this pool and
8656 there no natural barriers that we could use. Insert a
8657 new barrier in the code somewhere between the previous
8658 fix and this one, and arrange to jump around it. */
8659 HOST_WIDE_INT max_address;
8660
8661 /* The last item on the list of fixes must be a barrier, so
8662 we can never run off the end of the list of fixes without
8663 last_barrier being set. */
8664 gcc_assert (ftmp);
8665
8666 max_address = minipool_vector_head->max_address;
8667 /* Check that there isn't another fix that is in range that
8668 we couldn't fit into this pool because the pool was
8669 already too large: we need to put the pool before such an
8670 instruction. The pool itself may come just after the
8671 fix because create_fix_barrier also allows space for a
8672 jump instruction. */
8673 if (ftmp->address < max_address)
8674 max_address = ftmp->address + 1;
8675
8676 last_barrier = create_fix_barrier (last_added_fix, max_address);
8677 }
8678
8679 assign_minipool_offsets (last_barrier);
8680
8681 while (ftmp)
8682 {
8683 if (GET_CODE (ftmp->insn) != BARRIER
8684 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8685 == NULL))
8686 break;
8687
8688 ftmp = ftmp->next;
8689 }
8690
8691 /* Scan over the fixes we have identified for this pool, fixing them
8692 up and adding the constants to the pool itself. */
8693 for (this_fix = fix; this_fix && ftmp != this_fix;
8694 this_fix = this_fix->next)
8695 if (GET_CODE (this_fix->insn) != BARRIER)
8696 {
8697 rtx addr
8698 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8699 minipool_vector_label),
8700 this_fix->minipool->offset);
8701 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8702 }
8703
8704 dump_minipool (last_barrier->insn);
8705 fix = ftmp;
8706 }
8707
8708 /* From now on we must synthesize any constants that we can't handle
8709 directly. This can happen if the RTL gets split during final
8710 instruction generation. */
8711 after_arm_reorg = 1;
8712
8713 /* Free the minipool memory. */
8714 obstack_free (&minipool_obstack, minipool_startobj);
8715 }
8716 \f
8717 /* Routines to output assembly language. */
8718
8719 /* If the rtx is the correct value then return the string of the number.
8720 In this way we can ensure that valid double constants are generated even
8721 when cross compiling. */
8722 const char *
8723 fp_immediate_constant (rtx x)
8724 {
8725 REAL_VALUE_TYPE r;
8726 int i;
8727
8728 if (!fp_consts_inited)
8729 init_fp_table ();
8730
8731 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8732 for (i = 0; i < 8; i++)
8733 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8734 return strings_fp[i];
8735
8736 gcc_unreachable ();
8737 }
8738
8739 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8740 static const char *
8741 fp_const_from_val (REAL_VALUE_TYPE *r)
8742 {
8743 int i;
8744
8745 if (!fp_consts_inited)
8746 init_fp_table ();
8747
8748 for (i = 0; i < 8; i++)
8749 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8750 return strings_fp[i];
8751
8752 gcc_unreachable ();
8753 }
8754
8755 /* Output the operands of a LDM/STM instruction to STREAM.
8756 MASK is the ARM register set mask of which only bits 0-15 are important.
8757 REG is the base register, either the frame pointer or the stack pointer,
8758 INSTR is the possibly suffixed load or store instruction.
8759 RFE is nonzero if the instruction should also copy spsr to cpsr. */
8760
8761 static void
8762 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8763 unsigned long mask, int rfe)
8764 {
8765 unsigned i;
8766 bool not_first = FALSE;
8767
8768 gcc_assert (!rfe || (mask & (1 << PC_REGNUM)));
8769 fputc ('\t', stream);
8770 asm_fprintf (stream, instr, reg);
8771 fputc ('{', stream);
8772
8773 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8774 if (mask & (1 << i))
8775 {
8776 if (not_first)
8777 fprintf (stream, ", ");
8778
8779 asm_fprintf (stream, "%r", i);
8780 not_first = TRUE;
8781 }
8782
8783 if (rfe)
8784 fprintf (stream, "}^\n");
8785 else
8786 fprintf (stream, "}\n");
8787 }
8788
8789
8790 /* Output a FLDMD instruction to STREAM.
8791 BASE if the register containing the address.
8792 REG and COUNT specify the register range.
8793 Extra registers may be added to avoid hardware bugs.
8794
8795 We output FLDMD even for ARMv5 VFP implementations. Although
8796 FLDMD is technically not supported until ARMv6, it is believed
8797 that all VFP implementations support its use in this context. */
8798
8799 static void
8800 vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count)
8801 {
8802 int i;
8803
8804 /* Workaround ARM10 VFPr1 bug. */
8805 if (count == 2 && !arm_arch6)
8806 {
8807 if (reg == 15)
8808 reg--;
8809 count++;
8810 }
8811
8812 fputc ('\t', stream);
8813 asm_fprintf (stream, "fldmfdd\t%r!, {", base);
8814
8815 for (i = reg; i < reg + count; i++)
8816 {
8817 if (i > reg)
8818 fputs (", ", stream);
8819 asm_fprintf (stream, "d%d", i);
8820 }
8821 fputs ("}\n", stream);
8822
8823 }
8824
8825
8826 /* Output the assembly for a store multiple. */
8827
8828 const char *
8829 vfp_output_fstmd (rtx * operands)
8830 {
8831 char pattern[100];
8832 int p;
8833 int base;
8834 int i;
8835
8836 strcpy (pattern, "fstmfdd\t%m0!, {%P1");
8837 p = strlen (pattern);
8838
8839 gcc_assert (GET_CODE (operands[1]) == REG);
8840
8841 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8842 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8843 {
8844 p += sprintf (&pattern[p], ", d%d", base + i);
8845 }
8846 strcpy (&pattern[p], "}");
8847
8848 output_asm_insn (pattern, operands);
8849 return "";
8850 }
8851
8852
8853 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8854 number of bytes pushed. */
8855
8856 static int
8857 vfp_emit_fstmd (int base_reg, int count)
8858 {
8859 rtx par;
8860 rtx dwarf;
8861 rtx tmp, reg;
8862 int i;
8863
8864 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8865 register pairs are stored by a store multiple insn. We avoid this
8866 by pushing an extra pair. */
8867 if (count == 2 && !arm_arch6)
8868 {
8869 if (base_reg == LAST_VFP_REGNUM - 3)
8870 base_reg -= 2;
8871 count++;
8872 }
8873
8874 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8875 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8876
8877 reg = gen_rtx_REG (DFmode, base_reg);
8878 base_reg += 2;
8879
8880 XVECEXP (par, 0, 0)
8881 = gen_rtx_SET (VOIDmode,
8882 gen_frame_mem (BLKmode,
8883 gen_rtx_PRE_DEC (BLKmode,
8884 stack_pointer_rtx)),
8885 gen_rtx_UNSPEC (BLKmode,
8886 gen_rtvec (1, reg),
8887 UNSPEC_PUSH_MULT));
8888
8889 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8890 plus_constant (stack_pointer_rtx, -(count * 8)));
8891 RTX_FRAME_RELATED_P (tmp) = 1;
8892 XVECEXP (dwarf, 0, 0) = tmp;
8893
8894 tmp = gen_rtx_SET (VOIDmode,
8895 gen_frame_mem (DFmode, stack_pointer_rtx),
8896 reg);
8897 RTX_FRAME_RELATED_P (tmp) = 1;
8898 XVECEXP (dwarf, 0, 1) = tmp;
8899
8900 for (i = 1; i < count; i++)
8901 {
8902 reg = gen_rtx_REG (DFmode, base_reg);
8903 base_reg += 2;
8904 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8905
8906 tmp = gen_rtx_SET (VOIDmode,
8907 gen_frame_mem (DFmode,
8908 plus_constant (stack_pointer_rtx,
8909 i * 8)),
8910 reg);
8911 RTX_FRAME_RELATED_P (tmp) = 1;
8912 XVECEXP (dwarf, 0, i + 1) = tmp;
8913 }
8914
8915 par = emit_insn (par);
8916 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8917 REG_NOTES (par));
8918 RTX_FRAME_RELATED_P (par) = 1;
8919
8920 return count * 8;
8921 }
8922
8923 /* Emit a call instruction with pattern PAT. ADDR is the address of
8924 the call target. */
8925
8926 void
8927 arm_emit_call_insn (rtx pat, rtx addr)
8928 {
8929 rtx insn;
8930
8931 insn = emit_call_insn (pat);
8932
8933 /* The PIC register is live on entry to VxWorks PIC PLT entries.
8934 If the call might use such an entry, add a use of the PIC register
8935 to the instruction's CALL_INSN_FUNCTION_USAGE. */
8936 if (TARGET_VXWORKS_RTP
8937 && flag_pic
8938 && GET_CODE (addr) == SYMBOL_REF
8939 && (SYMBOL_REF_DECL (addr)
8940 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
8941 : !SYMBOL_REF_LOCAL_P (addr)))
8942 {
8943 require_pic_register ();
8944 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
8945 }
8946 }
8947
8948 /* Output a 'call' insn. */
8949 const char *
8950 output_call (rtx *operands)
8951 {
8952 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8953
8954 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8955 if (REGNO (operands[0]) == LR_REGNUM)
8956 {
8957 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8958 output_asm_insn ("mov%?\t%0, %|lr", operands);
8959 }
8960
8961 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8962
8963 if (TARGET_INTERWORK || arm_arch4t)
8964 output_asm_insn ("bx%?\t%0", operands);
8965 else
8966 output_asm_insn ("mov%?\t%|pc, %0", operands);
8967
8968 return "";
8969 }
8970
8971 /* Output a 'call' insn that is a reference in memory. */
8972 const char *
8973 output_call_mem (rtx *operands)
8974 {
8975 if (TARGET_INTERWORK && !arm_arch5)
8976 {
8977 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8978 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8979 output_asm_insn ("bx%?\t%|ip", operands);
8980 }
8981 else if (regno_use_in (LR_REGNUM, operands[0]))
8982 {
8983 /* LR is used in the memory address. We load the address in the
8984 first instruction. It's safe to use IP as the target of the
8985 load since the call will kill it anyway. */
8986 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8987 if (arm_arch5)
8988 output_asm_insn ("blx%?\t%|ip", operands);
8989 else
8990 {
8991 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8992 if (arm_arch4t)
8993 output_asm_insn ("bx%?\t%|ip", operands);
8994 else
8995 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8996 }
8997 }
8998 else
8999 {
9000 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
9001 output_asm_insn ("ldr%?\t%|pc, %0", operands);
9002 }
9003
9004 return "";
9005 }
9006
9007
9008 /* Output a move from arm registers to an fpa registers.
9009 OPERANDS[0] is an fpa register.
9010 OPERANDS[1] is the first registers of an arm register pair. */
9011 const char *
9012 output_mov_long_double_fpa_from_arm (rtx *operands)
9013 {
9014 int arm_reg0 = REGNO (operands[1]);
9015 rtx ops[3];
9016
9017 gcc_assert (arm_reg0 != IP_REGNUM);
9018
9019 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9020 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9021 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
9022
9023 output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
9024 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
9025
9026 return "";
9027 }
9028
9029 /* Output a move from an fpa register to arm registers.
9030 OPERANDS[0] is the first registers of an arm register pair.
9031 OPERANDS[1] is an fpa register. */
9032 const char *
9033 output_mov_long_double_arm_from_fpa (rtx *operands)
9034 {
9035 int arm_reg0 = REGNO (operands[0]);
9036 rtx ops[3];
9037
9038 gcc_assert (arm_reg0 != IP_REGNUM);
9039
9040 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9041 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9042 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
9043
9044 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
9045 output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
9046 return "";
9047 }
9048
9049 /* Output a move from arm registers to arm registers of a long double
9050 OPERANDS[0] is the destination.
9051 OPERANDS[1] is the source. */
9052 const char *
9053 output_mov_long_double_arm_from_arm (rtx *operands)
9054 {
9055 /* We have to be careful here because the two might overlap. */
9056 int dest_start = REGNO (operands[0]);
9057 int src_start = REGNO (operands[1]);
9058 rtx ops[2];
9059 int i;
9060
9061 if (dest_start < src_start)
9062 {
9063 for (i = 0; i < 3; i++)
9064 {
9065 ops[0] = gen_rtx_REG (SImode, dest_start + i);
9066 ops[1] = gen_rtx_REG (SImode, src_start + i);
9067 output_asm_insn ("mov%?\t%0, %1", ops);
9068 }
9069 }
9070 else
9071 {
9072 for (i = 2; i >= 0; i--)
9073 {
9074 ops[0] = gen_rtx_REG (SImode, dest_start + i);
9075 ops[1] = gen_rtx_REG (SImode, src_start + i);
9076 output_asm_insn ("mov%?\t%0, %1", ops);
9077 }
9078 }
9079
9080 return "";
9081 }
9082
9083
9084 /* Output a move from arm registers to an fpa registers.
9085 OPERANDS[0] is an fpa register.
9086 OPERANDS[1] is the first registers of an arm register pair. */
9087 const char *
9088 output_mov_double_fpa_from_arm (rtx *operands)
9089 {
9090 int arm_reg0 = REGNO (operands[1]);
9091 rtx ops[2];
9092
9093 gcc_assert (arm_reg0 != IP_REGNUM);
9094
9095 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9096 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9097 output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1}", ops);
9098 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
9099 return "";
9100 }
9101
9102 /* Output a move from an fpa register to arm registers.
9103 OPERANDS[0] is the first registers of an arm register pair.
9104 OPERANDS[1] is an fpa register. */
9105 const char *
9106 output_mov_double_arm_from_fpa (rtx *operands)
9107 {
9108 int arm_reg0 = REGNO (operands[0]);
9109 rtx ops[2];
9110
9111 gcc_assert (arm_reg0 != IP_REGNUM);
9112
9113 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9114 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9115 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
9116 output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1}", ops);
9117 return "";
9118 }
9119
9120 /* Output a move between double words.
9121 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
9122 or MEM<-REG and all MEMs must be offsettable addresses. */
9123 const char *
9124 output_move_double (rtx *operands)
9125 {
9126 enum rtx_code code0 = GET_CODE (operands[0]);
9127 enum rtx_code code1 = GET_CODE (operands[1]);
9128 rtx otherops[3];
9129
9130 if (code0 == REG)
9131 {
9132 int reg0 = REGNO (operands[0]);
9133
9134 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9135
9136 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
9137
9138 switch (GET_CODE (XEXP (operands[1], 0)))
9139 {
9140 case REG:
9141 output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
9142 break;
9143
9144 case PRE_INC:
9145 gcc_assert (TARGET_LDRD);
9146 output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
9147 break;
9148
9149 case PRE_DEC:
9150 if (TARGET_LDRD)
9151 output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
9152 else
9153 output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
9154 break;
9155
9156 case POST_INC:
9157 output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
9158 break;
9159
9160 case POST_DEC:
9161 gcc_assert (TARGET_LDRD);
9162 output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
9163 break;
9164
9165 case PRE_MODIFY:
9166 case POST_MODIFY:
9167 otherops[0] = operands[0];
9168 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
9169 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
9170
9171 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
9172 {
9173 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
9174 {
9175 /* Registers overlap so split out the increment. */
9176 output_asm_insn ("add%?\t%1, %1, %2", otherops);
9177 output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
9178 }
9179 else
9180 {
9181 /* IWMMXT allows offsets larger than ldrd can handle,
9182 fix these up with a pair of ldr. */
9183 if (GET_CODE (otherops[2]) == CONST_INT
9184 && (INTVAL(otherops[2]) <= -256
9185 || INTVAL(otherops[2]) >= 256))
9186 {
9187 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
9188 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9189 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9190 }
9191 else
9192 output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
9193 }
9194 }
9195 else
9196 {
9197 /* IWMMXT allows offsets larger than ldrd can handle,
9198 fix these up with a pair of ldr. */
9199 if (GET_CODE (otherops[2]) == CONST_INT
9200 && (INTVAL(otherops[2]) <= -256
9201 || INTVAL(otherops[2]) >= 256))
9202 {
9203 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9204 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9205 otherops[0] = operands[0];
9206 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
9207 }
9208 else
9209 /* We only allow constant increments, so this is safe. */
9210 output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
9211 }
9212 break;
9213
9214 case LABEL_REF:
9215 case CONST:
9216 output_asm_insn ("adr%?\t%0, %1", operands);
9217 output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
9218 break;
9219
9220 /* ??? This needs checking for thumb2. */
9221 default:
9222 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
9223 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
9224 {
9225 otherops[0] = operands[0];
9226 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
9227 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
9228
9229 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
9230 {
9231 if (GET_CODE (otherops[2]) == CONST_INT)
9232 {
9233 switch ((int) INTVAL (otherops[2]))
9234 {
9235 case -8:
9236 output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
9237 return "";
9238 case -4:
9239 if (TARGET_THUMB2)
9240 break;
9241 output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
9242 return "";
9243 case 4:
9244 if (TARGET_THUMB2)
9245 break;
9246 output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
9247 return "";
9248 }
9249 }
9250 if (TARGET_LDRD
9251 && (GET_CODE (otherops[2]) == REG
9252 || (GET_CODE (otherops[2]) == CONST_INT
9253 && INTVAL (otherops[2]) > -256
9254 && INTVAL (otherops[2]) < 256)))
9255 {
9256 if (reg_overlap_mentioned_p (otherops[0],
9257 otherops[2]))
9258 {
9259 /* Swap base and index registers over to
9260 avoid a conflict. */
9261 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
9262 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
9263 }
9264 /* If both registers conflict, it will usually
9265 have been fixed by a splitter. */
9266 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
9267 {
9268 output_asm_insn ("add%?\t%1, %1, %2", otherops);
9269 output_asm_insn ("ldr%(d%)\t%0, [%1]",
9270 otherops);
9271 }
9272 else
9273 output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
9274 return "";
9275 }
9276
9277 if (GET_CODE (otherops[2]) == CONST_INT)
9278 {
9279 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
9280 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
9281 else
9282 output_asm_insn ("add%?\t%0, %1, %2", otherops);
9283 }
9284 else
9285 output_asm_insn ("add%?\t%0, %1, %2", otherops);
9286 }
9287 else
9288 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
9289
9290 return "ldm%(ia%)\t%0, %M0";
9291 }
9292 else
9293 {
9294 otherops[1] = adjust_address (operands[1], SImode, 4);
9295 /* Take care of overlapping base/data reg. */
9296 if (reg_mentioned_p (operands[0], operands[1]))
9297 {
9298 output_asm_insn ("ldr%?\t%0, %1", otherops);
9299 output_asm_insn ("ldr%?\t%0, %1", operands);
9300 }
9301 else
9302 {
9303 output_asm_insn ("ldr%?\t%0, %1", operands);
9304 output_asm_insn ("ldr%?\t%0, %1", otherops);
9305 }
9306 }
9307 }
9308 }
9309 else
9310 {
9311 /* Constraints should ensure this. */
9312 gcc_assert (code0 == MEM && code1 == REG);
9313 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
9314
9315 switch (GET_CODE (XEXP (operands[0], 0)))
9316 {
9317 case REG:
9318 output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
9319 break;
9320
9321 case PRE_INC:
9322 gcc_assert (TARGET_LDRD);
9323 output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
9324 break;
9325
9326 case PRE_DEC:
9327 if (TARGET_LDRD)
9328 output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
9329 else
9330 output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
9331 break;
9332
9333 case POST_INC:
9334 output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
9335 break;
9336
9337 case POST_DEC:
9338 gcc_assert (TARGET_LDRD);
9339 output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
9340 break;
9341
9342 case PRE_MODIFY:
9343 case POST_MODIFY:
9344 otherops[0] = operands[1];
9345 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
9346 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
9347
9348 /* IWMMXT allows offsets larger than ldrd can handle,
9349 fix these up with a pair of ldr. */
9350 if (GET_CODE (otherops[2]) == CONST_INT
9351 && (INTVAL(otherops[2]) <= -256
9352 || INTVAL(otherops[2]) >= 256))
9353 {
9354 rtx reg1;
9355 reg1 = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9356 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
9357 {
9358 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
9359 otherops[0] = reg1;
9360 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9361 }
9362 else
9363 {
9364 otherops[0] = reg1;
9365 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9366 otherops[0] = operands[1];
9367 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
9368 }
9369 }
9370 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
9371 output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
9372 else
9373 output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
9374 break;
9375
9376 case PLUS:
9377 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
9378 if (GET_CODE (otherops[2]) == CONST_INT)
9379 {
9380 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
9381 {
9382 case -8:
9383 output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
9384 return "";
9385
9386 case -4:
9387 if (TARGET_THUMB2)
9388 break;
9389 output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
9390 return "";
9391
9392 case 4:
9393 if (TARGET_THUMB2)
9394 break;
9395 output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
9396 return "";
9397 }
9398 }
9399 if (TARGET_LDRD
9400 && (GET_CODE (otherops[2]) == REG
9401 || (GET_CODE (otherops[2]) == CONST_INT
9402 && INTVAL (otherops[2]) > -256
9403 && INTVAL (otherops[2]) < 256)))
9404 {
9405 otherops[0] = operands[1];
9406 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
9407 output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
9408 return "";
9409 }
9410 /* Fall through */
9411
9412 default:
9413 otherops[0] = adjust_address (operands[0], SImode, 4);
9414 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9415 output_asm_insn ("str%?\t%1, %0", operands);
9416 output_asm_insn ("str%?\t%1, %0", otherops);
9417 }
9418 }
9419
9420 return "";
9421 }
9422
9423 /* Output a VFP load or store instruction. */
9424
9425 const char *
9426 output_move_vfp (rtx *operands)
9427 {
9428 rtx reg, mem, addr, ops[2];
9429 int load = REG_P (operands[0]);
9430 int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
9431 int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
9432 const char *template;
9433 char buff[50];
9434
9435 reg = operands[!load];
9436 mem = operands[load];
9437
9438 gcc_assert (REG_P (reg));
9439 gcc_assert (IS_VFP_REGNUM (REGNO (reg)));
9440 gcc_assert (GET_MODE (reg) == SFmode
9441 || GET_MODE (reg) == DFmode
9442 || GET_MODE (reg) == SImode
9443 || GET_MODE (reg) == DImode);
9444 gcc_assert (MEM_P (mem));
9445
9446 addr = XEXP (mem, 0);
9447
9448 switch (GET_CODE (addr))
9449 {
9450 case PRE_DEC:
9451 template = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
9452 ops[0] = XEXP (addr, 0);
9453 ops[1] = reg;
9454 break;
9455
9456 case POST_INC:
9457 template = "f%smia%c%%?\t%%0!, {%%%s1}%s";
9458 ops[0] = XEXP (addr, 0);
9459 ops[1] = reg;
9460 break;
9461
9462 default:
9463 template = "f%s%c%%?\t%%%s0, %%1%s";
9464 ops[0] = reg;
9465 ops[1] = mem;
9466 break;
9467 }
9468
9469 sprintf (buff, template,
9470 load ? "ld" : "st",
9471 dp ? 'd' : 's',
9472 dp ? "P" : "",
9473 integer_p ? "\t%@ int" : "");
9474 output_asm_insn (buff, ops);
9475
9476 return "";
9477 }
9478
9479 /* Output an ADD r, s, #n where n may be too big for one instruction.
9480 If adding zero to one register, output nothing. */
9481 const char *
9482 output_add_immediate (rtx *operands)
9483 {
9484 HOST_WIDE_INT n = INTVAL (operands[2]);
9485
9486 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
9487 {
9488 if (n < 0)
9489 output_multi_immediate (operands,
9490 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
9491 -n);
9492 else
9493 output_multi_immediate (operands,
9494 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
9495 n);
9496 }
9497
9498 return "";
9499 }
9500
9501 /* Output a multiple immediate operation.
9502 OPERANDS is the vector of operands referred to in the output patterns.
9503 INSTR1 is the output pattern to use for the first constant.
9504 INSTR2 is the output pattern to use for subsequent constants.
9505 IMMED_OP is the index of the constant slot in OPERANDS.
9506 N is the constant value. */
9507 static const char *
9508 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
9509 int immed_op, HOST_WIDE_INT n)
9510 {
9511 #if HOST_BITS_PER_WIDE_INT > 32
9512 n &= 0xffffffff;
9513 #endif
9514
9515 if (n == 0)
9516 {
9517 /* Quick and easy output. */
9518 operands[immed_op] = const0_rtx;
9519 output_asm_insn (instr1, operands);
9520 }
9521 else
9522 {
9523 int i;
9524 const char * instr = instr1;
9525
9526 /* Note that n is never zero here (which would give no output). */
9527 for (i = 0; i < 32; i += 2)
9528 {
9529 if (n & (3 << i))
9530 {
9531 operands[immed_op] = GEN_INT (n & (255 << i));
9532 output_asm_insn (instr, operands);
9533 instr = instr2;
9534 i += 6;
9535 }
9536 }
9537 }
9538
9539 return "";
9540 }
9541
9542 /* Return the name of a shifter operation. */
9543 static const char *
9544 arm_shift_nmem(enum rtx_code code)
9545 {
9546 switch (code)
9547 {
9548 case ASHIFT:
9549 return ARM_LSL_NAME;
9550
9551 case ASHIFTRT:
9552 return "asr";
9553
9554 case LSHIFTRT:
9555 return "lsr";
9556
9557 case ROTATERT:
9558 return "ror";
9559
9560 default:
9561 abort();
9562 }
9563 }
9564
9565 /* Return the appropriate ARM instruction for the operation code.
9566 The returned result should not be overwritten. OP is the rtx of the
9567 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9568 was shifted. */
9569 const char *
9570 arithmetic_instr (rtx op, int shift_first_arg)
9571 {
9572 switch (GET_CODE (op))
9573 {
9574 case PLUS:
9575 return "add";
9576
9577 case MINUS:
9578 return shift_first_arg ? "rsb" : "sub";
9579
9580 case IOR:
9581 return "orr";
9582
9583 case XOR:
9584 return "eor";
9585
9586 case AND:
9587 return "and";
9588
9589 case ASHIFT:
9590 case ASHIFTRT:
9591 case LSHIFTRT:
9592 case ROTATERT:
9593 return arm_shift_nmem(GET_CODE(op));
9594
9595 default:
9596 gcc_unreachable ();
9597 }
9598 }
9599
9600 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9601 for the operation code. The returned result should not be overwritten.
9602 OP is the rtx code of the shift.
9603 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9604 shift. */
9605 static const char *
9606 shift_op (rtx op, HOST_WIDE_INT *amountp)
9607 {
9608 const char * mnem;
9609 enum rtx_code code = GET_CODE (op);
9610
9611 switch (GET_CODE (XEXP (op, 1)))
9612 {
9613 case REG:
9614 case SUBREG:
9615 *amountp = -1;
9616 break;
9617
9618 case CONST_INT:
9619 *amountp = INTVAL (XEXP (op, 1));
9620 break;
9621
9622 default:
9623 gcc_unreachable ();
9624 }
9625
9626 switch (code)
9627 {
9628 case ROTATE:
9629 gcc_assert (*amountp != -1);
9630 *amountp = 32 - *amountp;
9631 code = ROTATERT;
9632
9633 /* Fall through. */
9634
9635 case ASHIFT:
9636 case ASHIFTRT:
9637 case LSHIFTRT:
9638 case ROTATERT:
9639 mnem = arm_shift_nmem(code);
9640 break;
9641
9642 case MULT:
9643 /* We never have to worry about the amount being other than a
9644 power of 2, since this case can never be reloaded from a reg. */
9645 gcc_assert (*amountp != -1);
9646 *amountp = int_log2 (*amountp);
9647 return ARM_LSL_NAME;
9648
9649 default:
9650 gcc_unreachable ();
9651 }
9652
9653 if (*amountp != -1)
9654 {
9655 /* This is not 100% correct, but follows from the desire to merge
9656 multiplication by a power of 2 with the recognizer for a
9657 shift. >=32 is not a valid shift for "lsl", so we must try and
9658 output a shift that produces the correct arithmetical result.
9659 Using lsr #32 is identical except for the fact that the carry bit
9660 is not set correctly if we set the flags; but we never use the
9661 carry bit from such an operation, so we can ignore that. */
9662 if (code == ROTATERT)
9663 /* Rotate is just modulo 32. */
9664 *amountp &= 31;
9665 else if (*amountp != (*amountp & 31))
9666 {
9667 if (code == ASHIFT)
9668 mnem = "lsr";
9669 *amountp = 32;
9670 }
9671
9672 /* Shifts of 0 are no-ops. */
9673 if (*amountp == 0)
9674 return NULL;
9675 }
9676
9677 return mnem;
9678 }
9679
9680 /* Obtain the shift from the POWER of two. */
9681
9682 static HOST_WIDE_INT
9683 int_log2 (HOST_WIDE_INT power)
9684 {
9685 HOST_WIDE_INT shift = 0;
9686
9687 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9688 {
9689 gcc_assert (shift <= 31);
9690 shift++;
9691 }
9692
9693 return shift;
9694 }
9695
9696 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9697 because /bin/as is horribly restrictive. The judgement about
9698 whether or not each character is 'printable' (and can be output as
9699 is) or not (and must be printed with an octal escape) must be made
9700 with reference to the *host* character set -- the situation is
9701 similar to that discussed in the comments above pp_c_char in
9702 c-pretty-print.c. */
9703
9704 #define MAX_ASCII_LEN 51
9705
9706 void
9707 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9708 {
9709 int i;
9710 int len_so_far = 0;
9711
9712 fputs ("\t.ascii\t\"", stream);
9713
9714 for (i = 0; i < len; i++)
9715 {
9716 int c = p[i];
9717
9718 if (len_so_far >= MAX_ASCII_LEN)
9719 {
9720 fputs ("\"\n\t.ascii\t\"", stream);
9721 len_so_far = 0;
9722 }
9723
9724 if (ISPRINT (c))
9725 {
9726 if (c == '\\' || c == '\"')
9727 {
9728 putc ('\\', stream);
9729 len_so_far++;
9730 }
9731 putc (c, stream);
9732 len_so_far++;
9733 }
9734 else
9735 {
9736 fprintf (stream, "\\%03o", c);
9737 len_so_far += 4;
9738 }
9739 }
9740
9741 fputs ("\"\n", stream);
9742 }
9743 \f
9744 /* Compute the register save mask for registers 0 through 12
9745 inclusive. This code is used by arm_compute_save_reg_mask. */
9746
9747 static unsigned long
9748 arm_compute_save_reg0_reg12_mask (void)
9749 {
9750 unsigned long func_type = arm_current_func_type ();
9751 unsigned long save_reg_mask = 0;
9752 unsigned int reg;
9753
9754 if (IS_INTERRUPT (func_type))
9755 {
9756 unsigned int max_reg;
9757 /* Interrupt functions must not corrupt any registers,
9758 even call clobbered ones. If this is a leaf function
9759 we can just examine the registers used by the RTL, but
9760 otherwise we have to assume that whatever function is
9761 called might clobber anything, and so we have to save
9762 all the call-clobbered registers as well. */
9763 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9764 /* FIQ handlers have registers r8 - r12 banked, so
9765 we only need to check r0 - r7, Normal ISRs only
9766 bank r14 and r15, so we must check up to r12.
9767 r13 is the stack pointer which is always preserved,
9768 so we do not need to consider it here. */
9769 max_reg = 7;
9770 else
9771 max_reg = 12;
9772
9773 for (reg = 0; reg <= max_reg; reg++)
9774 if (df_regs_ever_live_p (reg)
9775 || (! current_function_is_leaf && call_used_regs[reg]))
9776 save_reg_mask |= (1 << reg);
9777
9778 /* Also save the pic base register if necessary. */
9779 if (flag_pic
9780 && !TARGET_SINGLE_PIC_BASE
9781 && arm_pic_register != INVALID_REGNUM
9782 && current_function_uses_pic_offset_table)
9783 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9784 }
9785 else
9786 {
9787 /* In arm mode we handle r11 (FP) as a special case. */
9788 unsigned last_reg = TARGET_ARM ? 10 : 11;
9789
9790 /* In the normal case we only need to save those registers
9791 which are call saved and which are used by this function. */
9792 for (reg = 0; reg <= last_reg; reg++)
9793 if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
9794 save_reg_mask |= (1 << reg);
9795
9796 /* Handle the frame pointer as a special case. */
9797 if (! TARGET_APCS_FRAME
9798 && ! frame_pointer_needed
9799 && df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM)
9800 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9801 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9802 else if (! TARGET_APCS_FRAME
9803 && ! frame_pointer_needed
9804 && df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM)
9805 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9806 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9807
9808 /* If we aren't loading the PIC register,
9809 don't stack it even though it may be live. */
9810 if (flag_pic
9811 && !TARGET_SINGLE_PIC_BASE
9812 && arm_pic_register != INVALID_REGNUM
9813 && (df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)
9814 || current_function_uses_pic_offset_table))
9815 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9816
9817 /* The prologue will copy SP into R0, so save it. */
9818 if (IS_STACKALIGN (func_type))
9819 save_reg_mask |= 1;
9820 }
9821
9822 /* Save registers so the exception handler can modify them. */
9823 if (current_function_calls_eh_return)
9824 {
9825 unsigned int i;
9826
9827 for (i = 0; ; i++)
9828 {
9829 reg = EH_RETURN_DATA_REGNO (i);
9830 if (reg == INVALID_REGNUM)
9831 break;
9832 save_reg_mask |= 1 << reg;
9833 }
9834 }
9835
9836 return save_reg_mask;
9837 }
9838
9839
9840 /* Compute a bit mask of which registers need to be
9841 saved on the stack for the current function. */
9842
9843 static unsigned long
9844 arm_compute_save_reg_mask (void)
9845 {
9846 unsigned int save_reg_mask = 0;
9847 unsigned long func_type = arm_current_func_type ();
9848 unsigned int reg;
9849
9850 if (IS_NAKED (func_type))
9851 /* This should never really happen. */
9852 return 0;
9853
9854 /* If we are creating a stack frame, then we must save the frame pointer,
9855 IP (which will hold the old stack pointer), LR and the PC. */
9856 if (frame_pointer_needed && TARGET_ARM)
9857 save_reg_mask |=
9858 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9859 | (1 << IP_REGNUM)
9860 | (1 << LR_REGNUM)
9861 | (1 << PC_REGNUM);
9862
9863 /* Volatile functions do not return, so there
9864 is no need to save any other registers. */
9865 if (IS_VOLATILE (func_type))
9866 return save_reg_mask;
9867
9868 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9869
9870 /* Decide if we need to save the link register.
9871 Interrupt routines have their own banked link register,
9872 so they never need to save it.
9873 Otherwise if we do not use the link register we do not need to save
9874 it. If we are pushing other registers onto the stack however, we
9875 can save an instruction in the epilogue by pushing the link register
9876 now and then popping it back into the PC. This incurs extra memory
9877 accesses though, so we only do it when optimizing for size, and only
9878 if we know that we will not need a fancy return sequence. */
9879 if (df_regs_ever_live_p (LR_REGNUM)
9880 || (save_reg_mask
9881 && optimize_size
9882 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9883 && !current_function_calls_eh_return))
9884 save_reg_mask |= 1 << LR_REGNUM;
9885
9886 if (cfun->machine->lr_save_eliminated)
9887 save_reg_mask &= ~ (1 << LR_REGNUM);
9888
9889 if (TARGET_REALLY_IWMMXT
9890 && ((bit_count (save_reg_mask)
9891 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9892 {
9893 /* The total number of registers that are going to be pushed
9894 onto the stack is odd. We need to ensure that the stack
9895 is 64-bit aligned before we start to save iWMMXt registers,
9896 and also before we start to create locals. (A local variable
9897 might be a double or long long which we will load/store using
9898 an iWMMXt instruction). Therefore we need to push another
9899 ARM register, so that the stack will be 64-bit aligned. We
9900 try to avoid using the arg registers (r0 -r3) as they might be
9901 used to pass values in a tail call. */
9902 for (reg = 4; reg <= 12; reg++)
9903 if ((save_reg_mask & (1 << reg)) == 0)
9904 break;
9905
9906 if (reg <= 12)
9907 save_reg_mask |= (1 << reg);
9908 else
9909 {
9910 cfun->machine->sibcall_blocked = 1;
9911 save_reg_mask |= (1 << 3);
9912 }
9913 }
9914
9915 /* We may need to push an additional register for use initializing the
9916 PIC base register. */
9917 if (TARGET_THUMB2 && IS_NESTED (func_type) && flag_pic
9918 && (save_reg_mask & THUMB2_WORK_REGS) == 0)
9919 {
9920 reg = thumb_find_work_register (1 << 4);
9921 if (!call_used_regs[reg])
9922 save_reg_mask |= (1 << reg);
9923 }
9924
9925 return save_reg_mask;
9926 }
9927
9928
9929 /* Compute a bit mask of which registers need to be
9930 saved on the stack for the current function. */
9931 static unsigned long
9932 thumb1_compute_save_reg_mask (void)
9933 {
9934 unsigned long mask;
9935 unsigned reg;
9936
9937 mask = 0;
9938 for (reg = 0; reg < 12; reg ++)
9939 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
9940 mask |= 1 << reg;
9941
9942 if (flag_pic
9943 && !TARGET_SINGLE_PIC_BASE
9944 && arm_pic_register != INVALID_REGNUM
9945 && current_function_uses_pic_offset_table)
9946 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9947
9948 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9949 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9950 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9951
9952 /* LR will also be pushed if any lo regs are pushed. */
9953 if (mask & 0xff || thumb_force_lr_save ())
9954 mask |= (1 << LR_REGNUM);
9955
9956 /* Make sure we have a low work register if we need one.
9957 We will need one if we are going to push a high register,
9958 but we are not currently intending to push a low register. */
9959 if ((mask & 0xff) == 0
9960 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9961 {
9962 /* Use thumb_find_work_register to choose which register
9963 we will use. If the register is live then we will
9964 have to push it. Use LAST_LO_REGNUM as our fallback
9965 choice for the register to select. */
9966 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9967
9968 if (! call_used_regs[reg])
9969 mask |= 1 << reg;
9970 }
9971
9972 return mask;
9973 }
9974
9975
9976 /* Return the number of bytes required to save VFP registers. */
9977 static int
9978 arm_get_vfp_saved_size (void)
9979 {
9980 unsigned int regno;
9981 int count;
9982 int saved;
9983
9984 saved = 0;
9985 /* Space for saved VFP registers. */
9986 if (TARGET_HARD_FLOAT && TARGET_VFP)
9987 {
9988 count = 0;
9989 for (regno = FIRST_VFP_REGNUM;
9990 regno < LAST_VFP_REGNUM;
9991 regno += 2)
9992 {
9993 if ((!df_regs_ever_live_p (regno) || call_used_regs[regno])
9994 && (!df_regs_ever_live_p (regno + 1) || call_used_regs[regno + 1]))
9995 {
9996 if (count > 0)
9997 {
9998 /* Workaround ARM10 VFPr1 bug. */
9999 if (count == 2 && !arm_arch6)
10000 count++;
10001 saved += count * 8;
10002 }
10003 count = 0;
10004 }
10005 else
10006 count++;
10007 }
10008 if (count > 0)
10009 {
10010 if (count == 2 && !arm_arch6)
10011 count++;
10012 saved += count * 8;
10013 }
10014 }
10015 return saved;
10016 }
10017
10018
10019 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
10020 everything bar the final return instruction. */
10021 const char *
10022 output_return_instruction (rtx operand, int really_return, int reverse)
10023 {
10024 char conditional[10];
10025 char instr[100];
10026 unsigned reg;
10027 unsigned long live_regs_mask;
10028 unsigned long func_type;
10029 arm_stack_offsets *offsets;
10030
10031 func_type = arm_current_func_type ();
10032
10033 if (IS_NAKED (func_type))
10034 return "";
10035
10036 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
10037 {
10038 /* If this function was declared non-returning, and we have
10039 found a tail call, then we have to trust that the called
10040 function won't return. */
10041 if (really_return)
10042 {
10043 rtx ops[2];
10044
10045 /* Otherwise, trap an attempted return by aborting. */
10046 ops[0] = operand;
10047 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
10048 : "abort");
10049 assemble_external_libcall (ops[1]);
10050 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
10051 }
10052
10053 return "";
10054 }
10055
10056 gcc_assert (!current_function_calls_alloca || really_return);
10057
10058 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
10059
10060 return_used_this_function = 1;
10061
10062 live_regs_mask = arm_compute_save_reg_mask ();
10063
10064 if (live_regs_mask)
10065 {
10066 const char * return_reg;
10067
10068 /* If we do not have any special requirements for function exit
10069 (e.g. interworking) then we can load the return address
10070 directly into the PC. Otherwise we must load it into LR. */
10071 if (really_return
10072 && (IS_INTERRUPT (func_type) || !TARGET_INTERWORK))
10073 return_reg = reg_names[PC_REGNUM];
10074 else
10075 return_reg = reg_names[LR_REGNUM];
10076
10077 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
10078 {
10079 /* There are three possible reasons for the IP register
10080 being saved. 1) a stack frame was created, in which case
10081 IP contains the old stack pointer, or 2) an ISR routine
10082 corrupted it, or 3) it was saved to align the stack on
10083 iWMMXt. In case 1, restore IP into SP, otherwise just
10084 restore IP. */
10085 if (frame_pointer_needed)
10086 {
10087 live_regs_mask &= ~ (1 << IP_REGNUM);
10088 live_regs_mask |= (1 << SP_REGNUM);
10089 }
10090 else
10091 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
10092 }
10093
10094 /* On some ARM architectures it is faster to use LDR rather than
10095 LDM to load a single register. On other architectures, the
10096 cost is the same. In 26 bit mode, or for exception handlers,
10097 we have to use LDM to load the PC so that the CPSR is also
10098 restored. */
10099 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
10100 if (live_regs_mask == (1U << reg))
10101 break;
10102
10103 if (reg <= LAST_ARM_REGNUM
10104 && (reg != LR_REGNUM
10105 || ! really_return
10106 || ! IS_INTERRUPT (func_type)))
10107 {
10108 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
10109 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
10110 }
10111 else
10112 {
10113 char *p;
10114 int first = 1;
10115
10116 /* Generate the load multiple instruction to restore the
10117 registers. Note we can get here, even if
10118 frame_pointer_needed is true, but only if sp already
10119 points to the base of the saved core registers. */
10120 if (live_regs_mask & (1 << SP_REGNUM))
10121 {
10122 unsigned HOST_WIDE_INT stack_adjust;
10123
10124 offsets = arm_get_frame_offsets ();
10125 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
10126 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
10127
10128 if (stack_adjust && arm_arch5 && TARGET_ARM)
10129 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
10130 else
10131 {
10132 /* If we can't use ldmib (SA110 bug),
10133 then try to pop r3 instead. */
10134 if (stack_adjust)
10135 live_regs_mask |= 1 << 3;
10136 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
10137 }
10138 }
10139 else
10140 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
10141
10142 p = instr + strlen (instr);
10143
10144 for (reg = 0; reg <= SP_REGNUM; reg++)
10145 if (live_regs_mask & (1 << reg))
10146 {
10147 int l = strlen (reg_names[reg]);
10148
10149 if (first)
10150 first = 0;
10151 else
10152 {
10153 memcpy (p, ", ", 2);
10154 p += 2;
10155 }
10156
10157 memcpy (p, "%|", 2);
10158 memcpy (p + 2, reg_names[reg], l);
10159 p += l + 2;
10160 }
10161
10162 if (live_regs_mask & (1 << LR_REGNUM))
10163 {
10164 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
10165 /* If returning from an interrupt, restore the CPSR. */
10166 if (IS_INTERRUPT (func_type))
10167 strcat (p, "^");
10168 }
10169 else
10170 strcpy (p, "}");
10171 }
10172
10173 output_asm_insn (instr, & operand);
10174
10175 /* See if we need to generate an extra instruction to
10176 perform the actual function return. */
10177 if (really_return
10178 && func_type != ARM_FT_INTERWORKED
10179 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
10180 {
10181 /* The return has already been handled
10182 by loading the LR into the PC. */
10183 really_return = 0;
10184 }
10185 }
10186
10187 if (really_return)
10188 {
10189 switch ((int) ARM_FUNC_TYPE (func_type))
10190 {
10191 case ARM_FT_ISR:
10192 case ARM_FT_FIQ:
10193 /* ??? This is wrong for unified assembly syntax. */
10194 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
10195 break;
10196
10197 case ARM_FT_INTERWORKED:
10198 sprintf (instr, "bx%s\t%%|lr", conditional);
10199 break;
10200
10201 case ARM_FT_EXCEPTION:
10202 /* ??? This is wrong for unified assembly syntax. */
10203 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
10204 break;
10205
10206 default:
10207 /* Use bx if it's available. */
10208 if (arm_arch5 || arm_arch4t)
10209 sprintf (instr, "bx%s\t%%|lr", conditional);
10210 else
10211 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
10212 break;
10213 }
10214
10215 output_asm_insn (instr, & operand);
10216 }
10217
10218 return "";
10219 }
10220
10221 /* Write the function name into the code section, directly preceding
10222 the function prologue.
10223
10224 Code will be output similar to this:
10225 t0
10226 .ascii "arm_poke_function_name", 0
10227 .align
10228 t1
10229 .word 0xff000000 + (t1 - t0)
10230 arm_poke_function_name
10231 mov ip, sp
10232 stmfd sp!, {fp, ip, lr, pc}
10233 sub fp, ip, #4
10234
10235 When performing a stack backtrace, code can inspect the value
10236 of 'pc' stored at 'fp' + 0. If the trace function then looks
10237 at location pc - 12 and the top 8 bits are set, then we know
10238 that there is a function name embedded immediately preceding this
10239 location and has length ((pc[-3]) & 0xff000000).
10240
10241 We assume that pc is declared as a pointer to an unsigned long.
10242
10243 It is of no benefit to output the function name if we are assembling
10244 a leaf function. These function types will not contain a stack
10245 backtrace structure, therefore it is not possible to determine the
10246 function name. */
10247 void
10248 arm_poke_function_name (FILE *stream, const char *name)
10249 {
10250 unsigned long alignlength;
10251 unsigned long length;
10252 rtx x;
10253
10254 length = strlen (name) + 1;
10255 alignlength = ROUND_UP_WORD (length);
10256
10257 ASM_OUTPUT_ASCII (stream, name, length);
10258 ASM_OUTPUT_ALIGN (stream, 2);
10259 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
10260 assemble_aligned_integer (UNITS_PER_WORD, x);
10261 }
10262
10263 /* Place some comments into the assembler stream
10264 describing the current function. */
10265 static void
10266 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
10267 {
10268 unsigned long func_type;
10269
10270 if (TARGET_THUMB1)
10271 {
10272 thumb1_output_function_prologue (f, frame_size);
10273 return;
10274 }
10275
10276 /* Sanity check. */
10277 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
10278
10279 func_type = arm_current_func_type ();
10280
10281 switch ((int) ARM_FUNC_TYPE (func_type))
10282 {
10283 default:
10284 case ARM_FT_NORMAL:
10285 break;
10286 case ARM_FT_INTERWORKED:
10287 asm_fprintf (f, "\t%@ Function supports interworking.\n");
10288 break;
10289 case ARM_FT_ISR:
10290 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
10291 break;
10292 case ARM_FT_FIQ:
10293 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
10294 break;
10295 case ARM_FT_EXCEPTION:
10296 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
10297 break;
10298 }
10299
10300 if (IS_NAKED (func_type))
10301 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
10302
10303 if (IS_VOLATILE (func_type))
10304 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
10305
10306 if (IS_NESTED (func_type))
10307 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
10308 if (IS_STACKALIGN (func_type))
10309 asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
10310
10311 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
10312 current_function_args_size,
10313 current_function_pretend_args_size, frame_size);
10314
10315 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
10316 frame_pointer_needed,
10317 cfun->machine->uses_anonymous_args);
10318
10319 if (cfun->machine->lr_save_eliminated)
10320 asm_fprintf (f, "\t%@ link register save eliminated.\n");
10321
10322 if (current_function_calls_eh_return)
10323 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
10324
10325 #ifdef AOF_ASSEMBLER
10326 if (flag_pic)
10327 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
10328 #endif
10329
10330 return_used_this_function = 0;
10331 }
10332
10333 const char *
10334 arm_output_epilogue (rtx sibling)
10335 {
10336 int reg;
10337 unsigned long saved_regs_mask;
10338 unsigned long func_type;
10339 /* Floats_offset is the offset from the "virtual" frame. In an APCS
10340 frame that is $fp + 4 for a non-variadic function. */
10341 int floats_offset = 0;
10342 rtx operands[3];
10343 FILE * f = asm_out_file;
10344 unsigned int lrm_count = 0;
10345 int really_return = (sibling == NULL);
10346 int start_reg;
10347 arm_stack_offsets *offsets;
10348
10349 /* If we have already generated the return instruction
10350 then it is futile to generate anything else. */
10351 if (use_return_insn (FALSE, sibling) && return_used_this_function)
10352 return "";
10353
10354 func_type = arm_current_func_type ();
10355
10356 if (IS_NAKED (func_type))
10357 /* Naked functions don't have epilogues. */
10358 return "";
10359
10360 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
10361 {
10362 rtx op;
10363
10364 /* A volatile function should never return. Call abort. */
10365 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
10366 assemble_external_libcall (op);
10367 output_asm_insn ("bl\t%a0", &op);
10368
10369 return "";
10370 }
10371
10372 /* If we are throwing an exception, then we really must be doing a
10373 return, so we can't tail-call. */
10374 gcc_assert (!current_function_calls_eh_return || really_return);
10375
10376 offsets = arm_get_frame_offsets ();
10377 saved_regs_mask = arm_compute_save_reg_mask ();
10378
10379 if (TARGET_IWMMXT)
10380 lrm_count = bit_count (saved_regs_mask);
10381
10382 floats_offset = offsets->saved_args;
10383 /* Compute how far away the floats will be. */
10384 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
10385 if (saved_regs_mask & (1 << reg))
10386 floats_offset += 4;
10387
10388 if (frame_pointer_needed && TARGET_ARM)
10389 {
10390 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
10391 int vfp_offset = offsets->frame;
10392
10393 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10394 {
10395 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10396 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10397 {
10398 floats_offset += 12;
10399 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
10400 reg, FP_REGNUM, floats_offset - vfp_offset);
10401 }
10402 }
10403 else
10404 {
10405 start_reg = LAST_FPA_REGNUM;
10406
10407 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10408 {
10409 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10410 {
10411 floats_offset += 12;
10412
10413 /* We can't unstack more than four registers at once. */
10414 if (start_reg - reg == 3)
10415 {
10416 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
10417 reg, FP_REGNUM, floats_offset - vfp_offset);
10418 start_reg = reg - 1;
10419 }
10420 }
10421 else
10422 {
10423 if (reg != start_reg)
10424 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
10425 reg + 1, start_reg - reg,
10426 FP_REGNUM, floats_offset - vfp_offset);
10427 start_reg = reg - 1;
10428 }
10429 }
10430
10431 /* Just in case the last register checked also needs unstacking. */
10432 if (reg != start_reg)
10433 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
10434 reg + 1, start_reg - reg,
10435 FP_REGNUM, floats_offset - vfp_offset);
10436 }
10437
10438 if (TARGET_HARD_FLOAT && TARGET_VFP)
10439 {
10440 int saved_size;
10441
10442 /* The fldmd insns do not have base+offset addressing
10443 modes, so we use IP to hold the address. */
10444 saved_size = arm_get_vfp_saved_size ();
10445
10446 if (saved_size > 0)
10447 {
10448 floats_offset += saved_size;
10449 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
10450 FP_REGNUM, floats_offset - vfp_offset);
10451 }
10452 start_reg = FIRST_VFP_REGNUM;
10453 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10454 {
10455 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
10456 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
10457 {
10458 if (start_reg != reg)
10459 vfp_output_fldmd (f, IP_REGNUM,
10460 (start_reg - FIRST_VFP_REGNUM) / 2,
10461 (reg - start_reg) / 2);
10462 start_reg = reg + 2;
10463 }
10464 }
10465 if (start_reg != reg)
10466 vfp_output_fldmd (f, IP_REGNUM,
10467 (start_reg - FIRST_VFP_REGNUM) / 2,
10468 (reg - start_reg) / 2);
10469 }
10470
10471 if (TARGET_IWMMXT)
10472 {
10473 /* The frame pointer is guaranteed to be non-double-word aligned.
10474 This is because it is set to (old_stack_pointer - 4) and the
10475 old_stack_pointer was double word aligned. Thus the offset to
10476 the iWMMXt registers to be loaded must also be non-double-word
10477 sized, so that the resultant address *is* double-word aligned.
10478 We can ignore floats_offset since that was already included in
10479 the live_regs_mask. */
10480 lrm_count += (lrm_count % 2 ? 2 : 1);
10481
10482 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10483 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10484 {
10485 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
10486 reg, FP_REGNUM, lrm_count * 4);
10487 lrm_count += 2;
10488 }
10489 }
10490
10491 /* saved_regs_mask should contain the IP, which at the time of stack
10492 frame generation actually contains the old stack pointer. So a
10493 quick way to unwind the stack is just pop the IP register directly
10494 into the stack pointer. */
10495 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
10496 saved_regs_mask &= ~ (1 << IP_REGNUM);
10497 saved_regs_mask |= (1 << SP_REGNUM);
10498
10499 /* There are two registers left in saved_regs_mask - LR and PC. We
10500 only need to restore the LR register (the return address), but to
10501 save time we can load it directly into the PC, unless we need a
10502 special function exit sequence, or we are not really returning. */
10503 if (really_return
10504 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10505 && !current_function_calls_eh_return)
10506 /* Delete the LR from the register mask, so that the LR on
10507 the stack is loaded into the PC in the register mask. */
10508 saved_regs_mask &= ~ (1 << LR_REGNUM);
10509 else
10510 saved_regs_mask &= ~ (1 << PC_REGNUM);
10511
10512 /* We must use SP as the base register, because SP is one of the
10513 registers being restored. If an interrupt or page fault
10514 happens in the ldm instruction, the SP might or might not
10515 have been restored. That would be bad, as then SP will no
10516 longer indicate the safe area of stack, and we can get stack
10517 corruption. Using SP as the base register means that it will
10518 be reset correctly to the original value, should an interrupt
10519 occur. If the stack pointer already points at the right
10520 place, then omit the subtraction. */
10521 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
10522 || current_function_calls_alloca)
10523 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
10524 4 * bit_count (saved_regs_mask));
10525 print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask, 0);
10526
10527 if (IS_INTERRUPT (func_type))
10528 /* Interrupt handlers will have pushed the
10529 IP onto the stack, so restore it now. */
10530 print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, 1 << IP_REGNUM, 0);
10531 }
10532 else
10533 {
10534 HOST_WIDE_INT amount;
10535 int rfe;
10536 /* Restore stack pointer if necessary. */
10537 if (frame_pointer_needed)
10538 {
10539 /* For Thumb-2 restore sp from the frame pointer.
10540 Operand restrictions mean we have to increment FP, then copy
10541 to SP. */
10542 amount = offsets->locals_base - offsets->saved_regs;
10543 operands[0] = hard_frame_pointer_rtx;
10544 }
10545 else
10546 {
10547 operands[0] = stack_pointer_rtx;
10548 amount = offsets->outgoing_args - offsets->saved_regs;
10549 }
10550
10551 if (amount)
10552 {
10553 operands[1] = operands[0];
10554 operands[2] = GEN_INT (amount);
10555 output_add_immediate (operands);
10556 }
10557 if (frame_pointer_needed)
10558 asm_fprintf (f, "\tmov\t%r, %r\n",
10559 SP_REGNUM, HARD_FRAME_POINTER_REGNUM);
10560
10561 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10562 {
10563 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10564 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10565 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
10566 reg, SP_REGNUM);
10567 }
10568 else
10569 {
10570 start_reg = FIRST_FPA_REGNUM;
10571
10572 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10573 {
10574 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10575 {
10576 if (reg - start_reg == 3)
10577 {
10578 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
10579 start_reg, SP_REGNUM);
10580 start_reg = reg + 1;
10581 }
10582 }
10583 else
10584 {
10585 if (reg != start_reg)
10586 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10587 start_reg, reg - start_reg,
10588 SP_REGNUM);
10589
10590 start_reg = reg + 1;
10591 }
10592 }
10593
10594 /* Just in case the last register checked also needs unstacking. */
10595 if (reg != start_reg)
10596 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10597 start_reg, reg - start_reg, SP_REGNUM);
10598 }
10599
10600 if (TARGET_HARD_FLOAT && TARGET_VFP)
10601 {
10602 start_reg = FIRST_VFP_REGNUM;
10603 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10604 {
10605 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
10606 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
10607 {
10608 if (start_reg != reg)
10609 vfp_output_fldmd (f, SP_REGNUM,
10610 (start_reg - FIRST_VFP_REGNUM) / 2,
10611 (reg - start_reg) / 2);
10612 start_reg = reg + 2;
10613 }
10614 }
10615 if (start_reg != reg)
10616 vfp_output_fldmd (f, SP_REGNUM,
10617 (start_reg - FIRST_VFP_REGNUM) / 2,
10618 (reg - start_reg) / 2);
10619 }
10620 if (TARGET_IWMMXT)
10621 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10622 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10623 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10624
10625 /* If we can, restore the LR into the PC. */
10626 if (ARM_FUNC_TYPE (func_type) != ARM_FT_INTERWORKED
10627 && (TARGET_ARM || ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
10628 && !IS_STACKALIGN (func_type)
10629 && really_return
10630 && current_function_pretend_args_size == 0
10631 && saved_regs_mask & (1 << LR_REGNUM)
10632 && !current_function_calls_eh_return)
10633 {
10634 saved_regs_mask &= ~ (1 << LR_REGNUM);
10635 saved_regs_mask |= (1 << PC_REGNUM);
10636 rfe = IS_INTERRUPT (func_type);
10637 }
10638 else
10639 rfe = 0;
10640
10641 /* Load the registers off the stack. If we only have one register
10642 to load use the LDR instruction - it is faster. For Thumb-2
10643 always use pop and the assembler will pick the best instruction.*/
10644 if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM)
10645 && !IS_INTERRUPT(func_type))
10646 {
10647 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10648 }
10649 else if (saved_regs_mask)
10650 {
10651 if (saved_regs_mask & (1 << SP_REGNUM))
10652 /* Note - write back to the stack register is not enabled
10653 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10654 in the list of registers and if we add writeback the
10655 instruction becomes UNPREDICTABLE. */
10656 print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask,
10657 rfe);
10658 else if (TARGET_ARM)
10659 print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask,
10660 rfe);
10661 else
10662 print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask, 0);
10663 }
10664
10665 if (current_function_pretend_args_size)
10666 {
10667 /* Unwind the pre-pushed regs. */
10668 operands[0] = operands[1] = stack_pointer_rtx;
10669 operands[2] = GEN_INT (current_function_pretend_args_size);
10670 output_add_immediate (operands);
10671 }
10672 }
10673
10674 /* We may have already restored PC directly from the stack. */
10675 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10676 return "";
10677
10678 /* Stack adjustment for exception handler. */
10679 if (current_function_calls_eh_return)
10680 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10681 ARM_EH_STACKADJ_REGNUM);
10682
10683 /* Generate the return instruction. */
10684 switch ((int) ARM_FUNC_TYPE (func_type))
10685 {
10686 case ARM_FT_ISR:
10687 case ARM_FT_FIQ:
10688 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10689 break;
10690
10691 case ARM_FT_EXCEPTION:
10692 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10693 break;
10694
10695 case ARM_FT_INTERWORKED:
10696 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10697 break;
10698
10699 default:
10700 if (IS_STACKALIGN (func_type))
10701 {
10702 /* See comment in arm_expand_prologue. */
10703 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, 0);
10704 }
10705 if (arm_arch5 || arm_arch4t)
10706 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10707 else
10708 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10709 break;
10710 }
10711
10712 return "";
10713 }
10714
10715 static void
10716 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10717 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10718 {
10719 arm_stack_offsets *offsets;
10720
10721 if (TARGET_THUMB1)
10722 {
10723 int regno;
10724
10725 /* Emit any call-via-reg trampolines that are needed for v4t support
10726 of call_reg and call_value_reg type insns. */
10727 for (regno = 0; regno < LR_REGNUM; regno++)
10728 {
10729 rtx label = cfun->machine->call_via[regno];
10730
10731 if (label != NULL)
10732 {
10733 switch_to_section (function_section (current_function_decl));
10734 targetm.asm_out.internal_label (asm_out_file, "L",
10735 CODE_LABEL_NUMBER (label));
10736 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10737 }
10738 }
10739
10740 /* ??? Probably not safe to set this here, since it assumes that a
10741 function will be emitted as assembly immediately after we generate
10742 RTL for it. This does not happen for inline functions. */
10743 return_used_this_function = 0;
10744 }
10745 else /* TARGET_32BIT */
10746 {
10747 /* We need to take into account any stack-frame rounding. */
10748 offsets = arm_get_frame_offsets ();
10749
10750 gcc_assert (!use_return_insn (FALSE, NULL)
10751 || !return_used_this_function
10752 || offsets->saved_regs == offsets->outgoing_args
10753 || frame_pointer_needed);
10754
10755 /* Reset the ARM-specific per-function variables. */
10756 after_arm_reorg = 0;
10757 }
10758 }
10759
10760 /* Generate and emit an insn that we will recognize as a push_multi.
10761 Unfortunately, since this insn does not reflect very well the actual
10762 semantics of the operation, we need to annotate the insn for the benefit
10763 of DWARF2 frame unwind information. */
10764 static rtx
10765 emit_multi_reg_push (unsigned long mask)
10766 {
10767 int num_regs = 0;
10768 int num_dwarf_regs;
10769 int i, j;
10770 rtx par;
10771 rtx dwarf;
10772 int dwarf_par_index;
10773 rtx tmp, reg;
10774
10775 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10776 if (mask & (1 << i))
10777 num_regs++;
10778
10779 gcc_assert (num_regs && num_regs <= 16);
10780
10781 /* We don't record the PC in the dwarf frame information. */
10782 num_dwarf_regs = num_regs;
10783 if (mask & (1 << PC_REGNUM))
10784 num_dwarf_regs--;
10785
10786 /* For the body of the insn we are going to generate an UNSPEC in
10787 parallel with several USEs. This allows the insn to be recognized
10788 by the push_multi pattern in the arm.md file. The insn looks
10789 something like this:
10790
10791 (parallel [
10792 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10793 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10794 (use (reg:SI 11 fp))
10795 (use (reg:SI 12 ip))
10796 (use (reg:SI 14 lr))
10797 (use (reg:SI 15 pc))
10798 ])
10799
10800 For the frame note however, we try to be more explicit and actually
10801 show each register being stored into the stack frame, plus a (single)
10802 decrement of the stack pointer. We do it this way in order to be
10803 friendly to the stack unwinding code, which only wants to see a single
10804 stack decrement per instruction. The RTL we generate for the note looks
10805 something like this:
10806
10807 (sequence [
10808 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10809 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10810 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10811 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10812 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10813 ])
10814
10815 This sequence is used both by the code to support stack unwinding for
10816 exceptions handlers and the code to generate dwarf2 frame debugging. */
10817
10818 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10819 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10820 dwarf_par_index = 1;
10821
10822 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10823 {
10824 if (mask & (1 << i))
10825 {
10826 reg = gen_rtx_REG (SImode, i);
10827
10828 XVECEXP (par, 0, 0)
10829 = gen_rtx_SET (VOIDmode,
10830 gen_frame_mem (BLKmode,
10831 gen_rtx_PRE_DEC (BLKmode,
10832 stack_pointer_rtx)),
10833 gen_rtx_UNSPEC (BLKmode,
10834 gen_rtvec (1, reg),
10835 UNSPEC_PUSH_MULT));
10836
10837 if (i != PC_REGNUM)
10838 {
10839 tmp = gen_rtx_SET (VOIDmode,
10840 gen_frame_mem (SImode, stack_pointer_rtx),
10841 reg);
10842 RTX_FRAME_RELATED_P (tmp) = 1;
10843 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10844 dwarf_par_index++;
10845 }
10846
10847 break;
10848 }
10849 }
10850
10851 for (j = 1, i++; j < num_regs; i++)
10852 {
10853 if (mask & (1 << i))
10854 {
10855 reg = gen_rtx_REG (SImode, i);
10856
10857 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10858
10859 if (i != PC_REGNUM)
10860 {
10861 tmp
10862 = gen_rtx_SET (VOIDmode,
10863 gen_frame_mem (SImode,
10864 plus_constant (stack_pointer_rtx,
10865 4 * j)),
10866 reg);
10867 RTX_FRAME_RELATED_P (tmp) = 1;
10868 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10869 }
10870
10871 j++;
10872 }
10873 }
10874
10875 par = emit_insn (par);
10876
10877 tmp = gen_rtx_SET (VOIDmode,
10878 stack_pointer_rtx,
10879 plus_constant (stack_pointer_rtx, -4 * num_regs));
10880 RTX_FRAME_RELATED_P (tmp) = 1;
10881 XVECEXP (dwarf, 0, 0) = tmp;
10882
10883 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10884 REG_NOTES (par));
10885 return par;
10886 }
10887
10888 /* Calculate the size of the return value that is passed in registers. */
10889 static int
10890 arm_size_return_regs (void)
10891 {
10892 enum machine_mode mode;
10893
10894 if (current_function_return_rtx != 0)
10895 mode = GET_MODE (current_function_return_rtx);
10896 else
10897 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10898
10899 return GET_MODE_SIZE (mode);
10900 }
10901
10902 static rtx
10903 emit_sfm (int base_reg, int count)
10904 {
10905 rtx par;
10906 rtx dwarf;
10907 rtx tmp, reg;
10908 int i;
10909
10910 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10911 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10912
10913 reg = gen_rtx_REG (XFmode, base_reg++);
10914
10915 XVECEXP (par, 0, 0)
10916 = gen_rtx_SET (VOIDmode,
10917 gen_frame_mem (BLKmode,
10918 gen_rtx_PRE_DEC (BLKmode,
10919 stack_pointer_rtx)),
10920 gen_rtx_UNSPEC (BLKmode,
10921 gen_rtvec (1, reg),
10922 UNSPEC_PUSH_MULT));
10923 tmp = gen_rtx_SET (VOIDmode,
10924 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10925 RTX_FRAME_RELATED_P (tmp) = 1;
10926 XVECEXP (dwarf, 0, 1) = tmp;
10927
10928 for (i = 1; i < count; i++)
10929 {
10930 reg = gen_rtx_REG (XFmode, base_reg++);
10931 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10932
10933 tmp = gen_rtx_SET (VOIDmode,
10934 gen_frame_mem (XFmode,
10935 plus_constant (stack_pointer_rtx,
10936 i * 12)),
10937 reg);
10938 RTX_FRAME_RELATED_P (tmp) = 1;
10939 XVECEXP (dwarf, 0, i + 1) = tmp;
10940 }
10941
10942 tmp = gen_rtx_SET (VOIDmode,
10943 stack_pointer_rtx,
10944 plus_constant (stack_pointer_rtx, -12 * count));
10945
10946 RTX_FRAME_RELATED_P (tmp) = 1;
10947 XVECEXP (dwarf, 0, 0) = tmp;
10948
10949 par = emit_insn (par);
10950 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10951 REG_NOTES (par));
10952 return par;
10953 }
10954
10955
10956 /* Return true if the current function needs to save/restore LR. */
10957
10958 static bool
10959 thumb_force_lr_save (void)
10960 {
10961 return !cfun->machine->lr_save_eliminated
10962 && (!leaf_function_p ()
10963 || thumb_far_jump_used_p ()
10964 || df_regs_ever_live_p (LR_REGNUM));
10965 }
10966
10967
10968 /* Compute the distance from register FROM to register TO.
10969 These can be the arg pointer (26), the soft frame pointer (25),
10970 the stack pointer (13) or the hard frame pointer (11).
10971 In thumb mode r7 is used as the soft frame pointer, if needed.
10972 Typical stack layout looks like this:
10973
10974 old stack pointer -> | |
10975 ----
10976 | | \
10977 | | saved arguments for
10978 | | vararg functions
10979 | | /
10980 --
10981 hard FP & arg pointer -> | | \
10982 | | stack
10983 | | frame
10984 | | /
10985 --
10986 | | \
10987 | | call saved
10988 | | registers
10989 soft frame pointer -> | | /
10990 --
10991 | | \
10992 | | local
10993 | | variables
10994 locals base pointer -> | | /
10995 --
10996 | | \
10997 | | outgoing
10998 | | arguments
10999 current stack pointer -> | | /
11000 --
11001
11002 For a given function some or all of these stack components
11003 may not be needed, giving rise to the possibility of
11004 eliminating some of the registers.
11005
11006 The values returned by this function must reflect the behavior
11007 of arm_expand_prologue() and arm_compute_save_reg_mask().
11008
11009 The sign of the number returned reflects the direction of stack
11010 growth, so the values are positive for all eliminations except
11011 from the soft frame pointer to the hard frame pointer.
11012
11013 SFP may point just inside the local variables block to ensure correct
11014 alignment. */
11015
11016
11017 /* Calculate stack offsets. These are used to calculate register elimination
11018 offsets and in prologue/epilogue code. */
11019
11020 static arm_stack_offsets *
11021 arm_get_frame_offsets (void)
11022 {
11023 struct arm_stack_offsets *offsets;
11024 unsigned long func_type;
11025 int leaf;
11026 int saved;
11027 HOST_WIDE_INT frame_size;
11028
11029 offsets = &cfun->machine->stack_offsets;
11030
11031 /* We need to know if we are a leaf function. Unfortunately, it
11032 is possible to be called after start_sequence has been called,
11033 which causes get_insns to return the insns for the sequence,
11034 not the function, which will cause leaf_function_p to return
11035 the incorrect result.
11036
11037 to know about leaf functions once reload has completed, and the
11038 frame size cannot be changed after that time, so we can safely
11039 use the cached value. */
11040
11041 if (reload_completed)
11042 return offsets;
11043
11044 /* Initially this is the size of the local variables. It will translated
11045 into an offset once we have determined the size of preceding data. */
11046 frame_size = ROUND_UP_WORD (get_frame_size ());
11047
11048 leaf = leaf_function_p ();
11049
11050 /* Space for variadic functions. */
11051 offsets->saved_args = current_function_pretend_args_size;
11052
11053 /* In Thumb mode this is incorrect, but never used. */
11054 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
11055
11056 if (TARGET_32BIT)
11057 {
11058 unsigned int regno;
11059
11060 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
11061
11062 /* We know that SP will be doubleword aligned on entry, and we must
11063 preserve that condition at any subroutine call. We also require the
11064 soft frame pointer to be doubleword aligned. */
11065
11066 if (TARGET_REALLY_IWMMXT)
11067 {
11068 /* Check for the call-saved iWMMXt registers. */
11069 for (regno = FIRST_IWMMXT_REGNUM;
11070 regno <= LAST_IWMMXT_REGNUM;
11071 regno++)
11072 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
11073 saved += 8;
11074 }
11075
11076 func_type = arm_current_func_type ();
11077 if (! IS_VOLATILE (func_type))
11078 {
11079 /* Space for saved FPA registers. */
11080 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
11081 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
11082 saved += 12;
11083
11084 /* Space for saved VFP registers. */
11085 if (TARGET_HARD_FLOAT && TARGET_VFP)
11086 saved += arm_get_vfp_saved_size ();
11087 }
11088 }
11089 else /* TARGET_THUMB1 */
11090 {
11091 saved = bit_count (thumb1_compute_save_reg_mask ()) * 4;
11092 if (TARGET_BACKTRACE)
11093 saved += 16;
11094 }
11095
11096 /* Saved registers include the stack frame. */
11097 offsets->saved_regs = offsets->saved_args + saved;
11098 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
11099 /* A leaf function does not need any stack alignment if it has nothing
11100 on the stack. */
11101 if (leaf && frame_size == 0)
11102 {
11103 offsets->outgoing_args = offsets->soft_frame;
11104 return offsets;
11105 }
11106
11107 /* Ensure SFP has the correct alignment. */
11108 if (ARM_DOUBLEWORD_ALIGN
11109 && (offsets->soft_frame & 7))
11110 offsets->soft_frame += 4;
11111
11112 offsets->locals_base = offsets->soft_frame + frame_size;
11113 offsets->outgoing_args = (offsets->locals_base
11114 + current_function_outgoing_args_size);
11115
11116 if (ARM_DOUBLEWORD_ALIGN)
11117 {
11118 /* Ensure SP remains doubleword aligned. */
11119 if (offsets->outgoing_args & 7)
11120 offsets->outgoing_args += 4;
11121 gcc_assert (!(offsets->outgoing_args & 7));
11122 }
11123
11124 return offsets;
11125 }
11126
11127
11128 /* Calculate the relative offsets for the different stack pointers. Positive
11129 offsets are in the direction of stack growth. */
11130
11131 HOST_WIDE_INT
11132 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
11133 {
11134 arm_stack_offsets *offsets;
11135
11136 offsets = arm_get_frame_offsets ();
11137
11138 /* OK, now we have enough information to compute the distances.
11139 There must be an entry in these switch tables for each pair
11140 of registers in ELIMINABLE_REGS, even if some of the entries
11141 seem to be redundant or useless. */
11142 switch (from)
11143 {
11144 case ARG_POINTER_REGNUM:
11145 switch (to)
11146 {
11147 case THUMB_HARD_FRAME_POINTER_REGNUM:
11148 return 0;
11149
11150 case FRAME_POINTER_REGNUM:
11151 /* This is the reverse of the soft frame pointer
11152 to hard frame pointer elimination below. */
11153 return offsets->soft_frame - offsets->saved_args;
11154
11155 case ARM_HARD_FRAME_POINTER_REGNUM:
11156 /* If there is no stack frame then the hard
11157 frame pointer and the arg pointer coincide. */
11158 if (offsets->frame == offsets->saved_regs)
11159 return 0;
11160 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
11161 return (frame_pointer_needed
11162 && cfun->static_chain_decl != NULL
11163 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
11164
11165 case STACK_POINTER_REGNUM:
11166 /* If nothing has been pushed on the stack at all
11167 then this will return -4. This *is* correct! */
11168 return offsets->outgoing_args - (offsets->saved_args + 4);
11169
11170 default:
11171 gcc_unreachable ();
11172 }
11173 gcc_unreachable ();
11174
11175 case FRAME_POINTER_REGNUM:
11176 switch (to)
11177 {
11178 case THUMB_HARD_FRAME_POINTER_REGNUM:
11179 return 0;
11180
11181 case ARM_HARD_FRAME_POINTER_REGNUM:
11182 /* The hard frame pointer points to the top entry in the
11183 stack frame. The soft frame pointer to the bottom entry
11184 in the stack frame. If there is no stack frame at all,
11185 then they are identical. */
11186
11187 return offsets->frame - offsets->soft_frame;
11188
11189 case STACK_POINTER_REGNUM:
11190 return offsets->outgoing_args - offsets->soft_frame;
11191
11192 default:
11193 gcc_unreachable ();
11194 }
11195 gcc_unreachable ();
11196
11197 default:
11198 /* You cannot eliminate from the stack pointer.
11199 In theory you could eliminate from the hard frame
11200 pointer to the stack pointer, but this will never
11201 happen, since if a stack frame is not needed the
11202 hard frame pointer will never be used. */
11203 gcc_unreachable ();
11204 }
11205 }
11206
11207
11208 /* Emit RTL to save coprocessor registers on function entry. Returns the
11209 number of bytes pushed. */
11210
11211 static int
11212 arm_save_coproc_regs(void)
11213 {
11214 int saved_size = 0;
11215 unsigned reg;
11216 unsigned start_reg;
11217 rtx insn;
11218
11219 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
11220 if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
11221 {
11222 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
11223 insn = gen_rtx_MEM (V2SImode, insn);
11224 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
11225 RTX_FRAME_RELATED_P (insn) = 1;
11226 saved_size += 8;
11227 }
11228
11229 /* Save any floating point call-saved registers used by this
11230 function. */
11231 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
11232 {
11233 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
11234 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
11235 {
11236 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
11237 insn = gen_rtx_MEM (XFmode, insn);
11238 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
11239 RTX_FRAME_RELATED_P (insn) = 1;
11240 saved_size += 12;
11241 }
11242 }
11243 else
11244 {
11245 start_reg = LAST_FPA_REGNUM;
11246
11247 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
11248 {
11249 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
11250 {
11251 if (start_reg - reg == 3)
11252 {
11253 insn = emit_sfm (reg, 4);
11254 RTX_FRAME_RELATED_P (insn) = 1;
11255 saved_size += 48;
11256 start_reg = reg - 1;
11257 }
11258 }
11259 else
11260 {
11261 if (start_reg != reg)
11262 {
11263 insn = emit_sfm (reg + 1, start_reg - reg);
11264 RTX_FRAME_RELATED_P (insn) = 1;
11265 saved_size += (start_reg - reg) * 12;
11266 }
11267 start_reg = reg - 1;
11268 }
11269 }
11270
11271 if (start_reg != reg)
11272 {
11273 insn = emit_sfm (reg + 1, start_reg - reg);
11274 saved_size += (start_reg - reg) * 12;
11275 RTX_FRAME_RELATED_P (insn) = 1;
11276 }
11277 }
11278 if (TARGET_HARD_FLOAT && TARGET_VFP)
11279 {
11280 start_reg = FIRST_VFP_REGNUM;
11281
11282 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
11283 {
11284 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
11285 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
11286 {
11287 if (start_reg != reg)
11288 saved_size += vfp_emit_fstmd (start_reg,
11289 (reg - start_reg) / 2);
11290 start_reg = reg + 2;
11291 }
11292 }
11293 if (start_reg != reg)
11294 saved_size += vfp_emit_fstmd (start_reg,
11295 (reg - start_reg) / 2);
11296 }
11297 return saved_size;
11298 }
11299
11300
11301 /* Set the Thumb frame pointer from the stack pointer. */
11302
11303 static void
11304 thumb_set_frame_pointer (arm_stack_offsets *offsets)
11305 {
11306 HOST_WIDE_INT amount;
11307 rtx insn, dwarf;
11308
11309 amount = offsets->outgoing_args - offsets->locals_base;
11310 if (amount < 1024)
11311 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
11312 stack_pointer_rtx, GEN_INT (amount)));
11313 else
11314 {
11315 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
11316 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
11317 hard_frame_pointer_rtx,
11318 stack_pointer_rtx));
11319 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
11320 plus_constant (stack_pointer_rtx, amount));
11321 RTX_FRAME_RELATED_P (dwarf) = 1;
11322 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
11323 REG_NOTES (insn));
11324 }
11325
11326 RTX_FRAME_RELATED_P (insn) = 1;
11327 }
11328
11329 /* Generate the prologue instructions for entry into an ARM or Thumb-2
11330 function. */
11331 void
11332 arm_expand_prologue (void)
11333 {
11334 rtx amount;
11335 rtx insn;
11336 rtx ip_rtx;
11337 unsigned long live_regs_mask;
11338 unsigned long func_type;
11339 int fp_offset = 0;
11340 int saved_pretend_args = 0;
11341 int saved_regs = 0;
11342 unsigned HOST_WIDE_INT args_to_push;
11343 arm_stack_offsets *offsets;
11344
11345 func_type = arm_current_func_type ();
11346
11347 /* Naked functions don't have prologues. */
11348 if (IS_NAKED (func_type))
11349 return;
11350
11351 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
11352 args_to_push = current_function_pretend_args_size;
11353
11354 /* Compute which register we will have to save onto the stack. */
11355 live_regs_mask = arm_compute_save_reg_mask ();
11356
11357 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
11358
11359 if (IS_STACKALIGN (func_type))
11360 {
11361 rtx dwarf;
11362 rtx r0;
11363 rtx r1;
11364 /* Handle a word-aligned stack pointer. We generate the following:
11365
11366 mov r0, sp
11367 bic r1, r0, #7
11368 mov sp, r1
11369 <save and restore r0 in normal prologue/epilogue>
11370 mov sp, r0
11371 bx lr
11372
11373 The unwinder doesn't need to know about the stack realignment.
11374 Just tell it we saved SP in r0. */
11375 gcc_assert (TARGET_THUMB2 && !arm_arch_notm && args_to_push == 0);
11376
11377 r0 = gen_rtx_REG (SImode, 0);
11378 r1 = gen_rtx_REG (SImode, 1);
11379 dwarf = gen_rtx_UNSPEC (SImode, NULL_RTVEC, UNSPEC_STACK_ALIGN);
11380 dwarf = gen_rtx_SET (VOIDmode, r0, dwarf);
11381 insn = gen_movsi (r0, stack_pointer_rtx);
11382 RTX_FRAME_RELATED_P (insn) = 1;
11383 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
11384 dwarf, REG_NOTES (insn));
11385 emit_insn (insn);
11386 emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7)));
11387 emit_insn (gen_movsi (stack_pointer_rtx, r1));
11388 }
11389
11390 if (frame_pointer_needed && TARGET_ARM)
11391 {
11392 if (IS_INTERRUPT (func_type))
11393 {
11394 /* Interrupt functions must not corrupt any registers.
11395 Creating a frame pointer however, corrupts the IP
11396 register, so we must push it first. */
11397 insn = emit_multi_reg_push (1 << IP_REGNUM);
11398
11399 /* Do not set RTX_FRAME_RELATED_P on this insn.
11400 The dwarf stack unwinding code only wants to see one
11401 stack decrement per function, and this is not it. If
11402 this instruction is labeled as being part of the frame
11403 creation sequence then dwarf2out_frame_debug_expr will
11404 die when it encounters the assignment of IP to FP
11405 later on, since the use of SP here establishes SP as
11406 the CFA register and not IP.
11407
11408 Anyway this instruction is not really part of the stack
11409 frame creation although it is part of the prologue. */
11410 }
11411 else if (IS_NESTED (func_type))
11412 {
11413 /* The Static chain register is the same as the IP register
11414 used as a scratch register during stack frame creation.
11415 To get around this need to find somewhere to store IP
11416 whilst the frame is being created. We try the following
11417 places in order:
11418
11419 1. The last argument register.
11420 2. A slot on the stack above the frame. (This only
11421 works if the function is not a varargs function).
11422 3. Register r3, after pushing the argument registers
11423 onto the stack.
11424
11425 Note - we only need to tell the dwarf2 backend about the SP
11426 adjustment in the second variant; the static chain register
11427 doesn't need to be unwound, as it doesn't contain a value
11428 inherited from the caller. */
11429
11430 if (df_regs_ever_live_p (3) == false)
11431 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
11432 else if (args_to_push == 0)
11433 {
11434 rtx dwarf;
11435
11436 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
11437 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
11438 fp_offset = 4;
11439
11440 /* Just tell the dwarf backend that we adjusted SP. */
11441 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
11442 plus_constant (stack_pointer_rtx,
11443 -fp_offset));
11444 RTX_FRAME_RELATED_P (insn) = 1;
11445 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
11446 dwarf, REG_NOTES (insn));
11447 }
11448 else
11449 {
11450 /* Store the args on the stack. */
11451 if (cfun->machine->uses_anonymous_args)
11452 insn = emit_multi_reg_push
11453 ((0xf0 >> (args_to_push / 4)) & 0xf);
11454 else
11455 insn = emit_insn
11456 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11457 GEN_INT (- args_to_push)));
11458
11459 RTX_FRAME_RELATED_P (insn) = 1;
11460
11461 saved_pretend_args = 1;
11462 fp_offset = args_to_push;
11463 args_to_push = 0;
11464
11465 /* Now reuse r3 to preserve IP. */
11466 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
11467 }
11468 }
11469
11470 insn = emit_set_insn (ip_rtx,
11471 plus_constant (stack_pointer_rtx, fp_offset));
11472 RTX_FRAME_RELATED_P (insn) = 1;
11473 }
11474
11475 if (args_to_push)
11476 {
11477 /* Push the argument registers, or reserve space for them. */
11478 if (cfun->machine->uses_anonymous_args)
11479 insn = emit_multi_reg_push
11480 ((0xf0 >> (args_to_push / 4)) & 0xf);
11481 else
11482 insn = emit_insn
11483 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11484 GEN_INT (- args_to_push)));
11485 RTX_FRAME_RELATED_P (insn) = 1;
11486 }
11487
11488 /* If this is an interrupt service routine, and the link register
11489 is going to be pushed, and we are not creating a stack frame,
11490 (which would involve an extra push of IP and a pop in the epilogue)
11491 subtracting four from LR now will mean that the function return
11492 can be done with a single instruction. */
11493 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
11494 && (live_regs_mask & (1 << LR_REGNUM)) != 0
11495 && ! frame_pointer_needed
11496 && TARGET_ARM)
11497 {
11498 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
11499
11500 emit_set_insn (lr, plus_constant (lr, -4));
11501 }
11502
11503 if (live_regs_mask)
11504 {
11505 insn = emit_multi_reg_push (live_regs_mask);
11506 saved_regs += bit_count (live_regs_mask) * 4;
11507 RTX_FRAME_RELATED_P (insn) = 1;
11508 }
11509
11510 if (! IS_VOLATILE (func_type))
11511 saved_regs += arm_save_coproc_regs ();
11512
11513 if (frame_pointer_needed && TARGET_ARM)
11514 {
11515 /* Create the new frame pointer. */
11516 {
11517 insn = GEN_INT (-(4 + args_to_push + fp_offset));
11518 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
11519 RTX_FRAME_RELATED_P (insn) = 1;
11520
11521 if (IS_NESTED (func_type))
11522 {
11523 /* Recover the static chain register. */
11524 if (!df_regs_ever_live_p (3)
11525 || saved_pretend_args)
11526 insn = gen_rtx_REG (SImode, 3);
11527 else /* if (current_function_pretend_args_size == 0) */
11528 {
11529 insn = plus_constant (hard_frame_pointer_rtx, 4);
11530 insn = gen_frame_mem (SImode, insn);
11531 }
11532 emit_set_insn (ip_rtx, insn);
11533 /* Add a USE to stop propagate_one_insn() from barfing. */
11534 emit_insn (gen_prologue_use (ip_rtx));
11535 }
11536 }
11537 }
11538
11539 offsets = arm_get_frame_offsets ();
11540 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
11541 {
11542 /* This add can produce multiple insns for a large constant, so we
11543 need to get tricky. */
11544 rtx last = get_last_insn ();
11545
11546 amount = GEN_INT (offsets->saved_args + saved_regs
11547 - offsets->outgoing_args);
11548
11549 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11550 amount));
11551 do
11552 {
11553 last = last ? NEXT_INSN (last) : get_insns ();
11554 RTX_FRAME_RELATED_P (last) = 1;
11555 }
11556 while (last != insn);
11557
11558 /* If the frame pointer is needed, emit a special barrier that
11559 will prevent the scheduler from moving stores to the frame
11560 before the stack adjustment. */
11561 if (frame_pointer_needed)
11562 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
11563 hard_frame_pointer_rtx));
11564 }
11565
11566
11567 if (frame_pointer_needed && TARGET_THUMB2)
11568 thumb_set_frame_pointer (offsets);
11569
11570 if (flag_pic && arm_pic_register != INVALID_REGNUM)
11571 {
11572 unsigned long mask;
11573
11574 mask = live_regs_mask;
11575 mask &= THUMB2_WORK_REGS;
11576 if (!IS_NESTED (func_type))
11577 mask |= (1 << IP_REGNUM);
11578 arm_load_pic_register (mask);
11579 }
11580
11581 /* If we are profiling, make sure no instructions are scheduled before
11582 the call to mcount. Similarly if the user has requested no
11583 scheduling in the prolog. Similarly if we want non-call exceptions
11584 using the EABI unwinder, to prevent faulting instructions from being
11585 swapped with a stack adjustment. */
11586 if (current_function_profile || !TARGET_SCHED_PROLOG
11587 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
11588 emit_insn (gen_blockage ());
11589
11590 /* If the link register is being kept alive, with the return address in it,
11591 then make sure that it does not get reused by the ce2 pass. */
11592 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
11593 cfun->machine->lr_save_eliminated = 1;
11594 }
11595 \f
11596 /* Print condition code to STREAM. Helper function for arm_print_operand. */
11597 static void
11598 arm_print_condition (FILE *stream)
11599 {
11600 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
11601 {
11602 /* Branch conversion is not implemented for Thumb-2. */
11603 if (TARGET_THUMB)
11604 {
11605 output_operand_lossage ("predicated Thumb instruction");
11606 return;
11607 }
11608 if (current_insn_predicate != NULL)
11609 {
11610 output_operand_lossage
11611 ("predicated instruction in conditional sequence");
11612 return;
11613 }
11614
11615 fputs (arm_condition_codes[arm_current_cc], stream);
11616 }
11617 else if (current_insn_predicate)
11618 {
11619 enum arm_cond_code code;
11620
11621 if (TARGET_THUMB1)
11622 {
11623 output_operand_lossage ("predicated Thumb instruction");
11624 return;
11625 }
11626
11627 code = get_arm_condition_code (current_insn_predicate);
11628 fputs (arm_condition_codes[code], stream);
11629 }
11630 }
11631
11632
11633 /* If CODE is 'd', then the X is a condition operand and the instruction
11634 should only be executed if the condition is true.
11635 if CODE is 'D', then the X is a condition operand and the instruction
11636 should only be executed if the condition is false: however, if the mode
11637 of the comparison is CCFPEmode, then always execute the instruction -- we
11638 do this because in these circumstances !GE does not necessarily imply LT;
11639 in these cases the instruction pattern will take care to make sure that
11640 an instruction containing %d will follow, thereby undoing the effects of
11641 doing this instruction unconditionally.
11642 If CODE is 'N' then X is a floating point operand that must be negated
11643 before output.
11644 If CODE is 'B' then output a bitwise inverted value of X (a const int).
11645 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
11646 void
11647 arm_print_operand (FILE *stream, rtx x, int code)
11648 {
11649 switch (code)
11650 {
11651 case '@':
11652 fputs (ASM_COMMENT_START, stream);
11653 return;
11654
11655 case '_':
11656 fputs (user_label_prefix, stream);
11657 return;
11658
11659 case '|':
11660 fputs (REGISTER_PREFIX, stream);
11661 return;
11662
11663 case '?':
11664 arm_print_condition (stream);
11665 return;
11666
11667 case '(':
11668 /* Nothing in unified syntax, otherwise the current condition code. */
11669 if (!TARGET_UNIFIED_ASM)
11670 arm_print_condition (stream);
11671 break;
11672
11673 case ')':
11674 /* The current condition code in unified syntax, otherwise nothing. */
11675 if (TARGET_UNIFIED_ASM)
11676 arm_print_condition (stream);
11677 break;
11678
11679 case '.':
11680 /* The current condition code for a condition code setting instruction.
11681 Preceded by 's' in unified syntax, otherwise followed by 's'. */
11682 if (TARGET_UNIFIED_ASM)
11683 {
11684 fputc('s', stream);
11685 arm_print_condition (stream);
11686 }
11687 else
11688 {
11689 arm_print_condition (stream);
11690 fputc('s', stream);
11691 }
11692 return;
11693
11694 case '!':
11695 /* If the instruction is conditionally executed then print
11696 the current condition code, otherwise print 's'. */
11697 gcc_assert (TARGET_THUMB2 && TARGET_UNIFIED_ASM);
11698 if (current_insn_predicate)
11699 arm_print_condition (stream);
11700 else
11701 fputc('s', stream);
11702 break;
11703
11704 case 'N':
11705 {
11706 REAL_VALUE_TYPE r;
11707 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11708 r = REAL_VALUE_NEGATE (r);
11709 fprintf (stream, "%s", fp_const_from_val (&r));
11710 }
11711 return;
11712
11713 case 'B':
11714 if (GET_CODE (x) == CONST_INT)
11715 {
11716 HOST_WIDE_INT val;
11717 val = ARM_SIGN_EXTEND (~INTVAL (x));
11718 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
11719 }
11720 else
11721 {
11722 putc ('~', stream);
11723 output_addr_const (stream, x);
11724 }
11725 return;
11726
11727 case 'L':
11728 /* The low 16 bits of an immediate constant. */
11729 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
11730 return;
11731
11732 case 'i':
11733 fprintf (stream, "%s", arithmetic_instr (x, 1));
11734 return;
11735
11736 /* Truncate Cirrus shift counts. */
11737 case 's':
11738 if (GET_CODE (x) == CONST_INT)
11739 {
11740 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11741 return;
11742 }
11743 arm_print_operand (stream, x, 0);
11744 return;
11745
11746 case 'I':
11747 fprintf (stream, "%s", arithmetic_instr (x, 0));
11748 return;
11749
11750 case 'S':
11751 {
11752 HOST_WIDE_INT val;
11753 const char *shift;
11754
11755 if (!shift_operator (x, SImode))
11756 {
11757 output_operand_lossage ("invalid shift operand");
11758 break;
11759 }
11760
11761 shift = shift_op (x, &val);
11762
11763 if (shift)
11764 {
11765 fprintf (stream, ", %s ", shift);
11766 if (val == -1)
11767 arm_print_operand (stream, XEXP (x, 1), 0);
11768 else
11769 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11770 }
11771 }
11772 return;
11773
11774 /* An explanation of the 'Q', 'R' and 'H' register operands:
11775
11776 In a pair of registers containing a DI or DF value the 'Q'
11777 operand returns the register number of the register containing
11778 the least significant part of the value. The 'R' operand returns
11779 the register number of the register containing the most
11780 significant part of the value.
11781
11782 The 'H' operand returns the higher of the two register numbers.
11783 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11784 same as the 'Q' operand, since the most significant part of the
11785 value is held in the lower number register. The reverse is true
11786 on systems where WORDS_BIG_ENDIAN is false.
11787
11788 The purpose of these operands is to distinguish between cases
11789 where the endian-ness of the values is important (for example
11790 when they are added together), and cases where the endian-ness
11791 is irrelevant, but the order of register operations is important.
11792 For example when loading a value from memory into a register
11793 pair, the endian-ness does not matter. Provided that the value
11794 from the lower memory address is put into the lower numbered
11795 register, and the value from the higher address is put into the
11796 higher numbered register, the load will work regardless of whether
11797 the value being loaded is big-wordian or little-wordian. The
11798 order of the two register loads can matter however, if the address
11799 of the memory location is actually held in one of the registers
11800 being overwritten by the load. */
11801 case 'Q':
11802 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11803 {
11804 output_operand_lossage ("invalid operand for code '%c'", code);
11805 return;
11806 }
11807
11808 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11809 return;
11810
11811 case 'R':
11812 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11813 {
11814 output_operand_lossage ("invalid operand for code '%c'", code);
11815 return;
11816 }
11817
11818 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11819 return;
11820
11821 case 'H':
11822 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11823 {
11824 output_operand_lossage ("invalid operand for code '%c'", code);
11825 return;
11826 }
11827
11828 asm_fprintf (stream, "%r", REGNO (x) + 1);
11829 return;
11830
11831 case 'm':
11832 asm_fprintf (stream, "%r",
11833 GET_CODE (XEXP (x, 0)) == REG
11834 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11835 return;
11836
11837 case 'M':
11838 asm_fprintf (stream, "{%r-%r}",
11839 REGNO (x),
11840 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11841 return;
11842
11843 case 'd':
11844 /* CONST_TRUE_RTX means always -- that's the default. */
11845 if (x == const_true_rtx)
11846 return;
11847
11848 if (!COMPARISON_P (x))
11849 {
11850 output_operand_lossage ("invalid operand for code '%c'", code);
11851 return;
11852 }
11853
11854 fputs (arm_condition_codes[get_arm_condition_code (x)],
11855 stream);
11856 return;
11857
11858 case 'D':
11859 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11860 want to do that. */
11861 if (x == const_true_rtx)
11862 {
11863 output_operand_lossage ("instruction never executed");
11864 return;
11865 }
11866 if (!COMPARISON_P (x))
11867 {
11868 output_operand_lossage ("invalid operand for code '%c'", code);
11869 return;
11870 }
11871
11872 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11873 (get_arm_condition_code (x))],
11874 stream);
11875 return;
11876
11877 /* Cirrus registers can be accessed in a variety of ways:
11878 single floating point (f)
11879 double floating point (d)
11880 32bit integer (fx)
11881 64bit integer (dx). */
11882 case 'W': /* Cirrus register in F mode. */
11883 case 'X': /* Cirrus register in D mode. */
11884 case 'Y': /* Cirrus register in FX mode. */
11885 case 'Z': /* Cirrus register in DX mode. */
11886 gcc_assert (GET_CODE (x) == REG
11887 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11888
11889 fprintf (stream, "mv%s%s",
11890 code == 'W' ? "f"
11891 : code == 'X' ? "d"
11892 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11893
11894 return;
11895
11896 /* Print cirrus register in the mode specified by the register's mode. */
11897 case 'V':
11898 {
11899 int mode = GET_MODE (x);
11900
11901 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11902 {
11903 output_operand_lossage ("invalid operand for code '%c'", code);
11904 return;
11905 }
11906
11907 fprintf (stream, "mv%s%s",
11908 mode == DFmode ? "d"
11909 : mode == SImode ? "fx"
11910 : mode == DImode ? "dx"
11911 : "f", reg_names[REGNO (x)] + 2);
11912
11913 return;
11914 }
11915
11916 case 'U':
11917 if (GET_CODE (x) != REG
11918 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11919 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11920 /* Bad value for wCG register number. */
11921 {
11922 output_operand_lossage ("invalid operand for code '%c'", code);
11923 return;
11924 }
11925
11926 else
11927 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11928 return;
11929
11930 /* Print an iWMMXt control register name. */
11931 case 'w':
11932 if (GET_CODE (x) != CONST_INT
11933 || INTVAL (x) < 0
11934 || INTVAL (x) >= 16)
11935 /* Bad value for wC register number. */
11936 {
11937 output_operand_lossage ("invalid operand for code '%c'", code);
11938 return;
11939 }
11940
11941 else
11942 {
11943 static const char * wc_reg_names [16] =
11944 {
11945 "wCID", "wCon", "wCSSF", "wCASF",
11946 "wC4", "wC5", "wC6", "wC7",
11947 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11948 "wC12", "wC13", "wC14", "wC15"
11949 };
11950
11951 fprintf (stream, wc_reg_names [INTVAL (x)]);
11952 }
11953 return;
11954
11955 /* Print a VFP double precision register name. */
11956 case 'P':
11957 {
11958 int mode = GET_MODE (x);
11959 int num;
11960
11961 if (mode != DImode && mode != DFmode)
11962 {
11963 output_operand_lossage ("invalid operand for code '%c'", code);
11964 return;
11965 }
11966
11967 if (GET_CODE (x) != REG
11968 || !IS_VFP_REGNUM (REGNO (x)))
11969 {
11970 output_operand_lossage ("invalid operand for code '%c'", code);
11971 return;
11972 }
11973
11974 num = REGNO(x) - FIRST_VFP_REGNUM;
11975 if (num & 1)
11976 {
11977 output_operand_lossage ("invalid operand for code '%c'", code);
11978 return;
11979 }
11980
11981 fprintf (stream, "d%d", num >> 1);
11982 }
11983 return;
11984
11985 default:
11986 if (x == 0)
11987 {
11988 output_operand_lossage ("missing operand");
11989 return;
11990 }
11991
11992 switch (GET_CODE (x))
11993 {
11994 case REG:
11995 asm_fprintf (stream, "%r", REGNO (x));
11996 break;
11997
11998 case MEM:
11999 output_memory_reference_mode = GET_MODE (x);
12000 output_address (XEXP (x, 0));
12001 break;
12002
12003 case CONST_DOUBLE:
12004 fprintf (stream, "#%s", fp_immediate_constant (x));
12005 break;
12006
12007 default:
12008 gcc_assert (GET_CODE (x) != NEG);
12009 fputc ('#', stream);
12010 output_addr_const (stream, x);
12011 break;
12012 }
12013 }
12014 }
12015 \f
12016 #ifndef AOF_ASSEMBLER
12017 /* Target hook for assembling integer objects. The ARM version needs to
12018 handle word-sized values specially. */
12019 static bool
12020 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
12021 {
12022 if (size == UNITS_PER_WORD && aligned_p)
12023 {
12024 fputs ("\t.word\t", asm_out_file);
12025 output_addr_const (asm_out_file, x);
12026
12027 /* Mark symbols as position independent. We only do this in the
12028 .text segment, not in the .data segment. */
12029 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
12030 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
12031 {
12032 /* See legitimize_pic_address for an explanation of the
12033 TARGET_VXWORKS_RTP check. */
12034 if (TARGET_VXWORKS_RTP
12035 || (GET_CODE (x) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (x)))
12036 fputs ("(GOT)", asm_out_file);
12037 else
12038 fputs ("(GOTOFF)", asm_out_file);
12039 }
12040 fputc ('\n', asm_out_file);
12041 return true;
12042 }
12043
12044 if (arm_vector_mode_supported_p (GET_MODE (x)))
12045 {
12046 int i, units;
12047
12048 gcc_assert (GET_CODE (x) == CONST_VECTOR);
12049
12050 units = CONST_VECTOR_NUNITS (x);
12051
12052 switch (GET_MODE (x))
12053 {
12054 case V2SImode: size = 4; break;
12055 case V4HImode: size = 2; break;
12056 case V8QImode: size = 1; break;
12057 default:
12058 gcc_unreachable ();
12059 }
12060
12061 for (i = 0; i < units; i++)
12062 {
12063 rtx elt;
12064
12065 elt = CONST_VECTOR_ELT (x, i);
12066 assemble_integer
12067 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
12068 }
12069
12070 return true;
12071 }
12072
12073 return default_assemble_integer (x, size, aligned_p);
12074 }
12075
12076 static void
12077 arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
12078 {
12079 section *s;
12080
12081 if (!TARGET_AAPCS_BASED)
12082 {
12083 (is_ctor ?
12084 default_named_section_asm_out_constructor
12085 : default_named_section_asm_out_destructor) (symbol, priority);
12086 return;
12087 }
12088
12089 /* Put these in the .init_array section, using a special relocation. */
12090 if (priority != DEFAULT_INIT_PRIORITY)
12091 {
12092 char buf[18];
12093 sprintf (buf, "%s.%.5u",
12094 is_ctor ? ".init_array" : ".fini_array",
12095 priority);
12096 s = get_section (buf, SECTION_WRITE, NULL_TREE);
12097 }
12098 else if (is_ctor)
12099 s = ctors_section;
12100 else
12101 s = dtors_section;
12102
12103 switch_to_section (s);
12104 assemble_align (POINTER_SIZE);
12105 fputs ("\t.word\t", asm_out_file);
12106 output_addr_const (asm_out_file, symbol);
12107 fputs ("(target1)\n", asm_out_file);
12108 }
12109
12110 /* Add a function to the list of static constructors. */
12111
12112 static void
12113 arm_elf_asm_constructor (rtx symbol, int priority)
12114 {
12115 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/true);
12116 }
12117
12118 /* Add a function to the list of static destructors. */
12119
12120 static void
12121 arm_elf_asm_destructor (rtx symbol, int priority)
12122 {
12123 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/false);
12124 }
12125 #endif
12126 \f
12127 /* A finite state machine takes care of noticing whether or not instructions
12128 can be conditionally executed, and thus decrease execution time and code
12129 size by deleting branch instructions. The fsm is controlled by
12130 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
12131
12132 /* The state of the fsm controlling condition codes are:
12133 0: normal, do nothing special
12134 1: make ASM_OUTPUT_OPCODE not output this instruction
12135 2: make ASM_OUTPUT_OPCODE not output this instruction
12136 3: make instructions conditional
12137 4: make instructions conditional
12138
12139 State transitions (state->state by whom under condition):
12140 0 -> 1 final_prescan_insn if the `target' is a label
12141 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
12142 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
12143 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
12144 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
12145 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
12146 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
12147 (the target insn is arm_target_insn).
12148
12149 If the jump clobbers the conditions then we use states 2 and 4.
12150
12151 A similar thing can be done with conditional return insns.
12152
12153 XXX In case the `target' is an unconditional branch, this conditionalising
12154 of the instructions always reduces code size, but not always execution
12155 time. But then, I want to reduce the code size to somewhere near what
12156 /bin/cc produces. */
12157
12158 /* In addition to this, state is maintained for Thumb-2 COND_EXEC
12159 instructions. When a COND_EXEC instruction is seen the subsequent
12160 instructions are scanned so that multiple conditional instructions can be
12161 combined into a single IT block. arm_condexec_count and arm_condexec_mask
12162 specify the length and true/false mask for the IT block. These will be
12163 decremented/zeroed by arm_asm_output_opcode as the insns are output. */
12164
12165 /* Returns the index of the ARM condition code string in
12166 `arm_condition_codes'. COMPARISON should be an rtx like
12167 `(eq (...) (...))'. */
12168 static enum arm_cond_code
12169 get_arm_condition_code (rtx comparison)
12170 {
12171 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
12172 int code;
12173 enum rtx_code comp_code = GET_CODE (comparison);
12174
12175 if (GET_MODE_CLASS (mode) != MODE_CC)
12176 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
12177 XEXP (comparison, 1));
12178
12179 switch (mode)
12180 {
12181 case CC_DNEmode: code = ARM_NE; goto dominance;
12182 case CC_DEQmode: code = ARM_EQ; goto dominance;
12183 case CC_DGEmode: code = ARM_GE; goto dominance;
12184 case CC_DGTmode: code = ARM_GT; goto dominance;
12185 case CC_DLEmode: code = ARM_LE; goto dominance;
12186 case CC_DLTmode: code = ARM_LT; goto dominance;
12187 case CC_DGEUmode: code = ARM_CS; goto dominance;
12188 case CC_DGTUmode: code = ARM_HI; goto dominance;
12189 case CC_DLEUmode: code = ARM_LS; goto dominance;
12190 case CC_DLTUmode: code = ARM_CC;
12191
12192 dominance:
12193 gcc_assert (comp_code == EQ || comp_code == NE);
12194
12195 if (comp_code == EQ)
12196 return ARM_INVERSE_CONDITION_CODE (code);
12197 return code;
12198
12199 case CC_NOOVmode:
12200 switch (comp_code)
12201 {
12202 case NE: return ARM_NE;
12203 case EQ: return ARM_EQ;
12204 case GE: return ARM_PL;
12205 case LT: return ARM_MI;
12206 default: gcc_unreachable ();
12207 }
12208
12209 case CC_Zmode:
12210 switch (comp_code)
12211 {
12212 case NE: return ARM_NE;
12213 case EQ: return ARM_EQ;
12214 default: gcc_unreachable ();
12215 }
12216
12217 case CC_Nmode:
12218 switch (comp_code)
12219 {
12220 case NE: return ARM_MI;
12221 case EQ: return ARM_PL;
12222 default: gcc_unreachable ();
12223 }
12224
12225 case CCFPEmode:
12226 case CCFPmode:
12227 /* These encodings assume that AC=1 in the FPA system control
12228 byte. This allows us to handle all cases except UNEQ and
12229 LTGT. */
12230 switch (comp_code)
12231 {
12232 case GE: return ARM_GE;
12233 case GT: return ARM_GT;
12234 case LE: return ARM_LS;
12235 case LT: return ARM_MI;
12236 case NE: return ARM_NE;
12237 case EQ: return ARM_EQ;
12238 case ORDERED: return ARM_VC;
12239 case UNORDERED: return ARM_VS;
12240 case UNLT: return ARM_LT;
12241 case UNLE: return ARM_LE;
12242 case UNGT: return ARM_HI;
12243 case UNGE: return ARM_PL;
12244 /* UNEQ and LTGT do not have a representation. */
12245 case UNEQ: /* Fall through. */
12246 case LTGT: /* Fall through. */
12247 default: gcc_unreachable ();
12248 }
12249
12250 case CC_SWPmode:
12251 switch (comp_code)
12252 {
12253 case NE: return ARM_NE;
12254 case EQ: return ARM_EQ;
12255 case GE: return ARM_LE;
12256 case GT: return ARM_LT;
12257 case LE: return ARM_GE;
12258 case LT: return ARM_GT;
12259 case GEU: return ARM_LS;
12260 case GTU: return ARM_CC;
12261 case LEU: return ARM_CS;
12262 case LTU: return ARM_HI;
12263 default: gcc_unreachable ();
12264 }
12265
12266 case CC_Cmode:
12267 switch (comp_code)
12268 {
12269 case LTU: return ARM_CS;
12270 case GEU: return ARM_CC;
12271 default: gcc_unreachable ();
12272 }
12273
12274 case CCmode:
12275 switch (comp_code)
12276 {
12277 case NE: return ARM_NE;
12278 case EQ: return ARM_EQ;
12279 case GE: return ARM_GE;
12280 case GT: return ARM_GT;
12281 case LE: return ARM_LE;
12282 case LT: return ARM_LT;
12283 case GEU: return ARM_CS;
12284 case GTU: return ARM_HI;
12285 case LEU: return ARM_LS;
12286 case LTU: return ARM_CC;
12287 default: gcc_unreachable ();
12288 }
12289
12290 default: gcc_unreachable ();
12291 }
12292 }
12293
12294 /* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
12295 instructions. */
12296 void
12297 thumb2_final_prescan_insn (rtx insn)
12298 {
12299 rtx first_insn = insn;
12300 rtx body = PATTERN (insn);
12301 rtx predicate;
12302 enum arm_cond_code code;
12303 int n;
12304 int mask;
12305
12306 /* Remove the previous insn from the count of insns to be output. */
12307 if (arm_condexec_count)
12308 arm_condexec_count--;
12309
12310 /* Nothing to do if we are already inside a conditional block. */
12311 if (arm_condexec_count)
12312 return;
12313
12314 if (GET_CODE (body) != COND_EXEC)
12315 return;
12316
12317 /* Conditional jumps are implemented directly. */
12318 if (GET_CODE (insn) == JUMP_INSN)
12319 return;
12320
12321 predicate = COND_EXEC_TEST (body);
12322 arm_current_cc = get_arm_condition_code (predicate);
12323
12324 n = get_attr_ce_count (insn);
12325 arm_condexec_count = 1;
12326 arm_condexec_mask = (1 << n) - 1;
12327 arm_condexec_masklen = n;
12328 /* See if subsequent instructions can be combined into the same block. */
12329 for (;;)
12330 {
12331 insn = next_nonnote_insn (insn);
12332
12333 /* Jumping into the middle of an IT block is illegal, so a label or
12334 barrier terminates the block. */
12335 if (GET_CODE (insn) != INSN && GET_CODE(insn) != JUMP_INSN)
12336 break;
12337
12338 body = PATTERN (insn);
12339 /* USE and CLOBBER aren't really insns, so just skip them. */
12340 if (GET_CODE (body) == USE
12341 || GET_CODE (body) == CLOBBER)
12342 continue;
12343
12344 /* ??? Recognize conditional jumps, and combine them with IT blocks. */
12345 if (GET_CODE (body) != COND_EXEC)
12346 break;
12347 /* Allow up to 4 conditionally executed instructions in a block. */
12348 n = get_attr_ce_count (insn);
12349 if (arm_condexec_masklen + n > 4)
12350 break;
12351
12352 predicate = COND_EXEC_TEST (body);
12353 code = get_arm_condition_code (predicate);
12354 mask = (1 << n) - 1;
12355 if (arm_current_cc == code)
12356 arm_condexec_mask |= (mask << arm_condexec_masklen);
12357 else if (arm_current_cc != ARM_INVERSE_CONDITION_CODE(code))
12358 break;
12359
12360 arm_condexec_count++;
12361 arm_condexec_masklen += n;
12362
12363 /* A jump must be the last instruction in a conditional block. */
12364 if (GET_CODE(insn) == JUMP_INSN)
12365 break;
12366 }
12367 /* Restore recog_data (getting the attributes of other insns can
12368 destroy this array, but final.c assumes that it remains intact
12369 across this call). */
12370 extract_constrain_insn_cached (first_insn);
12371 }
12372
12373 void
12374 arm_final_prescan_insn (rtx insn)
12375 {
12376 /* BODY will hold the body of INSN. */
12377 rtx body = PATTERN (insn);
12378
12379 /* This will be 1 if trying to repeat the trick, and things need to be
12380 reversed if it appears to fail. */
12381 int reverse = 0;
12382
12383 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
12384 taken are clobbered, even if the rtl suggests otherwise. It also
12385 means that we have to grub around within the jump expression to find
12386 out what the conditions are when the jump isn't taken. */
12387 int jump_clobbers = 0;
12388
12389 /* If we start with a return insn, we only succeed if we find another one. */
12390 int seeking_return = 0;
12391
12392 /* START_INSN will hold the insn from where we start looking. This is the
12393 first insn after the following code_label if REVERSE is true. */
12394 rtx start_insn = insn;
12395
12396 /* If in state 4, check if the target branch is reached, in order to
12397 change back to state 0. */
12398 if (arm_ccfsm_state == 4)
12399 {
12400 if (insn == arm_target_insn)
12401 {
12402 arm_target_insn = NULL;
12403 arm_ccfsm_state = 0;
12404 }
12405 return;
12406 }
12407
12408 /* If in state 3, it is possible to repeat the trick, if this insn is an
12409 unconditional branch to a label, and immediately following this branch
12410 is the previous target label which is only used once, and the label this
12411 branch jumps to is not too far off. */
12412 if (arm_ccfsm_state == 3)
12413 {
12414 if (simplejump_p (insn))
12415 {
12416 start_insn = next_nonnote_insn (start_insn);
12417 if (GET_CODE (start_insn) == BARRIER)
12418 {
12419 /* XXX Isn't this always a barrier? */
12420 start_insn = next_nonnote_insn (start_insn);
12421 }
12422 if (GET_CODE (start_insn) == CODE_LABEL
12423 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
12424 && LABEL_NUSES (start_insn) == 1)
12425 reverse = TRUE;
12426 else
12427 return;
12428 }
12429 else if (GET_CODE (body) == RETURN)
12430 {
12431 start_insn = next_nonnote_insn (start_insn);
12432 if (GET_CODE (start_insn) == BARRIER)
12433 start_insn = next_nonnote_insn (start_insn);
12434 if (GET_CODE (start_insn) == CODE_LABEL
12435 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
12436 && LABEL_NUSES (start_insn) == 1)
12437 {
12438 reverse = TRUE;
12439 seeking_return = 1;
12440 }
12441 else
12442 return;
12443 }
12444 else
12445 return;
12446 }
12447
12448 gcc_assert (!arm_ccfsm_state || reverse);
12449 if (GET_CODE (insn) != JUMP_INSN)
12450 return;
12451
12452 /* This jump might be paralleled with a clobber of the condition codes
12453 the jump should always come first */
12454 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
12455 body = XVECEXP (body, 0, 0);
12456
12457 if (reverse
12458 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
12459 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
12460 {
12461 int insns_skipped;
12462 int fail = FALSE, succeed = FALSE;
12463 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
12464 int then_not_else = TRUE;
12465 rtx this_insn = start_insn, label = 0;
12466
12467 /* If the jump cannot be done with one instruction, we cannot
12468 conditionally execute the instruction in the inverse case. */
12469 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
12470 {
12471 jump_clobbers = 1;
12472 return;
12473 }
12474
12475 /* Register the insn jumped to. */
12476 if (reverse)
12477 {
12478 if (!seeking_return)
12479 label = XEXP (SET_SRC (body), 0);
12480 }
12481 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
12482 label = XEXP (XEXP (SET_SRC (body), 1), 0);
12483 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
12484 {
12485 label = XEXP (XEXP (SET_SRC (body), 2), 0);
12486 then_not_else = FALSE;
12487 }
12488 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
12489 seeking_return = 1;
12490 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
12491 {
12492 seeking_return = 1;
12493 then_not_else = FALSE;
12494 }
12495 else
12496 gcc_unreachable ();
12497
12498 /* See how many insns this branch skips, and what kind of insns. If all
12499 insns are okay, and the label or unconditional branch to the same
12500 label is not too far away, succeed. */
12501 for (insns_skipped = 0;
12502 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
12503 {
12504 rtx scanbody;
12505
12506 this_insn = next_nonnote_insn (this_insn);
12507 if (!this_insn)
12508 break;
12509
12510 switch (GET_CODE (this_insn))
12511 {
12512 case CODE_LABEL:
12513 /* Succeed if it is the target label, otherwise fail since
12514 control falls in from somewhere else. */
12515 if (this_insn == label)
12516 {
12517 if (jump_clobbers)
12518 {
12519 arm_ccfsm_state = 2;
12520 this_insn = next_nonnote_insn (this_insn);
12521 }
12522 else
12523 arm_ccfsm_state = 1;
12524 succeed = TRUE;
12525 }
12526 else
12527 fail = TRUE;
12528 break;
12529
12530 case BARRIER:
12531 /* Succeed if the following insn is the target label.
12532 Otherwise fail.
12533 If return insns are used then the last insn in a function
12534 will be a barrier. */
12535 this_insn = next_nonnote_insn (this_insn);
12536 if (this_insn && this_insn == label)
12537 {
12538 if (jump_clobbers)
12539 {
12540 arm_ccfsm_state = 2;
12541 this_insn = next_nonnote_insn (this_insn);
12542 }
12543 else
12544 arm_ccfsm_state = 1;
12545 succeed = TRUE;
12546 }
12547 else
12548 fail = TRUE;
12549 break;
12550
12551 case CALL_INSN:
12552 /* The AAPCS says that conditional calls should not be
12553 used since they make interworking inefficient (the
12554 linker can't transform BL<cond> into BLX). That's
12555 only a problem if the machine has BLX. */
12556 if (arm_arch5)
12557 {
12558 fail = TRUE;
12559 break;
12560 }
12561
12562 /* Succeed if the following insn is the target label, or
12563 if the following two insns are a barrier and the
12564 target label. */
12565 this_insn = next_nonnote_insn (this_insn);
12566 if (this_insn && GET_CODE (this_insn) == BARRIER)
12567 this_insn = next_nonnote_insn (this_insn);
12568
12569 if (this_insn && this_insn == label
12570 && insns_skipped < max_insns_skipped)
12571 {
12572 if (jump_clobbers)
12573 {
12574 arm_ccfsm_state = 2;
12575 this_insn = next_nonnote_insn (this_insn);
12576 }
12577 else
12578 arm_ccfsm_state = 1;
12579 succeed = TRUE;
12580 }
12581 else
12582 fail = TRUE;
12583 break;
12584
12585 case JUMP_INSN:
12586 /* If this is an unconditional branch to the same label, succeed.
12587 If it is to another label, do nothing. If it is conditional,
12588 fail. */
12589 /* XXX Probably, the tests for SET and the PC are
12590 unnecessary. */
12591
12592 scanbody = PATTERN (this_insn);
12593 if (GET_CODE (scanbody) == SET
12594 && GET_CODE (SET_DEST (scanbody)) == PC)
12595 {
12596 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
12597 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
12598 {
12599 arm_ccfsm_state = 2;
12600 succeed = TRUE;
12601 }
12602 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
12603 fail = TRUE;
12604 }
12605 /* Fail if a conditional return is undesirable (e.g. on a
12606 StrongARM), but still allow this if optimizing for size. */
12607 else if (GET_CODE (scanbody) == RETURN
12608 && !use_return_insn (TRUE, NULL)
12609 && !optimize_size)
12610 fail = TRUE;
12611 else if (GET_CODE (scanbody) == RETURN
12612 && seeking_return)
12613 {
12614 arm_ccfsm_state = 2;
12615 succeed = TRUE;
12616 }
12617 else if (GET_CODE (scanbody) == PARALLEL)
12618 {
12619 switch (get_attr_conds (this_insn))
12620 {
12621 case CONDS_NOCOND:
12622 break;
12623 default:
12624 fail = TRUE;
12625 break;
12626 }
12627 }
12628 else
12629 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
12630
12631 break;
12632
12633 case INSN:
12634 /* Instructions using or affecting the condition codes make it
12635 fail. */
12636 scanbody = PATTERN (this_insn);
12637 if (!(GET_CODE (scanbody) == SET
12638 || GET_CODE (scanbody) == PARALLEL)
12639 || get_attr_conds (this_insn) != CONDS_NOCOND)
12640 fail = TRUE;
12641
12642 /* A conditional cirrus instruction must be followed by
12643 a non Cirrus instruction. However, since we
12644 conditionalize instructions in this function and by
12645 the time we get here we can't add instructions
12646 (nops), because shorten_branches() has already been
12647 called, we will disable conditionalizing Cirrus
12648 instructions to be safe. */
12649 if (GET_CODE (scanbody) != USE
12650 && GET_CODE (scanbody) != CLOBBER
12651 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
12652 fail = TRUE;
12653 break;
12654
12655 default:
12656 break;
12657 }
12658 }
12659 if (succeed)
12660 {
12661 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
12662 arm_target_label = CODE_LABEL_NUMBER (label);
12663 else
12664 {
12665 gcc_assert (seeking_return || arm_ccfsm_state == 2);
12666
12667 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
12668 {
12669 this_insn = next_nonnote_insn (this_insn);
12670 gcc_assert (!this_insn
12671 || (GET_CODE (this_insn) != BARRIER
12672 && GET_CODE (this_insn) != CODE_LABEL));
12673 }
12674 if (!this_insn)
12675 {
12676 /* Oh, dear! we ran off the end.. give up. */
12677 extract_constrain_insn_cached (insn);
12678 arm_ccfsm_state = 0;
12679 arm_target_insn = NULL;
12680 return;
12681 }
12682 arm_target_insn = this_insn;
12683 }
12684 if (jump_clobbers)
12685 {
12686 gcc_assert (!reverse);
12687 arm_current_cc =
12688 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
12689 0), 0), 1));
12690 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
12691 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12692 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
12693 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12694 }
12695 else
12696 {
12697 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
12698 what it was. */
12699 if (!reverse)
12700 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
12701 0));
12702 }
12703
12704 if (reverse || then_not_else)
12705 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12706 }
12707
12708 /* Restore recog_data (getting the attributes of other insns can
12709 destroy this array, but final.c assumes that it remains intact
12710 across this call. */
12711 extract_constrain_insn_cached (insn);
12712 }
12713 }
12714
12715 /* Output IT instructions. */
12716 void
12717 thumb2_asm_output_opcode (FILE * stream)
12718 {
12719 char buff[5];
12720 int n;
12721
12722 if (arm_condexec_mask)
12723 {
12724 for (n = 0; n < arm_condexec_masklen; n++)
12725 buff[n] = (arm_condexec_mask & (1 << n)) ? 't' : 'e';
12726 buff[n] = 0;
12727 asm_fprintf(stream, "i%s\t%s\n\t", buff,
12728 arm_condition_codes[arm_current_cc]);
12729 arm_condexec_mask = 0;
12730 }
12731 }
12732
12733 /* Returns true if REGNO is a valid register
12734 for holding a quantity of type MODE. */
12735 int
12736 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
12737 {
12738 if (GET_MODE_CLASS (mode) == MODE_CC)
12739 return (regno == CC_REGNUM
12740 || (TARGET_HARD_FLOAT && TARGET_VFP
12741 && regno == VFPCC_REGNUM));
12742
12743 if (TARGET_THUMB1)
12744 /* For the Thumb we only allow values bigger than SImode in
12745 registers 0 - 6, so that there is always a second low
12746 register available to hold the upper part of the value.
12747 We probably we ought to ensure that the register is the
12748 start of an even numbered register pair. */
12749 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
12750
12751 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
12752 && IS_CIRRUS_REGNUM (regno))
12753 /* We have outlawed SI values in Cirrus registers because they
12754 reside in the lower 32 bits, but SF values reside in the
12755 upper 32 bits. This causes gcc all sorts of grief. We can't
12756 even split the registers into pairs because Cirrus SI values
12757 get sign extended to 64bits-- aldyh. */
12758 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
12759
12760 if (TARGET_HARD_FLOAT && TARGET_VFP
12761 && IS_VFP_REGNUM (regno))
12762 {
12763 if (mode == SFmode || mode == SImode)
12764 return TRUE;
12765
12766 /* DFmode values are only valid in even register pairs. */
12767 if (mode == DFmode)
12768 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
12769 return FALSE;
12770 }
12771
12772 if (TARGET_REALLY_IWMMXT)
12773 {
12774 if (IS_IWMMXT_GR_REGNUM (regno))
12775 return mode == SImode;
12776
12777 if (IS_IWMMXT_REGNUM (regno))
12778 return VALID_IWMMXT_REG_MODE (mode);
12779 }
12780
12781 /* We allow any value to be stored in the general registers.
12782 Restrict doubleword quantities to even register pairs so that we can
12783 use ldrd. */
12784 if (regno <= LAST_ARM_REGNUM)
12785 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
12786
12787 if (regno == FRAME_POINTER_REGNUM
12788 || regno == ARG_POINTER_REGNUM)
12789 /* We only allow integers in the fake hard registers. */
12790 return GET_MODE_CLASS (mode) == MODE_INT;
12791
12792 /* The only registers left are the FPA registers
12793 which we only allow to hold FP values. */
12794 return (TARGET_HARD_FLOAT && TARGET_FPA
12795 && GET_MODE_CLASS (mode) == MODE_FLOAT
12796 && regno >= FIRST_FPA_REGNUM
12797 && regno <= LAST_FPA_REGNUM);
12798 }
12799
12800 /* For efficiency and historical reasons LO_REGS, HI_REGS and CC_REGS are
12801 not used in arm mode. */
12802 int
12803 arm_regno_class (int regno)
12804 {
12805 if (TARGET_THUMB1)
12806 {
12807 if (regno == STACK_POINTER_REGNUM)
12808 return STACK_REG;
12809 if (regno == CC_REGNUM)
12810 return CC_REG;
12811 if (regno < 8)
12812 return LO_REGS;
12813 return HI_REGS;
12814 }
12815
12816 if (TARGET_THUMB2 && regno < 8)
12817 return LO_REGS;
12818
12819 if ( regno <= LAST_ARM_REGNUM
12820 || regno == FRAME_POINTER_REGNUM
12821 || regno == ARG_POINTER_REGNUM)
12822 return TARGET_THUMB2 ? HI_REGS : GENERAL_REGS;
12823
12824 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
12825 return TARGET_THUMB2 ? CC_REG : NO_REGS;
12826
12827 if (IS_CIRRUS_REGNUM (regno))
12828 return CIRRUS_REGS;
12829
12830 if (IS_VFP_REGNUM (regno))
12831 return VFP_REGS;
12832
12833 if (IS_IWMMXT_REGNUM (regno))
12834 return IWMMXT_REGS;
12835
12836 if (IS_IWMMXT_GR_REGNUM (regno))
12837 return IWMMXT_GR_REGS;
12838
12839 return FPA_REGS;
12840 }
12841
12842 /* Handle a special case when computing the offset
12843 of an argument from the frame pointer. */
12844 int
12845 arm_debugger_arg_offset (int value, rtx addr)
12846 {
12847 rtx insn;
12848
12849 /* We are only interested if dbxout_parms() failed to compute the offset. */
12850 if (value != 0)
12851 return 0;
12852
12853 /* We can only cope with the case where the address is held in a register. */
12854 if (GET_CODE (addr) != REG)
12855 return 0;
12856
12857 /* If we are using the frame pointer to point at the argument, then
12858 an offset of 0 is correct. */
12859 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
12860 return 0;
12861
12862 /* If we are using the stack pointer to point at the
12863 argument, then an offset of 0 is correct. */
12864 /* ??? Check this is consistent with thumb2 frame layout. */
12865 if ((TARGET_THUMB || !frame_pointer_needed)
12866 && REGNO (addr) == SP_REGNUM)
12867 return 0;
12868
12869 /* Oh dear. The argument is pointed to by a register rather
12870 than being held in a register, or being stored at a known
12871 offset from the frame pointer. Since GDB only understands
12872 those two kinds of argument we must translate the address
12873 held in the register into an offset from the frame pointer.
12874 We do this by searching through the insns for the function
12875 looking to see where this register gets its value. If the
12876 register is initialized from the frame pointer plus an offset
12877 then we are in luck and we can continue, otherwise we give up.
12878
12879 This code is exercised by producing debugging information
12880 for a function with arguments like this:
12881
12882 double func (double a, double b, int c, double d) {return d;}
12883
12884 Without this code the stab for parameter 'd' will be set to
12885 an offset of 0 from the frame pointer, rather than 8. */
12886
12887 /* The if() statement says:
12888
12889 If the insn is a normal instruction
12890 and if the insn is setting the value in a register
12891 and if the register being set is the register holding the address of the argument
12892 and if the address is computing by an addition
12893 that involves adding to a register
12894 which is the frame pointer
12895 a constant integer
12896
12897 then... */
12898
12899 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12900 {
12901 if ( GET_CODE (insn) == INSN
12902 && GET_CODE (PATTERN (insn)) == SET
12903 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12904 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12905 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12906 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12907 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12908 )
12909 {
12910 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12911
12912 break;
12913 }
12914 }
12915
12916 if (value == 0)
12917 {
12918 debug_rtx (addr);
12919 warning (0, "unable to compute real location of stacked parameter");
12920 value = 8; /* XXX magic hack */
12921 }
12922
12923 return value;
12924 }
12925 \f
12926 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12927 do \
12928 { \
12929 if ((MASK) & insn_flags) \
12930 add_builtin_function ((NAME), (TYPE), (CODE), \
12931 BUILT_IN_MD, NULL, NULL_TREE); \
12932 } \
12933 while (0)
12934
12935 struct builtin_description
12936 {
12937 const unsigned int mask;
12938 const enum insn_code icode;
12939 const char * const name;
12940 const enum arm_builtins code;
12941 const enum rtx_code comparison;
12942 const unsigned int flag;
12943 };
12944
12945 static const struct builtin_description bdesc_2arg[] =
12946 {
12947 #define IWMMXT_BUILTIN(code, string, builtin) \
12948 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12949 ARM_BUILTIN_##builtin, 0, 0 },
12950
12951 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12952 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12953 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12954 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12955 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12956 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12957 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12958 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12959 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12960 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12961 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12962 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12963 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12964 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12965 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12966 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12967 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12968 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12969 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12970 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12971 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12972 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12973 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12974 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12975 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12976 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12977 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12978 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12979 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12980 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12981 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12982 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12983 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12984 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12985 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12986 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12987 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12988 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12989 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12990 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12991 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12992 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12993 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12994 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12995 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12996 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12997 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12998 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12999 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
13000 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
13001 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
13002 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
13003 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
13004 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
13005 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
13006 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
13007 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
13008 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
13009
13010 #define IWMMXT_BUILTIN2(code, builtin) \
13011 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
13012
13013 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
13014 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
13015 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
13016 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
13017 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
13018 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
13019 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
13020 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
13021 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
13022 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
13023 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
13024 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
13025 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
13026 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
13027 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
13028 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
13029 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
13030 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
13031 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
13032 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
13033 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
13034 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
13035 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
13036 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
13037 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
13038 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
13039 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
13040 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
13041 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
13042 IWMMXT_BUILTIN2 (rordi3, WRORDI)
13043 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
13044 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
13045 };
13046
13047 static const struct builtin_description bdesc_1arg[] =
13048 {
13049 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
13050 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
13051 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
13052 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
13053 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
13054 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
13055 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
13056 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
13057 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
13058 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
13059 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
13060 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
13061 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
13062 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
13063 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
13064 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
13065 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
13066 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
13067 };
13068
13069 /* Set up all the iWMMXt builtins. This is
13070 not called if TARGET_IWMMXT is zero. */
13071
13072 static void
13073 arm_init_iwmmxt_builtins (void)
13074 {
13075 const struct builtin_description * d;
13076 size_t i;
13077 tree endlink = void_list_node;
13078
13079 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13080 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13081 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13082
13083 tree int_ftype_int
13084 = build_function_type (integer_type_node,
13085 tree_cons (NULL_TREE, integer_type_node, endlink));
13086 tree v8qi_ftype_v8qi_v8qi_int
13087 = build_function_type (V8QI_type_node,
13088 tree_cons (NULL_TREE, V8QI_type_node,
13089 tree_cons (NULL_TREE, V8QI_type_node,
13090 tree_cons (NULL_TREE,
13091 integer_type_node,
13092 endlink))));
13093 tree v4hi_ftype_v4hi_int
13094 = build_function_type (V4HI_type_node,
13095 tree_cons (NULL_TREE, V4HI_type_node,
13096 tree_cons (NULL_TREE, integer_type_node,
13097 endlink)));
13098 tree v2si_ftype_v2si_int
13099 = build_function_type (V2SI_type_node,
13100 tree_cons (NULL_TREE, V2SI_type_node,
13101 tree_cons (NULL_TREE, integer_type_node,
13102 endlink)));
13103 tree v2si_ftype_di_di
13104 = build_function_type (V2SI_type_node,
13105 tree_cons (NULL_TREE, long_long_integer_type_node,
13106 tree_cons (NULL_TREE, long_long_integer_type_node,
13107 endlink)));
13108 tree di_ftype_di_int
13109 = build_function_type (long_long_integer_type_node,
13110 tree_cons (NULL_TREE, long_long_integer_type_node,
13111 tree_cons (NULL_TREE, integer_type_node,
13112 endlink)));
13113 tree di_ftype_di_int_int
13114 = build_function_type (long_long_integer_type_node,
13115 tree_cons (NULL_TREE, long_long_integer_type_node,
13116 tree_cons (NULL_TREE, integer_type_node,
13117 tree_cons (NULL_TREE,
13118 integer_type_node,
13119 endlink))));
13120 tree int_ftype_v8qi
13121 = build_function_type (integer_type_node,
13122 tree_cons (NULL_TREE, V8QI_type_node,
13123 endlink));
13124 tree int_ftype_v4hi
13125 = build_function_type (integer_type_node,
13126 tree_cons (NULL_TREE, V4HI_type_node,
13127 endlink));
13128 tree int_ftype_v2si
13129 = build_function_type (integer_type_node,
13130 tree_cons (NULL_TREE, V2SI_type_node,
13131 endlink));
13132 tree int_ftype_v8qi_int
13133 = build_function_type (integer_type_node,
13134 tree_cons (NULL_TREE, V8QI_type_node,
13135 tree_cons (NULL_TREE, integer_type_node,
13136 endlink)));
13137 tree int_ftype_v4hi_int
13138 = build_function_type (integer_type_node,
13139 tree_cons (NULL_TREE, V4HI_type_node,
13140 tree_cons (NULL_TREE, integer_type_node,
13141 endlink)));
13142 tree int_ftype_v2si_int
13143 = build_function_type (integer_type_node,
13144 tree_cons (NULL_TREE, V2SI_type_node,
13145 tree_cons (NULL_TREE, integer_type_node,
13146 endlink)));
13147 tree v8qi_ftype_v8qi_int_int
13148 = build_function_type (V8QI_type_node,
13149 tree_cons (NULL_TREE, V8QI_type_node,
13150 tree_cons (NULL_TREE, integer_type_node,
13151 tree_cons (NULL_TREE,
13152 integer_type_node,
13153 endlink))));
13154 tree v4hi_ftype_v4hi_int_int
13155 = build_function_type (V4HI_type_node,
13156 tree_cons (NULL_TREE, V4HI_type_node,
13157 tree_cons (NULL_TREE, integer_type_node,
13158 tree_cons (NULL_TREE,
13159 integer_type_node,
13160 endlink))));
13161 tree v2si_ftype_v2si_int_int
13162 = build_function_type (V2SI_type_node,
13163 tree_cons (NULL_TREE, V2SI_type_node,
13164 tree_cons (NULL_TREE, integer_type_node,
13165 tree_cons (NULL_TREE,
13166 integer_type_node,
13167 endlink))));
13168 /* Miscellaneous. */
13169 tree v8qi_ftype_v4hi_v4hi
13170 = build_function_type (V8QI_type_node,
13171 tree_cons (NULL_TREE, V4HI_type_node,
13172 tree_cons (NULL_TREE, V4HI_type_node,
13173 endlink)));
13174 tree v4hi_ftype_v2si_v2si
13175 = build_function_type (V4HI_type_node,
13176 tree_cons (NULL_TREE, V2SI_type_node,
13177 tree_cons (NULL_TREE, V2SI_type_node,
13178 endlink)));
13179 tree v2si_ftype_v4hi_v4hi
13180 = build_function_type (V2SI_type_node,
13181 tree_cons (NULL_TREE, V4HI_type_node,
13182 tree_cons (NULL_TREE, V4HI_type_node,
13183 endlink)));
13184 tree v2si_ftype_v8qi_v8qi
13185 = build_function_type (V2SI_type_node,
13186 tree_cons (NULL_TREE, V8QI_type_node,
13187 tree_cons (NULL_TREE, V8QI_type_node,
13188 endlink)));
13189 tree v4hi_ftype_v4hi_di
13190 = build_function_type (V4HI_type_node,
13191 tree_cons (NULL_TREE, V4HI_type_node,
13192 tree_cons (NULL_TREE,
13193 long_long_integer_type_node,
13194 endlink)));
13195 tree v2si_ftype_v2si_di
13196 = build_function_type (V2SI_type_node,
13197 tree_cons (NULL_TREE, V2SI_type_node,
13198 tree_cons (NULL_TREE,
13199 long_long_integer_type_node,
13200 endlink)));
13201 tree void_ftype_int_int
13202 = build_function_type (void_type_node,
13203 tree_cons (NULL_TREE, integer_type_node,
13204 tree_cons (NULL_TREE, integer_type_node,
13205 endlink)));
13206 tree di_ftype_void
13207 = build_function_type (long_long_unsigned_type_node, endlink);
13208 tree di_ftype_v8qi
13209 = build_function_type (long_long_integer_type_node,
13210 tree_cons (NULL_TREE, V8QI_type_node,
13211 endlink));
13212 tree di_ftype_v4hi
13213 = build_function_type (long_long_integer_type_node,
13214 tree_cons (NULL_TREE, V4HI_type_node,
13215 endlink));
13216 tree di_ftype_v2si
13217 = build_function_type (long_long_integer_type_node,
13218 tree_cons (NULL_TREE, V2SI_type_node,
13219 endlink));
13220 tree v2si_ftype_v4hi
13221 = build_function_type (V2SI_type_node,
13222 tree_cons (NULL_TREE, V4HI_type_node,
13223 endlink));
13224 tree v4hi_ftype_v8qi
13225 = build_function_type (V4HI_type_node,
13226 tree_cons (NULL_TREE, V8QI_type_node,
13227 endlink));
13228
13229 tree di_ftype_di_v4hi_v4hi
13230 = build_function_type (long_long_unsigned_type_node,
13231 tree_cons (NULL_TREE,
13232 long_long_unsigned_type_node,
13233 tree_cons (NULL_TREE, V4HI_type_node,
13234 tree_cons (NULL_TREE,
13235 V4HI_type_node,
13236 endlink))));
13237
13238 tree di_ftype_v4hi_v4hi
13239 = build_function_type (long_long_unsigned_type_node,
13240 tree_cons (NULL_TREE, V4HI_type_node,
13241 tree_cons (NULL_TREE, V4HI_type_node,
13242 endlink)));
13243
13244 /* Normal vector binops. */
13245 tree v8qi_ftype_v8qi_v8qi
13246 = build_function_type (V8QI_type_node,
13247 tree_cons (NULL_TREE, V8QI_type_node,
13248 tree_cons (NULL_TREE, V8QI_type_node,
13249 endlink)));
13250 tree v4hi_ftype_v4hi_v4hi
13251 = build_function_type (V4HI_type_node,
13252 tree_cons (NULL_TREE, V4HI_type_node,
13253 tree_cons (NULL_TREE, V4HI_type_node,
13254 endlink)));
13255 tree v2si_ftype_v2si_v2si
13256 = build_function_type (V2SI_type_node,
13257 tree_cons (NULL_TREE, V2SI_type_node,
13258 tree_cons (NULL_TREE, V2SI_type_node,
13259 endlink)));
13260 tree di_ftype_di_di
13261 = build_function_type (long_long_unsigned_type_node,
13262 tree_cons (NULL_TREE, long_long_unsigned_type_node,
13263 tree_cons (NULL_TREE,
13264 long_long_unsigned_type_node,
13265 endlink)));
13266
13267 /* Add all builtins that are more or less simple operations on two
13268 operands. */
13269 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13270 {
13271 /* Use one of the operands; the target can have a different mode for
13272 mask-generating compares. */
13273 enum machine_mode mode;
13274 tree type;
13275
13276 if (d->name == 0)
13277 continue;
13278
13279 mode = insn_data[d->icode].operand[1].mode;
13280
13281 switch (mode)
13282 {
13283 case V8QImode:
13284 type = v8qi_ftype_v8qi_v8qi;
13285 break;
13286 case V4HImode:
13287 type = v4hi_ftype_v4hi_v4hi;
13288 break;
13289 case V2SImode:
13290 type = v2si_ftype_v2si_v2si;
13291 break;
13292 case DImode:
13293 type = di_ftype_di_di;
13294 break;
13295
13296 default:
13297 gcc_unreachable ();
13298 }
13299
13300 def_mbuiltin (d->mask, d->name, type, d->code);
13301 }
13302
13303 /* Add the remaining MMX insns with somewhat more complicated types. */
13304 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
13305 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
13306 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
13307
13308 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
13309 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
13310 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
13311 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
13312 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
13313 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
13314
13315 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
13316 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
13317 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
13318 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
13319 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
13320 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
13321
13322 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
13323 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
13324 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
13325 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
13326 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
13327 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
13328
13329 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
13330 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
13331 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
13332 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
13333 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
13334 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
13335
13336 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
13337
13338 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
13339 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
13340 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
13341 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
13342
13343 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
13344 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
13345 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
13346 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
13347 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
13348 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
13349 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
13350 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
13351 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
13352
13353 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
13354 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
13355 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
13356
13357 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
13358 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
13359 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
13360
13361 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
13362 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
13363 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
13364 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
13365 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
13366 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
13367
13368 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
13369 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
13370 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
13371 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
13372 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
13373 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
13374 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
13375 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
13376 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
13377 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
13378 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
13379 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
13380
13381 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
13382 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
13383 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
13384 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
13385
13386 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
13387 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
13388 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
13389 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
13390 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
13391 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
13392 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
13393 }
13394
13395 static void
13396 arm_init_tls_builtins (void)
13397 {
13398 tree ftype;
13399 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
13400 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
13401
13402 ftype = build_function_type (ptr_type_node, void_list_node);
13403 add_builtin_function ("__builtin_thread_pointer", ftype,
13404 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
13405 NULL, const_nothrow);
13406 }
13407
13408 static void
13409 arm_init_builtins (void)
13410 {
13411 arm_init_tls_builtins ();
13412
13413 if (TARGET_REALLY_IWMMXT)
13414 arm_init_iwmmxt_builtins ();
13415 }
13416
13417 /* Errors in the source file can cause expand_expr to return const0_rtx
13418 where we expect a vector. To avoid crashing, use one of the vector
13419 clear instructions. */
13420
13421 static rtx
13422 safe_vector_operand (rtx x, enum machine_mode mode)
13423 {
13424 if (x != const0_rtx)
13425 return x;
13426 x = gen_reg_rtx (mode);
13427
13428 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
13429 : gen_rtx_SUBREG (DImode, x, 0)));
13430 return x;
13431 }
13432
13433 /* Subroutine of arm_expand_builtin to take care of binop insns. */
13434
13435 static rtx
13436 arm_expand_binop_builtin (enum insn_code icode,
13437 tree exp, rtx target)
13438 {
13439 rtx pat;
13440 tree arg0 = CALL_EXPR_ARG (exp, 0);
13441 tree arg1 = CALL_EXPR_ARG (exp, 1);
13442 rtx op0 = expand_normal (arg0);
13443 rtx op1 = expand_normal (arg1);
13444 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13445 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13446 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13447
13448 if (VECTOR_MODE_P (mode0))
13449 op0 = safe_vector_operand (op0, mode0);
13450 if (VECTOR_MODE_P (mode1))
13451 op1 = safe_vector_operand (op1, mode1);
13452
13453 if (! target
13454 || GET_MODE (target) != tmode
13455 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13456 target = gen_reg_rtx (tmode);
13457
13458 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
13459
13460 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13461 op0 = copy_to_mode_reg (mode0, op0);
13462 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13463 op1 = copy_to_mode_reg (mode1, op1);
13464
13465 pat = GEN_FCN (icode) (target, op0, op1);
13466 if (! pat)
13467 return 0;
13468 emit_insn (pat);
13469 return target;
13470 }
13471
13472 /* Subroutine of arm_expand_builtin to take care of unop insns. */
13473
13474 static rtx
13475 arm_expand_unop_builtin (enum insn_code icode,
13476 tree exp, rtx target, int do_load)
13477 {
13478 rtx pat;
13479 tree arg0 = CALL_EXPR_ARG (exp, 0);
13480 rtx op0 = expand_normal (arg0);
13481 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13482 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13483
13484 if (! target
13485 || GET_MODE (target) != tmode
13486 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13487 target = gen_reg_rtx (tmode);
13488 if (do_load)
13489 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13490 else
13491 {
13492 if (VECTOR_MODE_P (mode0))
13493 op0 = safe_vector_operand (op0, mode0);
13494
13495 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13496 op0 = copy_to_mode_reg (mode0, op0);
13497 }
13498
13499 pat = GEN_FCN (icode) (target, op0);
13500 if (! pat)
13501 return 0;
13502 emit_insn (pat);
13503 return target;
13504 }
13505
13506 /* Expand an expression EXP that calls a built-in function,
13507 with result going to TARGET if that's convenient
13508 (and in mode MODE if that's convenient).
13509 SUBTARGET may be used as the target for computing one of EXP's operands.
13510 IGNORE is nonzero if the value is to be ignored. */
13511
13512 static rtx
13513 arm_expand_builtin (tree exp,
13514 rtx target,
13515 rtx subtarget ATTRIBUTE_UNUSED,
13516 enum machine_mode mode ATTRIBUTE_UNUSED,
13517 int ignore ATTRIBUTE_UNUSED)
13518 {
13519 const struct builtin_description * d;
13520 enum insn_code icode;
13521 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13522 tree arg0;
13523 tree arg1;
13524 tree arg2;
13525 rtx op0;
13526 rtx op1;
13527 rtx op2;
13528 rtx pat;
13529 int fcode = DECL_FUNCTION_CODE (fndecl);
13530 size_t i;
13531 enum machine_mode tmode;
13532 enum machine_mode mode0;
13533 enum machine_mode mode1;
13534 enum machine_mode mode2;
13535
13536 switch (fcode)
13537 {
13538 case ARM_BUILTIN_TEXTRMSB:
13539 case ARM_BUILTIN_TEXTRMUB:
13540 case ARM_BUILTIN_TEXTRMSH:
13541 case ARM_BUILTIN_TEXTRMUH:
13542 case ARM_BUILTIN_TEXTRMSW:
13543 case ARM_BUILTIN_TEXTRMUW:
13544 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
13545 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
13546 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
13547 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
13548 : CODE_FOR_iwmmxt_textrmw);
13549
13550 arg0 = CALL_EXPR_ARG (exp, 0);
13551 arg1 = CALL_EXPR_ARG (exp, 1);
13552 op0 = expand_normal (arg0);
13553 op1 = expand_normal (arg1);
13554 tmode = insn_data[icode].operand[0].mode;
13555 mode0 = insn_data[icode].operand[1].mode;
13556 mode1 = insn_data[icode].operand[2].mode;
13557
13558 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13559 op0 = copy_to_mode_reg (mode0, op0);
13560 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13561 {
13562 /* @@@ better error message */
13563 error ("selector must be an immediate");
13564 return gen_reg_rtx (tmode);
13565 }
13566 if (target == 0
13567 || GET_MODE (target) != tmode
13568 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13569 target = gen_reg_rtx (tmode);
13570 pat = GEN_FCN (icode) (target, op0, op1);
13571 if (! pat)
13572 return 0;
13573 emit_insn (pat);
13574 return target;
13575
13576 case ARM_BUILTIN_TINSRB:
13577 case ARM_BUILTIN_TINSRH:
13578 case ARM_BUILTIN_TINSRW:
13579 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
13580 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
13581 : CODE_FOR_iwmmxt_tinsrw);
13582 arg0 = CALL_EXPR_ARG (exp, 0);
13583 arg1 = CALL_EXPR_ARG (exp, 1);
13584 arg2 = CALL_EXPR_ARG (exp, 2);
13585 op0 = expand_normal (arg0);
13586 op1 = expand_normal (arg1);
13587 op2 = expand_normal (arg2);
13588 tmode = insn_data[icode].operand[0].mode;
13589 mode0 = insn_data[icode].operand[1].mode;
13590 mode1 = insn_data[icode].operand[2].mode;
13591 mode2 = insn_data[icode].operand[3].mode;
13592
13593 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13594 op0 = copy_to_mode_reg (mode0, op0);
13595 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13596 op1 = copy_to_mode_reg (mode1, op1);
13597 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13598 {
13599 /* @@@ better error message */
13600 error ("selector must be an immediate");
13601 return const0_rtx;
13602 }
13603 if (target == 0
13604 || GET_MODE (target) != tmode
13605 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13606 target = gen_reg_rtx (tmode);
13607 pat = GEN_FCN (icode) (target, op0, op1, op2);
13608 if (! pat)
13609 return 0;
13610 emit_insn (pat);
13611 return target;
13612
13613 case ARM_BUILTIN_SETWCX:
13614 arg0 = CALL_EXPR_ARG (exp, 0);
13615 arg1 = CALL_EXPR_ARG (exp, 1);
13616 op0 = force_reg (SImode, expand_normal (arg0));
13617 op1 = expand_normal (arg1);
13618 emit_insn (gen_iwmmxt_tmcr (op1, op0));
13619 return 0;
13620
13621 case ARM_BUILTIN_GETWCX:
13622 arg0 = CALL_EXPR_ARG (exp, 0);
13623 op0 = expand_normal (arg0);
13624 target = gen_reg_rtx (SImode);
13625 emit_insn (gen_iwmmxt_tmrc (target, op0));
13626 return target;
13627
13628 case ARM_BUILTIN_WSHUFH:
13629 icode = CODE_FOR_iwmmxt_wshufh;
13630 arg0 = CALL_EXPR_ARG (exp, 0);
13631 arg1 = CALL_EXPR_ARG (exp, 1);
13632 op0 = expand_normal (arg0);
13633 op1 = expand_normal (arg1);
13634 tmode = insn_data[icode].operand[0].mode;
13635 mode1 = insn_data[icode].operand[1].mode;
13636 mode2 = insn_data[icode].operand[2].mode;
13637
13638 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13639 op0 = copy_to_mode_reg (mode1, op0);
13640 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13641 {
13642 /* @@@ better error message */
13643 error ("mask must be an immediate");
13644 return const0_rtx;
13645 }
13646 if (target == 0
13647 || GET_MODE (target) != tmode
13648 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13649 target = gen_reg_rtx (tmode);
13650 pat = GEN_FCN (icode) (target, op0, op1);
13651 if (! pat)
13652 return 0;
13653 emit_insn (pat);
13654 return target;
13655
13656 case ARM_BUILTIN_WSADB:
13657 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, exp, target);
13658 case ARM_BUILTIN_WSADH:
13659 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, exp, target);
13660 case ARM_BUILTIN_WSADBZ:
13661 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, exp, target);
13662 case ARM_BUILTIN_WSADHZ:
13663 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, exp, target);
13664
13665 /* Several three-argument builtins. */
13666 case ARM_BUILTIN_WMACS:
13667 case ARM_BUILTIN_WMACU:
13668 case ARM_BUILTIN_WALIGN:
13669 case ARM_BUILTIN_TMIA:
13670 case ARM_BUILTIN_TMIAPH:
13671 case ARM_BUILTIN_TMIATT:
13672 case ARM_BUILTIN_TMIATB:
13673 case ARM_BUILTIN_TMIABT:
13674 case ARM_BUILTIN_TMIABB:
13675 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
13676 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
13677 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
13678 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
13679 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
13680 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
13681 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
13682 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
13683 : CODE_FOR_iwmmxt_walign);
13684 arg0 = CALL_EXPR_ARG (exp, 0);
13685 arg1 = CALL_EXPR_ARG (exp, 1);
13686 arg2 = CALL_EXPR_ARG (exp, 2);
13687 op0 = expand_normal (arg0);
13688 op1 = expand_normal (arg1);
13689 op2 = expand_normal (arg2);
13690 tmode = insn_data[icode].operand[0].mode;
13691 mode0 = insn_data[icode].operand[1].mode;
13692 mode1 = insn_data[icode].operand[2].mode;
13693 mode2 = insn_data[icode].operand[3].mode;
13694
13695 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13696 op0 = copy_to_mode_reg (mode0, op0);
13697 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13698 op1 = copy_to_mode_reg (mode1, op1);
13699 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13700 op2 = copy_to_mode_reg (mode2, op2);
13701 if (target == 0
13702 || GET_MODE (target) != tmode
13703 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13704 target = gen_reg_rtx (tmode);
13705 pat = GEN_FCN (icode) (target, op0, op1, op2);
13706 if (! pat)
13707 return 0;
13708 emit_insn (pat);
13709 return target;
13710
13711 case ARM_BUILTIN_WZERO:
13712 target = gen_reg_rtx (DImode);
13713 emit_insn (gen_iwmmxt_clrdi (target));
13714 return target;
13715
13716 case ARM_BUILTIN_THREAD_POINTER:
13717 return arm_load_tp (target);
13718
13719 default:
13720 break;
13721 }
13722
13723 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13724 if (d->code == (const enum arm_builtins) fcode)
13725 return arm_expand_binop_builtin (d->icode, exp, target);
13726
13727 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13728 if (d->code == (const enum arm_builtins) fcode)
13729 return arm_expand_unop_builtin (d->icode, exp, target, 0);
13730
13731 /* @@@ Should really do something sensible here. */
13732 return NULL_RTX;
13733 }
13734 \f
13735 /* Return the number (counting from 0) of
13736 the least significant set bit in MASK. */
13737
13738 inline static int
13739 number_of_first_bit_set (unsigned mask)
13740 {
13741 int bit;
13742
13743 for (bit = 0;
13744 (mask & (1 << bit)) == 0;
13745 ++bit)
13746 continue;
13747
13748 return bit;
13749 }
13750
13751 /* Emit code to push or pop registers to or from the stack. F is the
13752 assembly file. MASK is the registers to push or pop. PUSH is
13753 nonzero if we should push, and zero if we should pop. For debugging
13754 output, if pushing, adjust CFA_OFFSET by the amount of space added
13755 to the stack. REAL_REGS should have the same number of bits set as
13756 MASK, and will be used instead (in the same order) to describe which
13757 registers were saved - this is used to mark the save slots when we
13758 push high registers after moving them to low registers. */
13759 static void
13760 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
13761 unsigned long real_regs)
13762 {
13763 int regno;
13764 int lo_mask = mask & 0xFF;
13765 int pushed_words = 0;
13766
13767 gcc_assert (mask);
13768
13769 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
13770 {
13771 /* Special case. Do not generate a POP PC statement here, do it in
13772 thumb_exit() */
13773 thumb_exit (f, -1);
13774 return;
13775 }
13776
13777 if (ARM_EABI_UNWIND_TABLES && push)
13778 {
13779 fprintf (f, "\t.save\t{");
13780 for (regno = 0; regno < 15; regno++)
13781 {
13782 if (real_regs & (1 << regno))
13783 {
13784 if (real_regs & ((1 << regno) -1))
13785 fprintf (f, ", ");
13786 asm_fprintf (f, "%r", regno);
13787 }
13788 }
13789 fprintf (f, "}\n");
13790 }
13791
13792 fprintf (f, "\t%s\t{", push ? "push" : "pop");
13793
13794 /* Look at the low registers first. */
13795 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
13796 {
13797 if (lo_mask & 1)
13798 {
13799 asm_fprintf (f, "%r", regno);
13800
13801 if ((lo_mask & ~1) != 0)
13802 fprintf (f, ", ");
13803
13804 pushed_words++;
13805 }
13806 }
13807
13808 if (push && (mask & (1 << LR_REGNUM)))
13809 {
13810 /* Catch pushing the LR. */
13811 if (mask & 0xFF)
13812 fprintf (f, ", ");
13813
13814 asm_fprintf (f, "%r", LR_REGNUM);
13815
13816 pushed_words++;
13817 }
13818 else if (!push && (mask & (1 << PC_REGNUM)))
13819 {
13820 /* Catch popping the PC. */
13821 if (TARGET_INTERWORK || TARGET_BACKTRACE
13822 || current_function_calls_eh_return)
13823 {
13824 /* The PC is never poped directly, instead
13825 it is popped into r3 and then BX is used. */
13826 fprintf (f, "}\n");
13827
13828 thumb_exit (f, -1);
13829
13830 return;
13831 }
13832 else
13833 {
13834 if (mask & 0xFF)
13835 fprintf (f, ", ");
13836
13837 asm_fprintf (f, "%r", PC_REGNUM);
13838 }
13839 }
13840
13841 fprintf (f, "}\n");
13842
13843 if (push && pushed_words && dwarf2out_do_frame ())
13844 {
13845 char *l = dwarf2out_cfi_label ();
13846 int pushed_mask = real_regs;
13847
13848 *cfa_offset += pushed_words * 4;
13849 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
13850
13851 pushed_words = 0;
13852 pushed_mask = real_regs;
13853 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
13854 {
13855 if (pushed_mask & 1)
13856 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
13857 }
13858 }
13859 }
13860
13861 /* Generate code to return from a thumb function.
13862 If 'reg_containing_return_addr' is -1, then the return address is
13863 actually on the stack, at the stack pointer. */
13864 static void
13865 thumb_exit (FILE *f, int reg_containing_return_addr)
13866 {
13867 unsigned regs_available_for_popping;
13868 unsigned regs_to_pop;
13869 int pops_needed;
13870 unsigned available;
13871 unsigned required;
13872 int mode;
13873 int size;
13874 int restore_a4 = FALSE;
13875
13876 /* Compute the registers we need to pop. */
13877 regs_to_pop = 0;
13878 pops_needed = 0;
13879
13880 if (reg_containing_return_addr == -1)
13881 {
13882 regs_to_pop |= 1 << LR_REGNUM;
13883 ++pops_needed;
13884 }
13885
13886 if (TARGET_BACKTRACE)
13887 {
13888 /* Restore the (ARM) frame pointer and stack pointer. */
13889 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13890 pops_needed += 2;
13891 }
13892
13893 /* If there is nothing to pop then just emit the BX instruction and
13894 return. */
13895 if (pops_needed == 0)
13896 {
13897 if (current_function_calls_eh_return)
13898 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13899
13900 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13901 return;
13902 }
13903 /* Otherwise if we are not supporting interworking and we have not created
13904 a backtrace structure and the function was not entered in ARM mode then
13905 just pop the return address straight into the PC. */
13906 else if (!TARGET_INTERWORK
13907 && !TARGET_BACKTRACE
13908 && !is_called_in_ARM_mode (current_function_decl)
13909 && !current_function_calls_eh_return)
13910 {
13911 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13912 return;
13913 }
13914
13915 /* Find out how many of the (return) argument registers we can corrupt. */
13916 regs_available_for_popping = 0;
13917
13918 /* If returning via __builtin_eh_return, the bottom three registers
13919 all contain information needed for the return. */
13920 if (current_function_calls_eh_return)
13921 size = 12;
13922 else
13923 {
13924 /* If we can deduce the registers used from the function's
13925 return value. This is more reliable that examining
13926 df_regs_ever_live_p () because that will be set if the register is
13927 ever used in the function, not just if the register is used
13928 to hold a return value. */
13929
13930 if (current_function_return_rtx != 0)
13931 mode = GET_MODE (current_function_return_rtx);
13932 else
13933 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13934
13935 size = GET_MODE_SIZE (mode);
13936
13937 if (size == 0)
13938 {
13939 /* In a void function we can use any argument register.
13940 In a function that returns a structure on the stack
13941 we can use the second and third argument registers. */
13942 if (mode == VOIDmode)
13943 regs_available_for_popping =
13944 (1 << ARG_REGISTER (1))
13945 | (1 << ARG_REGISTER (2))
13946 | (1 << ARG_REGISTER (3));
13947 else
13948 regs_available_for_popping =
13949 (1 << ARG_REGISTER (2))
13950 | (1 << ARG_REGISTER (3));
13951 }
13952 else if (size <= 4)
13953 regs_available_for_popping =
13954 (1 << ARG_REGISTER (2))
13955 | (1 << ARG_REGISTER (3));
13956 else if (size <= 8)
13957 regs_available_for_popping =
13958 (1 << ARG_REGISTER (3));
13959 }
13960
13961 /* Match registers to be popped with registers into which we pop them. */
13962 for (available = regs_available_for_popping,
13963 required = regs_to_pop;
13964 required != 0 && available != 0;
13965 available &= ~(available & - available),
13966 required &= ~(required & - required))
13967 -- pops_needed;
13968
13969 /* If we have any popping registers left over, remove them. */
13970 if (available > 0)
13971 regs_available_for_popping &= ~available;
13972
13973 /* Otherwise if we need another popping register we can use
13974 the fourth argument register. */
13975 else if (pops_needed)
13976 {
13977 /* If we have not found any free argument registers and
13978 reg a4 contains the return address, we must move it. */
13979 if (regs_available_for_popping == 0
13980 && reg_containing_return_addr == LAST_ARG_REGNUM)
13981 {
13982 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13983 reg_containing_return_addr = LR_REGNUM;
13984 }
13985 else if (size > 12)
13986 {
13987 /* Register a4 is being used to hold part of the return value,
13988 but we have dire need of a free, low register. */
13989 restore_a4 = TRUE;
13990
13991 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13992 }
13993
13994 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13995 {
13996 /* The fourth argument register is available. */
13997 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13998
13999 --pops_needed;
14000 }
14001 }
14002
14003 /* Pop as many registers as we can. */
14004 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14005 regs_available_for_popping);
14006
14007 /* Process the registers we popped. */
14008 if (reg_containing_return_addr == -1)
14009 {
14010 /* The return address was popped into the lowest numbered register. */
14011 regs_to_pop &= ~(1 << LR_REGNUM);
14012
14013 reg_containing_return_addr =
14014 number_of_first_bit_set (regs_available_for_popping);
14015
14016 /* Remove this register for the mask of available registers, so that
14017 the return address will not be corrupted by further pops. */
14018 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
14019 }
14020
14021 /* If we popped other registers then handle them here. */
14022 if (regs_available_for_popping)
14023 {
14024 int frame_pointer;
14025
14026 /* Work out which register currently contains the frame pointer. */
14027 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
14028
14029 /* Move it into the correct place. */
14030 asm_fprintf (f, "\tmov\t%r, %r\n",
14031 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
14032
14033 /* (Temporarily) remove it from the mask of popped registers. */
14034 regs_available_for_popping &= ~(1 << frame_pointer);
14035 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
14036
14037 if (regs_available_for_popping)
14038 {
14039 int stack_pointer;
14040
14041 /* We popped the stack pointer as well,
14042 find the register that contains it. */
14043 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
14044
14045 /* Move it into the stack register. */
14046 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
14047
14048 /* At this point we have popped all necessary registers, so
14049 do not worry about restoring regs_available_for_popping
14050 to its correct value:
14051
14052 assert (pops_needed == 0)
14053 assert (regs_available_for_popping == (1 << frame_pointer))
14054 assert (regs_to_pop == (1 << STACK_POINTER)) */
14055 }
14056 else
14057 {
14058 /* Since we have just move the popped value into the frame
14059 pointer, the popping register is available for reuse, and
14060 we know that we still have the stack pointer left to pop. */
14061 regs_available_for_popping |= (1 << frame_pointer);
14062 }
14063 }
14064
14065 /* If we still have registers left on the stack, but we no longer have
14066 any registers into which we can pop them, then we must move the return
14067 address into the link register and make available the register that
14068 contained it. */
14069 if (regs_available_for_popping == 0 && pops_needed > 0)
14070 {
14071 regs_available_for_popping |= 1 << reg_containing_return_addr;
14072
14073 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
14074 reg_containing_return_addr);
14075
14076 reg_containing_return_addr = LR_REGNUM;
14077 }
14078
14079 /* If we have registers left on the stack then pop some more.
14080 We know that at most we will want to pop FP and SP. */
14081 if (pops_needed > 0)
14082 {
14083 int popped_into;
14084 int move_to;
14085
14086 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14087 regs_available_for_popping);
14088
14089 /* We have popped either FP or SP.
14090 Move whichever one it is into the correct register. */
14091 popped_into = number_of_first_bit_set (regs_available_for_popping);
14092 move_to = number_of_first_bit_set (regs_to_pop);
14093
14094 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
14095
14096 regs_to_pop &= ~(1 << move_to);
14097
14098 --pops_needed;
14099 }
14100
14101 /* If we still have not popped everything then we must have only
14102 had one register available to us and we are now popping the SP. */
14103 if (pops_needed > 0)
14104 {
14105 int popped_into;
14106
14107 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14108 regs_available_for_popping);
14109
14110 popped_into = number_of_first_bit_set (regs_available_for_popping);
14111
14112 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
14113 /*
14114 assert (regs_to_pop == (1 << STACK_POINTER))
14115 assert (pops_needed == 1)
14116 */
14117 }
14118
14119 /* If necessary restore the a4 register. */
14120 if (restore_a4)
14121 {
14122 if (reg_containing_return_addr != LR_REGNUM)
14123 {
14124 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
14125 reg_containing_return_addr = LR_REGNUM;
14126 }
14127
14128 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
14129 }
14130
14131 if (current_function_calls_eh_return)
14132 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
14133
14134 /* Return to caller. */
14135 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
14136 }
14137
14138 \f
14139 void
14140 thumb1_final_prescan_insn (rtx insn)
14141 {
14142 if (flag_print_asm_name)
14143 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
14144 INSN_ADDRESSES (INSN_UID (insn)));
14145 }
14146
14147 int
14148 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
14149 {
14150 unsigned HOST_WIDE_INT mask = 0xff;
14151 int i;
14152
14153 if (val == 0) /* XXX */
14154 return 0;
14155
14156 for (i = 0; i < 25; i++)
14157 if ((val & (mask << i)) == val)
14158 return 1;
14159
14160 return 0;
14161 }
14162
14163 /* Returns nonzero if the current function contains,
14164 or might contain a far jump. */
14165 static int
14166 thumb_far_jump_used_p (void)
14167 {
14168 rtx insn;
14169
14170 /* This test is only important for leaf functions. */
14171 /* assert (!leaf_function_p ()); */
14172
14173 /* If we have already decided that far jumps may be used,
14174 do not bother checking again, and always return true even if
14175 it turns out that they are not being used. Once we have made
14176 the decision that far jumps are present (and that hence the link
14177 register will be pushed onto the stack) we cannot go back on it. */
14178 if (cfun->machine->far_jump_used)
14179 return 1;
14180
14181 /* If this function is not being called from the prologue/epilogue
14182 generation code then it must be being called from the
14183 INITIAL_ELIMINATION_OFFSET macro. */
14184 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
14185 {
14186 /* In this case we know that we are being asked about the elimination
14187 of the arg pointer register. If that register is not being used,
14188 then there are no arguments on the stack, and we do not have to
14189 worry that a far jump might force the prologue to push the link
14190 register, changing the stack offsets. In this case we can just
14191 return false, since the presence of far jumps in the function will
14192 not affect stack offsets.
14193
14194 If the arg pointer is live (or if it was live, but has now been
14195 eliminated and so set to dead) then we do have to test to see if
14196 the function might contain a far jump. This test can lead to some
14197 false negatives, since before reload is completed, then length of
14198 branch instructions is not known, so gcc defaults to returning their
14199 longest length, which in turn sets the far jump attribute to true.
14200
14201 A false negative will not result in bad code being generated, but it
14202 will result in a needless push and pop of the link register. We
14203 hope that this does not occur too often.
14204
14205 If we need doubleword stack alignment this could affect the other
14206 elimination offsets so we can't risk getting it wrong. */
14207 if (df_regs_ever_live_p (ARG_POINTER_REGNUM))
14208 cfun->machine->arg_pointer_live = 1;
14209 else if (!cfun->machine->arg_pointer_live)
14210 return 0;
14211 }
14212
14213 /* Check to see if the function contains a branch
14214 insn with the far jump attribute set. */
14215 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14216 {
14217 if (GET_CODE (insn) == JUMP_INSN
14218 /* Ignore tablejump patterns. */
14219 && GET_CODE (PATTERN (insn)) != ADDR_VEC
14220 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
14221 && get_attr_far_jump (insn) == FAR_JUMP_YES
14222 )
14223 {
14224 /* Record the fact that we have decided that
14225 the function does use far jumps. */
14226 cfun->machine->far_jump_used = 1;
14227 return 1;
14228 }
14229 }
14230
14231 return 0;
14232 }
14233
14234 /* Return nonzero if FUNC must be entered in ARM mode. */
14235 int
14236 is_called_in_ARM_mode (tree func)
14237 {
14238 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
14239
14240 /* Ignore the problem about functions whose address is taken. */
14241 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
14242 return TRUE;
14243
14244 #ifdef ARM_PE
14245 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
14246 #else
14247 return FALSE;
14248 #endif
14249 }
14250
14251 /* The bits which aren't usefully expanded as rtl. */
14252 const char *
14253 thumb_unexpanded_epilogue (void)
14254 {
14255 int regno;
14256 unsigned long live_regs_mask = 0;
14257 int high_regs_pushed = 0;
14258 int had_to_push_lr;
14259 int size;
14260
14261 if (return_used_this_function)
14262 return "";
14263
14264 if (IS_NAKED (arm_current_func_type ()))
14265 return "";
14266
14267 live_regs_mask = thumb1_compute_save_reg_mask ();
14268 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14269
14270 /* If we can deduce the registers used from the function's return value.
14271 This is more reliable that examining df_regs_ever_live_p () because that
14272 will be set if the register is ever used in the function, not just if
14273 the register is used to hold a return value. */
14274 size = arm_size_return_regs ();
14275
14276 /* The prolog may have pushed some high registers to use as
14277 work registers. e.g. the testsuite file:
14278 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
14279 compiles to produce:
14280 push {r4, r5, r6, r7, lr}
14281 mov r7, r9
14282 mov r6, r8
14283 push {r6, r7}
14284 as part of the prolog. We have to undo that pushing here. */
14285
14286 if (high_regs_pushed)
14287 {
14288 unsigned long mask = live_regs_mask & 0xff;
14289 int next_hi_reg;
14290
14291 /* The available low registers depend on the size of the value we are
14292 returning. */
14293 if (size <= 12)
14294 mask |= 1 << 3;
14295 if (size <= 8)
14296 mask |= 1 << 2;
14297
14298 if (mask == 0)
14299 /* Oh dear! We have no low registers into which we can pop
14300 high registers! */
14301 internal_error
14302 ("no low registers available for popping high registers");
14303
14304 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
14305 if (live_regs_mask & (1 << next_hi_reg))
14306 break;
14307
14308 while (high_regs_pushed)
14309 {
14310 /* Find lo register(s) into which the high register(s) can
14311 be popped. */
14312 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
14313 {
14314 if (mask & (1 << regno))
14315 high_regs_pushed--;
14316 if (high_regs_pushed == 0)
14317 break;
14318 }
14319
14320 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
14321
14322 /* Pop the values into the low register(s). */
14323 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
14324
14325 /* Move the value(s) into the high registers. */
14326 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
14327 {
14328 if (mask & (1 << regno))
14329 {
14330 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
14331 regno);
14332
14333 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
14334 if (live_regs_mask & (1 << next_hi_reg))
14335 break;
14336 }
14337 }
14338 }
14339 live_regs_mask &= ~0x0f00;
14340 }
14341
14342 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
14343 live_regs_mask &= 0xff;
14344
14345 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
14346 {
14347 /* Pop the return address into the PC. */
14348 if (had_to_push_lr)
14349 live_regs_mask |= 1 << PC_REGNUM;
14350
14351 /* Either no argument registers were pushed or a backtrace
14352 structure was created which includes an adjusted stack
14353 pointer, so just pop everything. */
14354 if (live_regs_mask)
14355 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
14356 live_regs_mask);
14357
14358 /* We have either just popped the return address into the
14359 PC or it is was kept in LR for the entire function. */
14360 if (!had_to_push_lr)
14361 thumb_exit (asm_out_file, LR_REGNUM);
14362 }
14363 else
14364 {
14365 /* Pop everything but the return address. */
14366 if (live_regs_mask)
14367 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
14368 live_regs_mask);
14369
14370 if (had_to_push_lr)
14371 {
14372 if (size > 12)
14373 {
14374 /* We have no free low regs, so save one. */
14375 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
14376 LAST_ARG_REGNUM);
14377 }
14378
14379 /* Get the return address into a temporary register. */
14380 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
14381 1 << LAST_ARG_REGNUM);
14382
14383 if (size > 12)
14384 {
14385 /* Move the return address to lr. */
14386 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
14387 LAST_ARG_REGNUM);
14388 /* Restore the low register. */
14389 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
14390 IP_REGNUM);
14391 regno = LR_REGNUM;
14392 }
14393 else
14394 regno = LAST_ARG_REGNUM;
14395 }
14396 else
14397 regno = LR_REGNUM;
14398
14399 /* Remove the argument registers that were pushed onto the stack. */
14400 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
14401 SP_REGNUM, SP_REGNUM,
14402 current_function_pretend_args_size);
14403
14404 thumb_exit (asm_out_file, regno);
14405 }
14406
14407 return "";
14408 }
14409
14410 /* Functions to save and restore machine-specific function data. */
14411 static struct machine_function *
14412 arm_init_machine_status (void)
14413 {
14414 struct machine_function *machine;
14415 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
14416
14417 #if ARM_FT_UNKNOWN != 0
14418 machine->func_type = ARM_FT_UNKNOWN;
14419 #endif
14420 return machine;
14421 }
14422
14423 /* Return an RTX indicating where the return address to the
14424 calling function can be found. */
14425 rtx
14426 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
14427 {
14428 if (count != 0)
14429 return NULL_RTX;
14430
14431 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
14432 }
14433
14434 /* Do anything needed before RTL is emitted for each function. */
14435 void
14436 arm_init_expanders (void)
14437 {
14438 /* Arrange to initialize and mark the machine per-function status. */
14439 init_machine_status = arm_init_machine_status;
14440
14441 /* This is to stop the combine pass optimizing away the alignment
14442 adjustment of va_arg. */
14443 /* ??? It is claimed that this should not be necessary. */
14444 if (cfun)
14445 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
14446 }
14447
14448
14449 /* Like arm_compute_initial_elimination offset. Simpler because there
14450 isn't an ABI specified frame pointer for Thumb. Instead, we set it
14451 to point at the base of the local variables after static stack
14452 space for a function has been allocated. */
14453
14454 HOST_WIDE_INT
14455 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
14456 {
14457 arm_stack_offsets *offsets;
14458
14459 offsets = arm_get_frame_offsets ();
14460
14461 switch (from)
14462 {
14463 case ARG_POINTER_REGNUM:
14464 switch (to)
14465 {
14466 case STACK_POINTER_REGNUM:
14467 return offsets->outgoing_args - offsets->saved_args;
14468
14469 case FRAME_POINTER_REGNUM:
14470 return offsets->soft_frame - offsets->saved_args;
14471
14472 case ARM_HARD_FRAME_POINTER_REGNUM:
14473 return offsets->saved_regs - offsets->saved_args;
14474
14475 case THUMB_HARD_FRAME_POINTER_REGNUM:
14476 return offsets->locals_base - offsets->saved_args;
14477
14478 default:
14479 gcc_unreachable ();
14480 }
14481 break;
14482
14483 case FRAME_POINTER_REGNUM:
14484 switch (to)
14485 {
14486 case STACK_POINTER_REGNUM:
14487 return offsets->outgoing_args - offsets->soft_frame;
14488
14489 case ARM_HARD_FRAME_POINTER_REGNUM:
14490 return offsets->saved_regs - offsets->soft_frame;
14491
14492 case THUMB_HARD_FRAME_POINTER_REGNUM:
14493 return offsets->locals_base - offsets->soft_frame;
14494
14495 default:
14496 gcc_unreachable ();
14497 }
14498 break;
14499
14500 default:
14501 gcc_unreachable ();
14502 }
14503 }
14504
14505 /* Generate the rest of a function's prologue. */
14506 void
14507 thumb1_expand_prologue (void)
14508 {
14509 rtx insn, dwarf;
14510
14511 HOST_WIDE_INT amount;
14512 arm_stack_offsets *offsets;
14513 unsigned long func_type;
14514 int regno;
14515 unsigned long live_regs_mask;
14516
14517 func_type = arm_current_func_type ();
14518
14519 /* Naked functions don't have prologues. */
14520 if (IS_NAKED (func_type))
14521 return;
14522
14523 if (IS_INTERRUPT (func_type))
14524 {
14525 error ("interrupt Service Routines cannot be coded in Thumb mode");
14526 return;
14527 }
14528
14529 live_regs_mask = thumb1_compute_save_reg_mask ();
14530 /* Load the pic register before setting the frame pointer,
14531 so we can use r7 as a temporary work register. */
14532 if (flag_pic && arm_pic_register != INVALID_REGNUM)
14533 arm_load_pic_register (live_regs_mask);
14534
14535 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
14536 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
14537 stack_pointer_rtx);
14538
14539 offsets = arm_get_frame_offsets ();
14540 amount = offsets->outgoing_args - offsets->saved_regs;
14541 if (amount)
14542 {
14543 if (amount < 512)
14544 {
14545 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14546 GEN_INT (- amount)));
14547 RTX_FRAME_RELATED_P (insn) = 1;
14548 }
14549 else
14550 {
14551 rtx reg;
14552
14553 /* The stack decrement is too big for an immediate value in a single
14554 insn. In theory we could issue multiple subtracts, but after
14555 three of them it becomes more space efficient to place the full
14556 value in the constant pool and load into a register. (Also the
14557 ARM debugger really likes to see only one stack decrement per
14558 function). So instead we look for a scratch register into which
14559 we can load the decrement, and then we subtract this from the
14560 stack pointer. Unfortunately on the thumb the only available
14561 scratch registers are the argument registers, and we cannot use
14562 these as they may hold arguments to the function. Instead we
14563 attempt to locate a call preserved register which is used by this
14564 function. If we can find one, then we know that it will have
14565 been pushed at the start of the prologue and so we can corrupt
14566 it now. */
14567 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
14568 if (live_regs_mask & (1 << regno)
14569 && !(frame_pointer_needed
14570 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
14571 break;
14572
14573 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
14574 {
14575 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
14576
14577 /* Choose an arbitrary, non-argument low register. */
14578 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
14579
14580 /* Save it by copying it into a high, scratch register. */
14581 emit_insn (gen_movsi (spare, reg));
14582 /* Add a USE to stop propagate_one_insn() from barfing. */
14583 emit_insn (gen_prologue_use (spare));
14584
14585 /* Decrement the stack. */
14586 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
14587 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
14588 stack_pointer_rtx, reg));
14589 RTX_FRAME_RELATED_P (insn) = 1;
14590 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14591 plus_constant (stack_pointer_rtx,
14592 -amount));
14593 RTX_FRAME_RELATED_P (dwarf) = 1;
14594 REG_NOTES (insn)
14595 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
14596 REG_NOTES (insn));
14597
14598 /* Restore the low register's original value. */
14599 emit_insn (gen_movsi (reg, spare));
14600
14601 /* Emit a USE of the restored scratch register, so that flow
14602 analysis will not consider the restore redundant. The
14603 register won't be used again in this function and isn't
14604 restored by the epilogue. */
14605 emit_insn (gen_prologue_use (reg));
14606 }
14607 else
14608 {
14609 reg = gen_rtx_REG (SImode, regno);
14610
14611 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
14612
14613 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
14614 stack_pointer_rtx, reg));
14615 RTX_FRAME_RELATED_P (insn) = 1;
14616 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14617 plus_constant (stack_pointer_rtx,
14618 -amount));
14619 RTX_FRAME_RELATED_P (dwarf) = 1;
14620 REG_NOTES (insn)
14621 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
14622 REG_NOTES (insn));
14623 }
14624 }
14625 }
14626
14627 if (frame_pointer_needed)
14628 thumb_set_frame_pointer (offsets);
14629
14630 /* If we are profiling, make sure no instructions are scheduled before
14631 the call to mcount. Similarly if the user has requested no
14632 scheduling in the prolog. Similarly if we want non-call exceptions
14633 using the EABI unwinder, to prevent faulting instructions from being
14634 swapped with a stack adjustment. */
14635 if (current_function_profile || !TARGET_SCHED_PROLOG
14636 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
14637 emit_insn (gen_blockage ());
14638
14639 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
14640 if (live_regs_mask & 0xff)
14641 cfun->machine->lr_save_eliminated = 0;
14642 }
14643
14644
14645 void
14646 thumb1_expand_epilogue (void)
14647 {
14648 HOST_WIDE_INT amount;
14649 arm_stack_offsets *offsets;
14650 int regno;
14651
14652 /* Naked functions don't have prologues. */
14653 if (IS_NAKED (arm_current_func_type ()))
14654 return;
14655
14656 offsets = arm_get_frame_offsets ();
14657 amount = offsets->outgoing_args - offsets->saved_regs;
14658
14659 if (frame_pointer_needed)
14660 {
14661 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
14662 amount = offsets->locals_base - offsets->saved_regs;
14663 }
14664
14665 if (amount)
14666 {
14667 if (amount < 512)
14668 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14669 GEN_INT (amount)));
14670 else
14671 {
14672 /* r3 is always free in the epilogue. */
14673 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
14674
14675 emit_insn (gen_movsi (reg, GEN_INT (amount)));
14676 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
14677 }
14678 }
14679
14680 /* Emit a USE (stack_pointer_rtx), so that
14681 the stack adjustment will not be deleted. */
14682 emit_insn (gen_prologue_use (stack_pointer_rtx));
14683
14684 if (current_function_profile || !TARGET_SCHED_PROLOG)
14685 emit_insn (gen_blockage ());
14686
14687 /* Emit a clobber for each insn that will be restored in the epilogue,
14688 so that flow2 will get register lifetimes correct. */
14689 for (regno = 0; regno < 13; regno++)
14690 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
14691 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
14692
14693 if (! df_regs_ever_live_p (LR_REGNUM))
14694 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
14695 }
14696
14697 static void
14698 thumb1_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
14699 {
14700 unsigned long live_regs_mask = 0;
14701 unsigned long l_mask;
14702 unsigned high_regs_pushed = 0;
14703 int cfa_offset = 0;
14704 int regno;
14705
14706 if (IS_NAKED (arm_current_func_type ()))
14707 return;
14708
14709 if (is_called_in_ARM_mode (current_function_decl))
14710 {
14711 const char * name;
14712
14713 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
14714 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
14715 == SYMBOL_REF);
14716 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
14717
14718 /* Generate code sequence to switch us into Thumb mode. */
14719 /* The .code 32 directive has already been emitted by
14720 ASM_DECLARE_FUNCTION_NAME. */
14721 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
14722 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
14723
14724 /* Generate a label, so that the debugger will notice the
14725 change in instruction sets. This label is also used by
14726 the assembler to bypass the ARM code when this function
14727 is called from a Thumb encoded function elsewhere in the
14728 same file. Hence the definition of STUB_NAME here must
14729 agree with the definition in gas/config/tc-arm.c. */
14730
14731 #define STUB_NAME ".real_start_of"
14732
14733 fprintf (f, "\t.code\t16\n");
14734 #ifdef ARM_PE
14735 if (arm_dllexport_name_p (name))
14736 name = arm_strip_name_encoding (name);
14737 #endif
14738 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
14739 fprintf (f, "\t.thumb_func\n");
14740 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
14741 }
14742
14743 if (current_function_pretend_args_size)
14744 {
14745 /* Output unwind directive for the stack adjustment. */
14746 if (ARM_EABI_UNWIND_TABLES)
14747 fprintf (f, "\t.pad #%d\n",
14748 current_function_pretend_args_size);
14749
14750 if (cfun->machine->uses_anonymous_args)
14751 {
14752 int num_pushes;
14753
14754 fprintf (f, "\tpush\t{");
14755
14756 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
14757
14758 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
14759 regno <= LAST_ARG_REGNUM;
14760 regno++)
14761 asm_fprintf (f, "%r%s", regno,
14762 regno == LAST_ARG_REGNUM ? "" : ", ");
14763
14764 fprintf (f, "}\n");
14765 }
14766 else
14767 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
14768 SP_REGNUM, SP_REGNUM,
14769 current_function_pretend_args_size);
14770
14771 /* We don't need to record the stores for unwinding (would it
14772 help the debugger any if we did?), but record the change in
14773 the stack pointer. */
14774 if (dwarf2out_do_frame ())
14775 {
14776 char *l = dwarf2out_cfi_label ();
14777
14778 cfa_offset = cfa_offset + current_function_pretend_args_size;
14779 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14780 }
14781 }
14782
14783 /* Get the registers we are going to push. */
14784 live_regs_mask = thumb1_compute_save_reg_mask ();
14785 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
14786 l_mask = live_regs_mask & 0x40ff;
14787 /* Then count how many other high registers will need to be pushed. */
14788 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14789
14790 if (TARGET_BACKTRACE)
14791 {
14792 unsigned offset;
14793 unsigned work_register;
14794
14795 /* We have been asked to create a stack backtrace structure.
14796 The code looks like this:
14797
14798 0 .align 2
14799 0 func:
14800 0 sub SP, #16 Reserve space for 4 registers.
14801 2 push {R7} Push low registers.
14802 4 add R7, SP, #20 Get the stack pointer before the push.
14803 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
14804 8 mov R7, PC Get hold of the start of this code plus 12.
14805 10 str R7, [SP, #16] Store it.
14806 12 mov R7, FP Get hold of the current frame pointer.
14807 14 str R7, [SP, #4] Store it.
14808 16 mov R7, LR Get hold of the current return address.
14809 18 str R7, [SP, #12] Store it.
14810 20 add R7, SP, #16 Point at the start of the backtrace structure.
14811 22 mov FP, R7 Put this value into the frame pointer. */
14812
14813 work_register = thumb_find_work_register (live_regs_mask);
14814
14815 if (ARM_EABI_UNWIND_TABLES)
14816 asm_fprintf (f, "\t.pad #16\n");
14817
14818 asm_fprintf
14819 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
14820 SP_REGNUM, SP_REGNUM);
14821
14822 if (dwarf2out_do_frame ())
14823 {
14824 char *l = dwarf2out_cfi_label ();
14825
14826 cfa_offset = cfa_offset + 16;
14827 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14828 }
14829
14830 if (l_mask)
14831 {
14832 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14833 offset = bit_count (l_mask) * UNITS_PER_WORD;
14834 }
14835 else
14836 offset = 0;
14837
14838 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14839 offset + 16 + current_function_pretend_args_size);
14840
14841 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14842 offset + 4);
14843
14844 /* Make sure that the instruction fetching the PC is in the right place
14845 to calculate "start of backtrace creation code + 12". */
14846 if (l_mask)
14847 {
14848 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14849 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14850 offset + 12);
14851 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14852 ARM_HARD_FRAME_POINTER_REGNUM);
14853 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14854 offset);
14855 }
14856 else
14857 {
14858 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14859 ARM_HARD_FRAME_POINTER_REGNUM);
14860 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14861 offset);
14862 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14863 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14864 offset + 12);
14865 }
14866
14867 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14868 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14869 offset + 8);
14870 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14871 offset + 12);
14872 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14873 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14874 }
14875 /* Optimization: If we are not pushing any low registers but we are going
14876 to push some high registers then delay our first push. This will just
14877 be a push of LR and we can combine it with the push of the first high
14878 register. */
14879 else if ((l_mask & 0xff) != 0
14880 || (high_regs_pushed == 0 && l_mask))
14881 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14882
14883 if (high_regs_pushed)
14884 {
14885 unsigned pushable_regs;
14886 unsigned next_hi_reg;
14887
14888 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14889 if (live_regs_mask & (1 << next_hi_reg))
14890 break;
14891
14892 pushable_regs = l_mask & 0xff;
14893
14894 if (pushable_regs == 0)
14895 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14896
14897 while (high_regs_pushed > 0)
14898 {
14899 unsigned long real_regs_mask = 0;
14900
14901 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14902 {
14903 if (pushable_regs & (1 << regno))
14904 {
14905 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14906
14907 high_regs_pushed --;
14908 real_regs_mask |= (1 << next_hi_reg);
14909
14910 if (high_regs_pushed)
14911 {
14912 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14913 next_hi_reg --)
14914 if (live_regs_mask & (1 << next_hi_reg))
14915 break;
14916 }
14917 else
14918 {
14919 pushable_regs &= ~((1 << regno) - 1);
14920 break;
14921 }
14922 }
14923 }
14924
14925 /* If we had to find a work register and we have not yet
14926 saved the LR then add it to the list of regs to push. */
14927 if (l_mask == (1 << LR_REGNUM))
14928 {
14929 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14930 1, &cfa_offset,
14931 real_regs_mask | (1 << LR_REGNUM));
14932 l_mask = 0;
14933 }
14934 else
14935 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14936 }
14937 }
14938 }
14939
14940 /* Handle the case of a double word load into a low register from
14941 a computed memory address. The computed address may involve a
14942 register which is overwritten by the load. */
14943 const char *
14944 thumb_load_double_from_address (rtx *operands)
14945 {
14946 rtx addr;
14947 rtx base;
14948 rtx offset;
14949 rtx arg1;
14950 rtx arg2;
14951
14952 gcc_assert (GET_CODE (operands[0]) == REG);
14953 gcc_assert (GET_CODE (operands[1]) == MEM);
14954
14955 /* Get the memory address. */
14956 addr = XEXP (operands[1], 0);
14957
14958 /* Work out how the memory address is computed. */
14959 switch (GET_CODE (addr))
14960 {
14961 case REG:
14962 operands[2] = adjust_address (operands[1], SImode, 4);
14963
14964 if (REGNO (operands[0]) == REGNO (addr))
14965 {
14966 output_asm_insn ("ldr\t%H0, %2", operands);
14967 output_asm_insn ("ldr\t%0, %1", operands);
14968 }
14969 else
14970 {
14971 output_asm_insn ("ldr\t%0, %1", operands);
14972 output_asm_insn ("ldr\t%H0, %2", operands);
14973 }
14974 break;
14975
14976 case CONST:
14977 /* Compute <address> + 4 for the high order load. */
14978 operands[2] = adjust_address (operands[1], SImode, 4);
14979
14980 output_asm_insn ("ldr\t%0, %1", operands);
14981 output_asm_insn ("ldr\t%H0, %2", operands);
14982 break;
14983
14984 case PLUS:
14985 arg1 = XEXP (addr, 0);
14986 arg2 = XEXP (addr, 1);
14987
14988 if (CONSTANT_P (arg1))
14989 base = arg2, offset = arg1;
14990 else
14991 base = arg1, offset = arg2;
14992
14993 gcc_assert (GET_CODE (base) == REG);
14994
14995 /* Catch the case of <address> = <reg> + <reg> */
14996 if (GET_CODE (offset) == REG)
14997 {
14998 int reg_offset = REGNO (offset);
14999 int reg_base = REGNO (base);
15000 int reg_dest = REGNO (operands[0]);
15001
15002 /* Add the base and offset registers together into the
15003 higher destination register. */
15004 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
15005 reg_dest + 1, reg_base, reg_offset);
15006
15007 /* Load the lower destination register from the address in
15008 the higher destination register. */
15009 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
15010 reg_dest, reg_dest + 1);
15011
15012 /* Load the higher destination register from its own address
15013 plus 4. */
15014 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
15015 reg_dest + 1, reg_dest + 1);
15016 }
15017 else
15018 {
15019 /* Compute <address> + 4 for the high order load. */
15020 operands[2] = adjust_address (operands[1], SImode, 4);
15021
15022 /* If the computed address is held in the low order register
15023 then load the high order register first, otherwise always
15024 load the low order register first. */
15025 if (REGNO (operands[0]) == REGNO (base))
15026 {
15027 output_asm_insn ("ldr\t%H0, %2", operands);
15028 output_asm_insn ("ldr\t%0, %1", operands);
15029 }
15030 else
15031 {
15032 output_asm_insn ("ldr\t%0, %1", operands);
15033 output_asm_insn ("ldr\t%H0, %2", operands);
15034 }
15035 }
15036 break;
15037
15038 case LABEL_REF:
15039 /* With no registers to worry about we can just load the value
15040 directly. */
15041 operands[2] = adjust_address (operands[1], SImode, 4);
15042
15043 output_asm_insn ("ldr\t%H0, %2", operands);
15044 output_asm_insn ("ldr\t%0, %1", operands);
15045 break;
15046
15047 default:
15048 gcc_unreachable ();
15049 }
15050
15051 return "";
15052 }
15053
15054 const char *
15055 thumb_output_move_mem_multiple (int n, rtx *operands)
15056 {
15057 rtx tmp;
15058
15059 switch (n)
15060 {
15061 case 2:
15062 if (REGNO (operands[4]) > REGNO (operands[5]))
15063 {
15064 tmp = operands[4];
15065 operands[4] = operands[5];
15066 operands[5] = tmp;
15067 }
15068 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
15069 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
15070 break;
15071
15072 case 3:
15073 if (REGNO (operands[4]) > REGNO (operands[5]))
15074 {
15075 tmp = operands[4];
15076 operands[4] = operands[5];
15077 operands[5] = tmp;
15078 }
15079 if (REGNO (operands[5]) > REGNO (operands[6]))
15080 {
15081 tmp = operands[5];
15082 operands[5] = operands[6];
15083 operands[6] = tmp;
15084 }
15085 if (REGNO (operands[4]) > REGNO (operands[5]))
15086 {
15087 tmp = operands[4];
15088 operands[4] = operands[5];
15089 operands[5] = tmp;
15090 }
15091
15092 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
15093 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
15094 break;
15095
15096 default:
15097 gcc_unreachable ();
15098 }
15099
15100 return "";
15101 }
15102
15103 /* Output a call-via instruction for thumb state. */
15104 const char *
15105 thumb_call_via_reg (rtx reg)
15106 {
15107 int regno = REGNO (reg);
15108 rtx *labelp;
15109
15110 gcc_assert (regno < LR_REGNUM);
15111
15112 /* If we are in the normal text section we can use a single instance
15113 per compilation unit. If we are doing function sections, then we need
15114 an entry per section, since we can't rely on reachability. */
15115 if (in_section == text_section)
15116 {
15117 thumb_call_reg_needed = 1;
15118
15119 if (thumb_call_via_label[regno] == NULL)
15120 thumb_call_via_label[regno] = gen_label_rtx ();
15121 labelp = thumb_call_via_label + regno;
15122 }
15123 else
15124 {
15125 if (cfun->machine->call_via[regno] == NULL)
15126 cfun->machine->call_via[regno] = gen_label_rtx ();
15127 labelp = cfun->machine->call_via + regno;
15128 }
15129
15130 output_asm_insn ("bl\t%a0", labelp);
15131 return "";
15132 }
15133
15134 /* Routines for generating rtl. */
15135 void
15136 thumb_expand_movmemqi (rtx *operands)
15137 {
15138 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
15139 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
15140 HOST_WIDE_INT len = INTVAL (operands[2]);
15141 HOST_WIDE_INT offset = 0;
15142
15143 while (len >= 12)
15144 {
15145 emit_insn (gen_movmem12b (out, in, out, in));
15146 len -= 12;
15147 }
15148
15149 if (len >= 8)
15150 {
15151 emit_insn (gen_movmem8b (out, in, out, in));
15152 len -= 8;
15153 }
15154
15155 if (len >= 4)
15156 {
15157 rtx reg = gen_reg_rtx (SImode);
15158 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
15159 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
15160 len -= 4;
15161 offset += 4;
15162 }
15163
15164 if (len >= 2)
15165 {
15166 rtx reg = gen_reg_rtx (HImode);
15167 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
15168 plus_constant (in, offset))));
15169 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
15170 reg));
15171 len -= 2;
15172 offset += 2;
15173 }
15174
15175 if (len)
15176 {
15177 rtx reg = gen_reg_rtx (QImode);
15178 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
15179 plus_constant (in, offset))));
15180 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
15181 reg));
15182 }
15183 }
15184
15185 void
15186 thumb_reload_out_hi (rtx *operands)
15187 {
15188 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
15189 }
15190
15191 /* Handle reading a half-word from memory during reload. */
15192 void
15193 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
15194 {
15195 gcc_unreachable ();
15196 }
15197
15198 /* Return the length of a function name prefix
15199 that starts with the character 'c'. */
15200 static int
15201 arm_get_strip_length (int c)
15202 {
15203 switch (c)
15204 {
15205 ARM_NAME_ENCODING_LENGTHS
15206 default: return 0;
15207 }
15208 }
15209
15210 /* Return a pointer to a function's name with any
15211 and all prefix encodings stripped from it. */
15212 const char *
15213 arm_strip_name_encoding (const char *name)
15214 {
15215 int skip;
15216
15217 while ((skip = arm_get_strip_length (* name)))
15218 name += skip;
15219
15220 return name;
15221 }
15222
15223 /* If there is a '*' anywhere in the name's prefix, then
15224 emit the stripped name verbatim, otherwise prepend an
15225 underscore if leading underscores are being used. */
15226 void
15227 arm_asm_output_labelref (FILE *stream, const char *name)
15228 {
15229 int skip;
15230 int verbatim = 0;
15231
15232 while ((skip = arm_get_strip_length (* name)))
15233 {
15234 verbatim |= (*name == '*');
15235 name += skip;
15236 }
15237
15238 if (verbatim)
15239 fputs (name, stream);
15240 else
15241 asm_fprintf (stream, "%U%s", name);
15242 }
15243
15244 static void
15245 arm_file_start (void)
15246 {
15247 int val;
15248
15249 if (TARGET_UNIFIED_ASM)
15250 asm_fprintf (asm_out_file, "\t.syntax unified\n");
15251
15252 if (TARGET_BPABI)
15253 {
15254 const char *fpu_name;
15255 if (arm_select[0].string)
15256 asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
15257 else if (arm_select[1].string)
15258 asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
15259 else
15260 asm_fprintf (asm_out_file, "\t.cpu %s\n",
15261 all_cores[arm_default_cpu].name);
15262
15263 if (TARGET_SOFT_FLOAT)
15264 {
15265 if (TARGET_VFP)
15266 fpu_name = "softvfp";
15267 else
15268 fpu_name = "softfpa";
15269 }
15270 else
15271 {
15272 switch (arm_fpu_arch)
15273 {
15274 case FPUTYPE_FPA:
15275 fpu_name = "fpa";
15276 break;
15277 case FPUTYPE_FPA_EMU2:
15278 fpu_name = "fpe2";
15279 break;
15280 case FPUTYPE_FPA_EMU3:
15281 fpu_name = "fpe3";
15282 break;
15283 case FPUTYPE_MAVERICK:
15284 fpu_name = "maverick";
15285 break;
15286 case FPUTYPE_VFP:
15287 if (TARGET_HARD_FLOAT)
15288 asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
15289 if (TARGET_HARD_FLOAT_ABI)
15290 asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
15291 fpu_name = "vfp";
15292 break;
15293 default:
15294 abort();
15295 }
15296 }
15297 asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
15298
15299 /* Some of these attributes only apply when the corresponding features
15300 are used. However we don't have any easy way of figuring this out.
15301 Conservatively record the setting that would have been used. */
15302
15303 /* Tag_ABI_PCS_wchar_t. */
15304 asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
15305 (int)WCHAR_TYPE_SIZE / BITS_PER_UNIT);
15306
15307 /* Tag_ABI_FP_rounding. */
15308 if (flag_rounding_math)
15309 asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
15310 if (!flag_unsafe_math_optimizations)
15311 {
15312 /* Tag_ABI_FP_denomal. */
15313 asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
15314 /* Tag_ABI_FP_exceptions. */
15315 asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
15316 }
15317 /* Tag_ABI_FP_user_exceptions. */
15318 if (flag_signaling_nans)
15319 asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
15320 /* Tag_ABI_FP_number_model. */
15321 asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
15322 flag_finite_math_only ? 1 : 3);
15323
15324 /* Tag_ABI_align8_needed. */
15325 asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
15326 /* Tag_ABI_align8_preserved. */
15327 asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
15328 /* Tag_ABI_enum_size. */
15329 asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
15330 flag_short_enums ? 1 : 2);
15331
15332 /* Tag_ABI_optimization_goals. */
15333 if (optimize_size)
15334 val = 4;
15335 else if (optimize >= 2)
15336 val = 2;
15337 else if (optimize)
15338 val = 1;
15339 else
15340 val = 6;
15341 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
15342 }
15343 default_file_start();
15344 }
15345
15346 static void
15347 arm_file_end (void)
15348 {
15349 int regno;
15350
15351 if (NEED_INDICATE_EXEC_STACK)
15352 /* Add .note.GNU-stack. */
15353 file_end_indicate_exec_stack ();
15354
15355 if (! thumb_call_reg_needed)
15356 return;
15357
15358 switch_to_section (text_section);
15359 asm_fprintf (asm_out_file, "\t.code 16\n");
15360 ASM_OUTPUT_ALIGN (asm_out_file, 1);
15361
15362 for (regno = 0; regno < LR_REGNUM; regno++)
15363 {
15364 rtx label = thumb_call_via_label[regno];
15365
15366 if (label != 0)
15367 {
15368 targetm.asm_out.internal_label (asm_out_file, "L",
15369 CODE_LABEL_NUMBER (label));
15370 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
15371 }
15372 }
15373 }
15374
15375 rtx aof_pic_label;
15376
15377 #ifdef AOF_ASSEMBLER
15378 /* Special functions only needed when producing AOF syntax assembler. */
15379
15380 struct pic_chain
15381 {
15382 struct pic_chain * next;
15383 const char * symname;
15384 };
15385
15386 static struct pic_chain * aof_pic_chain = NULL;
15387
15388 rtx
15389 aof_pic_entry (rtx x)
15390 {
15391 struct pic_chain ** chainp;
15392 int offset;
15393
15394 if (aof_pic_label == NULL_RTX)
15395 {
15396 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
15397 }
15398
15399 for (offset = 0, chainp = &aof_pic_chain; *chainp;
15400 offset += 4, chainp = &(*chainp)->next)
15401 if ((*chainp)->symname == XSTR (x, 0))
15402 return plus_constant (aof_pic_label, offset);
15403
15404 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
15405 (*chainp)->next = NULL;
15406 (*chainp)->symname = XSTR (x, 0);
15407 return plus_constant (aof_pic_label, offset);
15408 }
15409
15410 void
15411 aof_dump_pic_table (FILE *f)
15412 {
15413 struct pic_chain * chain;
15414
15415 if (aof_pic_chain == NULL)
15416 return;
15417
15418 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
15419 PIC_OFFSET_TABLE_REGNUM,
15420 PIC_OFFSET_TABLE_REGNUM);
15421 fputs ("|x$adcons|\n", f);
15422
15423 for (chain = aof_pic_chain; chain; chain = chain->next)
15424 {
15425 fputs ("\tDCD\t", f);
15426 assemble_name (f, chain->symname);
15427 fputs ("\n", f);
15428 }
15429 }
15430
15431 int arm_text_section_count = 1;
15432
15433 /* A get_unnamed_section callback for switching to the text section. */
15434
15435 static void
15436 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
15437 {
15438 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
15439 arm_text_section_count++);
15440 if (flag_pic)
15441 fprintf (asm_out_file, ", PIC, REENTRANT");
15442 fprintf (asm_out_file, "\n");
15443 }
15444
15445 static int arm_data_section_count = 1;
15446
15447 /* A get_unnamed_section callback for switching to the data section. */
15448
15449 static void
15450 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
15451 {
15452 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
15453 arm_data_section_count++);
15454 }
15455
15456 /* Implement TARGET_ASM_INIT_SECTIONS.
15457
15458 AOF Assembler syntax is a nightmare when it comes to areas, since once
15459 we change from one area to another, we can't go back again. Instead,
15460 we must create a new area with the same attributes and add the new output
15461 to that. Unfortunately, there is nothing we can do here to guarantee that
15462 two areas with the same attributes will be linked adjacently in the
15463 resulting executable, so we have to be careful not to do pc-relative
15464 addressing across such boundaries. */
15465
15466 static void
15467 aof_asm_init_sections (void)
15468 {
15469 text_section = get_unnamed_section (SECTION_CODE,
15470 aof_output_text_section_asm_op, NULL);
15471 data_section = get_unnamed_section (SECTION_WRITE,
15472 aof_output_data_section_asm_op, NULL);
15473 readonly_data_section = text_section;
15474 }
15475
15476 void
15477 zero_init_section (void)
15478 {
15479 static int zero_init_count = 1;
15480
15481 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
15482 in_section = NULL;
15483 }
15484
15485 /* The AOF assembler is religiously strict about declarations of
15486 imported and exported symbols, so that it is impossible to declare
15487 a function as imported near the beginning of the file, and then to
15488 export it later on. It is, however, possible to delay the decision
15489 until all the functions in the file have been compiled. To get
15490 around this, we maintain a list of the imports and exports, and
15491 delete from it any that are subsequently defined. At the end of
15492 compilation we spit the remainder of the list out before the END
15493 directive. */
15494
15495 struct import
15496 {
15497 struct import * next;
15498 const char * name;
15499 };
15500
15501 static struct import * imports_list = NULL;
15502
15503 void
15504 aof_add_import (const char *name)
15505 {
15506 struct import * new;
15507
15508 for (new = imports_list; new; new = new->next)
15509 if (new->name == name)
15510 return;
15511
15512 new = (struct import *) xmalloc (sizeof (struct import));
15513 new->next = imports_list;
15514 imports_list = new;
15515 new->name = name;
15516 }
15517
15518 void
15519 aof_delete_import (const char *name)
15520 {
15521 struct import ** old;
15522
15523 for (old = &imports_list; *old; old = & (*old)->next)
15524 {
15525 if ((*old)->name == name)
15526 {
15527 *old = (*old)->next;
15528 return;
15529 }
15530 }
15531 }
15532
15533 int arm_main_function = 0;
15534
15535 static void
15536 aof_dump_imports (FILE *f)
15537 {
15538 /* The AOF assembler needs this to cause the startup code to be extracted
15539 from the library. Brining in __main causes the whole thing to work
15540 automagically. */
15541 if (arm_main_function)
15542 {
15543 switch_to_section (text_section);
15544 fputs ("\tIMPORT __main\n", f);
15545 fputs ("\tDCD __main\n", f);
15546 }
15547
15548 /* Now dump the remaining imports. */
15549 while (imports_list)
15550 {
15551 fprintf (f, "\tIMPORT\t");
15552 assemble_name (f, imports_list->name);
15553 fputc ('\n', f);
15554 imports_list = imports_list->next;
15555 }
15556 }
15557
15558 static void
15559 aof_globalize_label (FILE *stream, const char *name)
15560 {
15561 default_globalize_label (stream, name);
15562 if (! strcmp (name, "main"))
15563 arm_main_function = 1;
15564 }
15565
15566 static void
15567 aof_file_start (void)
15568 {
15569 fputs ("__r0\tRN\t0\n", asm_out_file);
15570 fputs ("__a1\tRN\t0\n", asm_out_file);
15571 fputs ("__a2\tRN\t1\n", asm_out_file);
15572 fputs ("__a3\tRN\t2\n", asm_out_file);
15573 fputs ("__a4\tRN\t3\n", asm_out_file);
15574 fputs ("__v1\tRN\t4\n", asm_out_file);
15575 fputs ("__v2\tRN\t5\n", asm_out_file);
15576 fputs ("__v3\tRN\t6\n", asm_out_file);
15577 fputs ("__v4\tRN\t7\n", asm_out_file);
15578 fputs ("__v5\tRN\t8\n", asm_out_file);
15579 fputs ("__v6\tRN\t9\n", asm_out_file);
15580 fputs ("__sl\tRN\t10\n", asm_out_file);
15581 fputs ("__fp\tRN\t11\n", asm_out_file);
15582 fputs ("__ip\tRN\t12\n", asm_out_file);
15583 fputs ("__sp\tRN\t13\n", asm_out_file);
15584 fputs ("__lr\tRN\t14\n", asm_out_file);
15585 fputs ("__pc\tRN\t15\n", asm_out_file);
15586 fputs ("__f0\tFN\t0\n", asm_out_file);
15587 fputs ("__f1\tFN\t1\n", asm_out_file);
15588 fputs ("__f2\tFN\t2\n", asm_out_file);
15589 fputs ("__f3\tFN\t3\n", asm_out_file);
15590 fputs ("__f4\tFN\t4\n", asm_out_file);
15591 fputs ("__f5\tFN\t5\n", asm_out_file);
15592 fputs ("__f6\tFN\t6\n", asm_out_file);
15593 fputs ("__f7\tFN\t7\n", asm_out_file);
15594 switch_to_section (text_section);
15595 }
15596
15597 static void
15598 aof_file_end (void)
15599 {
15600 if (flag_pic)
15601 aof_dump_pic_table (asm_out_file);
15602 arm_file_end ();
15603 aof_dump_imports (asm_out_file);
15604 fputs ("\tEND\n", asm_out_file);
15605 }
15606 #endif /* AOF_ASSEMBLER */
15607
15608 #ifndef ARM_PE
15609 /* Symbols in the text segment can be accessed without indirecting via the
15610 constant pool; it may take an extra binary operation, but this is still
15611 faster than indirecting via memory. Don't do this when not optimizing,
15612 since we won't be calculating al of the offsets necessary to do this
15613 simplification. */
15614
15615 static void
15616 arm_encode_section_info (tree decl, rtx rtl, int first)
15617 {
15618 /* This doesn't work with AOF syntax, since the string table may be in
15619 a different AREA. */
15620 #ifndef AOF_ASSEMBLER
15621 if (optimize > 0 && TREE_CONSTANT (decl))
15622 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
15623 #endif
15624
15625 default_encode_section_info (decl, rtl, first);
15626 }
15627 #endif /* !ARM_PE */
15628
15629 static void
15630 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
15631 {
15632 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
15633 && !strcmp (prefix, "L"))
15634 {
15635 arm_ccfsm_state = 0;
15636 arm_target_insn = NULL;
15637 }
15638 default_internal_label (stream, prefix, labelno);
15639 }
15640
15641 /* Output code to add DELTA to the first argument, and then jump
15642 to FUNCTION. Used for C++ multiple inheritance. */
15643 static void
15644 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
15645 HOST_WIDE_INT delta,
15646 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
15647 tree function)
15648 {
15649 static int thunk_label = 0;
15650 char label[256];
15651 char labelpc[256];
15652 int mi_delta = delta;
15653 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
15654 int shift = 0;
15655 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
15656 ? 1 : 0);
15657 if (mi_delta < 0)
15658 mi_delta = - mi_delta;
15659 /* When generating 16-bit thumb code, thunks are entered in arm mode. */
15660 if (TARGET_THUMB1)
15661 {
15662 int labelno = thunk_label++;
15663 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
15664 fputs ("\tldr\tr12, ", file);
15665 assemble_name (file, label);
15666 fputc ('\n', file);
15667 if (flag_pic)
15668 {
15669 /* If we are generating PIC, the ldr instruction below loads
15670 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
15671 the address of the add + 8, so we have:
15672
15673 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
15674 = target + 1.
15675
15676 Note that we have "+ 1" because some versions of GNU ld
15677 don't set the low bit of the result for R_ARM_REL32
15678 relocations against thumb function symbols. */
15679 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
15680 assemble_name (file, labelpc);
15681 fputs (":\n", file);
15682 fputs ("\tadd\tr12, pc, r12\n", file);
15683 }
15684 }
15685 /* TODO: Use movw/movt for large constants when available. */
15686 while (mi_delta != 0)
15687 {
15688 if ((mi_delta & (3 << shift)) == 0)
15689 shift += 2;
15690 else
15691 {
15692 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
15693 mi_op, this_regno, this_regno,
15694 mi_delta & (0xff << shift));
15695 mi_delta &= ~(0xff << shift);
15696 shift += 8;
15697 }
15698 }
15699 if (TARGET_THUMB1)
15700 {
15701 fprintf (file, "\tbx\tr12\n");
15702 ASM_OUTPUT_ALIGN (file, 2);
15703 assemble_name (file, label);
15704 fputs (":\n", file);
15705 if (flag_pic)
15706 {
15707 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
15708 rtx tem = XEXP (DECL_RTL (function), 0);
15709 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
15710 tem = gen_rtx_MINUS (GET_MODE (tem),
15711 tem,
15712 gen_rtx_SYMBOL_REF (Pmode,
15713 ggc_strdup (labelpc)));
15714 assemble_integer (tem, 4, BITS_PER_WORD, 1);
15715 }
15716 else
15717 /* Output ".word .LTHUNKn". */
15718 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
15719 }
15720 else
15721 {
15722 fputs ("\tb\t", file);
15723 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
15724 if (NEED_PLT_RELOC)
15725 fputs ("(PLT)", file);
15726 fputc ('\n', file);
15727 }
15728 }
15729
15730 int
15731 arm_emit_vector_const (FILE *file, rtx x)
15732 {
15733 int i;
15734 const char * pattern;
15735
15736 gcc_assert (GET_CODE (x) == CONST_VECTOR);
15737
15738 switch (GET_MODE (x))
15739 {
15740 case V2SImode: pattern = "%08x"; break;
15741 case V4HImode: pattern = "%04x"; break;
15742 case V8QImode: pattern = "%02x"; break;
15743 default: gcc_unreachable ();
15744 }
15745
15746 fprintf (file, "0x");
15747 for (i = CONST_VECTOR_NUNITS (x); i--;)
15748 {
15749 rtx element;
15750
15751 element = CONST_VECTOR_ELT (x, i);
15752 fprintf (file, pattern, INTVAL (element));
15753 }
15754
15755 return 1;
15756 }
15757
15758 const char *
15759 arm_output_load_gr (rtx *operands)
15760 {
15761 rtx reg;
15762 rtx offset;
15763 rtx wcgr;
15764 rtx sum;
15765
15766 if (GET_CODE (operands [1]) != MEM
15767 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
15768 || GET_CODE (reg = XEXP (sum, 0)) != REG
15769 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
15770 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
15771 return "wldrw%?\t%0, %1";
15772
15773 /* Fix up an out-of-range load of a GR register. */
15774 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
15775 wcgr = operands[0];
15776 operands[0] = reg;
15777 output_asm_insn ("ldr%?\t%0, %1", operands);
15778
15779 operands[0] = wcgr;
15780 operands[1] = reg;
15781 output_asm_insn ("tmcr%?\t%0, %1", operands);
15782 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
15783
15784 return "";
15785 }
15786
15787 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
15788
15789 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
15790 named arg and all anonymous args onto the stack.
15791 XXX I know the prologue shouldn't be pushing registers, but it is faster
15792 that way. */
15793
15794 static void
15795 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
15796 enum machine_mode mode ATTRIBUTE_UNUSED,
15797 tree type ATTRIBUTE_UNUSED,
15798 int *pretend_size,
15799 int second_time ATTRIBUTE_UNUSED)
15800 {
15801 cfun->machine->uses_anonymous_args = 1;
15802 if (cum->nregs < NUM_ARG_REGS)
15803 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
15804 }
15805
15806 /* Return nonzero if the CONSUMER instruction (a store) does not need
15807 PRODUCER's value to calculate the address. */
15808
15809 int
15810 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
15811 {
15812 rtx value = PATTERN (producer);
15813 rtx addr = PATTERN (consumer);
15814
15815 if (GET_CODE (value) == COND_EXEC)
15816 value = COND_EXEC_CODE (value);
15817 if (GET_CODE (value) == PARALLEL)
15818 value = XVECEXP (value, 0, 0);
15819 value = XEXP (value, 0);
15820 if (GET_CODE (addr) == COND_EXEC)
15821 addr = COND_EXEC_CODE (addr);
15822 if (GET_CODE (addr) == PARALLEL)
15823 addr = XVECEXP (addr, 0, 0);
15824 addr = XEXP (addr, 0);
15825
15826 return !reg_overlap_mentioned_p (value, addr);
15827 }
15828
15829 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15830 have an early register shift value or amount dependency on the
15831 result of PRODUCER. */
15832
15833 int
15834 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
15835 {
15836 rtx value = PATTERN (producer);
15837 rtx op = PATTERN (consumer);
15838 rtx early_op;
15839
15840 if (GET_CODE (value) == COND_EXEC)
15841 value = COND_EXEC_CODE (value);
15842 if (GET_CODE (value) == PARALLEL)
15843 value = XVECEXP (value, 0, 0);
15844 value = XEXP (value, 0);
15845 if (GET_CODE (op) == COND_EXEC)
15846 op = COND_EXEC_CODE (op);
15847 if (GET_CODE (op) == PARALLEL)
15848 op = XVECEXP (op, 0, 0);
15849 op = XEXP (op, 1);
15850
15851 early_op = XEXP (op, 0);
15852 /* This is either an actual independent shift, or a shift applied to
15853 the first operand of another operation. We want the whole shift
15854 operation. */
15855 if (GET_CODE (early_op) == REG)
15856 early_op = op;
15857
15858 return !reg_overlap_mentioned_p (value, early_op);
15859 }
15860
15861 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15862 have an early register shift value dependency on the result of
15863 PRODUCER. */
15864
15865 int
15866 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
15867 {
15868 rtx value = PATTERN (producer);
15869 rtx op = PATTERN (consumer);
15870 rtx early_op;
15871
15872 if (GET_CODE (value) == COND_EXEC)
15873 value = COND_EXEC_CODE (value);
15874 if (GET_CODE (value) == PARALLEL)
15875 value = XVECEXP (value, 0, 0);
15876 value = XEXP (value, 0);
15877 if (GET_CODE (op) == COND_EXEC)
15878 op = COND_EXEC_CODE (op);
15879 if (GET_CODE (op) == PARALLEL)
15880 op = XVECEXP (op, 0, 0);
15881 op = XEXP (op, 1);
15882
15883 early_op = XEXP (op, 0);
15884
15885 /* This is either an actual independent shift, or a shift applied to
15886 the first operand of another operation. We want the value being
15887 shifted, in either case. */
15888 if (GET_CODE (early_op) != REG)
15889 early_op = XEXP (early_op, 0);
15890
15891 return !reg_overlap_mentioned_p (value, early_op);
15892 }
15893
15894 /* Return nonzero if the CONSUMER (a mul or mac op) does not
15895 have an early register mult dependency on the result of
15896 PRODUCER. */
15897
15898 int
15899 arm_no_early_mul_dep (rtx producer, rtx consumer)
15900 {
15901 rtx value = PATTERN (producer);
15902 rtx op = PATTERN (consumer);
15903
15904 if (GET_CODE (value) == COND_EXEC)
15905 value = COND_EXEC_CODE (value);
15906 if (GET_CODE (value) == PARALLEL)
15907 value = XVECEXP (value, 0, 0);
15908 value = XEXP (value, 0);
15909 if (GET_CODE (op) == COND_EXEC)
15910 op = COND_EXEC_CODE (op);
15911 if (GET_CODE (op) == PARALLEL)
15912 op = XVECEXP (op, 0, 0);
15913 op = XEXP (op, 1);
15914
15915 return (GET_CODE (op) == PLUS
15916 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
15917 }
15918
15919
15920 /* We can't rely on the caller doing the proper promotion when
15921 using APCS or ATPCS. */
15922
15923 static bool
15924 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
15925 {
15926 return !TARGET_AAPCS_BASED;
15927 }
15928
15929
15930 /* AAPCS based ABIs use short enums by default. */
15931
15932 static bool
15933 arm_default_short_enums (void)
15934 {
15935 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
15936 }
15937
15938
15939 /* AAPCS requires that anonymous bitfields affect structure alignment. */
15940
15941 static bool
15942 arm_align_anon_bitfield (void)
15943 {
15944 return TARGET_AAPCS_BASED;
15945 }
15946
15947
15948 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
15949
15950 static tree
15951 arm_cxx_guard_type (void)
15952 {
15953 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15954 }
15955
15956
15957 /* The EABI says test the least significant bit of a guard variable. */
15958
15959 static bool
15960 arm_cxx_guard_mask_bit (void)
15961 {
15962 return TARGET_AAPCS_BASED;
15963 }
15964
15965
15966 /* The EABI specifies that all array cookies are 8 bytes long. */
15967
15968 static tree
15969 arm_get_cookie_size (tree type)
15970 {
15971 tree size;
15972
15973 if (!TARGET_AAPCS_BASED)
15974 return default_cxx_get_cookie_size (type);
15975
15976 size = build_int_cst (sizetype, 8);
15977 return size;
15978 }
15979
15980
15981 /* The EABI says that array cookies should also contain the element size. */
15982
15983 static bool
15984 arm_cookie_has_size (void)
15985 {
15986 return TARGET_AAPCS_BASED;
15987 }
15988
15989
15990 /* The EABI says constructors and destructors should return a pointer to
15991 the object constructed/destroyed. */
15992
15993 static bool
15994 arm_cxx_cdtor_returns_this (void)
15995 {
15996 return TARGET_AAPCS_BASED;
15997 }
15998
15999 /* The EABI says that an inline function may never be the key
16000 method. */
16001
16002 static bool
16003 arm_cxx_key_method_may_be_inline (void)
16004 {
16005 return !TARGET_AAPCS_BASED;
16006 }
16007
16008 static void
16009 arm_cxx_determine_class_data_visibility (tree decl)
16010 {
16011 if (!TARGET_AAPCS_BASED)
16012 return;
16013
16014 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
16015 is exported. However, on systems without dynamic vague linkage,
16016 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
16017 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
16018 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
16019 else
16020 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
16021 DECL_VISIBILITY_SPECIFIED (decl) = 1;
16022 }
16023
16024 static bool
16025 arm_cxx_class_data_always_comdat (void)
16026 {
16027 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
16028 vague linkage if the class has no key function. */
16029 return !TARGET_AAPCS_BASED;
16030 }
16031
16032
16033 /* The EABI says __aeabi_atexit should be used to register static
16034 destructors. */
16035
16036 static bool
16037 arm_cxx_use_aeabi_atexit (void)
16038 {
16039 return TARGET_AAPCS_BASED;
16040 }
16041
16042
16043 void
16044 arm_set_return_address (rtx source, rtx scratch)
16045 {
16046 arm_stack_offsets *offsets;
16047 HOST_WIDE_INT delta;
16048 rtx addr;
16049 unsigned long saved_regs;
16050
16051 saved_regs = arm_compute_save_reg_mask ();
16052
16053 if ((saved_regs & (1 << LR_REGNUM)) == 0)
16054 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
16055 else
16056 {
16057 if (frame_pointer_needed)
16058 addr = plus_constant(hard_frame_pointer_rtx, -4);
16059 else
16060 {
16061 /* LR will be the first saved register. */
16062 offsets = arm_get_frame_offsets ();
16063 delta = offsets->outgoing_args - (offsets->frame + 4);
16064
16065
16066 if (delta >= 4096)
16067 {
16068 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
16069 GEN_INT (delta & ~4095)));
16070 addr = scratch;
16071 delta &= 4095;
16072 }
16073 else
16074 addr = stack_pointer_rtx;
16075
16076 addr = plus_constant (addr, delta);
16077 }
16078 emit_move_insn (gen_frame_mem (Pmode, addr), source);
16079 }
16080 }
16081
16082
16083 void
16084 thumb_set_return_address (rtx source, rtx scratch)
16085 {
16086 arm_stack_offsets *offsets;
16087 HOST_WIDE_INT delta;
16088 HOST_WIDE_INT limit;
16089 int reg;
16090 rtx addr;
16091 unsigned long mask;
16092
16093 emit_insn (gen_rtx_USE (VOIDmode, source));
16094
16095 mask = thumb1_compute_save_reg_mask ();
16096 if (mask & (1 << LR_REGNUM))
16097 {
16098 offsets = arm_get_frame_offsets ();
16099
16100 limit = 1024;
16101 /* Find the saved regs. */
16102 if (frame_pointer_needed)
16103 {
16104 delta = offsets->soft_frame - offsets->saved_args;
16105 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
16106 if (TARGET_THUMB1)
16107 limit = 128;
16108 }
16109 else
16110 {
16111 delta = offsets->outgoing_args - offsets->saved_args;
16112 reg = SP_REGNUM;
16113 }
16114 /* Allow for the stack frame. */
16115 if (TARGET_THUMB1 && TARGET_BACKTRACE)
16116 delta -= 16;
16117 /* The link register is always the first saved register. */
16118 delta -= 4;
16119
16120 /* Construct the address. */
16121 addr = gen_rtx_REG (SImode, reg);
16122 if (delta > limit)
16123 {
16124 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
16125 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
16126 addr = scratch;
16127 }
16128 else
16129 addr = plus_constant (addr, delta);
16130
16131 emit_move_insn (gen_frame_mem (Pmode, addr), source);
16132 }
16133 else
16134 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
16135 }
16136
16137 /* Implements target hook vector_mode_supported_p. */
16138 bool
16139 arm_vector_mode_supported_p (enum machine_mode mode)
16140 {
16141 if ((mode == V2SImode)
16142 || (mode == V4HImode)
16143 || (mode == V8QImode))
16144 return true;
16145
16146 return false;
16147 }
16148
16149 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
16150 ARM insns and therefore guarantee that the shift count is modulo 256.
16151 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
16152 guarantee no particular behavior for out-of-range counts. */
16153
16154 static unsigned HOST_WIDE_INT
16155 arm_shift_truncation_mask (enum machine_mode mode)
16156 {
16157 return mode == SImode ? 255 : 0;
16158 }
16159
16160
16161 /* Map internal gcc register numbers to DWARF2 register numbers. */
16162
16163 unsigned int
16164 arm_dbx_register_number (unsigned int regno)
16165 {
16166 if (regno < 16)
16167 return regno;
16168
16169 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
16170 compatibility. The EABI defines them as registers 96-103. */
16171 if (IS_FPA_REGNUM (regno))
16172 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
16173
16174 if (IS_VFP_REGNUM (regno))
16175 return 64 + regno - FIRST_VFP_REGNUM;
16176
16177 if (IS_IWMMXT_GR_REGNUM (regno))
16178 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
16179
16180 if (IS_IWMMXT_REGNUM (regno))
16181 return 112 + regno - FIRST_IWMMXT_REGNUM;
16182
16183 gcc_unreachable ();
16184 }
16185
16186
16187 #ifdef TARGET_UNWIND_INFO
16188 /* Emit unwind directives for a store-multiple instruction or stack pointer
16189 push during alignment.
16190 These should only ever be generated by the function prologue code, so
16191 expect them to have a particular form. */
16192
16193 static void
16194 arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
16195 {
16196 int i;
16197 HOST_WIDE_INT offset;
16198 HOST_WIDE_INT nregs;
16199 int reg_size;
16200 unsigned reg;
16201 unsigned lastreg;
16202 rtx e;
16203
16204 e = XVECEXP (p, 0, 0);
16205 if (GET_CODE (e) != SET)
16206 abort ();
16207
16208 /* First insn will adjust the stack pointer. */
16209 if (GET_CODE (e) != SET
16210 || GET_CODE (XEXP (e, 0)) != REG
16211 || REGNO (XEXP (e, 0)) != SP_REGNUM
16212 || GET_CODE (XEXP (e, 1)) != PLUS)
16213 abort ();
16214
16215 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
16216 nregs = XVECLEN (p, 0) - 1;
16217
16218 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
16219 if (reg < 16)
16220 {
16221 /* The function prologue may also push pc, but not annotate it as it is
16222 never restored. We turn this into a stack pointer adjustment. */
16223 if (nregs * 4 == offset - 4)
16224 {
16225 fprintf (asm_out_file, "\t.pad #4\n");
16226 offset -= 4;
16227 }
16228 reg_size = 4;
16229 fprintf (asm_out_file, "\t.save {");
16230 }
16231 else if (IS_VFP_REGNUM (reg))
16232 {
16233 reg_size = 8;
16234 fprintf (asm_out_file, "\t.vsave {");
16235 }
16236 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
16237 {
16238 /* FPA registers are done differently. */
16239 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
16240 return;
16241 }
16242 else
16243 /* Unknown register type. */
16244 abort ();
16245
16246 /* If the stack increment doesn't match the size of the saved registers,
16247 something has gone horribly wrong. */
16248 if (offset != nregs * reg_size)
16249 abort ();
16250
16251 offset = 0;
16252 lastreg = 0;
16253 /* The remaining insns will describe the stores. */
16254 for (i = 1; i <= nregs; i++)
16255 {
16256 /* Expect (set (mem <addr>) (reg)).
16257 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
16258 e = XVECEXP (p, 0, i);
16259 if (GET_CODE (e) != SET
16260 || GET_CODE (XEXP (e, 0)) != MEM
16261 || GET_CODE (XEXP (e, 1)) != REG)
16262 abort ();
16263
16264 reg = REGNO (XEXP (e, 1));
16265 if (reg < lastreg)
16266 abort ();
16267
16268 if (i != 1)
16269 fprintf (asm_out_file, ", ");
16270 /* We can't use %r for vfp because we need to use the
16271 double precision register names. */
16272 if (IS_VFP_REGNUM (reg))
16273 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
16274 else
16275 asm_fprintf (asm_out_file, "%r", reg);
16276
16277 #ifdef ENABLE_CHECKING
16278 /* Check that the addresses are consecutive. */
16279 e = XEXP (XEXP (e, 0), 0);
16280 if (GET_CODE (e) == PLUS)
16281 {
16282 offset += reg_size;
16283 if (GET_CODE (XEXP (e, 0)) != REG
16284 || REGNO (XEXP (e, 0)) != SP_REGNUM
16285 || GET_CODE (XEXP (e, 1)) != CONST_INT
16286 || offset != INTVAL (XEXP (e, 1)))
16287 abort ();
16288 }
16289 else if (i != 1
16290 || GET_CODE (e) != REG
16291 || REGNO (e) != SP_REGNUM)
16292 abort ();
16293 #endif
16294 }
16295 fprintf (asm_out_file, "}\n");
16296 }
16297
16298 /* Emit unwind directives for a SET. */
16299
16300 static void
16301 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
16302 {
16303 rtx e0;
16304 rtx e1;
16305 unsigned reg;
16306
16307 e0 = XEXP (p, 0);
16308 e1 = XEXP (p, 1);
16309 switch (GET_CODE (e0))
16310 {
16311 case MEM:
16312 /* Pushing a single register. */
16313 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
16314 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
16315 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
16316 abort ();
16317
16318 asm_fprintf (asm_out_file, "\t.save ");
16319 if (IS_VFP_REGNUM (REGNO (e1)))
16320 asm_fprintf(asm_out_file, "{d%d}\n",
16321 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
16322 else
16323 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
16324 break;
16325
16326 case REG:
16327 if (REGNO (e0) == SP_REGNUM)
16328 {
16329 /* A stack increment. */
16330 if (GET_CODE (e1) != PLUS
16331 || GET_CODE (XEXP (e1, 0)) != REG
16332 || REGNO (XEXP (e1, 0)) != SP_REGNUM
16333 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
16334 abort ();
16335
16336 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
16337 -INTVAL (XEXP (e1, 1)));
16338 }
16339 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
16340 {
16341 HOST_WIDE_INT offset;
16342
16343 if (GET_CODE (e1) == PLUS)
16344 {
16345 if (GET_CODE (XEXP (e1, 0)) != REG
16346 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
16347 abort ();
16348 reg = REGNO (XEXP (e1, 0));
16349 offset = INTVAL (XEXP (e1, 1));
16350 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
16351 HARD_FRAME_POINTER_REGNUM, reg,
16352 INTVAL (XEXP (e1, 1)));
16353 }
16354 else if (GET_CODE (e1) == REG)
16355 {
16356 reg = REGNO (e1);
16357 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
16358 HARD_FRAME_POINTER_REGNUM, reg);
16359 }
16360 else
16361 abort ();
16362 }
16363 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
16364 {
16365 /* Move from sp to reg. */
16366 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
16367 }
16368 else if (GET_CODE (e1) == PLUS
16369 && GET_CODE (XEXP (e1, 0)) == REG
16370 && REGNO (XEXP (e1, 0)) == SP_REGNUM
16371 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
16372 {
16373 /* Set reg to offset from sp. */
16374 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
16375 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
16376 }
16377 else if (GET_CODE (e1) == UNSPEC && XINT (e1, 1) == UNSPEC_STACK_ALIGN)
16378 {
16379 /* Stack pointer save before alignment. */
16380 reg = REGNO (e0);
16381 asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n",
16382 reg + 0x90, reg);
16383 }
16384 else
16385 abort ();
16386 break;
16387
16388 default:
16389 abort ();
16390 }
16391 }
16392
16393
16394 /* Emit unwind directives for the given insn. */
16395
16396 static void
16397 arm_unwind_emit (FILE * asm_out_file, rtx insn)
16398 {
16399 rtx pat;
16400
16401 if (!ARM_EABI_UNWIND_TABLES)
16402 return;
16403
16404 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
16405 return;
16406
16407 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
16408 if (pat)
16409 pat = XEXP (pat, 0);
16410 else
16411 pat = PATTERN (insn);
16412
16413 switch (GET_CODE (pat))
16414 {
16415 case SET:
16416 arm_unwind_emit_set (asm_out_file, pat);
16417 break;
16418
16419 case SEQUENCE:
16420 /* Store multiple. */
16421 arm_unwind_emit_sequence (asm_out_file, pat);
16422 break;
16423
16424 default:
16425 abort();
16426 }
16427 }
16428
16429
16430 /* Output a reference from a function exception table to the type_info
16431 object X. The EABI specifies that the symbol should be relocated by
16432 an R_ARM_TARGET2 relocation. */
16433
16434 static bool
16435 arm_output_ttype (rtx x)
16436 {
16437 fputs ("\t.word\t", asm_out_file);
16438 output_addr_const (asm_out_file, x);
16439 /* Use special relocations for symbol references. */
16440 if (GET_CODE (x) != CONST_INT)
16441 fputs ("(TARGET2)", asm_out_file);
16442 fputc ('\n', asm_out_file);
16443
16444 return TRUE;
16445 }
16446 #endif /* TARGET_UNWIND_INFO */
16447
16448
16449 /* Handle UNSPEC DWARF call frame instructions. These are needed for dynamic
16450 stack alignment. */
16451
16452 static void
16453 arm_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
16454 {
16455 rtx unspec = SET_SRC (pattern);
16456 gcc_assert (GET_CODE (unspec) == UNSPEC);
16457
16458 switch (index)
16459 {
16460 case UNSPEC_STACK_ALIGN:
16461 /* ??? We should set the CFA = (SP & ~7). At this point we haven't
16462 put anything on the stack, so hopefully it won't matter.
16463 CFA = SP will be correct after alignment. */
16464 dwarf2out_reg_save_reg (label, stack_pointer_rtx,
16465 SET_DEST (pattern));
16466 break;
16467 default:
16468 gcc_unreachable ();
16469 }
16470 }
16471
16472
16473 /* Output unwind directives for the start/end of a function. */
16474
16475 void
16476 arm_output_fn_unwind (FILE * f, bool prologue)
16477 {
16478 if (!ARM_EABI_UNWIND_TABLES)
16479 return;
16480
16481 if (prologue)
16482 fputs ("\t.fnstart\n", f);
16483 else
16484 fputs ("\t.fnend\n", f);
16485 }
16486
16487 static bool
16488 arm_emit_tls_decoration (FILE *fp, rtx x)
16489 {
16490 enum tls_reloc reloc;
16491 rtx val;
16492
16493 val = XVECEXP (x, 0, 0);
16494 reloc = INTVAL (XVECEXP (x, 0, 1));
16495
16496 output_addr_const (fp, val);
16497
16498 switch (reloc)
16499 {
16500 case TLS_GD32:
16501 fputs ("(tlsgd)", fp);
16502 break;
16503 case TLS_LDM32:
16504 fputs ("(tlsldm)", fp);
16505 break;
16506 case TLS_LDO32:
16507 fputs ("(tlsldo)", fp);
16508 break;
16509 case TLS_IE32:
16510 fputs ("(gottpoff)", fp);
16511 break;
16512 case TLS_LE32:
16513 fputs ("(tpoff)", fp);
16514 break;
16515 default:
16516 gcc_unreachable ();
16517 }
16518
16519 switch (reloc)
16520 {
16521 case TLS_GD32:
16522 case TLS_LDM32:
16523 case TLS_IE32:
16524 fputs (" + (. - ", fp);
16525 output_addr_const (fp, XVECEXP (x, 0, 2));
16526 fputs (" - ", fp);
16527 output_addr_const (fp, XVECEXP (x, 0, 3));
16528 fputc (')', fp);
16529 break;
16530 default:
16531 break;
16532 }
16533
16534 return TRUE;
16535 }
16536
16537 /* ARM implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
16538
16539 static void
16540 arm_output_dwarf_dtprel (FILE *file, int size, rtx x)
16541 {
16542 gcc_assert (size == 4);
16543 fputs ("\t.word\t", file);
16544 output_addr_const (file, x);
16545 fputs ("(tlsldo)", file);
16546 }
16547
16548 bool
16549 arm_output_addr_const_extra (FILE *fp, rtx x)
16550 {
16551 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
16552 return arm_emit_tls_decoration (fp, x);
16553 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
16554 {
16555 char label[256];
16556 int labelno = INTVAL (XVECEXP (x, 0, 0));
16557
16558 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
16559 assemble_name_raw (fp, label);
16560
16561 return TRUE;
16562 }
16563 else if (GET_CODE (x) == CONST_VECTOR)
16564 return arm_emit_vector_const (fp, x);
16565
16566 return FALSE;
16567 }
16568
16569 /* Output assembly for a shift instruction.
16570 SET_FLAGS determines how the instruction modifies the condition codes.
16571 0 - Do not set condition codes.
16572 1 - Set condition codes.
16573 2 - Use smallest instruction. */
16574 const char *
16575 arm_output_shift(rtx * operands, int set_flags)
16576 {
16577 char pattern[100];
16578 static const char flag_chars[3] = {'?', '.', '!'};
16579 const char *shift;
16580 HOST_WIDE_INT val;
16581 char c;
16582
16583 c = flag_chars[set_flags];
16584 if (TARGET_UNIFIED_ASM)
16585 {
16586 shift = shift_op(operands[3], &val);
16587 if (shift)
16588 {
16589 if (val != -1)
16590 operands[2] = GEN_INT(val);
16591 sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
16592 }
16593 else
16594 sprintf (pattern, "mov%%%c\t%%0, %%1", c);
16595 }
16596 else
16597 sprintf (pattern, "mov%%%c\t%%0, %%1%%S3", c);
16598 output_asm_insn (pattern, operands);
16599 return "";
16600 }
16601
16602 /* Output a Thumb-2 casesi instruction. */
16603 const char *
16604 thumb2_output_casesi (rtx *operands)
16605 {
16606 rtx diff_vec = PATTERN (next_real_insn (operands[2]));
16607
16608 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
16609
16610 output_asm_insn ("cmp\t%0, %1", operands);
16611 output_asm_insn ("bhi\t%l3", operands);
16612 switch (GET_MODE(diff_vec))
16613 {
16614 case QImode:
16615 return "tbb\t[%|pc, %0]";
16616 case HImode:
16617 return "tbh\t[%|pc, %0, lsl #1]";
16618 case SImode:
16619 if (flag_pic)
16620 {
16621 output_asm_insn ("adr\t%4, %l2", operands);
16622 output_asm_insn ("ldr\t%5, [%4, %0, lsl #2]", operands);
16623 output_asm_insn ("add\t%4, %4, %5", operands);
16624 return "bx\t%4";
16625 }
16626 else
16627 {
16628 output_asm_insn ("adr\t%4, %l2", operands);
16629 return "ldr\t%|pc, [%4, %0, lsl #2]";
16630 }
16631 default:
16632 gcc_unreachable ();
16633 }
16634 }
16635
16636 #include "gt-arm.h"