]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/arm.c
Merge basic-improvements-branch to trunk
[thirdparty/gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
3 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GNU CC.
9
10 GNU CC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GNU CC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53
54 /* Forward definitions of types. */
55 typedef struct minipool_node Mnode;
56 typedef struct minipool_fixup Mfix;
57
58 /* In order to improve the layout of the prototypes below
59 some short type abbreviations are defined here. */
60 #define Hint HOST_WIDE_INT
61 #define Mmode enum machine_mode
62 #define Ulong unsigned long
63 #define Ccstar const char *
64
65 const struct attribute_spec arm_attribute_table[];
66
67 /* Forward function declarations. */
68 static void arm_add_gc_roots PARAMS ((void));
69 static int arm_gen_constant PARAMS ((enum rtx_code, Mmode, Hint, rtx, rtx, int, int));
70 static unsigned bit_count PARAMS ((Ulong));
71 static int const_ok_for_op PARAMS ((Hint, enum rtx_code));
72 static int eliminate_lr2ip PARAMS ((rtx *));
73 static rtx emit_multi_reg_push PARAMS ((int));
74 static rtx emit_sfm PARAMS ((int, int));
75 #ifndef AOF_ASSEMBLER
76 static bool arm_assemble_integer PARAMS ((rtx, unsigned int, int));
77 #endif
78 static Ccstar fp_const_from_val PARAMS ((REAL_VALUE_TYPE *));
79 static arm_cc get_arm_condition_code PARAMS ((rtx));
80 static void init_fpa_table PARAMS ((void));
81 static Hint int_log2 PARAMS ((Hint));
82 static rtx is_jump_table PARAMS ((rtx));
83 static Ccstar output_multi_immediate PARAMS ((rtx *, Ccstar, Ccstar, int, Hint));
84 static void print_multi_reg PARAMS ((FILE *, Ccstar, int, int));
85 static Mmode select_dominance_cc_mode PARAMS ((rtx, rtx, Hint));
86 static Ccstar shift_op PARAMS ((rtx, Hint *));
87 static struct machine_function * arm_init_machine_status PARAMS ((void));
88 static int number_of_first_bit_set PARAMS ((int));
89 static void replace_symbols_in_block PARAMS ((tree, rtx, rtx));
90 static void thumb_exit PARAMS ((FILE *, int, rtx));
91 static void thumb_pushpop PARAMS ((FILE *, int, int));
92 static Ccstar thumb_condition_code PARAMS ((rtx, int));
93 static rtx is_jump_table PARAMS ((rtx));
94 static Hint get_jump_table_size PARAMS ((rtx));
95 static Mnode * move_minipool_fix_forward_ref PARAMS ((Mnode *, Mnode *, Hint));
96 static Mnode * add_minipool_forward_ref PARAMS ((Mfix *));
97 static Mnode * move_minipool_fix_backward_ref PARAMS ((Mnode *, Mnode *, Hint));
98 static Mnode * add_minipool_backward_ref PARAMS ((Mfix *));
99 static void assign_minipool_offsets PARAMS ((Mfix *));
100 static void arm_print_value PARAMS ((FILE *, rtx));
101 static void dump_minipool PARAMS ((rtx));
102 static int arm_barrier_cost PARAMS ((rtx));
103 static Mfix * create_fix_barrier PARAMS ((Mfix *, Hint));
104 static void push_minipool_barrier PARAMS ((rtx, Hint));
105 static void push_minipool_fix PARAMS ((rtx, Hint, rtx *, Mmode, rtx));
106 static void note_invalid_constants PARAMS ((rtx, Hint));
107 static int current_file_function_operand PARAMS ((rtx));
108 static Ulong arm_compute_save_reg0_reg12_mask PARAMS ((void));
109 static Ulong arm_compute_save_reg_mask PARAMS ((void));
110 static Ulong arm_isr_value PARAMS ((tree));
111 static Ulong arm_compute_func_type PARAMS ((void));
112 static tree arm_handle_fndecl_attribute PARAMS ((tree *, tree, tree, int, bool *));
113 static tree arm_handle_isr_attribute PARAMS ((tree *, tree, tree, int, bool *));
114 static void arm_output_function_epilogue PARAMS ((FILE *, Hint));
115 static void arm_output_function_prologue PARAMS ((FILE *, Hint));
116 static void thumb_output_function_prologue PARAMS ((FILE *, Hint));
117 static int arm_comp_type_attributes PARAMS ((tree, tree));
118 static void arm_set_default_type_attributes PARAMS ((tree));
119 static int arm_adjust_cost PARAMS ((rtx, rtx, rtx, int));
120 static int count_insns_for_constant PARAMS ((HOST_WIDE_INT, int));
121 static int arm_get_strip_length PARAMS ((int));
122 static bool arm_function_ok_for_sibcall PARAMS ((tree, tree));
123 #ifdef OBJECT_FORMAT_ELF
124 static void arm_elf_asm_named_section PARAMS ((const char *, unsigned int));
125 #endif
126 #ifndef ARM_PE
127 static void arm_encode_section_info PARAMS ((tree, int));
128 #endif
129 #ifdef AOF_ASSEMBLER
130 static void aof_globalize_label PARAMS ((FILE *, const char *));
131 #endif
132 static void arm_internal_label PARAMS ((FILE *, const char *, unsigned long));
133 static void arm_output_mi_thunk PARAMS ((FILE *, tree,
134 HOST_WIDE_INT,
135 HOST_WIDE_INT, tree));
136
137 #undef Hint
138 #undef Mmode
139 #undef Ulong
140 #undef Ccstar
141 \f
142 /* Initialize the GCC target structure. */
143 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
144 #undef TARGET_MERGE_DECL_ATTRIBUTES
145 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
146 #endif
147
148 #undef TARGET_ATTRIBUTE_TABLE
149 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
150
151 #ifdef AOF_ASSEMBLER
152 #undef TARGET_ASM_BYTE_OP
153 #define TARGET_ASM_BYTE_OP "\tDCB\t"
154 #undef TARGET_ASM_ALIGNED_HI_OP
155 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
156 #undef TARGET_ASM_ALIGNED_SI_OP
157 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
158 #undef TARGET_ASM_GLOBALIZE_LABEL
159 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
160 #else
161 #undef TARGET_ASM_ALIGNED_SI_OP
162 #define TARGET_ASM_ALIGNED_SI_OP NULL
163 #undef TARGET_ASM_INTEGER
164 #define TARGET_ASM_INTEGER arm_assemble_integer
165 #endif
166
167 #undef TARGET_ASM_FUNCTION_PROLOGUE
168 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
169
170 #undef TARGET_ASM_FUNCTION_EPILOGUE
171 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
172
173 #undef TARGET_COMP_TYPE_ATTRIBUTES
174 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
175
176 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
177 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
178
179 #undef TARGET_INIT_BUILTINS
180 #define TARGET_INIT_BUILTINS arm_init_builtins
181
182 #undef TARGET_EXPAND_BUILTIN
183 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
184
185 #undef TARGET_SCHED_ADJUST_COST
186 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
187
188 #undef TARGET_ENCODE_SECTION_INFO
189 #ifdef ARM_PE
190 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
191 #else
192 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
193 #endif
194
195 #undef TARGET_STRIP_NAME_ENCODING
196 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
197
198 #undef TARGET_ASM_INTERNAL_LABEL
199 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
200
201 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
202 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
203
204 #undef TARGET_ASM_OUTPUT_MI_THUNK
205 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
206 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
207 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
208
209 struct gcc_target targetm = TARGET_INITIALIZER;
210 \f
211 /* Obstack for minipool constant handling. */
212 static struct obstack minipool_obstack;
213 static char * minipool_startobj;
214
215 /* The maximum number of insns skipped which
216 will be conditionalised if possible. */
217 static int max_insns_skipped = 5;
218
219 extern FILE * asm_out_file;
220
221 /* True if we are currently building a constant table. */
222 int making_const_table;
223
224 /* Define the information needed to generate branch insns. This is
225 stored from the compare operation. */
226 rtx arm_compare_op0, arm_compare_op1;
227
228 /* What type of floating point are we tuning for? */
229 enum floating_point_type arm_fpu;
230
231 /* What type of floating point instructions are available? */
232 enum floating_point_type arm_fpu_arch;
233
234 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
235 enum prog_mode_type arm_prgmode;
236
237 /* Set by the -mfp=... option. */
238 const char * target_fp_name = NULL;
239
240 /* Used to parse -mstructure_size_boundary command line option. */
241 const char * structure_size_string = NULL;
242 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
243
244 /* Bit values used to identify processor capabilities. */
245 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
246 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
247 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
248 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
249 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
250 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
251 #define FL_THUMB (1 << 6) /* Thumb aware */
252 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
253 #define FL_STRONG (1 << 8) /* StrongARM */
254 #define FL_ARCH5E (1 << 9) /* DSP extenstions to v5 */
255 #define FL_XSCALE (1 << 10) /* XScale */
256
257 /* The bits in this mask specify which
258 instructions we are allowed to generate. */
259 static unsigned long insn_flags = 0;
260
261 /* The bits in this mask specify which instruction scheduling options should
262 be used. Note - there is an overlap with the FL_FAST_MULT. For some
263 hardware we want to be able to generate the multiply instructions, but to
264 tune as if they were not present in the architecture. */
265 static unsigned long tune_flags = 0;
266
267 /* The following are used in the arm.md file as equivalents to bits
268 in the above two flag variables. */
269
270 /* Nonzero if this is an "M" variant of the processor. */
271 int arm_fast_multiply = 0;
272
273 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
274 int arm_arch4 = 0;
275
276 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
277 int arm_arch5 = 0;
278
279 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
280 int arm_arch5e = 0;
281
282 /* Nonzero if this chip can benefit from load scheduling. */
283 int arm_ld_sched = 0;
284
285 /* Nonzero if this chip is a StrongARM. */
286 int arm_is_strong = 0;
287
288 /* Nonzero if this chip is an XScale. */
289 int arm_is_xscale = 0;
290
291 /* Nonzero if this chip is an ARM6 or an ARM7. */
292 int arm_is_6_or_7 = 0;
293
294 /* Nonzero if generating Thumb instructions. */
295 int thumb_code = 0;
296
297 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
298 must report the mode of the memory reference from PRINT_OPERAND to
299 PRINT_OPERAND_ADDRESS. */
300 enum machine_mode output_memory_reference_mode;
301
302 /* The register number to be used for the PIC offset register. */
303 const char * arm_pic_register_string = NULL;
304 int arm_pic_register = INVALID_REGNUM;
305
306 /* Set to 1 when a return insn is output, this means that the epilogue
307 is not needed. */
308 int return_used_this_function;
309
310 /* Set to 1 after arm_reorg has started. Reset to start at the start of
311 the next function. */
312 static int after_arm_reorg = 0;
313
314 /* The maximum number of insns to be used when loading a constant. */
315 static int arm_constant_limit = 3;
316
317 /* For an explanation of these variables, see final_prescan_insn below. */
318 int arm_ccfsm_state;
319 enum arm_cond_code arm_current_cc;
320 rtx arm_target_insn;
321 int arm_target_label;
322
323 /* The condition codes of the ARM, and the inverse function. */
324 static const char * const arm_condition_codes[] =
325 {
326 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
327 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
328 };
329
330 #define streq(string1, string2) (strcmp (string1, string2) == 0)
331 \f
332 /* Initialization code. */
333
334 struct processors
335 {
336 const char *const name;
337 const unsigned long flags;
338 };
339
340 /* Not all of these give usefully different compilation alternatives,
341 but there is no simple way of generalizing them. */
342 static const struct processors all_cores[] =
343 {
344 /* ARM Cores */
345
346 {"arm2", FL_CO_PROC | FL_MODE26 },
347 {"arm250", FL_CO_PROC | FL_MODE26 },
348 {"arm3", FL_CO_PROC | FL_MODE26 },
349 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
350 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
351 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
352 {"arm610", FL_MODE26 | FL_MODE32 },
353 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
354 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
355 /* arm7m doesn't exist on its own, but only with D, (and I), but
356 those don't alter the code, so arm7m is sometimes used. */
357 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
358 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
359 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
360 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
361 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
362 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
363 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
364 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
365 {"arm710", FL_MODE26 | FL_MODE32 },
366 {"arm710t", FL_MODE26 | FL_MODE32 | FL_THUMB },
367 {"arm720", FL_MODE26 | FL_MODE32 },
368 {"arm720t", FL_MODE26 | FL_MODE32 | FL_THUMB },
369 {"arm740t", FL_MODE26 | FL_MODE32 | FL_THUMB },
370 {"arm710c", FL_MODE26 | FL_MODE32 },
371 {"arm7100", FL_MODE26 | FL_MODE32 },
372 {"arm7500", FL_MODE26 | FL_MODE32 },
373 /* Doesn't have an external co-proc, but does have embedded fpu. */
374 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
375 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
376 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
377 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
378 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
379 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
380 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
381 {"arm940t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
382 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
383 {"arm9e", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
384 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
385 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
386 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
387 {"strongarm1110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
388 {"arm10tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
389 {"arm1020t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
390 {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE },
391
392 {NULL, 0}
393 };
394
395 static const struct processors all_architectures[] =
396 {
397 /* ARM Architectures */
398
399 { "armv2", FL_CO_PROC | FL_MODE26 },
400 { "armv2a", FL_CO_PROC | FL_MODE26 },
401 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
402 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
403 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
404 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
405 implementations that support it, so we will leave it out for now. */
406 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
407 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
408 { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
409 { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
410 { NULL, 0 }
411 };
412
413 /* This is a magic stucture. The 'string' field is magically filled in
414 with a pointer to the value specified by the user on the command line
415 assuming that the user has specified such a value. */
416
417 struct arm_cpu_select arm_select[] =
418 {
419 /* string name processors */
420 { NULL, "-mcpu=", all_cores },
421 { NULL, "-march=", all_architectures },
422 { NULL, "-mtune=", all_cores }
423 };
424
425 /* Return the number of bits set in VALUE. */
426 static unsigned
427 bit_count (value)
428 unsigned long value;
429 {
430 unsigned long count = 0;
431
432 while (value)
433 {
434 count++;
435 value &= value - 1; /* Clear the least-significant set bit. */
436 }
437
438 return count;
439 }
440
441 /* Fix up any incompatible options that the user has specified.
442 This has now turned into a maze. */
443 void
444 arm_override_options ()
445 {
446 unsigned i;
447
448 /* Set up the flags based on the cpu/architecture selected by the user. */
449 for (i = ARRAY_SIZE (arm_select); i--;)
450 {
451 struct arm_cpu_select * ptr = arm_select + i;
452
453 if (ptr->string != NULL && ptr->string[0] != '\0')
454 {
455 const struct processors * sel;
456
457 for (sel = ptr->processors; sel->name != NULL; sel++)
458 if (streq (ptr->string, sel->name))
459 {
460 if (i == 2)
461 tune_flags = sel->flags;
462 else
463 {
464 /* If we have been given an architecture and a processor
465 make sure that they are compatible. We only generate
466 a warning though, and we prefer the CPU over the
467 architecture. */
468 if (insn_flags != 0 && (insn_flags ^ sel->flags))
469 warning ("switch -mcpu=%s conflicts with -march= switch",
470 ptr->string);
471
472 insn_flags = sel->flags;
473 }
474
475 break;
476 }
477
478 if (sel->name == NULL)
479 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
480 }
481 }
482
483 /* If the user did not specify a processor, choose one for them. */
484 if (insn_flags == 0)
485 {
486 const struct processors * sel;
487 unsigned int sought;
488 static const struct cpu_default
489 {
490 const int cpu;
491 const char *const name;
492 }
493 cpu_defaults[] =
494 {
495 { TARGET_CPU_arm2, "arm2" },
496 { TARGET_CPU_arm6, "arm6" },
497 { TARGET_CPU_arm610, "arm610" },
498 { TARGET_CPU_arm710, "arm710" },
499 { TARGET_CPU_arm7m, "arm7m" },
500 { TARGET_CPU_arm7500fe, "arm7500fe" },
501 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
502 { TARGET_CPU_arm8, "arm8" },
503 { TARGET_CPU_arm810, "arm810" },
504 { TARGET_CPU_arm9, "arm9" },
505 { TARGET_CPU_strongarm, "strongarm" },
506 { TARGET_CPU_xscale, "xscale" },
507 { TARGET_CPU_generic, "arm" },
508 { 0, 0 }
509 };
510 const struct cpu_default * def;
511
512 /* Find the default. */
513 for (def = cpu_defaults; def->name; def++)
514 if (def->cpu == TARGET_CPU_DEFAULT)
515 break;
516
517 /* Make sure we found the default CPU. */
518 if (def->name == NULL)
519 abort ();
520
521 /* Find the default CPU's flags. */
522 for (sel = all_cores; sel->name != NULL; sel++)
523 if (streq (def->name, sel->name))
524 break;
525
526 if (sel->name == NULL)
527 abort ();
528
529 insn_flags = sel->flags;
530
531 /* Now check to see if the user has specified some command line
532 switch that require certain abilities from the cpu. */
533 sought = 0;
534
535 if (TARGET_INTERWORK || TARGET_THUMB)
536 {
537 sought |= (FL_THUMB | FL_MODE32);
538
539 /* Force apcs-32 to be used for interworking. */
540 target_flags |= ARM_FLAG_APCS_32;
541
542 /* There are no ARM processors that support both APCS-26 and
543 interworking. Therefore we force FL_MODE26 to be removed
544 from insn_flags here (if it was set), so that the search
545 below will always be able to find a compatible processor. */
546 insn_flags &= ~FL_MODE26;
547 }
548 else if (!TARGET_APCS_32)
549 sought |= FL_MODE26;
550
551 if (sought != 0 && ((sought & insn_flags) != sought))
552 {
553 /* Try to locate a CPU type that supports all of the abilities
554 of the default CPU, plus the extra abilities requested by
555 the user. */
556 for (sel = all_cores; sel->name != NULL; sel++)
557 if ((sel->flags & sought) == (sought | insn_flags))
558 break;
559
560 if (sel->name == NULL)
561 {
562 unsigned current_bit_count = 0;
563 const struct processors * best_fit = NULL;
564
565 /* Ideally we would like to issue an error message here
566 saying that it was not possible to find a CPU compatible
567 with the default CPU, but which also supports the command
568 line options specified by the programmer, and so they
569 ought to use the -mcpu=<name> command line option to
570 override the default CPU type.
571
572 Unfortunately this does not work with multilibing. We
573 need to be able to support multilibs for -mapcs-26 and for
574 -mthumb-interwork and there is no CPU that can support both
575 options. Instead if we cannot find a cpu that has both the
576 characteristics of the default cpu and the given command line
577 options we scan the array again looking for a best match. */
578 for (sel = all_cores; sel->name != NULL; sel++)
579 if ((sel->flags & sought) == sought)
580 {
581 unsigned count;
582
583 count = bit_count (sel->flags & insn_flags);
584
585 if (count >= current_bit_count)
586 {
587 best_fit = sel;
588 current_bit_count = count;
589 }
590 }
591
592 if (best_fit == NULL)
593 abort ();
594 else
595 sel = best_fit;
596 }
597
598 insn_flags = sel->flags;
599 }
600 }
601
602 /* If tuning has not been specified, tune for whichever processor or
603 architecture has been selected. */
604 if (tune_flags == 0)
605 tune_flags = insn_flags;
606
607 /* Make sure that the processor choice does not conflict with any of the
608 other command line choices. */
609 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
610 {
611 /* If APCS-32 was not the default then it must have been set by the
612 user, so issue a warning message. If the user has specified
613 "-mapcs-32 -mcpu=arm2" then we loose here. */
614 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
615 warning ("target CPU does not support APCS-32" );
616 target_flags &= ~ARM_FLAG_APCS_32;
617 }
618 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
619 {
620 warning ("target CPU does not support APCS-26" );
621 target_flags |= ARM_FLAG_APCS_32;
622 }
623
624 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
625 {
626 warning ("target CPU does not support interworking" );
627 target_flags &= ~ARM_FLAG_INTERWORK;
628 }
629
630 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
631 {
632 warning ("target CPU does not support THUMB instructions");
633 target_flags &= ~ARM_FLAG_THUMB;
634 }
635
636 if (TARGET_APCS_FRAME && TARGET_THUMB)
637 {
638 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
639 target_flags &= ~ARM_FLAG_APCS_FRAME;
640 }
641
642 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
643 from here where no function is being compiled currently. */
644 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
645 && TARGET_ARM)
646 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
647
648 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
649 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
650
651 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
652 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
653
654 /* If interworking is enabled then APCS-32 must be selected as well. */
655 if (TARGET_INTERWORK)
656 {
657 if (!TARGET_APCS_32)
658 warning ("interworking forces APCS-32 to be used" );
659 target_flags |= ARM_FLAG_APCS_32;
660 }
661
662 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
663 {
664 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
665 target_flags |= ARM_FLAG_APCS_FRAME;
666 }
667
668 if (TARGET_POKE_FUNCTION_NAME)
669 target_flags |= ARM_FLAG_APCS_FRAME;
670
671 if (TARGET_APCS_REENT && flag_pic)
672 error ("-fpic and -mapcs-reent are incompatible");
673
674 if (TARGET_APCS_REENT)
675 warning ("APCS reentrant code not supported. Ignored");
676
677 /* If this target is normally configured to use APCS frames, warn if they
678 are turned off and debugging is turned on. */
679 if (TARGET_ARM
680 && write_symbols != NO_DEBUG
681 && !TARGET_APCS_FRAME
682 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
683 warning ("-g with -mno-apcs-frame may not give sensible debugging");
684
685 /* If stack checking is disabled, we can use r10 as the PIC register,
686 which keeps r9 available. */
687 if (flag_pic)
688 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
689
690 if (TARGET_APCS_FLOAT)
691 warning ("passing floating point arguments in fp regs not yet supported");
692
693 /* Initialize boolean versions of the flags, for use in the arm.md file. */
694 arm_fast_multiply = (insn_flags & FL_FAST_MULT) != 0;
695 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
696 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
697 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
698 arm_is_xscale = (insn_flags & FL_XSCALE) != 0;
699
700 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
701 arm_is_strong = (tune_flags & FL_STRONG) != 0;
702 thumb_code = (TARGET_ARM == 0);
703 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
704 && !(tune_flags & FL_ARCH4))) != 0;
705
706 /* Default value for floating point code... if no co-processor
707 bus, then schedule for emulated floating point. Otherwise,
708 assume the user has an FPA.
709 Note: this does not prevent use of floating point instructions,
710 -msoft-float does that. */
711 arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
712
713 if (target_fp_name)
714 {
715 if (streq (target_fp_name, "2"))
716 arm_fpu_arch = FP_SOFT2;
717 else if (streq (target_fp_name, "3"))
718 arm_fpu_arch = FP_SOFT3;
719 else
720 error ("invalid floating point emulation option: -mfpe-%s",
721 target_fp_name);
722 }
723 else
724 arm_fpu_arch = FP_DEFAULT;
725
726 if (TARGET_FPE && arm_fpu != FP_HARD)
727 arm_fpu = FP_SOFT2;
728
729 /* For arm2/3 there is no need to do any scheduling if there is only
730 a floating point emulator, or we are doing software floating-point. */
731 if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD)
732 && (tune_flags & FL_MODE32) == 0)
733 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
734
735 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
736
737 if (structure_size_string != NULL)
738 {
739 int size = strtol (structure_size_string, NULL, 0);
740
741 if (size == 8 || size == 32)
742 arm_structure_size_boundary = size;
743 else
744 warning ("structure size boundary can only be set to 8 or 32");
745 }
746
747 if (arm_pic_register_string != NULL)
748 {
749 int pic_register = decode_reg_name (arm_pic_register_string);
750
751 if (!flag_pic)
752 warning ("-mpic-register= is useless without -fpic");
753
754 /* Prevent the user from choosing an obviously stupid PIC register. */
755 else if (pic_register < 0 || call_used_regs[pic_register]
756 || pic_register == HARD_FRAME_POINTER_REGNUM
757 || pic_register == STACK_POINTER_REGNUM
758 || pic_register >= PC_REGNUM)
759 error ("unable to use '%s' for PIC register", arm_pic_register_string);
760 else
761 arm_pic_register = pic_register;
762 }
763
764 if (TARGET_THUMB && flag_schedule_insns)
765 {
766 /* Don't warn since it's on by default in -O2. */
767 flag_schedule_insns = 0;
768 }
769
770 /* If optimizing for space, don't synthesize constants.
771 For processors with load scheduling, it never costs more than 2 cycles
772 to load a constant, and the load scheduler may well reduce that to 1. */
773 if (optimize_size || (tune_flags & FL_LDSCHED))
774 arm_constant_limit = 1;
775
776 if (arm_is_xscale)
777 arm_constant_limit = 2;
778
779 /* If optimizing for size, bump the number of instructions that we
780 are prepared to conditionally execute (even on a StrongARM).
781 Otherwise for the StrongARM, which has early execution of branches,
782 a sequence that is worth skipping is shorter. */
783 if (optimize_size)
784 max_insns_skipped = 6;
785 else if (arm_is_strong)
786 max_insns_skipped = 3;
787
788 /* Register global variables with the garbage collector. */
789 arm_add_gc_roots ();
790 }
791
792 static void
793 arm_add_gc_roots ()
794 {
795 gcc_obstack_init(&minipool_obstack);
796 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
797 }
798 \f
799 /* A table of known ARM exception types.
800 For use with the interrupt function attribute. */
801
802 typedef struct
803 {
804 const char *const arg;
805 const unsigned long return_value;
806 }
807 isr_attribute_arg;
808
809 static const isr_attribute_arg isr_attribute_args [] =
810 {
811 { "IRQ", ARM_FT_ISR },
812 { "irq", ARM_FT_ISR },
813 { "FIQ", ARM_FT_FIQ },
814 { "fiq", ARM_FT_FIQ },
815 { "ABORT", ARM_FT_ISR },
816 { "abort", ARM_FT_ISR },
817 { "ABORT", ARM_FT_ISR },
818 { "abort", ARM_FT_ISR },
819 { "UNDEF", ARM_FT_EXCEPTION },
820 { "undef", ARM_FT_EXCEPTION },
821 { "SWI", ARM_FT_EXCEPTION },
822 { "swi", ARM_FT_EXCEPTION },
823 { NULL, ARM_FT_NORMAL }
824 };
825
826 /* Returns the (interrupt) function type of the current
827 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
828
829 static unsigned long
830 arm_isr_value (argument)
831 tree argument;
832 {
833 const isr_attribute_arg * ptr;
834 const char * arg;
835
836 /* No argument - default to IRQ. */
837 if (argument == NULL_TREE)
838 return ARM_FT_ISR;
839
840 /* Get the value of the argument. */
841 if (TREE_VALUE (argument) == NULL_TREE
842 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
843 return ARM_FT_UNKNOWN;
844
845 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
846
847 /* Check it against the list of known arguments. */
848 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr ++)
849 if (streq (arg, ptr->arg))
850 return ptr->return_value;
851
852 /* An unrecognized interrupt type. */
853 return ARM_FT_UNKNOWN;
854 }
855
856 /* Computes the type of the current function. */
857
858 static unsigned long
859 arm_compute_func_type ()
860 {
861 unsigned long type = ARM_FT_UNKNOWN;
862 tree a;
863 tree attr;
864
865 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
866 abort ();
867
868 /* Decide if the current function is volatile. Such functions
869 never return, and many memory cycles can be saved by not storing
870 register values that will never be needed again. This optimization
871 was added to speed up context switching in a kernel application. */
872 if (optimize > 0
873 && current_function_nothrow
874 && TREE_THIS_VOLATILE (current_function_decl))
875 type |= ARM_FT_VOLATILE;
876
877 if (current_function_needs_context)
878 type |= ARM_FT_NESTED;
879
880 attr = DECL_ATTRIBUTES (current_function_decl);
881
882 a = lookup_attribute ("naked", attr);
883 if (a != NULL_TREE)
884 type |= ARM_FT_NAKED;
885
886 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
887 type |= ARM_FT_EXCEPTION_HANDLER;
888 else
889 {
890 a = lookup_attribute ("isr", attr);
891 if (a == NULL_TREE)
892 a = lookup_attribute ("interrupt", attr);
893
894 if (a == NULL_TREE)
895 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
896 else
897 type |= arm_isr_value (TREE_VALUE (a));
898 }
899
900 return type;
901 }
902
903 /* Returns the type of the current function. */
904
905 unsigned long
906 arm_current_func_type ()
907 {
908 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
909 cfun->machine->func_type = arm_compute_func_type ();
910
911 return cfun->machine->func_type;
912 }
913 \f
914 /* Return 1 if it is possible to return using a single instruction. */
915
916 int
917 use_return_insn (iscond)
918 int iscond;
919 {
920 int regno;
921 unsigned int func_type;
922 unsigned long saved_int_regs;
923
924 /* Never use a return instruction before reload has run. */
925 if (!reload_completed)
926 return 0;
927
928 func_type = arm_current_func_type ();
929
930 /* Naked functions and volatile functions need special
931 consideration. */
932 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
933 return 0;
934
935 /* As do variadic functions. */
936 if (current_function_pretend_args_size
937 || cfun->machine->uses_anonymous_args
938 /* Of if the function calls __builtin_eh_return () */
939 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
940 /* Or if there is no frame pointer and there is a stack adjustment. */
941 || ((arm_get_frame_size () + current_function_outgoing_args_size != 0)
942 && !frame_pointer_needed))
943 return 0;
944
945 saved_int_regs = arm_compute_save_reg_mask ();
946
947 /* Can't be done if interworking with Thumb, and any registers have been
948 stacked. */
949 if (TARGET_INTERWORK && saved_int_regs != 0)
950 return 0;
951
952 /* On StrongARM, conditional returns are expensive if they aren't
953 taken and multiple registers have been stacked. */
954 if (iscond && arm_is_strong)
955 {
956 /* Conditional return when just the LR is stored is a simple
957 conditional-load instruction, that's not expensive. */
958 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
959 return 0;
960
961 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
962 return 0;
963 }
964
965 /* If there are saved registers but the LR isn't saved, then we need
966 two instructions for the return. */
967 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
968 return 0;
969
970 /* Can't be done if any of the FPU regs are pushed,
971 since this also requires an insn. */
972 if (TARGET_HARD_FLOAT)
973 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
974 if (regs_ever_live[regno] && !call_used_regs[regno])
975 return 0;
976
977 return 1;
978 }
979
980 /* Return TRUE if int I is a valid immediate ARM constant. */
981
982 int
983 const_ok_for_arm (i)
984 HOST_WIDE_INT i;
985 {
986 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
987
988 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
989 be all zero, or all one. */
990 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
991 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
992 != ((~(unsigned HOST_WIDE_INT) 0)
993 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
994 return FALSE;
995
996 /* Fast return for 0 and powers of 2 */
997 if ((i & (i - 1)) == 0)
998 return TRUE;
999
1000 do
1001 {
1002 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1003 return TRUE;
1004 mask =
1005 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1006 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1007 }
1008 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1009
1010 return FALSE;
1011 }
1012
1013 /* Return true if I is a valid constant for the operation CODE. */
1014 static int
1015 const_ok_for_op (i, code)
1016 HOST_WIDE_INT i;
1017 enum rtx_code code;
1018 {
1019 if (const_ok_for_arm (i))
1020 return 1;
1021
1022 switch (code)
1023 {
1024 case PLUS:
1025 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1026
1027 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1028 case XOR:
1029 case IOR:
1030 return 0;
1031
1032 case AND:
1033 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1034
1035 default:
1036 abort ();
1037 }
1038 }
1039
1040 /* Emit a sequence of insns to handle a large constant.
1041 CODE is the code of the operation required, it can be any of SET, PLUS,
1042 IOR, AND, XOR, MINUS;
1043 MODE is the mode in which the operation is being performed;
1044 VAL is the integer to operate on;
1045 SOURCE is the other operand (a register, or a null-pointer for SET);
1046 SUBTARGETS means it is safe to create scratch registers if that will
1047 either produce a simpler sequence, or we will want to cse the values.
1048 Return value is the number of insns emitted. */
1049
1050 int
1051 arm_split_constant (code, mode, val, target, source, subtargets)
1052 enum rtx_code code;
1053 enum machine_mode mode;
1054 HOST_WIDE_INT val;
1055 rtx target;
1056 rtx source;
1057 int subtargets;
1058 {
1059 if (subtargets || code == SET
1060 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1061 && REGNO (target) != REGNO (source)))
1062 {
1063 /* After arm_reorg has been called, we can't fix up expensive
1064 constants by pushing them into memory so we must synthesize
1065 them in-line, regardless of the cost. This is only likely to
1066 be more costly on chips that have load delay slots and we are
1067 compiling without running the scheduler (so no splitting
1068 occurred before the final instruction emission).
1069
1070 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1071 */
1072 if (!after_arm_reorg
1073 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1074 > arm_constant_limit + (code != SET)))
1075 {
1076 if (code == SET)
1077 {
1078 /* Currently SET is the only monadic value for CODE, all
1079 the rest are diadic. */
1080 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1081 return 1;
1082 }
1083 else
1084 {
1085 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1086
1087 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1088 /* For MINUS, the value is subtracted from, since we never
1089 have subtraction of a constant. */
1090 if (code == MINUS)
1091 emit_insn (gen_rtx_SET (VOIDmode, target,
1092 gen_rtx_MINUS (mode, temp, source)));
1093 else
1094 emit_insn (gen_rtx_SET (VOIDmode, target,
1095 gen_rtx (code, mode, source, temp)));
1096 return 2;
1097 }
1098 }
1099 }
1100
1101 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1102 }
1103
1104 static int
1105 count_insns_for_constant (remainder, i)
1106 HOST_WIDE_INT remainder;
1107 int i;
1108 {
1109 HOST_WIDE_INT temp1;
1110 int num_insns = 0;
1111 do
1112 {
1113 int end;
1114
1115 if (i <= 0)
1116 i += 32;
1117 if (remainder & (3 << (i - 2)))
1118 {
1119 end = i - 8;
1120 if (end < 0)
1121 end += 32;
1122 temp1 = remainder & ((0x0ff << end)
1123 | ((i < end) ? (0xff >> (32 - end)) : 0));
1124 remainder &= ~temp1;
1125 num_insns++;
1126 i -= 6;
1127 }
1128 i -= 2;
1129 } while (remainder);
1130 return num_insns;
1131 }
1132
1133 /* As above, but extra parameter GENERATE which, if clear, suppresses
1134 RTL generation. */
1135
1136 static int
1137 arm_gen_constant (code, mode, val, target, source, subtargets, generate)
1138 enum rtx_code code;
1139 enum machine_mode mode;
1140 HOST_WIDE_INT val;
1141 rtx target;
1142 rtx source;
1143 int subtargets;
1144 int generate;
1145 {
1146 int can_invert = 0;
1147 int can_negate = 0;
1148 int can_negate_initial = 0;
1149 int can_shift = 0;
1150 int i;
1151 int num_bits_set = 0;
1152 int set_sign_bit_copies = 0;
1153 int clear_sign_bit_copies = 0;
1154 int clear_zero_bit_copies = 0;
1155 int set_zero_bit_copies = 0;
1156 int insns = 0;
1157 unsigned HOST_WIDE_INT temp1, temp2;
1158 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1159
1160 /* Find out which operations are safe for a given CODE. Also do a quick
1161 check for degenerate cases; these can occur when DImode operations
1162 are split. */
1163 switch (code)
1164 {
1165 case SET:
1166 can_invert = 1;
1167 can_shift = 1;
1168 can_negate = 1;
1169 break;
1170
1171 case PLUS:
1172 can_negate = 1;
1173 can_negate_initial = 1;
1174 break;
1175
1176 case IOR:
1177 if (remainder == 0xffffffff)
1178 {
1179 if (generate)
1180 emit_insn (gen_rtx_SET (VOIDmode, target,
1181 GEN_INT (ARM_SIGN_EXTEND (val))));
1182 return 1;
1183 }
1184 if (remainder == 0)
1185 {
1186 if (reload_completed && rtx_equal_p (target, source))
1187 return 0;
1188 if (generate)
1189 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1190 return 1;
1191 }
1192 break;
1193
1194 case AND:
1195 if (remainder == 0)
1196 {
1197 if (generate)
1198 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1199 return 1;
1200 }
1201 if (remainder == 0xffffffff)
1202 {
1203 if (reload_completed && rtx_equal_p (target, source))
1204 return 0;
1205 if (generate)
1206 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1207 return 1;
1208 }
1209 can_invert = 1;
1210 break;
1211
1212 case XOR:
1213 if (remainder == 0)
1214 {
1215 if (reload_completed && rtx_equal_p (target, source))
1216 return 0;
1217 if (generate)
1218 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1219 return 1;
1220 }
1221 if (remainder == 0xffffffff)
1222 {
1223 if (generate)
1224 emit_insn (gen_rtx_SET (VOIDmode, target,
1225 gen_rtx_NOT (mode, source)));
1226 return 1;
1227 }
1228
1229 /* We don't know how to handle this yet below. */
1230 abort ();
1231
1232 case MINUS:
1233 /* We treat MINUS as (val - source), since (source - val) is always
1234 passed as (source + (-val)). */
1235 if (remainder == 0)
1236 {
1237 if (generate)
1238 emit_insn (gen_rtx_SET (VOIDmode, target,
1239 gen_rtx_NEG (mode, source)));
1240 return 1;
1241 }
1242 if (const_ok_for_arm (val))
1243 {
1244 if (generate)
1245 emit_insn (gen_rtx_SET (VOIDmode, target,
1246 gen_rtx_MINUS (mode, GEN_INT (val),
1247 source)));
1248 return 1;
1249 }
1250 can_negate = 1;
1251
1252 break;
1253
1254 default:
1255 abort ();
1256 }
1257
1258 /* If we can do it in one insn get out quickly. */
1259 if (const_ok_for_arm (val)
1260 || (can_negate_initial && const_ok_for_arm (-val))
1261 || (can_invert && const_ok_for_arm (~val)))
1262 {
1263 if (generate)
1264 emit_insn (gen_rtx_SET (VOIDmode, target,
1265 (source ? gen_rtx (code, mode, source,
1266 GEN_INT (val))
1267 : GEN_INT (val))));
1268 return 1;
1269 }
1270
1271 /* Calculate a few attributes that may be useful for specific
1272 optimizations. */
1273 for (i = 31; i >= 0; i--)
1274 {
1275 if ((remainder & (1 << i)) == 0)
1276 clear_sign_bit_copies++;
1277 else
1278 break;
1279 }
1280
1281 for (i = 31; i >= 0; i--)
1282 {
1283 if ((remainder & (1 << i)) != 0)
1284 set_sign_bit_copies++;
1285 else
1286 break;
1287 }
1288
1289 for (i = 0; i <= 31; i++)
1290 {
1291 if ((remainder & (1 << i)) == 0)
1292 clear_zero_bit_copies++;
1293 else
1294 break;
1295 }
1296
1297 for (i = 0; i <= 31; i++)
1298 {
1299 if ((remainder & (1 << i)) != 0)
1300 set_zero_bit_copies++;
1301 else
1302 break;
1303 }
1304
1305 switch (code)
1306 {
1307 case SET:
1308 /* See if we can do this by sign_extending a constant that is known
1309 to be negative. This is a good, way of doing it, since the shift
1310 may well merge into a subsequent insn. */
1311 if (set_sign_bit_copies > 1)
1312 {
1313 if (const_ok_for_arm
1314 (temp1 = ARM_SIGN_EXTEND (remainder
1315 << (set_sign_bit_copies - 1))))
1316 {
1317 if (generate)
1318 {
1319 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1320 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1321 GEN_INT (temp1)));
1322 emit_insn (gen_ashrsi3 (target, new_src,
1323 GEN_INT (set_sign_bit_copies - 1)));
1324 }
1325 return 2;
1326 }
1327 /* For an inverted constant, we will need to set the low bits,
1328 these will be shifted out of harm's way. */
1329 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1330 if (const_ok_for_arm (~temp1))
1331 {
1332 if (generate)
1333 {
1334 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1335 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1336 GEN_INT (temp1)));
1337 emit_insn (gen_ashrsi3 (target, new_src,
1338 GEN_INT (set_sign_bit_copies - 1)));
1339 }
1340 return 2;
1341 }
1342 }
1343
1344 /* See if we can generate this by setting the bottom (or the top)
1345 16 bits, and then shifting these into the other half of the
1346 word. We only look for the simplest cases, to do more would cost
1347 too much. Be careful, however, not to generate this when the
1348 alternative would take fewer insns. */
1349 if (val & 0xffff0000)
1350 {
1351 temp1 = remainder & 0xffff0000;
1352 temp2 = remainder & 0x0000ffff;
1353
1354 /* Overlaps outside this range are best done using other methods. */
1355 for (i = 9; i < 24; i++)
1356 {
1357 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1358 && !const_ok_for_arm (temp2))
1359 {
1360 rtx new_src = (subtargets
1361 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1362 : target);
1363 insns = arm_gen_constant (code, mode, temp2, new_src,
1364 source, subtargets, generate);
1365 source = new_src;
1366 if (generate)
1367 emit_insn (gen_rtx_SET
1368 (VOIDmode, target,
1369 gen_rtx_IOR (mode,
1370 gen_rtx_ASHIFT (mode, source,
1371 GEN_INT (i)),
1372 source)));
1373 return insns + 1;
1374 }
1375 }
1376
1377 /* Don't duplicate cases already considered. */
1378 for (i = 17; i < 24; i++)
1379 {
1380 if (((temp1 | (temp1 >> i)) == remainder)
1381 && !const_ok_for_arm (temp1))
1382 {
1383 rtx new_src = (subtargets
1384 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1385 : target);
1386 insns = arm_gen_constant (code, mode, temp1, new_src,
1387 source, subtargets, generate);
1388 source = new_src;
1389 if (generate)
1390 emit_insn
1391 (gen_rtx_SET (VOIDmode, target,
1392 gen_rtx_IOR
1393 (mode,
1394 gen_rtx_LSHIFTRT (mode, source,
1395 GEN_INT (i)),
1396 source)));
1397 return insns + 1;
1398 }
1399 }
1400 }
1401 break;
1402
1403 case IOR:
1404 case XOR:
1405 /* If we have IOR or XOR, and the constant can be loaded in a
1406 single instruction, and we can find a temporary to put it in,
1407 then this can be done in two instructions instead of 3-4. */
1408 if (subtargets
1409 /* TARGET can't be NULL if SUBTARGETS is 0 */
1410 || (reload_completed && !reg_mentioned_p (target, source)))
1411 {
1412 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1413 {
1414 if (generate)
1415 {
1416 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1417
1418 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1419 emit_insn (gen_rtx_SET (VOIDmode, target,
1420 gen_rtx (code, mode, source, sub)));
1421 }
1422 return 2;
1423 }
1424 }
1425
1426 if (code == XOR)
1427 break;
1428
1429 if (set_sign_bit_copies > 8
1430 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1431 {
1432 if (generate)
1433 {
1434 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1435 rtx shift = GEN_INT (set_sign_bit_copies);
1436
1437 emit_insn (gen_rtx_SET (VOIDmode, sub,
1438 gen_rtx_NOT (mode,
1439 gen_rtx_ASHIFT (mode,
1440 source,
1441 shift))));
1442 emit_insn (gen_rtx_SET (VOIDmode, target,
1443 gen_rtx_NOT (mode,
1444 gen_rtx_LSHIFTRT (mode, sub,
1445 shift))));
1446 }
1447 return 2;
1448 }
1449
1450 if (set_zero_bit_copies > 8
1451 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1452 {
1453 if (generate)
1454 {
1455 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1456 rtx shift = GEN_INT (set_zero_bit_copies);
1457
1458 emit_insn (gen_rtx_SET (VOIDmode, sub,
1459 gen_rtx_NOT (mode,
1460 gen_rtx_LSHIFTRT (mode,
1461 source,
1462 shift))));
1463 emit_insn (gen_rtx_SET (VOIDmode, target,
1464 gen_rtx_NOT (mode,
1465 gen_rtx_ASHIFT (mode, sub,
1466 shift))));
1467 }
1468 return 2;
1469 }
1470
1471 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1472 {
1473 if (generate)
1474 {
1475 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1476 emit_insn (gen_rtx_SET (VOIDmode, sub,
1477 gen_rtx_NOT (mode, source)));
1478 source = sub;
1479 if (subtargets)
1480 sub = gen_reg_rtx (mode);
1481 emit_insn (gen_rtx_SET (VOIDmode, sub,
1482 gen_rtx_AND (mode, source,
1483 GEN_INT (temp1))));
1484 emit_insn (gen_rtx_SET (VOIDmode, target,
1485 gen_rtx_NOT (mode, sub)));
1486 }
1487 return 3;
1488 }
1489 break;
1490
1491 case AND:
1492 /* See if two shifts will do 2 or more insn's worth of work. */
1493 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1494 {
1495 HOST_WIDE_INT shift_mask = ((0xffffffff
1496 << (32 - clear_sign_bit_copies))
1497 & 0xffffffff);
1498
1499 if ((remainder | shift_mask) != 0xffffffff)
1500 {
1501 if (generate)
1502 {
1503 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1504 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1505 new_src, source, subtargets, 1);
1506 source = new_src;
1507 }
1508 else
1509 {
1510 rtx targ = subtargets ? NULL_RTX : target;
1511 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1512 targ, source, subtargets, 0);
1513 }
1514 }
1515
1516 if (generate)
1517 {
1518 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1519 rtx shift = GEN_INT (clear_sign_bit_copies);
1520
1521 emit_insn (gen_ashlsi3 (new_src, source, shift));
1522 emit_insn (gen_lshrsi3 (target, new_src, shift));
1523 }
1524
1525 return insns + 2;
1526 }
1527
1528 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1529 {
1530 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1531
1532 if ((remainder | shift_mask) != 0xffffffff)
1533 {
1534 if (generate)
1535 {
1536 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1537
1538 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1539 new_src, source, subtargets, 1);
1540 source = new_src;
1541 }
1542 else
1543 {
1544 rtx targ = subtargets ? NULL_RTX : target;
1545
1546 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1547 targ, source, subtargets, 0);
1548 }
1549 }
1550
1551 if (generate)
1552 {
1553 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1554 rtx shift = GEN_INT (clear_zero_bit_copies);
1555
1556 emit_insn (gen_lshrsi3 (new_src, source, shift));
1557 emit_insn (gen_ashlsi3 (target, new_src, shift));
1558 }
1559
1560 return insns + 2;
1561 }
1562
1563 break;
1564
1565 default:
1566 break;
1567 }
1568
1569 for (i = 0; i < 32; i++)
1570 if (remainder & (1 << i))
1571 num_bits_set++;
1572
1573 if (code == AND || (can_invert && num_bits_set > 16))
1574 remainder = (~remainder) & 0xffffffff;
1575 else if (code == PLUS && num_bits_set > 16)
1576 remainder = (-remainder) & 0xffffffff;
1577 else
1578 {
1579 can_invert = 0;
1580 can_negate = 0;
1581 }
1582
1583 /* Now try and find a way of doing the job in either two or three
1584 instructions.
1585 We start by looking for the largest block of zeros that are aligned on
1586 a 2-bit boundary, we then fill up the temps, wrapping around to the
1587 top of the word when we drop off the bottom.
1588 In the worst case this code should produce no more than four insns. */
1589 {
1590 int best_start = 0;
1591 int best_consecutive_zeros = 0;
1592
1593 for (i = 0; i < 32; i += 2)
1594 {
1595 int consecutive_zeros = 0;
1596
1597 if (!(remainder & (3 << i)))
1598 {
1599 while ((i < 32) && !(remainder & (3 << i)))
1600 {
1601 consecutive_zeros += 2;
1602 i += 2;
1603 }
1604 if (consecutive_zeros > best_consecutive_zeros)
1605 {
1606 best_consecutive_zeros = consecutive_zeros;
1607 best_start = i - consecutive_zeros;
1608 }
1609 i -= 2;
1610 }
1611 }
1612
1613 /* So long as it won't require any more insns to do so, it's
1614 desirable to emit a small constant (in bits 0...9) in the last
1615 insn. This way there is more chance that it can be combined with
1616 a later addressing insn to form a pre-indexed load or store
1617 operation. Consider:
1618
1619 *((volatile int *)0xe0000100) = 1;
1620 *((volatile int *)0xe0000110) = 2;
1621
1622 We want this to wind up as:
1623
1624 mov rA, #0xe0000000
1625 mov rB, #1
1626 str rB, [rA, #0x100]
1627 mov rB, #2
1628 str rB, [rA, #0x110]
1629
1630 rather than having to synthesize both large constants from scratch.
1631
1632 Therefore, we calculate how many insns would be required to emit
1633 the constant starting from `best_start', and also starting from
1634 zero (ie with bit 31 first to be output). If `best_start' doesn't
1635 yield a shorter sequence, we may as well use zero. */
1636 if (best_start != 0
1637 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1638 && (count_insns_for_constant (remainder, 0) <=
1639 count_insns_for_constant (remainder, best_start)))
1640 best_start = 0;
1641
1642 /* Now start emitting the insns. */
1643 i = best_start;
1644 do
1645 {
1646 int end;
1647
1648 if (i <= 0)
1649 i += 32;
1650 if (remainder & (3 << (i - 2)))
1651 {
1652 end = i - 8;
1653 if (end < 0)
1654 end += 32;
1655 temp1 = remainder & ((0x0ff << end)
1656 | ((i < end) ? (0xff >> (32 - end)) : 0));
1657 remainder &= ~temp1;
1658
1659 if (generate)
1660 {
1661 rtx new_src, temp1_rtx;
1662
1663 if (code == SET || code == MINUS)
1664 {
1665 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1666 if (can_invert && code != MINUS)
1667 temp1 = ~temp1;
1668 }
1669 else
1670 {
1671 if (remainder && subtargets)
1672 new_src = gen_reg_rtx (mode);
1673 else
1674 new_src = target;
1675 if (can_invert)
1676 temp1 = ~temp1;
1677 else if (can_negate)
1678 temp1 = -temp1;
1679 }
1680
1681 temp1 = trunc_int_for_mode (temp1, mode);
1682 temp1_rtx = GEN_INT (temp1);
1683
1684 if (code == SET)
1685 ;
1686 else if (code == MINUS)
1687 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1688 else
1689 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1690
1691 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1692 source = new_src;
1693 }
1694
1695 if (code == SET)
1696 {
1697 can_invert = 0;
1698 code = PLUS;
1699 }
1700 else if (code == MINUS)
1701 code = PLUS;
1702
1703 insns++;
1704 i -= 6;
1705 }
1706 i -= 2;
1707 }
1708 while (remainder);
1709 }
1710
1711 return insns;
1712 }
1713
1714 /* Canonicalize a comparison so that we are more likely to recognize it.
1715 This can be done for a few constant compares, where we can make the
1716 immediate value easier to load. */
1717
1718 enum rtx_code
1719 arm_canonicalize_comparison (code, op1)
1720 enum rtx_code code;
1721 rtx * op1;
1722 {
1723 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1724
1725 switch (code)
1726 {
1727 case EQ:
1728 case NE:
1729 return code;
1730
1731 case GT:
1732 case LE:
1733 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1734 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1735 {
1736 *op1 = GEN_INT (i + 1);
1737 return code == GT ? GE : LT;
1738 }
1739 break;
1740
1741 case GE:
1742 case LT:
1743 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1744 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1745 {
1746 *op1 = GEN_INT (i - 1);
1747 return code == GE ? GT : LE;
1748 }
1749 break;
1750
1751 case GTU:
1752 case LEU:
1753 if (i != ~((unsigned HOST_WIDE_INT) 0)
1754 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1755 {
1756 *op1 = GEN_INT (i + 1);
1757 return code == GTU ? GEU : LTU;
1758 }
1759 break;
1760
1761 case GEU:
1762 case LTU:
1763 if (i != 0
1764 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1765 {
1766 *op1 = GEN_INT (i - 1);
1767 return code == GEU ? GTU : LEU;
1768 }
1769 break;
1770
1771 default:
1772 abort ();
1773 }
1774
1775 return code;
1776 }
1777
1778 /* Decide whether a type should be returned in memory (true)
1779 or in a register (false). This is called by the macro
1780 RETURN_IN_MEMORY. */
1781
1782 int
1783 arm_return_in_memory (type)
1784 tree type;
1785 {
1786 HOST_WIDE_INT size;
1787
1788 if (!AGGREGATE_TYPE_P (type))
1789 /* All simple types are returned in registers. */
1790 return 0;
1791
1792 size = int_size_in_bytes (type);
1793
1794 if (TARGET_ATPCS)
1795 {
1796 /* ATPCS returns aggregate types in memory only if they are
1797 larger than a word (or are variable size). */
1798 return (size < 0 || size > UNITS_PER_WORD);
1799 }
1800
1801 /* For the arm-wince targets we choose to be compitable with Microsoft's
1802 ARM and Thumb compilers, which always return aggregates in memory. */
1803 #ifndef ARM_WINCE
1804 /* All structures/unions bigger than one word are returned in memory.
1805 Also catch the case where int_size_in_bytes returns -1. In this case
1806 the aggregate is either huge or of varaible size, and in either case
1807 we will want to return it via memory and not in a register. */
1808 if (size < 0 || size > UNITS_PER_WORD)
1809 return 1;
1810
1811 if (TREE_CODE (type) == RECORD_TYPE)
1812 {
1813 tree field;
1814
1815 /* For a struct the APCS says that we only return in a register
1816 if the type is 'integer like' and every addressable element
1817 has an offset of zero. For practical purposes this means
1818 that the structure can have at most one non bit-field element
1819 and that this element must be the first one in the structure. */
1820
1821 /* Find the first field, ignoring non FIELD_DECL things which will
1822 have been created by C++. */
1823 for (field = TYPE_FIELDS (type);
1824 field && TREE_CODE (field) != FIELD_DECL;
1825 field = TREE_CHAIN (field))
1826 continue;
1827
1828 if (field == NULL)
1829 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1830
1831 /* Check that the first field is valid for returning in a register. */
1832
1833 /* ... Floats are not allowed */
1834 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1835 return 1;
1836
1837 /* ... Aggregates that are not themselves valid for returning in
1838 a register are not allowed. */
1839 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1840 return 1;
1841
1842 /* Now check the remaining fields, if any. Only bitfields are allowed,
1843 since they are not addressable. */
1844 for (field = TREE_CHAIN (field);
1845 field;
1846 field = TREE_CHAIN (field))
1847 {
1848 if (TREE_CODE (field) != FIELD_DECL)
1849 continue;
1850
1851 if (!DECL_BIT_FIELD_TYPE (field))
1852 return 1;
1853 }
1854
1855 return 0;
1856 }
1857
1858 if (TREE_CODE (type) == UNION_TYPE)
1859 {
1860 tree field;
1861
1862 /* Unions can be returned in registers if every element is
1863 integral, or can be returned in an integer register. */
1864 for (field = TYPE_FIELDS (type);
1865 field;
1866 field = TREE_CHAIN (field))
1867 {
1868 if (TREE_CODE (field) != FIELD_DECL)
1869 continue;
1870
1871 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1872 return 1;
1873
1874 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1875 return 1;
1876 }
1877
1878 return 0;
1879 }
1880 #endif /* not ARM_WINCE */
1881
1882 /* Return all other types in memory. */
1883 return 1;
1884 }
1885
1886 /* Indicate whether or not words of a double are in big-endian order. */
1887
1888 int
1889 arm_float_words_big_endian ()
1890 {
1891
1892 /* For FPA, float words are always big-endian. For VFP, floats words
1893 follow the memory system mode. */
1894
1895 if (TARGET_HARD_FLOAT)
1896 {
1897 /* FIXME: TARGET_HARD_FLOAT currently implies FPA. */
1898 return 1;
1899 }
1900
1901 if (TARGET_VFP)
1902 return (TARGET_BIG_END ? 1 : 0);
1903
1904 return 1;
1905 }
1906
1907 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1908 for a call to a function whose data type is FNTYPE.
1909 For a library call, FNTYPE is NULL. */
1910 void
1911 arm_init_cumulative_args (pcum, fntype, libname, indirect)
1912 CUMULATIVE_ARGS * pcum;
1913 tree fntype;
1914 rtx libname ATTRIBUTE_UNUSED;
1915 int indirect ATTRIBUTE_UNUSED;
1916 {
1917 /* On the ARM, the offset starts at 0. */
1918 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype))) ? 1 : 0);
1919
1920 pcum->call_cookie = CALL_NORMAL;
1921
1922 if (TARGET_LONG_CALLS)
1923 pcum->call_cookie = CALL_LONG;
1924
1925 /* Check for long call/short call attributes. The attributes
1926 override any command line option. */
1927 if (fntype)
1928 {
1929 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
1930 pcum->call_cookie = CALL_SHORT;
1931 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
1932 pcum->call_cookie = CALL_LONG;
1933 }
1934 }
1935
1936 /* Determine where to put an argument to a function.
1937 Value is zero to push the argument on the stack,
1938 or a hard register in which to store the argument.
1939
1940 MODE is the argument's machine mode.
1941 TYPE is the data type of the argument (as a tree).
1942 This is null for libcalls where that information may
1943 not be available.
1944 CUM is a variable of type CUMULATIVE_ARGS which gives info about
1945 the preceding args and about the function being called.
1946 NAMED is nonzero if this argument is a named parameter
1947 (otherwise it is an extra parameter matching an ellipsis). */
1948
1949 rtx
1950 arm_function_arg (pcum, mode, type, named)
1951 CUMULATIVE_ARGS * pcum;
1952 enum machine_mode mode;
1953 tree type ATTRIBUTE_UNUSED;
1954 int named;
1955 {
1956 if (mode == VOIDmode)
1957 /* Compute operand 2 of the call insn. */
1958 return GEN_INT (pcum->call_cookie);
1959
1960 if (!named || pcum->nregs >= NUM_ARG_REGS)
1961 return NULL_RTX;
1962
1963 return gen_rtx_REG (mode, pcum->nregs);
1964 }
1965
1966 /* Variable sized types are passed by reference. This is a GCC
1967 extension to the ARM ABI. */
1968
1969 int
1970 arm_function_arg_pass_by_reference (cum, mode, type, named)
1971 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
1972 enum machine_mode mode ATTRIBUTE_UNUSED;
1973 tree type;
1974 int named ATTRIBUTE_UNUSED;
1975 {
1976 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
1977 }
1978
1979 /* Implement va_arg. */
1980
1981 rtx
1982 arm_va_arg (valist, type)
1983 tree valist, type;
1984 {
1985 /* Variable sized types are passed by reference. */
1986 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1987 {
1988 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
1989 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
1990 }
1991
1992 return std_expand_builtin_va_arg (valist, type);
1993 }
1994 \f
1995 /* Encode the current state of the #pragma [no_]long_calls. */
1996 typedef enum
1997 {
1998 OFF, /* No #pramgma [no_]long_calls is in effect. */
1999 LONG, /* #pragma long_calls is in effect. */
2000 SHORT /* #pragma no_long_calls is in effect. */
2001 } arm_pragma_enum;
2002
2003 static arm_pragma_enum arm_pragma_long_calls = OFF;
2004
2005 void
2006 arm_pr_long_calls (pfile)
2007 cpp_reader * pfile ATTRIBUTE_UNUSED;
2008 {
2009 arm_pragma_long_calls = LONG;
2010 }
2011
2012 void
2013 arm_pr_no_long_calls (pfile)
2014 cpp_reader * pfile ATTRIBUTE_UNUSED;
2015 {
2016 arm_pragma_long_calls = SHORT;
2017 }
2018
2019 void
2020 arm_pr_long_calls_off (pfile)
2021 cpp_reader * pfile ATTRIBUTE_UNUSED;
2022 {
2023 arm_pragma_long_calls = OFF;
2024 }
2025 \f
2026 /* Table of machine attributes. */
2027 const struct attribute_spec arm_attribute_table[] =
2028 {
2029 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2030 /* Function calls made to this symbol must be done indirectly, because
2031 it may lie outside of the 26 bit addressing range of a normal function
2032 call. */
2033 { "long_call", 0, 0, false, true, true, NULL },
2034 /* Whereas these functions are always known to reside within the 26 bit
2035 addressing range. */
2036 { "short_call", 0, 0, false, true, true, NULL },
2037 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2038 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2039 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2040 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2041 #ifdef ARM_PE
2042 /* ARM/PE has three new attributes:
2043 interfacearm - ?
2044 dllexport - for exporting a function/variable that will live in a dll
2045 dllimport - for importing a function/variable from a dll
2046
2047 Microsoft allows multiple declspecs in one __declspec, separating
2048 them with spaces. We do NOT support this. Instead, use __declspec
2049 multiple times.
2050 */
2051 { "dllimport", 0, 0, true, false, false, NULL },
2052 { "dllexport", 0, 0, true, false, false, NULL },
2053 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2054 #endif
2055 { NULL, 0, 0, false, false, false, NULL }
2056 };
2057
2058 /* Handle an attribute requiring a FUNCTION_DECL;
2059 arguments as in struct attribute_spec.handler. */
2060
2061 static tree
2062 arm_handle_fndecl_attribute (node, name, args, flags, no_add_attrs)
2063 tree * node;
2064 tree name;
2065 tree args ATTRIBUTE_UNUSED;
2066 int flags ATTRIBUTE_UNUSED;
2067 bool * no_add_attrs;
2068 {
2069 if (TREE_CODE (*node) != FUNCTION_DECL)
2070 {
2071 warning ("`%s' attribute only applies to functions",
2072 IDENTIFIER_POINTER (name));
2073 *no_add_attrs = true;
2074 }
2075
2076 return NULL_TREE;
2077 }
2078
2079 /* Handle an "interrupt" or "isr" attribute;
2080 arguments as in struct attribute_spec.handler. */
2081
2082 static tree
2083 arm_handle_isr_attribute (node, name, args, flags, no_add_attrs)
2084 tree * node;
2085 tree name;
2086 tree args;
2087 int flags;
2088 bool * no_add_attrs;
2089 {
2090 if (DECL_P (*node))
2091 {
2092 if (TREE_CODE (*node) != FUNCTION_DECL)
2093 {
2094 warning ("`%s' attribute only applies to functions",
2095 IDENTIFIER_POINTER (name));
2096 *no_add_attrs = true;
2097 }
2098 /* FIXME: the argument if any is checked for type attributes;
2099 should it be checked for decl ones? */
2100 }
2101 else
2102 {
2103 if (TREE_CODE (*node) == FUNCTION_TYPE
2104 || TREE_CODE (*node) == METHOD_TYPE)
2105 {
2106 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2107 {
2108 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2109 *no_add_attrs = true;
2110 }
2111 }
2112 else if (TREE_CODE (*node) == POINTER_TYPE
2113 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2114 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2115 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2116 {
2117 *node = build_type_copy (*node);
2118 TREE_TYPE (*node) = build_type_attribute_variant
2119 (TREE_TYPE (*node),
2120 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2121 *no_add_attrs = true;
2122 }
2123 else
2124 {
2125 /* Possibly pass this attribute on from the type to a decl. */
2126 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2127 | (int) ATTR_FLAG_FUNCTION_NEXT
2128 | (int) ATTR_FLAG_ARRAY_NEXT))
2129 {
2130 *no_add_attrs = true;
2131 return tree_cons (name, args, NULL_TREE);
2132 }
2133 else
2134 {
2135 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2136 }
2137 }
2138 }
2139
2140 return NULL_TREE;
2141 }
2142
2143 /* Return 0 if the attributes for two types are incompatible, 1 if they
2144 are compatible, and 2 if they are nearly compatible (which causes a
2145 warning to be generated). */
2146
2147 static int
2148 arm_comp_type_attributes (type1, type2)
2149 tree type1;
2150 tree type2;
2151 {
2152 int l1, l2, s1, s2;
2153
2154 /* Check for mismatch of non-default calling convention. */
2155 if (TREE_CODE (type1) != FUNCTION_TYPE)
2156 return 1;
2157
2158 /* Check for mismatched call attributes. */
2159 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2160 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2161 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2162 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2163
2164 /* Only bother to check if an attribute is defined. */
2165 if (l1 | l2 | s1 | s2)
2166 {
2167 /* If one type has an attribute, the other must have the same attribute. */
2168 if ((l1 != l2) || (s1 != s2))
2169 return 0;
2170
2171 /* Disallow mixed attributes. */
2172 if ((l1 & s2) || (l2 & s1))
2173 return 0;
2174 }
2175
2176 /* Check for mismatched ISR attribute. */
2177 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2178 if (! l1)
2179 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2180 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2181 if (! l2)
2182 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2183 if (l1 != l2)
2184 return 0;
2185
2186 return 1;
2187 }
2188
2189 /* Encode long_call or short_call attribute by prefixing
2190 symbol name in DECL with a special character FLAG. */
2191
2192 void
2193 arm_encode_call_attribute (decl, flag)
2194 tree decl;
2195 int flag;
2196 {
2197 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2198 int len = strlen (str);
2199 char * newstr;
2200
2201 /* Do not allow weak functions to be treated as short call. */
2202 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2203 return;
2204
2205 newstr = alloca (len + 2);
2206 newstr[0] = flag;
2207 strcpy (newstr + 1, str);
2208
2209 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2210 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2211 }
2212
2213 /* Assigns default attributes to newly defined type. This is used to
2214 set short_call/long_call attributes for function types of
2215 functions defined inside corresponding #pragma scopes. */
2216
2217 static void
2218 arm_set_default_type_attributes (type)
2219 tree type;
2220 {
2221 /* Add __attribute__ ((long_call)) to all functions, when
2222 inside #pragma long_calls or __attribute__ ((short_call)),
2223 when inside #pragma no_long_calls. */
2224 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2225 {
2226 tree type_attr_list, attr_name;
2227 type_attr_list = TYPE_ATTRIBUTES (type);
2228
2229 if (arm_pragma_long_calls == LONG)
2230 attr_name = get_identifier ("long_call");
2231 else if (arm_pragma_long_calls == SHORT)
2232 attr_name = get_identifier ("short_call");
2233 else
2234 return;
2235
2236 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2237 TYPE_ATTRIBUTES (type) = type_attr_list;
2238 }
2239 }
2240 \f
2241 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2242 defined within the current compilation unit. If this caanot be
2243 determined, then 0 is returned. */
2244
2245 static int
2246 current_file_function_operand (sym_ref)
2247 rtx sym_ref;
2248 {
2249 /* This is a bit of a fib. A function will have a short call flag
2250 applied to its name if it has the short call attribute, or it has
2251 already been defined within the current compilation unit. */
2252 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2253 return 1;
2254
2255 /* The current function is always defined within the current compilation
2256 unit. if it s a weak definition however, then this may not be the real
2257 definition of the function, and so we have to say no. */
2258 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2259 && !DECL_WEAK (current_function_decl))
2260 return 1;
2261
2262 /* We cannot make the determination - default to returning 0. */
2263 return 0;
2264 }
2265
2266 /* Return nonzero if a 32 bit "long_call" should be generated for
2267 this call. We generate a long_call if the function:
2268
2269 a. has an __attribute__((long call))
2270 or b. is within the scope of a #pragma long_calls
2271 or c. the -mlong-calls command line switch has been specified
2272
2273 However we do not generate a long call if the function:
2274
2275 d. has an __attribute__ ((short_call))
2276 or e. is inside the scope of a #pragma no_long_calls
2277 or f. has an __attribute__ ((section))
2278 or g. is defined within the current compilation unit.
2279
2280 This function will be called by C fragments contained in the machine
2281 description file. CALL_REF and CALL_COOKIE correspond to the matched
2282 rtl operands. CALL_SYMBOL is used to distinguish between
2283 two different callers of the function. It is set to 1 in the
2284 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2285 and "call_value" patterns. This is because of the difference in the
2286 SYM_REFs passed by these patterns. */
2287
2288 int
2289 arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
2290 rtx sym_ref;
2291 int call_cookie;
2292 int call_symbol;
2293 {
2294 if (!call_symbol)
2295 {
2296 if (GET_CODE (sym_ref) != MEM)
2297 return 0;
2298
2299 sym_ref = XEXP (sym_ref, 0);
2300 }
2301
2302 if (GET_CODE (sym_ref) != SYMBOL_REF)
2303 return 0;
2304
2305 if (call_cookie & CALL_SHORT)
2306 return 0;
2307
2308 if (TARGET_LONG_CALLS && flag_function_sections)
2309 return 1;
2310
2311 if (current_file_function_operand (sym_ref))
2312 return 0;
2313
2314 return (call_cookie & CALL_LONG)
2315 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2316 || TARGET_LONG_CALLS;
2317 }
2318
2319 /* Return nonzero if it is ok to make a tail-call to DECL. */
2320
2321 static bool
2322 arm_function_ok_for_sibcall (decl, exp)
2323 tree decl;
2324 tree exp ATTRIBUTE_UNUSED;
2325 {
2326 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2327
2328 /* Never tailcall something for which we have no decl, or if we
2329 are in Thumb mode. */
2330 if (decl == NULL || TARGET_THUMB)
2331 return false;
2332
2333 /* Get the calling method. */
2334 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2335 call_type = CALL_SHORT;
2336 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2337 call_type = CALL_LONG;
2338
2339 /* Cannot tail-call to long calls, since these are out of range of
2340 a branch instruction. However, if not compiling PIC, we know
2341 we can reach the symbol if it is in this compilation unit. */
2342 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2343 return false;
2344
2345 /* If we are interworking and the function is not declared static
2346 then we can't tail-call it unless we know that it exists in this
2347 compilation unit (since it might be a Thumb routine). */
2348 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2349 return false;
2350
2351 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2352 if (IS_INTERRUPT (arm_current_func_type ()))
2353 return false;
2354
2355 /* Everything else is ok. */
2356 return true;
2357 }
2358
2359 \f
2360 int
2361 legitimate_pic_operand_p (x)
2362 rtx x;
2363 {
2364 if (CONSTANT_P (x)
2365 && flag_pic
2366 && (GET_CODE (x) == SYMBOL_REF
2367 || (GET_CODE (x) == CONST
2368 && GET_CODE (XEXP (x, 0)) == PLUS
2369 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2370 return 0;
2371
2372 return 1;
2373 }
2374
2375 rtx
2376 legitimize_pic_address (orig, mode, reg)
2377 rtx orig;
2378 enum machine_mode mode;
2379 rtx reg;
2380 {
2381 if (GET_CODE (orig) == SYMBOL_REF
2382 || GET_CODE (orig) == LABEL_REF)
2383 {
2384 #ifndef AOF_ASSEMBLER
2385 rtx pic_ref, address;
2386 #endif
2387 rtx insn;
2388 int subregs = 0;
2389
2390 if (reg == 0)
2391 {
2392 if (no_new_pseudos)
2393 abort ();
2394 else
2395 reg = gen_reg_rtx (Pmode);
2396
2397 subregs = 1;
2398 }
2399
2400 #ifdef AOF_ASSEMBLER
2401 /* The AOF assembler can generate relocations for these directly, and
2402 understands that the PIC register has to be added into the offset. */
2403 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2404 #else
2405 if (subregs)
2406 address = gen_reg_rtx (Pmode);
2407 else
2408 address = reg;
2409
2410 if (TARGET_ARM)
2411 emit_insn (gen_pic_load_addr_arm (address, orig));
2412 else
2413 emit_insn (gen_pic_load_addr_thumb (address, orig));
2414
2415 if ((GET_CODE (orig) == LABEL_REF
2416 || (GET_CODE (orig) == SYMBOL_REF &&
2417 ENCODED_SHORT_CALL_ATTR_P (XSTR (orig, 0))))
2418 && NEED_GOT_RELOC)
2419 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2420 else
2421 {
2422 pic_ref = gen_rtx_MEM (Pmode,
2423 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2424 address));
2425 RTX_UNCHANGING_P (pic_ref) = 1;
2426 }
2427
2428 insn = emit_move_insn (reg, pic_ref);
2429 #endif
2430 current_function_uses_pic_offset_table = 1;
2431 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2432 by loop. */
2433 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2434 REG_NOTES (insn));
2435 return reg;
2436 }
2437 else if (GET_CODE (orig) == CONST)
2438 {
2439 rtx base, offset;
2440
2441 if (GET_CODE (XEXP (orig, 0)) == PLUS
2442 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2443 return orig;
2444
2445 if (reg == 0)
2446 {
2447 if (no_new_pseudos)
2448 abort ();
2449 else
2450 reg = gen_reg_rtx (Pmode);
2451 }
2452
2453 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2454 {
2455 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2456 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2457 base == reg ? 0 : reg);
2458 }
2459 else
2460 abort ();
2461
2462 if (GET_CODE (offset) == CONST_INT)
2463 {
2464 /* The base register doesn't really matter, we only want to
2465 test the index for the appropriate mode. */
2466 ARM_GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
2467
2468 if (!no_new_pseudos)
2469 offset = force_reg (Pmode, offset);
2470 else
2471 abort ();
2472
2473 win:
2474 if (GET_CODE (offset) == CONST_INT)
2475 return plus_constant (base, INTVAL (offset));
2476 }
2477
2478 if (GET_MODE_SIZE (mode) > 4
2479 && (GET_MODE_CLASS (mode) == MODE_INT
2480 || TARGET_SOFT_FLOAT))
2481 {
2482 emit_insn (gen_addsi3 (reg, base, offset));
2483 return reg;
2484 }
2485
2486 return gen_rtx_PLUS (Pmode, base, offset);
2487 }
2488
2489 return orig;
2490 }
2491
2492 /* Generate code to load the PIC register. PROLOGUE is true if
2493 called from arm_expand_prologue (in which case we want the
2494 generated insns at the start of the function); false if called
2495 by an exception receiver that needs the PIC register reloaded
2496 (in which case the insns are just dumped at the current location). */
2497
2498 void
2499 arm_finalize_pic (prologue)
2500 int prologue ATTRIBUTE_UNUSED;
2501 {
2502 #ifndef AOF_ASSEMBLER
2503 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2504 rtx global_offset_table;
2505
2506 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2507 return;
2508
2509 if (!flag_pic)
2510 abort ();
2511
2512 start_sequence ();
2513 l1 = gen_label_rtx ();
2514
2515 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2516 /* On the ARM the PC register contains 'dot + 8' at the time of the
2517 addition, on the Thumb it is 'dot + 4'. */
2518 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2519 if (GOT_PCREL)
2520 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2521 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2522 else
2523 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2524
2525 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2526
2527 if (TARGET_ARM)
2528 {
2529 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2530 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2531 }
2532 else
2533 {
2534 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2535 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2536 }
2537
2538 seq = get_insns ();
2539 end_sequence ();
2540 if (prologue)
2541 emit_insn_after (seq, get_insns ());
2542 else
2543 emit_insn (seq);
2544
2545 /* Need to emit this whether or not we obey regdecls,
2546 since setjmp/longjmp can cause life info to screw up. */
2547 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2548 #endif /* AOF_ASSEMBLER */
2549 }
2550
2551 #define REG_OR_SUBREG_REG(X) \
2552 (GET_CODE (X) == REG \
2553 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
2554
2555 #define REG_OR_SUBREG_RTX(X) \
2556 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
2557
2558 #ifndef COSTS_N_INSNS
2559 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
2560 #endif
2561
2562 int
2563 arm_rtx_costs (x, code, outer)
2564 rtx x;
2565 enum rtx_code code;
2566 enum rtx_code outer;
2567 {
2568 enum machine_mode mode = GET_MODE (x);
2569 enum rtx_code subcode;
2570 int extra_cost;
2571
2572 if (TARGET_THUMB)
2573 {
2574 switch (code)
2575 {
2576 case ASHIFT:
2577 case ASHIFTRT:
2578 case LSHIFTRT:
2579 case ROTATERT:
2580 case PLUS:
2581 case MINUS:
2582 case COMPARE:
2583 case NEG:
2584 case NOT:
2585 return COSTS_N_INSNS (1);
2586
2587 case MULT:
2588 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2589 {
2590 int cycles = 0;
2591 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
2592
2593 while (i)
2594 {
2595 i >>= 2;
2596 cycles++;
2597 }
2598 return COSTS_N_INSNS (2) + cycles;
2599 }
2600 return COSTS_N_INSNS (1) + 16;
2601
2602 case SET:
2603 return (COSTS_N_INSNS (1)
2604 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
2605 + GET_CODE (SET_DEST (x)) == MEM));
2606
2607 case CONST_INT:
2608 if (outer == SET)
2609 {
2610 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
2611 return 0;
2612 if (thumb_shiftable_const (INTVAL (x)))
2613 return COSTS_N_INSNS (2);
2614 return COSTS_N_INSNS (3);
2615 }
2616 else if (outer == PLUS
2617 && INTVAL (x) < 256 && INTVAL (x) > -256)
2618 return 0;
2619 else if (outer == COMPARE
2620 && (unsigned HOST_WIDE_INT) INTVAL (x) < 256)
2621 return 0;
2622 else if (outer == ASHIFT || outer == ASHIFTRT
2623 || outer == LSHIFTRT)
2624 return 0;
2625 return COSTS_N_INSNS (2);
2626
2627 case CONST:
2628 case CONST_DOUBLE:
2629 case LABEL_REF:
2630 case SYMBOL_REF:
2631 return COSTS_N_INSNS (3);
2632
2633 case UDIV:
2634 case UMOD:
2635 case DIV:
2636 case MOD:
2637 return 100;
2638
2639 case TRUNCATE:
2640 return 99;
2641
2642 case AND:
2643 case XOR:
2644 case IOR:
2645 /* XXX guess. */
2646 return 8;
2647
2648 case ADDRESSOF:
2649 case MEM:
2650 /* XXX another guess. */
2651 /* Memory costs quite a lot for the first word, but subsequent words
2652 load at the equivalent of a single insn each. */
2653 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
2654 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
2655 ? 4 : 0));
2656
2657 case IF_THEN_ELSE:
2658 /* XXX a guess. */
2659 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
2660 return 14;
2661 return 2;
2662
2663 case ZERO_EXTEND:
2664 /* XXX still guessing. */
2665 switch (GET_MODE (XEXP (x, 0)))
2666 {
2667 case QImode:
2668 return (1 + (mode == DImode ? 4 : 0)
2669 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2670
2671 case HImode:
2672 return (4 + (mode == DImode ? 4 : 0)
2673 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2674
2675 case SImode:
2676 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2677
2678 default:
2679 return 99;
2680 }
2681
2682 default:
2683 return 99;
2684 #if 0
2685 case FFS:
2686 case FLOAT:
2687 case FIX:
2688 case UNSIGNED_FIX:
2689 /* XXX guess */
2690 fprintf (stderr, "unexpected code for thumb in rtx_costs: %s\n",
2691 rtx_name[code]);
2692 abort ();
2693 #endif
2694 }
2695 }
2696
2697 switch (code)
2698 {
2699 case MEM:
2700 /* Memory costs quite a lot for the first word, but subsequent words
2701 load at the equivalent of a single insn each. */
2702 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
2703 + (GET_CODE (x) == SYMBOL_REF
2704 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
2705
2706 case DIV:
2707 case MOD:
2708 return 100;
2709
2710 case ROTATE:
2711 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
2712 return 4;
2713 /* Fall through */
2714 case ROTATERT:
2715 if (mode != SImode)
2716 return 8;
2717 /* Fall through */
2718 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
2719 if (mode == DImode)
2720 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
2721 + ((GET_CODE (XEXP (x, 0)) == REG
2722 || (GET_CODE (XEXP (x, 0)) == SUBREG
2723 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
2724 ? 0 : 8));
2725 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
2726 || (GET_CODE (XEXP (x, 0)) == SUBREG
2727 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
2728 ? 0 : 4)
2729 + ((GET_CODE (XEXP (x, 1)) == REG
2730 || (GET_CODE (XEXP (x, 1)) == SUBREG
2731 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
2732 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
2733 ? 0 : 4));
2734
2735 case MINUS:
2736 if (mode == DImode)
2737 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
2738 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
2739 || (GET_CODE (XEXP (x, 0)) == CONST_INT
2740 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
2741 ? 0 : 8));
2742
2743 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2744 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2745 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2746 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
2747 ? 0 : 8)
2748 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
2749 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
2750 && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
2751 ? 0 : 8));
2752
2753 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
2754 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
2755 && REG_OR_SUBREG_REG (XEXP (x, 1))))
2756 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
2757 || subcode == ASHIFTRT || subcode == LSHIFTRT
2758 || subcode == ROTATE || subcode == ROTATERT
2759 || (subcode == MULT
2760 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
2761 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
2762 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
2763 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
2764 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
2765 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
2766 && REG_OR_SUBREG_REG (XEXP (x, 0))))
2767 return 1;
2768 /* Fall through */
2769
2770 case PLUS:
2771 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2772 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
2773 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2774 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2775 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
2776 ? 0 : 8));
2777
2778 /* Fall through */
2779 case AND: case XOR: case IOR:
2780 extra_cost = 0;
2781
2782 /* Normally the frame registers will be spilt into reg+const during
2783 reload, so it is a bad idea to combine them with other instructions,
2784 since then they might not be moved outside of loops. As a compromise
2785 we allow integration with ops that have a constant as their second
2786 operand. */
2787 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
2788 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
2789 && GET_CODE (XEXP (x, 1)) != CONST_INT)
2790 || (REG_OR_SUBREG_REG (XEXP (x, 0))
2791 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
2792 extra_cost = 4;
2793
2794 if (mode == DImode)
2795 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
2796 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2797 || (GET_CODE (XEXP (x, 1)) == CONST_INT
2798 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
2799 ? 0 : 8));
2800
2801 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
2802 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
2803 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2804 || (GET_CODE (XEXP (x, 1)) == CONST_INT
2805 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
2806 ? 0 : 4));
2807
2808 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
2809 return (1 + extra_cost
2810 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
2811 || subcode == LSHIFTRT || subcode == ASHIFTRT
2812 || subcode == ROTATE || subcode == ROTATERT
2813 || (subcode == MULT
2814 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2815 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
2816 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
2817 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
2818 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
2819 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
2820 ? 0 : 4));
2821
2822 return 8;
2823
2824 case MULT:
2825 /* There is no point basing this on the tuning, since it is always the
2826 fast variant if it exists at all. */
2827 if (arm_fast_multiply && mode == DImode
2828 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
2829 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2830 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
2831 return 8;
2832
2833 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2834 || mode == DImode)
2835 return 30;
2836
2837 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2838 {
2839 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
2840 & (unsigned HOST_WIDE_INT) 0xffffffff);
2841 int add_cost = const_ok_for_arm (i) ? 4 : 8;
2842 int j;
2843
2844 /* Tune as appropriate. */
2845 int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
2846
2847 for (j = 0; i && j < 32; j += booth_unit_size)
2848 {
2849 i >>= booth_unit_size;
2850 add_cost += 2;
2851 }
2852
2853 return add_cost;
2854 }
2855
2856 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
2857 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
2858 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
2859
2860 case TRUNCATE:
2861 if (arm_fast_multiply && mode == SImode
2862 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
2863 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
2864 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
2865 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
2866 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
2867 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
2868 return 8;
2869 return 99;
2870
2871 case NEG:
2872 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2873 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
2874 /* Fall through */
2875 case NOT:
2876 if (mode == DImode)
2877 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
2878
2879 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
2880
2881 case IF_THEN_ELSE:
2882 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
2883 return 14;
2884 return 2;
2885
2886 case COMPARE:
2887 return 1;
2888
2889 case ABS:
2890 return 4 + (mode == DImode ? 4 : 0);
2891
2892 case SIGN_EXTEND:
2893 if (GET_MODE (XEXP (x, 0)) == QImode)
2894 return (4 + (mode == DImode ? 4 : 0)
2895 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2896 /* Fall through */
2897 case ZERO_EXTEND:
2898 switch (GET_MODE (XEXP (x, 0)))
2899 {
2900 case QImode:
2901 return (1 + (mode == DImode ? 4 : 0)
2902 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2903
2904 case HImode:
2905 return (4 + (mode == DImode ? 4 : 0)
2906 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2907
2908 case SImode:
2909 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2910
2911 default:
2912 break;
2913 }
2914 abort ();
2915
2916 case CONST_INT:
2917 if (const_ok_for_arm (INTVAL (x)))
2918 return outer == SET ? 2 : -1;
2919 else if (outer == AND
2920 && const_ok_for_arm (~INTVAL (x)))
2921 return -1;
2922 else if ((outer == COMPARE
2923 || outer == PLUS || outer == MINUS)
2924 && const_ok_for_arm (-INTVAL (x)))
2925 return -1;
2926 else
2927 return 5;
2928
2929 case CONST:
2930 case LABEL_REF:
2931 case SYMBOL_REF:
2932 return 6;
2933
2934 case CONST_DOUBLE:
2935 if (const_double_rtx_ok_for_fpu (x))
2936 return outer == SET ? 2 : -1;
2937 else if ((outer == COMPARE || outer == PLUS)
2938 && neg_const_double_rtx_ok_for_fpu (x))
2939 return -1;
2940 return 7;
2941
2942 default:
2943 return 99;
2944 }
2945 }
2946
2947 static int
2948 arm_adjust_cost (insn, link, dep, cost)
2949 rtx insn;
2950 rtx link;
2951 rtx dep;
2952 int cost;
2953 {
2954 rtx i_pat, d_pat;
2955
2956 /* Some true dependencies can have a higher cost depending
2957 on precisely how certain input operands are used. */
2958 if (arm_is_xscale
2959 && REG_NOTE_KIND (link) == 0
2960 && recog_memoized (insn) < 0
2961 && recog_memoized (dep) < 0)
2962 {
2963 int shift_opnum = get_attr_shift (insn);
2964 enum attr_type attr_type = get_attr_type (dep);
2965
2966 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
2967 operand for INSN. If we have a shifted input operand and the
2968 instruction we depend on is another ALU instruction, then we may
2969 have to account for an additional stall. */
2970 if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
2971 {
2972 rtx shifted_operand;
2973 int opno;
2974
2975 /* Get the shifted operand. */
2976 extract_insn (insn);
2977 shifted_operand = recog_data.operand[shift_opnum];
2978
2979 /* Iterate over all the operands in DEP. If we write an operand
2980 that overlaps with SHIFTED_OPERAND, then we have increase the
2981 cost of this dependency. */
2982 extract_insn (dep);
2983 preprocess_constraints ();
2984 for (opno = 0; opno < recog_data.n_operands; opno++)
2985 {
2986 /* We can ignore strict inputs. */
2987 if (recog_data.operand_type[opno] == OP_IN)
2988 continue;
2989
2990 if (reg_overlap_mentioned_p (recog_data.operand[opno],
2991 shifted_operand))
2992 return 2;
2993 }
2994 }
2995 }
2996
2997 /* XXX This is not strictly true for the FPA. */
2998 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
2999 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
3000 return 0;
3001
3002 /* Call insns don't incur a stall, even if they follow a load. */
3003 if (REG_NOTE_KIND (link) == 0
3004 && GET_CODE (insn) == CALL_INSN)
3005 return 1;
3006
3007 if ((i_pat = single_set (insn)) != NULL
3008 && GET_CODE (SET_SRC (i_pat)) == MEM
3009 && (d_pat = single_set (dep)) != NULL
3010 && GET_CODE (SET_DEST (d_pat)) == MEM)
3011 {
3012 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
3013 /* This is a load after a store, there is no conflict if the load reads
3014 from a cached area. Assume that loads from the stack, and from the
3015 constant pool are cached, and that others will miss. This is a
3016 hack. */
3017
3018 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
3019 || reg_mentioned_p (stack_pointer_rtx, src_mem)
3020 || reg_mentioned_p (frame_pointer_rtx, src_mem)
3021 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
3022 return 1;
3023 }
3024
3025 return cost;
3026 }
3027
3028 /* This code has been fixed for cross compilation. */
3029
3030 static int fpa_consts_inited = 0;
3031
3032 static const char * const strings_fpa[8] =
3033 {
3034 "0", "1", "2", "3",
3035 "4", "5", "0.5", "10"
3036 };
3037
3038 static REAL_VALUE_TYPE values_fpa[8];
3039
3040 static void
3041 init_fpa_table ()
3042 {
3043 int i;
3044 REAL_VALUE_TYPE r;
3045
3046 for (i = 0; i < 8; i++)
3047 {
3048 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
3049 values_fpa[i] = r;
3050 }
3051
3052 fpa_consts_inited = 1;
3053 }
3054
3055 /* Return TRUE if rtx X is a valid immediate FPU constant. */
3056
3057 int
3058 const_double_rtx_ok_for_fpu (x)
3059 rtx x;
3060 {
3061 REAL_VALUE_TYPE r;
3062 int i;
3063
3064 if (!fpa_consts_inited)
3065 init_fpa_table ();
3066
3067 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3068 if (REAL_VALUE_MINUS_ZERO (r))
3069 return 0;
3070
3071 for (i = 0; i < 8; i++)
3072 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3073 return 1;
3074
3075 return 0;
3076 }
3077
3078 /* Return TRUE if rtx X is a valid immediate FPU constant. */
3079
3080 int
3081 neg_const_double_rtx_ok_for_fpu (x)
3082 rtx x;
3083 {
3084 REAL_VALUE_TYPE r;
3085 int i;
3086
3087 if (!fpa_consts_inited)
3088 init_fpa_table ();
3089
3090 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3091 r = REAL_VALUE_NEGATE (r);
3092 if (REAL_VALUE_MINUS_ZERO (r))
3093 return 0;
3094
3095 for (i = 0; i < 8; i++)
3096 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3097 return 1;
3098
3099 return 0;
3100 }
3101 \f
3102 /* Predicates for `match_operand' and `match_operator'. */
3103
3104 /* s_register_operand is the same as register_operand, but it doesn't accept
3105 (SUBREG (MEM)...).
3106
3107 This function exists because at the time it was put in it led to better
3108 code. SUBREG(MEM) always needs a reload in the places where
3109 s_register_operand is used, and this seemed to lead to excessive
3110 reloading. */
3111
3112 int
3113 s_register_operand (op, mode)
3114 rtx op;
3115 enum machine_mode mode;
3116 {
3117 if (GET_MODE (op) != mode && mode != VOIDmode)
3118 return 0;
3119
3120 if (GET_CODE (op) == SUBREG)
3121 op = SUBREG_REG (op);
3122
3123 /* We don't consider registers whose class is NO_REGS
3124 to be a register operand. */
3125 /* XXX might have to check for lo regs only for thumb ??? */
3126 return (GET_CODE (op) == REG
3127 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3128 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3129 }
3130
3131 /* A hard register operand (even before reload. */
3132
3133 int
3134 arm_hard_register_operand (op, mode)
3135 rtx op;
3136 enum machine_mode mode;
3137 {
3138 if (GET_MODE (op) != mode && mode != VOIDmode)
3139 return 0;
3140
3141 return (GET_CODE (op) == REG
3142 && REGNO (op) < FIRST_PSEUDO_REGISTER);
3143 }
3144
3145 /* Only accept reg, subreg(reg), const_int. */
3146
3147 int
3148 reg_or_int_operand (op, mode)
3149 rtx op;
3150 enum machine_mode mode;
3151 {
3152 if (GET_CODE (op) == CONST_INT)
3153 return 1;
3154
3155 if (GET_MODE (op) != mode && mode != VOIDmode)
3156 return 0;
3157
3158 if (GET_CODE (op) == SUBREG)
3159 op = SUBREG_REG (op);
3160
3161 /* We don't consider registers whose class is NO_REGS
3162 to be a register operand. */
3163 return (GET_CODE (op) == REG
3164 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3165 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3166 }
3167
3168 /* Return 1 if OP is an item in memory, given that we are in reload. */
3169
3170 int
3171 arm_reload_memory_operand (op, mode)
3172 rtx op;
3173 enum machine_mode mode ATTRIBUTE_UNUSED;
3174 {
3175 int regno = true_regnum (op);
3176
3177 return (!CONSTANT_P (op)
3178 && (regno == -1
3179 || (GET_CODE (op) == REG
3180 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
3181 }
3182
3183 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
3184 memory access (architecture V4).
3185 MODE is QImode if called when computing constraints, or VOIDmode when
3186 emitting patterns. In this latter case we cannot use memory_operand()
3187 because it will fail on badly formed MEMs, which is precisly what we are
3188 trying to catch. */
3189
3190 int
3191 bad_signed_byte_operand (op, mode)
3192 rtx op;
3193 enum machine_mode mode ATTRIBUTE_UNUSED;
3194 {
3195 #if 0
3196 if ((mode == QImode && !memory_operand (op, mode)) || GET_CODE (op) != MEM)
3197 return 0;
3198 #endif
3199 if (GET_CODE (op) != MEM)
3200 return 0;
3201
3202 op = XEXP (op, 0);
3203
3204 /* A sum of anything more complex than reg + reg or reg + const is bad. */
3205 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3206 && (!s_register_operand (XEXP (op, 0), VOIDmode)
3207 || (!s_register_operand (XEXP (op, 1), VOIDmode)
3208 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
3209 return 1;
3210
3211 /* Big constants are also bad. */
3212 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
3213 && (INTVAL (XEXP (op, 1)) > 0xff
3214 || -INTVAL (XEXP (op, 1)) > 0xff))
3215 return 1;
3216
3217 /* Everything else is good, or can will automatically be made so. */
3218 return 0;
3219 }
3220
3221 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
3222
3223 int
3224 arm_rhs_operand (op, mode)
3225 rtx op;
3226 enum machine_mode mode;
3227 {
3228 return (s_register_operand (op, mode)
3229 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
3230 }
3231
3232 /* Return TRUE for valid operands for the
3233 rhs of an ARM instruction, or a load. */
3234
3235 int
3236 arm_rhsm_operand (op, mode)
3237 rtx op;
3238 enum machine_mode mode;
3239 {
3240 return (s_register_operand (op, mode)
3241 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
3242 || memory_operand (op, mode));
3243 }
3244
3245 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
3246 constant that is valid when negated. */
3247
3248 int
3249 arm_add_operand (op, mode)
3250 rtx op;
3251 enum machine_mode mode;
3252 {
3253 if (TARGET_THUMB)
3254 return thumb_cmp_operand (op, mode);
3255
3256 return (s_register_operand (op, mode)
3257 || (GET_CODE (op) == CONST_INT
3258 && (const_ok_for_arm (INTVAL (op))
3259 || const_ok_for_arm (-INTVAL (op)))));
3260 }
3261
3262 int
3263 arm_not_operand (op, mode)
3264 rtx op;
3265 enum machine_mode mode;
3266 {
3267 return (s_register_operand (op, mode)
3268 || (GET_CODE (op) == CONST_INT
3269 && (const_ok_for_arm (INTVAL (op))
3270 || const_ok_for_arm (~INTVAL (op)))));
3271 }
3272
3273 /* Return TRUE if the operand is a memory reference which contains an
3274 offsettable address. */
3275
3276 int
3277 offsettable_memory_operand (op, mode)
3278 rtx op;
3279 enum machine_mode mode;
3280 {
3281 if (mode == VOIDmode)
3282 mode = GET_MODE (op);
3283
3284 return (mode == GET_MODE (op)
3285 && GET_CODE (op) == MEM
3286 && offsettable_address_p (reload_completed | reload_in_progress,
3287 mode, XEXP (op, 0)));
3288 }
3289
3290 /* Return TRUE if the operand is a memory reference which is, or can be
3291 made word aligned by adjusting the offset. */
3292
3293 int
3294 alignable_memory_operand (op, mode)
3295 rtx op;
3296 enum machine_mode mode;
3297 {
3298 rtx reg;
3299
3300 if (mode == VOIDmode)
3301 mode = GET_MODE (op);
3302
3303 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
3304 return 0;
3305
3306 op = XEXP (op, 0);
3307
3308 return ((GET_CODE (reg = op) == REG
3309 || (GET_CODE (op) == SUBREG
3310 && GET_CODE (reg = SUBREG_REG (op)) == REG)
3311 || (GET_CODE (op) == PLUS
3312 && GET_CODE (XEXP (op, 1)) == CONST_INT
3313 && (GET_CODE (reg = XEXP (op, 0)) == REG
3314 || (GET_CODE (XEXP (op, 0)) == SUBREG
3315 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
3316 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
3317 }
3318
3319 /* Similar to s_register_operand, but does not allow hard integer
3320 registers. */
3321
3322 int
3323 f_register_operand (op, mode)
3324 rtx op;
3325 enum machine_mode mode;
3326 {
3327 if (GET_MODE (op) != mode && mode != VOIDmode)
3328 return 0;
3329
3330 if (GET_CODE (op) == SUBREG)
3331 op = SUBREG_REG (op);
3332
3333 /* We don't consider registers whose class is NO_REGS
3334 to be a register operand. */
3335 return (GET_CODE (op) == REG
3336 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3337 || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
3338 }
3339
3340 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
3341
3342 int
3343 fpu_rhs_operand (op, mode)
3344 rtx op;
3345 enum machine_mode mode;
3346 {
3347 if (s_register_operand (op, mode))
3348 return TRUE;
3349
3350 if (GET_MODE (op) != mode && mode != VOIDmode)
3351 return FALSE;
3352
3353 if (GET_CODE (op) == CONST_DOUBLE)
3354 return const_double_rtx_ok_for_fpu (op);
3355
3356 return FALSE;
3357 }
3358
3359 int
3360 fpu_add_operand (op, mode)
3361 rtx op;
3362 enum machine_mode mode;
3363 {
3364 if (s_register_operand (op, mode))
3365 return TRUE;
3366
3367 if (GET_MODE (op) != mode && mode != VOIDmode)
3368 return FALSE;
3369
3370 if (GET_CODE (op) == CONST_DOUBLE)
3371 return (const_double_rtx_ok_for_fpu (op)
3372 || neg_const_double_rtx_ok_for_fpu (op));
3373
3374 return FALSE;
3375 }
3376
3377 /* Return nonzero if OP is a constant power of two. */
3378
3379 int
3380 power_of_two_operand (op, mode)
3381 rtx op;
3382 enum machine_mode mode ATTRIBUTE_UNUSED;
3383 {
3384 if (GET_CODE (op) == CONST_INT)
3385 {
3386 HOST_WIDE_INT value = INTVAL (op);
3387
3388 return value != 0 && (value & (value - 1)) == 0;
3389 }
3390
3391 return FALSE;
3392 }
3393
3394 /* Return TRUE for a valid operand of a DImode operation.
3395 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
3396 Note that this disallows MEM(REG+REG), but allows
3397 MEM(PRE/POST_INC/DEC(REG)). */
3398
3399 int
3400 di_operand (op, mode)
3401 rtx op;
3402 enum machine_mode mode;
3403 {
3404 if (s_register_operand (op, mode))
3405 return TRUE;
3406
3407 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
3408 return FALSE;
3409
3410 if (GET_CODE (op) == SUBREG)
3411 op = SUBREG_REG (op);
3412
3413 switch (GET_CODE (op))
3414 {
3415 case CONST_DOUBLE:
3416 case CONST_INT:
3417 return TRUE;
3418
3419 case MEM:
3420 return memory_address_p (DImode, XEXP (op, 0));
3421
3422 default:
3423 return FALSE;
3424 }
3425 }
3426
3427 /* Like di_operand, but don't accept constants. */
3428
3429 int
3430 nonimmediate_di_operand (op, mode)
3431 rtx op;
3432 enum machine_mode mode;
3433 {
3434 if (s_register_operand (op, mode))
3435 return TRUE;
3436
3437 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
3438 return FALSE;
3439
3440 if (GET_CODE (op) == SUBREG)
3441 op = SUBREG_REG (op);
3442
3443 if (GET_CODE (op) == MEM)
3444 return memory_address_p (DImode, XEXP (op, 0));
3445
3446 return FALSE;
3447 }
3448
3449 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
3450 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
3451 Note that this disallows MEM(REG+REG), but allows
3452 MEM(PRE/POST_INC/DEC(REG)). */
3453
3454 int
3455 soft_df_operand (op, mode)
3456 rtx op;
3457 enum machine_mode mode;
3458 {
3459 if (s_register_operand (op, mode))
3460 return TRUE;
3461
3462 if (mode != VOIDmode && GET_MODE (op) != mode)
3463 return FALSE;
3464
3465 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
3466 return FALSE;
3467
3468 if (GET_CODE (op) == SUBREG)
3469 op = SUBREG_REG (op);
3470
3471 switch (GET_CODE (op))
3472 {
3473 case CONST_DOUBLE:
3474 return TRUE;
3475
3476 case MEM:
3477 return memory_address_p (DFmode, XEXP (op, 0));
3478
3479 default:
3480 return FALSE;
3481 }
3482 }
3483
3484 /* Like soft_df_operand, but don't accept constants. */
3485
3486 int
3487 nonimmediate_soft_df_operand (op, mode)
3488 rtx op;
3489 enum machine_mode mode;
3490 {
3491 if (s_register_operand (op, mode))
3492 return TRUE;
3493
3494 if (mode != VOIDmode && GET_MODE (op) != mode)
3495 return FALSE;
3496
3497 if (GET_CODE (op) == SUBREG)
3498 op = SUBREG_REG (op);
3499
3500 if (GET_CODE (op) == MEM)
3501 return memory_address_p (DFmode, XEXP (op, 0));
3502 return FALSE;
3503 }
3504
3505 /* Return TRUE for valid index operands. */
3506
3507 int
3508 index_operand (op, mode)
3509 rtx op;
3510 enum machine_mode mode;
3511 {
3512 return (s_register_operand (op, mode)
3513 || (immediate_operand (op, mode)
3514 && (GET_CODE (op) != CONST_INT
3515 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
3516 }
3517
3518 /* Return TRUE for valid shifts by a constant. This also accepts any
3519 power of two on the (somewhat overly relaxed) assumption that the
3520 shift operator in this case was a mult. */
3521
3522 int
3523 const_shift_operand (op, mode)
3524 rtx op;
3525 enum machine_mode mode;
3526 {
3527 return (power_of_two_operand (op, mode)
3528 || (immediate_operand (op, mode)
3529 && (GET_CODE (op) != CONST_INT
3530 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
3531 }
3532
3533 /* Return TRUE for arithmetic operators which can be combined with a multiply
3534 (shift). */
3535
3536 int
3537 shiftable_operator (x, mode)
3538 rtx x;
3539 enum machine_mode mode;
3540 {
3541 enum rtx_code code;
3542
3543 if (GET_MODE (x) != mode)
3544 return FALSE;
3545
3546 code = GET_CODE (x);
3547
3548 return (code == PLUS || code == MINUS
3549 || code == IOR || code == XOR || code == AND);
3550 }
3551
3552 /* Return TRUE for binary logical operators. */
3553
3554 int
3555 logical_binary_operator (x, mode)
3556 rtx x;
3557 enum machine_mode mode;
3558 {
3559 enum rtx_code code;
3560
3561 if (GET_MODE (x) != mode)
3562 return FALSE;
3563
3564 code = GET_CODE (x);
3565
3566 return (code == IOR || code == XOR || code == AND);
3567 }
3568
3569 /* Return TRUE for shift operators. */
3570
3571 int
3572 shift_operator (x, mode)
3573 rtx x;
3574 enum machine_mode mode;
3575 {
3576 enum rtx_code code;
3577
3578 if (GET_MODE (x) != mode)
3579 return FALSE;
3580
3581 code = GET_CODE (x);
3582
3583 if (code == MULT)
3584 return power_of_two_operand (XEXP (x, 1), mode);
3585
3586 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
3587 || code == ROTATERT);
3588 }
3589
3590 /* Return TRUE if x is EQ or NE. */
3591
3592 int
3593 equality_operator (x, mode)
3594 rtx x;
3595 enum machine_mode mode ATTRIBUTE_UNUSED;
3596 {
3597 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
3598 }
3599
3600 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
3601
3602 int
3603 arm_comparison_operator (x, mode)
3604 rtx x;
3605 enum machine_mode mode;
3606 {
3607 return (comparison_operator (x, mode)
3608 && GET_CODE (x) != LTGT
3609 && GET_CODE (x) != UNEQ);
3610 }
3611
3612 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
3613
3614 int
3615 minmax_operator (x, mode)
3616 rtx x;
3617 enum machine_mode mode;
3618 {
3619 enum rtx_code code = GET_CODE (x);
3620
3621 if (GET_MODE (x) != mode)
3622 return FALSE;
3623
3624 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
3625 }
3626
3627 /* Return TRUE if this is the condition code register, if we aren't given
3628 a mode, accept any class CCmode register. */
3629
3630 int
3631 cc_register (x, mode)
3632 rtx x;
3633 enum machine_mode mode;
3634 {
3635 if (mode == VOIDmode)
3636 {
3637 mode = GET_MODE (x);
3638
3639 if (GET_MODE_CLASS (mode) != MODE_CC)
3640 return FALSE;
3641 }
3642
3643 if ( GET_MODE (x) == mode
3644 && GET_CODE (x) == REG
3645 && REGNO (x) == CC_REGNUM)
3646 return TRUE;
3647
3648 return FALSE;
3649 }
3650
3651 /* Return TRUE if this is the condition code register, if we aren't given
3652 a mode, accept any class CCmode register which indicates a dominance
3653 expression. */
3654
3655 int
3656 dominant_cc_register (x, mode)
3657 rtx x;
3658 enum machine_mode mode;
3659 {
3660 if (mode == VOIDmode)
3661 {
3662 mode = GET_MODE (x);
3663
3664 if (GET_MODE_CLASS (mode) != MODE_CC)
3665 return FALSE;
3666 }
3667
3668 if ( mode != CC_DNEmode && mode != CC_DEQmode
3669 && mode != CC_DLEmode && mode != CC_DLTmode
3670 && mode != CC_DGEmode && mode != CC_DGTmode
3671 && mode != CC_DLEUmode && mode != CC_DLTUmode
3672 && mode != CC_DGEUmode && mode != CC_DGTUmode)
3673 return FALSE;
3674
3675 return cc_register (x, mode);
3676 }
3677
3678 /* Return TRUE if X references a SYMBOL_REF. */
3679
3680 int
3681 symbol_mentioned_p (x)
3682 rtx x;
3683 {
3684 const char * fmt;
3685 int i;
3686
3687 if (GET_CODE (x) == SYMBOL_REF)
3688 return 1;
3689
3690 fmt = GET_RTX_FORMAT (GET_CODE (x));
3691
3692 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3693 {
3694 if (fmt[i] == 'E')
3695 {
3696 int j;
3697
3698 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3699 if (symbol_mentioned_p (XVECEXP (x, i, j)))
3700 return 1;
3701 }
3702 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
3703 return 1;
3704 }
3705
3706 return 0;
3707 }
3708
3709 /* Return TRUE if X references a LABEL_REF. */
3710
3711 int
3712 label_mentioned_p (x)
3713 rtx x;
3714 {
3715 const char * fmt;
3716 int i;
3717
3718 if (GET_CODE (x) == LABEL_REF)
3719 return 1;
3720
3721 fmt = GET_RTX_FORMAT (GET_CODE (x));
3722 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3723 {
3724 if (fmt[i] == 'E')
3725 {
3726 int j;
3727
3728 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3729 if (label_mentioned_p (XVECEXP (x, i, j)))
3730 return 1;
3731 }
3732 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
3733 return 1;
3734 }
3735
3736 return 0;
3737 }
3738
3739 enum rtx_code
3740 minmax_code (x)
3741 rtx x;
3742 {
3743 enum rtx_code code = GET_CODE (x);
3744
3745 if (code == SMAX)
3746 return GE;
3747 else if (code == SMIN)
3748 return LE;
3749 else if (code == UMIN)
3750 return LEU;
3751 else if (code == UMAX)
3752 return GEU;
3753
3754 abort ();
3755 }
3756
3757 /* Return 1 if memory locations are adjacent. */
3758
3759 int
3760 adjacent_mem_locations (a, b)
3761 rtx a, b;
3762 {
3763 if ((GET_CODE (XEXP (a, 0)) == REG
3764 || (GET_CODE (XEXP (a, 0)) == PLUS
3765 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
3766 && (GET_CODE (XEXP (b, 0)) == REG
3767 || (GET_CODE (XEXP (b, 0)) == PLUS
3768 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
3769 {
3770 int val0 = 0, val1 = 0;
3771 int reg0, reg1;
3772
3773 if (GET_CODE (XEXP (a, 0)) == PLUS)
3774 {
3775 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
3776 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
3777 }
3778 else
3779 reg0 = REGNO (XEXP (a, 0));
3780
3781 if (GET_CODE (XEXP (b, 0)) == PLUS)
3782 {
3783 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
3784 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
3785 }
3786 else
3787 reg1 = REGNO (XEXP (b, 0));
3788
3789 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
3790 }
3791 return 0;
3792 }
3793
3794 /* Return 1 if OP is a load multiple operation. It is known to be
3795 parallel and the first section will be tested. */
3796
3797 int
3798 load_multiple_operation (op, mode)
3799 rtx op;
3800 enum machine_mode mode ATTRIBUTE_UNUSED;
3801 {
3802 HOST_WIDE_INT count = XVECLEN (op, 0);
3803 int dest_regno;
3804 rtx src_addr;
3805 HOST_WIDE_INT i = 1, base = 0;
3806 rtx elt;
3807
3808 if (count <= 1
3809 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
3810 return 0;
3811
3812 /* Check to see if this might be a write-back. */
3813 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
3814 {
3815 i++;
3816 base = 1;
3817
3818 /* Now check it more carefully. */
3819 if (GET_CODE (SET_DEST (elt)) != REG
3820 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
3821 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
3822 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
3823 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
3824 return 0;
3825 }
3826
3827 /* Perform a quick check so we don't blow up below. */
3828 if (count <= i
3829 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
3830 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
3831 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
3832 return 0;
3833
3834 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
3835 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
3836
3837 for (; i < count; i++)
3838 {
3839 elt = XVECEXP (op, 0, i);
3840
3841 if (GET_CODE (elt) != SET
3842 || GET_CODE (SET_DEST (elt)) != REG
3843 || GET_MODE (SET_DEST (elt)) != SImode
3844 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
3845 || GET_CODE (SET_SRC (elt)) != MEM
3846 || GET_MODE (SET_SRC (elt)) != SImode
3847 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
3848 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
3849 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
3850 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
3851 return 0;
3852 }
3853
3854 return 1;
3855 }
3856
3857 /* Return 1 if OP is a store multiple operation. It is known to be
3858 parallel and the first section will be tested. */
3859
3860 int
3861 store_multiple_operation (op, mode)
3862 rtx op;
3863 enum machine_mode mode ATTRIBUTE_UNUSED;
3864 {
3865 HOST_WIDE_INT count = XVECLEN (op, 0);
3866 int src_regno;
3867 rtx dest_addr;
3868 HOST_WIDE_INT i = 1, base = 0;
3869 rtx elt;
3870
3871 if (count <= 1
3872 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
3873 return 0;
3874
3875 /* Check to see if this might be a write-back. */
3876 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
3877 {
3878 i++;
3879 base = 1;
3880
3881 /* Now check it more carefully. */
3882 if (GET_CODE (SET_DEST (elt)) != REG
3883 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
3884 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
3885 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
3886 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
3887 return 0;
3888 }
3889
3890 /* Perform a quick check so we don't blow up below. */
3891 if (count <= i
3892 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
3893 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
3894 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
3895 return 0;
3896
3897 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
3898 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
3899
3900 for (; i < count; i++)
3901 {
3902 elt = XVECEXP (op, 0, i);
3903
3904 if (GET_CODE (elt) != SET
3905 || GET_CODE (SET_SRC (elt)) != REG
3906 || GET_MODE (SET_SRC (elt)) != SImode
3907 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
3908 || GET_CODE (SET_DEST (elt)) != MEM
3909 || GET_MODE (SET_DEST (elt)) != SImode
3910 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
3911 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
3912 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
3913 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
3914 return 0;
3915 }
3916
3917 return 1;
3918 }
3919
3920 int
3921 load_multiple_sequence (operands, nops, regs, base, load_offset)
3922 rtx * operands;
3923 int nops;
3924 int * regs;
3925 int * base;
3926 HOST_WIDE_INT * load_offset;
3927 {
3928 int unsorted_regs[4];
3929 HOST_WIDE_INT unsorted_offsets[4];
3930 int order[4];
3931 int base_reg = -1;
3932 int i;
3933
3934 /* Can only handle 2, 3, or 4 insns at present,
3935 though could be easily extended if required. */
3936 if (nops < 2 || nops > 4)
3937 abort ();
3938
3939 /* Loop over the operands and check that the memory references are
3940 suitable (ie immediate offsets from the same base register). At
3941 the same time, extract the target register, and the memory
3942 offsets. */
3943 for (i = 0; i < nops; i++)
3944 {
3945 rtx reg;
3946 rtx offset;
3947
3948 /* Convert a subreg of a mem into the mem itself. */
3949 if (GET_CODE (operands[nops + i]) == SUBREG)
3950 operands[nops + i] = alter_subreg (operands + (nops + i));
3951
3952 if (GET_CODE (operands[nops + i]) != MEM)
3953 abort ();
3954
3955 /* Don't reorder volatile memory references; it doesn't seem worth
3956 looking for the case where the order is ok anyway. */
3957 if (MEM_VOLATILE_P (operands[nops + i]))
3958 return 0;
3959
3960 offset = const0_rtx;
3961
3962 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
3963 || (GET_CODE (reg) == SUBREG
3964 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
3965 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
3966 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
3967 == REG)
3968 || (GET_CODE (reg) == SUBREG
3969 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
3970 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
3971 == CONST_INT)))
3972 {
3973 if (i == 0)
3974 {
3975 base_reg = REGNO (reg);
3976 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
3977 ? REGNO (operands[i])
3978 : REGNO (SUBREG_REG (operands[i])));
3979 order[0] = 0;
3980 }
3981 else
3982 {
3983 if (base_reg != (int) REGNO (reg))
3984 /* Not addressed from the same base register. */
3985 return 0;
3986
3987 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
3988 ? REGNO (operands[i])
3989 : REGNO (SUBREG_REG (operands[i])));
3990 if (unsorted_regs[i] < unsorted_regs[order[0]])
3991 order[0] = i;
3992 }
3993
3994 /* If it isn't an integer register, or if it overwrites the
3995 base register but isn't the last insn in the list, then
3996 we can't do this. */
3997 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
3998 || (i != nops - 1 && unsorted_regs[i] == base_reg))
3999 return 0;
4000
4001 unsorted_offsets[i] = INTVAL (offset);
4002 }
4003 else
4004 /* Not a suitable memory address. */
4005 return 0;
4006 }
4007
4008 /* All the useful information has now been extracted from the
4009 operands into unsorted_regs and unsorted_offsets; additionally,
4010 order[0] has been set to the lowest numbered register in the
4011 list. Sort the registers into order, and check that the memory
4012 offsets are ascending and adjacent. */
4013
4014 for (i = 1; i < nops; i++)
4015 {
4016 int j;
4017
4018 order[i] = order[i - 1];
4019 for (j = 0; j < nops; j++)
4020 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4021 && (order[i] == order[i - 1]
4022 || unsorted_regs[j] < unsorted_regs[order[i]]))
4023 order[i] = j;
4024
4025 /* Have we found a suitable register? if not, one must be used more
4026 than once. */
4027 if (order[i] == order[i - 1])
4028 return 0;
4029
4030 /* Is the memory address adjacent and ascending? */
4031 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4032 return 0;
4033 }
4034
4035 if (base)
4036 {
4037 *base = base_reg;
4038
4039 for (i = 0; i < nops; i++)
4040 regs[i] = unsorted_regs[order[i]];
4041
4042 *load_offset = unsorted_offsets[order[0]];
4043 }
4044
4045 if (unsorted_offsets[order[0]] == 0)
4046 return 1; /* ldmia */
4047
4048 if (unsorted_offsets[order[0]] == 4)
4049 return 2; /* ldmib */
4050
4051 if (unsorted_offsets[order[nops - 1]] == 0)
4052 return 3; /* ldmda */
4053
4054 if (unsorted_offsets[order[nops - 1]] == -4)
4055 return 4; /* ldmdb */
4056
4057 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
4058 if the offset isn't small enough. The reason 2 ldrs are faster
4059 is because these ARMs are able to do more than one cache access
4060 in a single cycle. The ARM9 and StrongARM have Harvard caches,
4061 whilst the ARM8 has a double bandwidth cache. This means that
4062 these cores can do both an instruction fetch and a data fetch in
4063 a single cycle, so the trick of calculating the address into a
4064 scratch register (one of the result regs) and then doing a load
4065 multiple actually becomes slower (and no smaller in code size).
4066 That is the transformation
4067
4068 ldr rd1, [rbase + offset]
4069 ldr rd2, [rbase + offset + 4]
4070
4071 to
4072
4073 add rd1, rbase, offset
4074 ldmia rd1, {rd1, rd2}
4075
4076 produces worse code -- '3 cycles + any stalls on rd2' instead of
4077 '2 cycles + any stalls on rd2'. On ARMs with only one cache
4078 access per cycle, the first sequence could never complete in less
4079 than 6 cycles, whereas the ldm sequence would only take 5 and
4080 would make better use of sequential accesses if not hitting the
4081 cache.
4082
4083 We cheat here and test 'arm_ld_sched' which we currently know to
4084 only be true for the ARM8, ARM9 and StrongARM. If this ever
4085 changes, then the test below needs to be reworked. */
4086 if (nops == 2 && arm_ld_sched)
4087 return 0;
4088
4089 /* Can't do it without setting up the offset, only do this if it takes
4090 no more than one insn. */
4091 return (const_ok_for_arm (unsorted_offsets[order[0]])
4092 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
4093 }
4094
4095 const char *
4096 emit_ldm_seq (operands, nops)
4097 rtx * operands;
4098 int nops;
4099 {
4100 int regs[4];
4101 int base_reg;
4102 HOST_WIDE_INT offset;
4103 char buf[100];
4104 int i;
4105
4106 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4107 {
4108 case 1:
4109 strcpy (buf, "ldm%?ia\t");
4110 break;
4111
4112 case 2:
4113 strcpy (buf, "ldm%?ib\t");
4114 break;
4115
4116 case 3:
4117 strcpy (buf, "ldm%?da\t");
4118 break;
4119
4120 case 4:
4121 strcpy (buf, "ldm%?db\t");
4122 break;
4123
4124 case 5:
4125 if (offset >= 0)
4126 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4127 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4128 (long) offset);
4129 else
4130 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4131 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4132 (long) -offset);
4133 output_asm_insn (buf, operands);
4134 base_reg = regs[0];
4135 strcpy (buf, "ldm%?ia\t");
4136 break;
4137
4138 default:
4139 abort ();
4140 }
4141
4142 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4143 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4144
4145 for (i = 1; i < nops; i++)
4146 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4147 reg_names[regs[i]]);
4148
4149 strcat (buf, "}\t%@ phole ldm");
4150
4151 output_asm_insn (buf, operands);
4152 return "";
4153 }
4154
4155 int
4156 store_multiple_sequence (operands, nops, regs, base, load_offset)
4157 rtx * operands;
4158 int nops;
4159 int * regs;
4160 int * base;
4161 HOST_WIDE_INT * load_offset;
4162 {
4163 int unsorted_regs[4];
4164 HOST_WIDE_INT unsorted_offsets[4];
4165 int order[4];
4166 int base_reg = -1;
4167 int i;
4168
4169 /* Can only handle 2, 3, or 4 insns at present, though could be easily
4170 extended if required. */
4171 if (nops < 2 || nops > 4)
4172 abort ();
4173
4174 /* Loop over the operands and check that the memory references are
4175 suitable (ie immediate offsets from the same base register). At
4176 the same time, extract the target register, and the memory
4177 offsets. */
4178 for (i = 0; i < nops; i++)
4179 {
4180 rtx reg;
4181 rtx offset;
4182
4183 /* Convert a subreg of a mem into the mem itself. */
4184 if (GET_CODE (operands[nops + i]) == SUBREG)
4185 operands[nops + i] = alter_subreg (operands + (nops + i));
4186
4187 if (GET_CODE (operands[nops + i]) != MEM)
4188 abort ();
4189
4190 /* Don't reorder volatile memory references; it doesn't seem worth
4191 looking for the case where the order is ok anyway. */
4192 if (MEM_VOLATILE_P (operands[nops + i]))
4193 return 0;
4194
4195 offset = const0_rtx;
4196
4197 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4198 || (GET_CODE (reg) == SUBREG
4199 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4200 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4201 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4202 == REG)
4203 || (GET_CODE (reg) == SUBREG
4204 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4205 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4206 == CONST_INT)))
4207 {
4208 if (i == 0)
4209 {
4210 base_reg = REGNO (reg);
4211 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4212 ? REGNO (operands[i])
4213 : REGNO (SUBREG_REG (operands[i])));
4214 order[0] = 0;
4215 }
4216 else
4217 {
4218 if (base_reg != (int) REGNO (reg))
4219 /* Not addressed from the same base register. */
4220 return 0;
4221
4222 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
4223 ? REGNO (operands[i])
4224 : REGNO (SUBREG_REG (operands[i])));
4225 if (unsorted_regs[i] < unsorted_regs[order[0]])
4226 order[0] = i;
4227 }
4228
4229 /* If it isn't an integer register, then we can't do this. */
4230 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
4231 return 0;
4232
4233 unsorted_offsets[i] = INTVAL (offset);
4234 }
4235 else
4236 /* Not a suitable memory address. */
4237 return 0;
4238 }
4239
4240 /* All the useful information has now been extracted from the
4241 operands into unsorted_regs and unsorted_offsets; additionally,
4242 order[0] has been set to the lowest numbered register in the
4243 list. Sort the registers into order, and check that the memory
4244 offsets are ascending and adjacent. */
4245
4246 for (i = 1; i < nops; i++)
4247 {
4248 int j;
4249
4250 order[i] = order[i - 1];
4251 for (j = 0; j < nops; j++)
4252 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4253 && (order[i] == order[i - 1]
4254 || unsorted_regs[j] < unsorted_regs[order[i]]))
4255 order[i] = j;
4256
4257 /* Have we found a suitable register? if not, one must be used more
4258 than once. */
4259 if (order[i] == order[i - 1])
4260 return 0;
4261
4262 /* Is the memory address adjacent and ascending? */
4263 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4264 return 0;
4265 }
4266
4267 if (base)
4268 {
4269 *base = base_reg;
4270
4271 for (i = 0; i < nops; i++)
4272 regs[i] = unsorted_regs[order[i]];
4273
4274 *load_offset = unsorted_offsets[order[0]];
4275 }
4276
4277 if (unsorted_offsets[order[0]] == 0)
4278 return 1; /* stmia */
4279
4280 if (unsorted_offsets[order[0]] == 4)
4281 return 2; /* stmib */
4282
4283 if (unsorted_offsets[order[nops - 1]] == 0)
4284 return 3; /* stmda */
4285
4286 if (unsorted_offsets[order[nops - 1]] == -4)
4287 return 4; /* stmdb */
4288
4289 return 0;
4290 }
4291
4292 const char *
4293 emit_stm_seq (operands, nops)
4294 rtx * operands;
4295 int nops;
4296 {
4297 int regs[4];
4298 int base_reg;
4299 HOST_WIDE_INT offset;
4300 char buf[100];
4301 int i;
4302
4303 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4304 {
4305 case 1:
4306 strcpy (buf, "stm%?ia\t");
4307 break;
4308
4309 case 2:
4310 strcpy (buf, "stm%?ib\t");
4311 break;
4312
4313 case 3:
4314 strcpy (buf, "stm%?da\t");
4315 break;
4316
4317 case 4:
4318 strcpy (buf, "stm%?db\t");
4319 break;
4320
4321 default:
4322 abort ();
4323 }
4324
4325 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4326 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4327
4328 for (i = 1; i < nops; i++)
4329 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4330 reg_names[regs[i]]);
4331
4332 strcat (buf, "}\t%@ phole stm");
4333
4334 output_asm_insn (buf, operands);
4335 return "";
4336 }
4337
4338 int
4339 multi_register_push (op, mode)
4340 rtx op;
4341 enum machine_mode mode ATTRIBUTE_UNUSED;
4342 {
4343 if (GET_CODE (op) != PARALLEL
4344 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
4345 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
4346 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
4347 return 0;
4348
4349 return 1;
4350 }
4351 \f
4352 /* Routines for use in generating RTL. */
4353
4354 rtx
4355 arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
4356 in_struct_p, scalar_p)
4357 int base_regno;
4358 int count;
4359 rtx from;
4360 int up;
4361 int write_back;
4362 int unchanging_p;
4363 int in_struct_p;
4364 int scalar_p;
4365 {
4366 int i = 0, j;
4367 rtx result;
4368 int sign = up ? 1 : -1;
4369 rtx mem;
4370
4371 /* XScale has load-store double instructions, but they have stricter
4372 alignment requirements than load-store multiple, so we can not
4373 use them.
4374
4375 For XScale ldm requires 2 + NREGS cycles to complete and blocks
4376 the pipeline until completion.
4377
4378 NREGS CYCLES
4379 1 3
4380 2 4
4381 3 5
4382 4 6
4383
4384 An ldr instruction takes 1-3 cycles, but does not block the
4385 pipeline.
4386
4387 NREGS CYCLES
4388 1 1-3
4389 2 2-6
4390 3 3-9
4391 4 4-12
4392
4393 Best case ldr will always win. However, the more ldr instructions
4394 we issue, the less likely we are to be able to schedule them well.
4395 Using ldr instructions also increases code size.
4396
4397 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
4398 for counts of 3 or 4 regs. */
4399 if (arm_is_xscale && count <= 2 && ! optimize_size)
4400 {
4401 rtx seq;
4402
4403 start_sequence ();
4404
4405 for (i = 0; i < count; i++)
4406 {
4407 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
4408 RTX_UNCHANGING_P (mem) = unchanging_p;
4409 MEM_IN_STRUCT_P (mem) = in_struct_p;
4410 MEM_SCALAR_P (mem) = scalar_p;
4411 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
4412 }
4413
4414 if (write_back)
4415 emit_move_insn (from, plus_constant (from, count * 4 * sign));
4416
4417 seq = get_insns ();
4418 end_sequence ();
4419
4420 return seq;
4421 }
4422
4423 result = gen_rtx_PARALLEL (VOIDmode,
4424 rtvec_alloc (count + (write_back ? 1 : 0)));
4425 if (write_back)
4426 {
4427 XVECEXP (result, 0, 0)
4428 = gen_rtx_SET (GET_MODE (from), from,
4429 plus_constant (from, count * 4 * sign));
4430 i = 1;
4431 count++;
4432 }
4433
4434 for (j = 0; i < count; i++, j++)
4435 {
4436 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
4437 RTX_UNCHANGING_P (mem) = unchanging_p;
4438 MEM_IN_STRUCT_P (mem) = in_struct_p;
4439 MEM_SCALAR_P (mem) = scalar_p;
4440 XVECEXP (result, 0, i)
4441 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
4442 }
4443
4444 return result;
4445 }
4446
4447 rtx
4448 arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
4449 in_struct_p, scalar_p)
4450 int base_regno;
4451 int count;
4452 rtx to;
4453 int up;
4454 int write_back;
4455 int unchanging_p;
4456 int in_struct_p;
4457 int scalar_p;
4458 {
4459 int i = 0, j;
4460 rtx result;
4461 int sign = up ? 1 : -1;
4462 rtx mem;
4463
4464 /* See arm_gen_load_multiple for discussion of
4465 the pros/cons of ldm/stm usage for XScale. */
4466 if (arm_is_xscale && count <= 2 && ! optimize_size)
4467 {
4468 rtx seq;
4469
4470 start_sequence ();
4471
4472 for (i = 0; i < count; i++)
4473 {
4474 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
4475 RTX_UNCHANGING_P (mem) = unchanging_p;
4476 MEM_IN_STRUCT_P (mem) = in_struct_p;
4477 MEM_SCALAR_P (mem) = scalar_p;
4478 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
4479 }
4480
4481 if (write_back)
4482 emit_move_insn (to, plus_constant (to, count * 4 * sign));
4483
4484 seq = get_insns ();
4485 end_sequence ();
4486
4487 return seq;
4488 }
4489
4490 result = gen_rtx_PARALLEL (VOIDmode,
4491 rtvec_alloc (count + (write_back ? 1 : 0)));
4492 if (write_back)
4493 {
4494 XVECEXP (result, 0, 0)
4495 = gen_rtx_SET (GET_MODE (to), to,
4496 plus_constant (to, count * 4 * sign));
4497 i = 1;
4498 count++;
4499 }
4500
4501 for (j = 0; i < count; i++, j++)
4502 {
4503 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
4504 RTX_UNCHANGING_P (mem) = unchanging_p;
4505 MEM_IN_STRUCT_P (mem) = in_struct_p;
4506 MEM_SCALAR_P (mem) = scalar_p;
4507
4508 XVECEXP (result, 0, i)
4509 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
4510 }
4511
4512 return result;
4513 }
4514
4515 int
4516 arm_gen_movstrqi (operands)
4517 rtx * operands;
4518 {
4519 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
4520 int i;
4521 rtx src, dst;
4522 rtx st_src, st_dst, fin_src, fin_dst;
4523 rtx part_bytes_reg = NULL;
4524 rtx mem;
4525 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
4526 int dst_scalar_p, src_scalar_p;
4527
4528 if (GET_CODE (operands[2]) != CONST_INT
4529 || GET_CODE (operands[3]) != CONST_INT
4530 || INTVAL (operands[2]) > 64
4531 || INTVAL (operands[3]) & 3)
4532 return 0;
4533
4534 st_dst = XEXP (operands[0], 0);
4535 st_src = XEXP (operands[1], 0);
4536
4537 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
4538 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
4539 dst_scalar_p = MEM_SCALAR_P (operands[0]);
4540 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
4541 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
4542 src_scalar_p = MEM_SCALAR_P (operands[1]);
4543
4544 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
4545 fin_src = src = copy_to_mode_reg (SImode, st_src);
4546
4547 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
4548 out_words_to_go = INTVAL (operands[2]) / 4;
4549 last_bytes = INTVAL (operands[2]) & 3;
4550
4551 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
4552 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
4553
4554 for (i = 0; in_words_to_go >= 2; i+=4)
4555 {
4556 if (in_words_to_go > 4)
4557 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
4558 src_unchanging_p,
4559 src_in_struct_p,
4560 src_scalar_p));
4561 else
4562 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
4563 FALSE, src_unchanging_p,
4564 src_in_struct_p, src_scalar_p));
4565
4566 if (out_words_to_go)
4567 {
4568 if (out_words_to_go > 4)
4569 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
4570 dst_unchanging_p,
4571 dst_in_struct_p,
4572 dst_scalar_p));
4573 else if (out_words_to_go != 1)
4574 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
4575 dst, TRUE,
4576 (last_bytes == 0
4577 ? FALSE : TRUE),
4578 dst_unchanging_p,
4579 dst_in_struct_p,
4580 dst_scalar_p));
4581 else
4582 {
4583 mem = gen_rtx_MEM (SImode, dst);
4584 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4585 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4586 MEM_SCALAR_P (mem) = dst_scalar_p;
4587 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
4588 if (last_bytes != 0)
4589 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
4590 }
4591 }
4592
4593 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
4594 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
4595 }
4596
4597 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
4598 if (out_words_to_go)
4599 {
4600 rtx sreg;
4601
4602 mem = gen_rtx_MEM (SImode, src);
4603 RTX_UNCHANGING_P (mem) = src_unchanging_p;
4604 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
4605 MEM_SCALAR_P (mem) = src_scalar_p;
4606 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
4607 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
4608
4609 mem = gen_rtx_MEM (SImode, dst);
4610 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4611 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4612 MEM_SCALAR_P (mem) = dst_scalar_p;
4613 emit_move_insn (mem, sreg);
4614 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
4615 in_words_to_go--;
4616
4617 if (in_words_to_go) /* Sanity check */
4618 abort ();
4619 }
4620
4621 if (in_words_to_go)
4622 {
4623 if (in_words_to_go < 0)
4624 abort ();
4625
4626 mem = gen_rtx_MEM (SImode, src);
4627 RTX_UNCHANGING_P (mem) = src_unchanging_p;
4628 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
4629 MEM_SCALAR_P (mem) = src_scalar_p;
4630 part_bytes_reg = copy_to_mode_reg (SImode, mem);
4631 }
4632
4633 if (last_bytes && part_bytes_reg == NULL)
4634 abort ();
4635
4636 if (BYTES_BIG_ENDIAN && last_bytes)
4637 {
4638 rtx tmp = gen_reg_rtx (SImode);
4639
4640 /* The bytes we want are in the top end of the word. */
4641 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
4642 GEN_INT (8 * (4 - last_bytes))));
4643 part_bytes_reg = tmp;
4644
4645 while (last_bytes)
4646 {
4647 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
4648 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4649 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4650 MEM_SCALAR_P (mem) = dst_scalar_p;
4651 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
4652
4653 if (--last_bytes)
4654 {
4655 tmp = gen_reg_rtx (SImode);
4656 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
4657 part_bytes_reg = tmp;
4658 }
4659 }
4660
4661 }
4662 else
4663 {
4664 if (last_bytes > 1)
4665 {
4666 mem = gen_rtx_MEM (HImode, dst);
4667 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4668 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4669 MEM_SCALAR_P (mem) = dst_scalar_p;
4670 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
4671 last_bytes -= 2;
4672 if (last_bytes)
4673 {
4674 rtx tmp = gen_reg_rtx (SImode);
4675
4676 emit_insn (gen_addsi3 (dst, dst, GEN_INT (2)));
4677 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
4678 part_bytes_reg = tmp;
4679 }
4680 }
4681
4682 if (last_bytes)
4683 {
4684 mem = gen_rtx_MEM (QImode, dst);
4685 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4686 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4687 MEM_SCALAR_P (mem) = dst_scalar_p;
4688 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
4689 }
4690 }
4691
4692 return 1;
4693 }
4694
4695 /* Generate a memory reference for a half word, such that it will be loaded
4696 into the top 16 bits of the word. We can assume that the address is
4697 known to be alignable and of the form reg, or plus (reg, const). */
4698
4699 rtx
4700 arm_gen_rotated_half_load (memref)
4701 rtx memref;
4702 {
4703 HOST_WIDE_INT offset = 0;
4704 rtx base = XEXP (memref, 0);
4705
4706 if (GET_CODE (base) == PLUS)
4707 {
4708 offset = INTVAL (XEXP (base, 1));
4709 base = XEXP (base, 0);
4710 }
4711
4712 /* If we aren't allowed to generate unaligned addresses, then fail. */
4713 if (TARGET_MMU_TRAPS
4714 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
4715 return NULL;
4716
4717 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
4718
4719 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
4720 return base;
4721
4722 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
4723 }
4724
4725 /* Select a dominance comparison mode if possible. We support three forms.
4726 COND_OR == 0 => (X && Y)
4727 COND_OR == 1 => ((! X( || Y)
4728 COND_OR == 2 => (X || Y)
4729 If we are unable to support a dominance comparsison we return CC mode.
4730 This will then fail to match for the RTL expressions that generate this
4731 call. */
4732
4733 static enum machine_mode
4734 select_dominance_cc_mode (x, y, cond_or)
4735 rtx x;
4736 rtx y;
4737 HOST_WIDE_INT cond_or;
4738 {
4739 enum rtx_code cond1, cond2;
4740 int swapped = 0;
4741
4742 /* Currently we will probably get the wrong result if the individual
4743 comparisons are not simple. This also ensures that it is safe to
4744 reverse a comparison if necessary. */
4745 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
4746 != CCmode)
4747 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
4748 != CCmode))
4749 return CCmode;
4750
4751 /* The if_then_else variant of this tests the second condition if the
4752 first passes, but is true if the first fails. Reverse the first
4753 condition to get a true "inclusive-or" expression. */
4754 if (cond_or == 1)
4755 cond1 = reverse_condition (cond1);
4756
4757 /* If the comparisons are not equal, and one doesn't dominate the other,
4758 then we can't do this. */
4759 if (cond1 != cond2
4760 && !comparison_dominates_p (cond1, cond2)
4761 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
4762 return CCmode;
4763
4764 if (swapped)
4765 {
4766 enum rtx_code temp = cond1;
4767 cond1 = cond2;
4768 cond2 = temp;
4769 }
4770
4771 switch (cond1)
4772 {
4773 case EQ:
4774 if (cond2 == EQ || !cond_or)
4775 return CC_DEQmode;
4776
4777 switch (cond2)
4778 {
4779 case LE: return CC_DLEmode;
4780 case LEU: return CC_DLEUmode;
4781 case GE: return CC_DGEmode;
4782 case GEU: return CC_DGEUmode;
4783 default: break;
4784 }
4785
4786 break;
4787
4788 case LT:
4789 if (cond2 == LT || !cond_or)
4790 return CC_DLTmode;
4791 if (cond2 == LE)
4792 return CC_DLEmode;
4793 if (cond2 == NE)
4794 return CC_DNEmode;
4795 break;
4796
4797 case GT:
4798 if (cond2 == GT || !cond_or)
4799 return CC_DGTmode;
4800 if (cond2 == GE)
4801 return CC_DGEmode;
4802 if (cond2 == NE)
4803 return CC_DNEmode;
4804 break;
4805
4806 case LTU:
4807 if (cond2 == LTU || !cond_or)
4808 return CC_DLTUmode;
4809 if (cond2 == LEU)
4810 return CC_DLEUmode;
4811 if (cond2 == NE)
4812 return CC_DNEmode;
4813 break;
4814
4815 case GTU:
4816 if (cond2 == GTU || !cond_or)
4817 return CC_DGTUmode;
4818 if (cond2 == GEU)
4819 return CC_DGEUmode;
4820 if (cond2 == NE)
4821 return CC_DNEmode;
4822 break;
4823
4824 /* The remaining cases only occur when both comparisons are the
4825 same. */
4826 case NE:
4827 return CC_DNEmode;
4828
4829 case LE:
4830 return CC_DLEmode;
4831
4832 case GE:
4833 return CC_DGEmode;
4834
4835 case LEU:
4836 return CC_DLEUmode;
4837
4838 case GEU:
4839 return CC_DGEUmode;
4840
4841 default:
4842 break;
4843 }
4844
4845 abort ();
4846 }
4847
4848 enum machine_mode
4849 arm_select_cc_mode (op, x, y)
4850 enum rtx_code op;
4851 rtx x;
4852 rtx y;
4853 {
4854 /* All floating point compares return CCFP if it is an equality
4855 comparison, and CCFPE otherwise. */
4856 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4857 {
4858 switch (op)
4859 {
4860 case EQ:
4861 case NE:
4862 case UNORDERED:
4863 case ORDERED:
4864 case UNLT:
4865 case UNLE:
4866 case UNGT:
4867 case UNGE:
4868 case UNEQ:
4869 case LTGT:
4870 return CCFPmode;
4871
4872 case LT:
4873 case LE:
4874 case GT:
4875 case GE:
4876 return CCFPEmode;
4877
4878 default:
4879 abort ();
4880 }
4881 }
4882
4883 /* A compare with a shifted operand. Because of canonicalization, the
4884 comparison will have to be swapped when we emit the assembler. */
4885 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
4886 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4887 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
4888 || GET_CODE (x) == ROTATERT))
4889 return CC_SWPmode;
4890
4891 /* This is a special case that is used by combine to allow a
4892 comparison of a shifted byte load to be split into a zero-extend
4893 followed by a comparison of the shifted integer (only valid for
4894 equalities and unsigned inequalities). */
4895 if (GET_MODE (x) == SImode
4896 && GET_CODE (x) == ASHIFT
4897 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
4898 && GET_CODE (XEXP (x, 0)) == SUBREG
4899 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
4900 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
4901 && (op == EQ || op == NE
4902 || op == GEU || op == GTU || op == LTU || op == LEU)
4903 && GET_CODE (y) == CONST_INT)
4904 return CC_Zmode;
4905
4906 /* A construct for a conditional compare, if the false arm contains
4907 0, then both conditions must be true, otherwise either condition
4908 must be true. Not all conditions are possible, so CCmode is
4909 returned if it can't be done. */
4910 if (GET_CODE (x) == IF_THEN_ELSE
4911 && (XEXP (x, 2) == const0_rtx
4912 || XEXP (x, 2) == const1_rtx)
4913 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4914 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4915 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
4916 INTVAL (XEXP (x, 2)));
4917
4918 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
4919 if (GET_CODE (x) == AND
4920 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4921 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4922 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1), 0);
4923
4924 if (GET_CODE (x) == IOR
4925 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4926 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4927 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1), 2);
4928
4929 /* An operation that sets the condition codes as a side-effect, the
4930 V flag is not set correctly, so we can only use comparisons where
4931 this doesn't matter. (For LT and GE we can use "mi" and "pl"
4932 instead. */
4933 if (GET_MODE (x) == SImode
4934 && y == const0_rtx
4935 && (op == EQ || op == NE || op == LT || op == GE)
4936 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
4937 || GET_CODE (x) == AND || GET_CODE (x) == IOR
4938 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
4939 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
4940 || GET_CODE (x) == LSHIFTRT
4941 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4942 || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
4943 return CC_NOOVmode;
4944
4945 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
4946 return CC_Zmode;
4947
4948 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
4949 && GET_CODE (x) == PLUS
4950 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
4951 return CC_Cmode;
4952
4953 return CCmode;
4954 }
4955
4956 /* X and Y are two things to compare using CODE. Emit the compare insn and
4957 return the rtx for register 0 in the proper mode. FP means this is a
4958 floating point compare: I don't think that it is needed on the arm. */
4959
4960 rtx
4961 arm_gen_compare_reg (code, x, y)
4962 enum rtx_code code;
4963 rtx x, y;
4964 {
4965 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
4966 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
4967
4968 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
4969 gen_rtx_COMPARE (mode, x, y)));
4970
4971 return cc_reg;
4972 }
4973
4974 /* Generate a sequence of insns that will generate the correct return
4975 address mask depending on the physical architecture that the program
4976 is running on. */
4977
4978 rtx
4979 arm_gen_return_addr_mask ()
4980 {
4981 rtx reg = gen_reg_rtx (Pmode);
4982
4983 emit_insn (gen_return_addr_mask (reg));
4984 return reg;
4985 }
4986
4987 void
4988 arm_reload_in_hi (operands)
4989 rtx * operands;
4990 {
4991 rtx ref = operands[1];
4992 rtx base, scratch;
4993 HOST_WIDE_INT offset = 0;
4994
4995 if (GET_CODE (ref) == SUBREG)
4996 {
4997 offset = SUBREG_BYTE (ref);
4998 ref = SUBREG_REG (ref);
4999 }
5000
5001 if (GET_CODE (ref) == REG)
5002 {
5003 /* We have a pseudo which has been spilt onto the stack; there
5004 are two cases here: the first where there is a simple
5005 stack-slot replacement and a second where the stack-slot is
5006 out of range, or is used as a subreg. */
5007 if (reg_equiv_mem[REGNO (ref)])
5008 {
5009 ref = reg_equiv_mem[REGNO (ref)];
5010 base = find_replacement (&XEXP (ref, 0));
5011 }
5012 else
5013 /* The slot is out of range, or was dressed up in a SUBREG. */
5014 base = reg_equiv_address[REGNO (ref)];
5015 }
5016 else
5017 base = find_replacement (&XEXP (ref, 0));
5018
5019 /* Handle the case where the address is too complex to be offset by 1. */
5020 if (GET_CODE (base) == MINUS
5021 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5022 {
5023 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5024
5025 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5026 base = base_plus;
5027 }
5028 else if (GET_CODE (base) == PLUS)
5029 {
5030 /* The addend must be CONST_INT, or we would have dealt with it above. */
5031 HOST_WIDE_INT hi, lo;
5032
5033 offset += INTVAL (XEXP (base, 1));
5034 base = XEXP (base, 0);
5035
5036 /* Rework the address into a legal sequence of insns. */
5037 /* Valid range for lo is -4095 -> 4095 */
5038 lo = (offset >= 0
5039 ? (offset & 0xfff)
5040 : -((-offset) & 0xfff));
5041
5042 /* Corner case, if lo is the max offset then we would be out of range
5043 once we have added the additional 1 below, so bump the msb into the
5044 pre-loading insn(s). */
5045 if (lo == 4095)
5046 lo &= 0x7ff;
5047
5048 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5049 ^ (HOST_WIDE_INT) 0x80000000)
5050 - (HOST_WIDE_INT) 0x80000000);
5051
5052 if (hi + lo != offset)
5053 abort ();
5054
5055 if (hi != 0)
5056 {
5057 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5058
5059 /* Get the base address; addsi3 knows how to handle constants
5060 that require more than one insn. */
5061 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5062 base = base_plus;
5063 offset = lo;
5064 }
5065 }
5066
5067 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5068 emit_insn (gen_zero_extendqisi2 (scratch,
5069 gen_rtx_MEM (QImode,
5070 plus_constant (base,
5071 offset))));
5072 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
5073 gen_rtx_MEM (QImode,
5074 plus_constant (base,
5075 offset + 1))));
5076 if (!BYTES_BIG_ENDIAN)
5077 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5078 gen_rtx_IOR (SImode,
5079 gen_rtx_ASHIFT
5080 (SImode,
5081 gen_rtx_SUBREG (SImode, operands[0], 0),
5082 GEN_INT (8)),
5083 scratch)));
5084 else
5085 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5086 gen_rtx_IOR (SImode,
5087 gen_rtx_ASHIFT (SImode, scratch,
5088 GEN_INT (8)),
5089 gen_rtx_SUBREG (SImode, operands[0],
5090 0))));
5091 }
5092
5093 /* Handle storing a half-word to memory during reload by synthesising as two
5094 byte stores. Take care not to clobber the input values until after we
5095 have moved them somewhere safe. This code assumes that if the DImode
5096 scratch in operands[2] overlaps either the input value or output address
5097 in some way, then that value must die in this insn (we absolutely need
5098 two scratch registers for some corner cases). */
5099
5100 void
5101 arm_reload_out_hi (operands)
5102 rtx * operands;
5103 {
5104 rtx ref = operands[0];
5105 rtx outval = operands[1];
5106 rtx base, scratch;
5107 HOST_WIDE_INT offset = 0;
5108
5109 if (GET_CODE (ref) == SUBREG)
5110 {
5111 offset = SUBREG_BYTE (ref);
5112 ref = SUBREG_REG (ref);
5113 }
5114
5115 if (GET_CODE (ref) == REG)
5116 {
5117 /* We have a pseudo which has been spilt onto the stack; there
5118 are two cases here: the first where there is a simple
5119 stack-slot replacement and a second where the stack-slot is
5120 out of range, or is used as a subreg. */
5121 if (reg_equiv_mem[REGNO (ref)])
5122 {
5123 ref = reg_equiv_mem[REGNO (ref)];
5124 base = find_replacement (&XEXP (ref, 0));
5125 }
5126 else
5127 /* The slot is out of range, or was dressed up in a SUBREG. */
5128 base = reg_equiv_address[REGNO (ref)];
5129 }
5130 else
5131 base = find_replacement (&XEXP (ref, 0));
5132
5133 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5134
5135 /* Handle the case where the address is too complex to be offset by 1. */
5136 if (GET_CODE (base) == MINUS
5137 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5138 {
5139 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5140
5141 /* Be careful not to destroy OUTVAL. */
5142 if (reg_overlap_mentioned_p (base_plus, outval))
5143 {
5144 /* Updating base_plus might destroy outval, see if we can
5145 swap the scratch and base_plus. */
5146 if (!reg_overlap_mentioned_p (scratch, outval))
5147 {
5148 rtx tmp = scratch;
5149 scratch = base_plus;
5150 base_plus = tmp;
5151 }
5152 else
5153 {
5154 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5155
5156 /* Be conservative and copy OUTVAL into the scratch now,
5157 this should only be necessary if outval is a subreg
5158 of something larger than a word. */
5159 /* XXX Might this clobber base? I can't see how it can,
5160 since scratch is known to overlap with OUTVAL, and
5161 must be wider than a word. */
5162 emit_insn (gen_movhi (scratch_hi, outval));
5163 outval = scratch_hi;
5164 }
5165 }
5166
5167 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5168 base = base_plus;
5169 }
5170 else if (GET_CODE (base) == PLUS)
5171 {
5172 /* The addend must be CONST_INT, or we would have dealt with it above. */
5173 HOST_WIDE_INT hi, lo;
5174
5175 offset += INTVAL (XEXP (base, 1));
5176 base = XEXP (base, 0);
5177
5178 /* Rework the address into a legal sequence of insns. */
5179 /* Valid range for lo is -4095 -> 4095 */
5180 lo = (offset >= 0
5181 ? (offset & 0xfff)
5182 : -((-offset) & 0xfff));
5183
5184 /* Corner case, if lo is the max offset then we would be out of range
5185 once we have added the additional 1 below, so bump the msb into the
5186 pre-loading insn(s). */
5187 if (lo == 4095)
5188 lo &= 0x7ff;
5189
5190 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5191 ^ (HOST_WIDE_INT) 0x80000000)
5192 - (HOST_WIDE_INT) 0x80000000);
5193
5194 if (hi + lo != offset)
5195 abort ();
5196
5197 if (hi != 0)
5198 {
5199 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5200
5201 /* Be careful not to destroy OUTVAL. */
5202 if (reg_overlap_mentioned_p (base_plus, outval))
5203 {
5204 /* Updating base_plus might destroy outval, see if we
5205 can swap the scratch and base_plus. */
5206 if (!reg_overlap_mentioned_p (scratch, outval))
5207 {
5208 rtx tmp = scratch;
5209 scratch = base_plus;
5210 base_plus = tmp;
5211 }
5212 else
5213 {
5214 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5215
5216 /* Be conservative and copy outval into scratch now,
5217 this should only be necessary if outval is a
5218 subreg of something larger than a word. */
5219 /* XXX Might this clobber base? I can't see how it
5220 can, since scratch is known to overlap with
5221 outval. */
5222 emit_insn (gen_movhi (scratch_hi, outval));
5223 outval = scratch_hi;
5224 }
5225 }
5226
5227 /* Get the base address; addsi3 knows how to handle constants
5228 that require more than one insn. */
5229 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5230 base = base_plus;
5231 offset = lo;
5232 }
5233 }
5234
5235 if (BYTES_BIG_ENDIAN)
5236 {
5237 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
5238 plus_constant (base, offset + 1)),
5239 gen_lowpart (QImode, outval)));
5240 emit_insn (gen_lshrsi3 (scratch,
5241 gen_rtx_SUBREG (SImode, outval, 0),
5242 GEN_INT (8)));
5243 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
5244 gen_lowpart (QImode, scratch)));
5245 }
5246 else
5247 {
5248 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
5249 gen_lowpart (QImode, outval)));
5250 emit_insn (gen_lshrsi3 (scratch,
5251 gen_rtx_SUBREG (SImode, outval, 0),
5252 GEN_INT (8)));
5253 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
5254 plus_constant (base, offset + 1)),
5255 gen_lowpart (QImode, scratch)));
5256 }
5257 }
5258 \f
5259 /* Print a symbolic form of X to the debug file, F. */
5260
5261 static void
5262 arm_print_value (f, x)
5263 FILE * f;
5264 rtx x;
5265 {
5266 switch (GET_CODE (x))
5267 {
5268 case CONST_INT:
5269 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
5270 return;
5271
5272 case CONST_DOUBLE:
5273 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
5274 return;
5275
5276 case CONST_STRING:
5277 fprintf (f, "\"%s\"", XSTR (x, 0));
5278 return;
5279
5280 case SYMBOL_REF:
5281 fprintf (f, "`%s'", XSTR (x, 0));
5282 return;
5283
5284 case LABEL_REF:
5285 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
5286 return;
5287
5288 case CONST:
5289 arm_print_value (f, XEXP (x, 0));
5290 return;
5291
5292 case PLUS:
5293 arm_print_value (f, XEXP (x, 0));
5294 fprintf (f, "+");
5295 arm_print_value (f, XEXP (x, 1));
5296 return;
5297
5298 case PC:
5299 fprintf (f, "pc");
5300 return;
5301
5302 default:
5303 fprintf (f, "????");
5304 return;
5305 }
5306 }
5307 \f
5308 /* Routines for manipulation of the constant pool. */
5309
5310 /* Arm instructions cannot load a large constant directly into a
5311 register; they have to come from a pc relative load. The constant
5312 must therefore be placed in the addressable range of the pc
5313 relative load. Depending on the precise pc relative load
5314 instruction the range is somewhere between 256 bytes and 4k. This
5315 means that we often have to dump a constant inside a function, and
5316 generate code to branch around it.
5317
5318 It is important to minimize this, since the branches will slow
5319 things down and make the code larger.
5320
5321 Normally we can hide the table after an existing unconditional
5322 branch so that there is no interruption of the flow, but in the
5323 worst case the code looks like this:
5324
5325 ldr rn, L1
5326 ...
5327 b L2
5328 align
5329 L1: .long value
5330 L2:
5331 ...
5332
5333 ldr rn, L3
5334 ...
5335 b L4
5336 align
5337 L3: .long value
5338 L4:
5339 ...
5340
5341 We fix this by performing a scan after scheduling, which notices
5342 which instructions need to have their operands fetched from the
5343 constant table and builds the table.
5344
5345 The algorithm starts by building a table of all the constants that
5346 need fixing up and all the natural barriers in the function (places
5347 where a constant table can be dropped without breaking the flow).
5348 For each fixup we note how far the pc-relative replacement will be
5349 able to reach and the offset of the instruction into the function.
5350
5351 Having built the table we then group the fixes together to form
5352 tables that are as large as possible (subject to addressing
5353 constraints) and emit each table of constants after the last
5354 barrier that is within range of all the instructions in the group.
5355 If a group does not contain a barrier, then we forcibly create one
5356 by inserting a jump instruction into the flow. Once the table has
5357 been inserted, the insns are then modified to reference the
5358 relevant entry in the pool.
5359
5360 Possible enhancements to the algorithm (not implemented) are:
5361
5362 1) For some processors and object formats, there may be benefit in
5363 aligning the pools to the start of cache lines; this alignment
5364 would need to be taken into account when calculating addressability
5365 of a pool. */
5366
5367 /* These typedefs are located at the start of this file, so that
5368 they can be used in the prototypes there. This comment is to
5369 remind readers of that fact so that the following structures
5370 can be understood more easily.
5371
5372 typedef struct minipool_node Mnode;
5373 typedef struct minipool_fixup Mfix; */
5374
5375 struct minipool_node
5376 {
5377 /* Doubly linked chain of entries. */
5378 Mnode * next;
5379 Mnode * prev;
5380 /* The maximum offset into the code that this entry can be placed. While
5381 pushing fixes for forward references, all entries are sorted in order
5382 of increasing max_address. */
5383 HOST_WIDE_INT max_address;
5384 /* Similarly for an entry inserted for a backwards ref. */
5385 HOST_WIDE_INT min_address;
5386 /* The number of fixes referencing this entry. This can become zero
5387 if we "unpush" an entry. In this case we ignore the entry when we
5388 come to emit the code. */
5389 int refcount;
5390 /* The offset from the start of the minipool. */
5391 HOST_WIDE_INT offset;
5392 /* The value in table. */
5393 rtx value;
5394 /* The mode of value. */
5395 enum machine_mode mode;
5396 int fix_size;
5397 };
5398
5399 struct minipool_fixup
5400 {
5401 Mfix * next;
5402 rtx insn;
5403 HOST_WIDE_INT address;
5404 rtx * loc;
5405 enum machine_mode mode;
5406 int fix_size;
5407 rtx value;
5408 Mnode * minipool;
5409 HOST_WIDE_INT forwards;
5410 HOST_WIDE_INT backwards;
5411 };
5412
5413 /* Fixes less than a word need padding out to a word boundary. */
5414 #define MINIPOOL_FIX_SIZE(mode) \
5415 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
5416
5417 static Mnode * minipool_vector_head;
5418 static Mnode * minipool_vector_tail;
5419 static rtx minipool_vector_label;
5420
5421 /* The linked list of all minipool fixes required for this function. */
5422 Mfix * minipool_fix_head;
5423 Mfix * minipool_fix_tail;
5424 /* The fix entry for the current minipool, once it has been placed. */
5425 Mfix * minipool_barrier;
5426
5427 /* Determines if INSN is the start of a jump table. Returns the end
5428 of the TABLE or NULL_RTX. */
5429
5430 static rtx
5431 is_jump_table (insn)
5432 rtx insn;
5433 {
5434 rtx table;
5435
5436 if (GET_CODE (insn) == JUMP_INSN
5437 && JUMP_LABEL (insn) != NULL
5438 && ((table = next_real_insn (JUMP_LABEL (insn)))
5439 == next_real_insn (insn))
5440 && table != NULL
5441 && GET_CODE (table) == JUMP_INSN
5442 && (GET_CODE (PATTERN (table)) == ADDR_VEC
5443 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
5444 return table;
5445
5446 return NULL_RTX;
5447 }
5448
5449 #ifndef JUMP_TABLES_IN_TEXT_SECTION
5450 #define JUMP_TABLES_IN_TEXT_SECTION 0
5451 #endif
5452
5453 static HOST_WIDE_INT
5454 get_jump_table_size (insn)
5455 rtx insn;
5456 {
5457 /* ADDR_VECs only take room if read-only data does into the text
5458 section. */
5459 if (JUMP_TABLES_IN_TEXT_SECTION
5460 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
5461 || 1
5462 #endif
5463 )
5464 {
5465 rtx body = PATTERN (insn);
5466 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
5467
5468 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
5469 }
5470
5471 return 0;
5472 }
5473
5474 /* Move a minipool fix MP from its current location to before MAX_MP.
5475 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
5476 contrains may need updating. */
5477
5478 static Mnode *
5479 move_minipool_fix_forward_ref (mp, max_mp, max_address)
5480 Mnode * mp;
5481 Mnode * max_mp;
5482 HOST_WIDE_INT max_address;
5483 {
5484 /* This should never be true and the code below assumes these are
5485 different. */
5486 if (mp == max_mp)
5487 abort ();
5488
5489 if (max_mp == NULL)
5490 {
5491 if (max_address < mp->max_address)
5492 mp->max_address = max_address;
5493 }
5494 else
5495 {
5496 if (max_address > max_mp->max_address - mp->fix_size)
5497 mp->max_address = max_mp->max_address - mp->fix_size;
5498 else
5499 mp->max_address = max_address;
5500
5501 /* Unlink MP from its current position. Since max_mp is non-null,
5502 mp->prev must be non-null. */
5503 mp->prev->next = mp->next;
5504 if (mp->next != NULL)
5505 mp->next->prev = mp->prev;
5506 else
5507 minipool_vector_tail = mp->prev;
5508
5509 /* Re-insert it before MAX_MP. */
5510 mp->next = max_mp;
5511 mp->prev = max_mp->prev;
5512 max_mp->prev = mp;
5513
5514 if (mp->prev != NULL)
5515 mp->prev->next = mp;
5516 else
5517 minipool_vector_head = mp;
5518 }
5519
5520 /* Save the new entry. */
5521 max_mp = mp;
5522
5523 /* Scan over the preceding entries and adjust their addresses as
5524 required. */
5525 while (mp->prev != NULL
5526 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5527 {
5528 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5529 mp = mp->prev;
5530 }
5531
5532 return max_mp;
5533 }
5534
5535 /* Add a constant to the minipool for a forward reference. Returns the
5536 node added or NULL if the constant will not fit in this pool. */
5537
5538 static Mnode *
5539 add_minipool_forward_ref (fix)
5540 Mfix * fix;
5541 {
5542 /* If set, max_mp is the first pool_entry that has a lower
5543 constraint than the one we are trying to add. */
5544 Mnode * max_mp = NULL;
5545 HOST_WIDE_INT max_address = fix->address + fix->forwards;
5546 Mnode * mp;
5547
5548 /* If this fix's address is greater than the address of the first
5549 entry, then we can't put the fix in this pool. We subtract the
5550 size of the current fix to ensure that if the table is fully
5551 packed we still have enough room to insert this value by suffling
5552 the other fixes forwards. */
5553 if (minipool_vector_head &&
5554 fix->address >= minipool_vector_head->max_address - fix->fix_size)
5555 return NULL;
5556
5557 /* Scan the pool to see if a constant with the same value has
5558 already been added. While we are doing this, also note the
5559 location where we must insert the constant if it doesn't already
5560 exist. */
5561 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5562 {
5563 if (GET_CODE (fix->value) == GET_CODE (mp->value)
5564 && fix->mode == mp->mode
5565 && (GET_CODE (fix->value) != CODE_LABEL
5566 || (CODE_LABEL_NUMBER (fix->value)
5567 == CODE_LABEL_NUMBER (mp->value)))
5568 && rtx_equal_p (fix->value, mp->value))
5569 {
5570 /* More than one fix references this entry. */
5571 mp->refcount++;
5572 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
5573 }
5574
5575 /* Note the insertion point if necessary. */
5576 if (max_mp == NULL
5577 && mp->max_address > max_address)
5578 max_mp = mp;
5579 }
5580
5581 /* The value is not currently in the minipool, so we need to create
5582 a new entry for it. If MAX_MP is NULL, the entry will be put on
5583 the end of the list since the placement is less constrained than
5584 any existing entry. Otherwise, we insert the new fix before
5585 MAX_MP and, if neceesary, adjust the constraints on the other
5586 entries. */
5587 mp = xmalloc (sizeof (* mp));
5588 mp->fix_size = fix->fix_size;
5589 mp->mode = fix->mode;
5590 mp->value = fix->value;
5591 mp->refcount = 1;
5592 /* Not yet required for a backwards ref. */
5593 mp->min_address = -65536;
5594
5595 if (max_mp == NULL)
5596 {
5597 mp->max_address = max_address;
5598 mp->next = NULL;
5599 mp->prev = minipool_vector_tail;
5600
5601 if (mp->prev == NULL)
5602 {
5603 minipool_vector_head = mp;
5604 minipool_vector_label = gen_label_rtx ();
5605 }
5606 else
5607 mp->prev->next = mp;
5608
5609 minipool_vector_tail = mp;
5610 }
5611 else
5612 {
5613 if (max_address > max_mp->max_address - mp->fix_size)
5614 mp->max_address = max_mp->max_address - mp->fix_size;
5615 else
5616 mp->max_address = max_address;
5617
5618 mp->next = max_mp;
5619 mp->prev = max_mp->prev;
5620 max_mp->prev = mp;
5621 if (mp->prev != NULL)
5622 mp->prev->next = mp;
5623 else
5624 minipool_vector_head = mp;
5625 }
5626
5627 /* Save the new entry. */
5628 max_mp = mp;
5629
5630 /* Scan over the preceding entries and adjust their addresses as
5631 required. */
5632 while (mp->prev != NULL
5633 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5634 {
5635 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5636 mp = mp->prev;
5637 }
5638
5639 return max_mp;
5640 }
5641
5642 static Mnode *
5643 move_minipool_fix_backward_ref (mp, min_mp, min_address)
5644 Mnode * mp;
5645 Mnode * min_mp;
5646 HOST_WIDE_INT min_address;
5647 {
5648 HOST_WIDE_INT offset;
5649
5650 /* This should never be true, and the code below assumes these are
5651 different. */
5652 if (mp == min_mp)
5653 abort ();
5654
5655 if (min_mp == NULL)
5656 {
5657 if (min_address > mp->min_address)
5658 mp->min_address = min_address;
5659 }
5660 else
5661 {
5662 /* We will adjust this below if it is too loose. */
5663 mp->min_address = min_address;
5664
5665 /* Unlink MP from its current position. Since min_mp is non-null,
5666 mp->next must be non-null. */
5667 mp->next->prev = mp->prev;
5668 if (mp->prev != NULL)
5669 mp->prev->next = mp->next;
5670 else
5671 minipool_vector_head = mp->next;
5672
5673 /* Reinsert it after MIN_MP. */
5674 mp->prev = min_mp;
5675 mp->next = min_mp->next;
5676 min_mp->next = mp;
5677 if (mp->next != NULL)
5678 mp->next->prev = mp;
5679 else
5680 minipool_vector_tail = mp;
5681 }
5682
5683 min_mp = mp;
5684
5685 offset = 0;
5686 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5687 {
5688 mp->offset = offset;
5689 if (mp->refcount > 0)
5690 offset += mp->fix_size;
5691
5692 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
5693 mp->next->min_address = mp->min_address + mp->fix_size;
5694 }
5695
5696 return min_mp;
5697 }
5698
5699 /* Add a constant to the minipool for a backward reference. Returns the
5700 node added or NULL if the constant will not fit in this pool.
5701
5702 Note that the code for insertion for a backwards reference can be
5703 somewhat confusing because the calculated offsets for each fix do
5704 not take into account the size of the pool (which is still under
5705 construction. */
5706
5707 static Mnode *
5708 add_minipool_backward_ref (fix)
5709 Mfix * fix;
5710 {
5711 /* If set, min_mp is the last pool_entry that has a lower constraint
5712 than the one we are trying to add. */
5713 Mnode * min_mp = NULL;
5714 /* This can be negative, since it is only a constraint. */
5715 HOST_WIDE_INT min_address = fix->address - fix->backwards;
5716 Mnode * mp;
5717
5718 /* If we can't reach the current pool from this insn, or if we can't
5719 insert this entry at the end of the pool without pushing other
5720 fixes out of range, then we don't try. This ensures that we
5721 can't fail later on. */
5722 if (min_address >= minipool_barrier->address
5723 || (minipool_vector_tail->min_address + fix->fix_size
5724 >= minipool_barrier->address))
5725 return NULL;
5726
5727 /* Scan the pool to see if a constant with the same value has
5728 already been added. While we are doing this, also note the
5729 location where we must insert the constant if it doesn't already
5730 exist. */
5731 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
5732 {
5733 if (GET_CODE (fix->value) == GET_CODE (mp->value)
5734 && fix->mode == mp->mode
5735 && (GET_CODE (fix->value) != CODE_LABEL
5736 || (CODE_LABEL_NUMBER (fix->value)
5737 == CODE_LABEL_NUMBER (mp->value)))
5738 && rtx_equal_p (fix->value, mp->value)
5739 /* Check that there is enough slack to move this entry to the
5740 end of the table (this is conservative). */
5741 && (mp->max_address
5742 > (minipool_barrier->address
5743 + minipool_vector_tail->offset
5744 + minipool_vector_tail->fix_size)))
5745 {
5746 mp->refcount++;
5747 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
5748 }
5749
5750 if (min_mp != NULL)
5751 mp->min_address += fix->fix_size;
5752 else
5753 {
5754 /* Note the insertion point if necessary. */
5755 if (mp->min_address < min_address)
5756 min_mp = mp;
5757 else if (mp->max_address
5758 < minipool_barrier->address + mp->offset + fix->fix_size)
5759 {
5760 /* Inserting before this entry would push the fix beyond
5761 its maximum address (which can happen if we have
5762 re-located a forwards fix); force the new fix to come
5763 after it. */
5764 min_mp = mp;
5765 min_address = mp->min_address + fix->fix_size;
5766 }
5767 }
5768 }
5769
5770 /* We need to create a new entry. */
5771 mp = xmalloc (sizeof (* mp));
5772 mp->fix_size = fix->fix_size;
5773 mp->mode = fix->mode;
5774 mp->value = fix->value;
5775 mp->refcount = 1;
5776 mp->max_address = minipool_barrier->address + 65536;
5777
5778 mp->min_address = min_address;
5779
5780 if (min_mp == NULL)
5781 {
5782 mp->prev = NULL;
5783 mp->next = minipool_vector_head;
5784
5785 if (mp->next == NULL)
5786 {
5787 minipool_vector_tail = mp;
5788 minipool_vector_label = gen_label_rtx ();
5789 }
5790 else
5791 mp->next->prev = mp;
5792
5793 minipool_vector_head = mp;
5794 }
5795 else
5796 {
5797 mp->next = min_mp->next;
5798 mp->prev = min_mp;
5799 min_mp->next = mp;
5800
5801 if (mp->next != NULL)
5802 mp->next->prev = mp;
5803 else
5804 minipool_vector_tail = mp;
5805 }
5806
5807 /* Save the new entry. */
5808 min_mp = mp;
5809
5810 if (mp->prev)
5811 mp = mp->prev;
5812 else
5813 mp->offset = 0;
5814
5815 /* Scan over the following entries and adjust their offsets. */
5816 while (mp->next != NULL)
5817 {
5818 if (mp->next->min_address < mp->min_address + mp->fix_size)
5819 mp->next->min_address = mp->min_address + mp->fix_size;
5820
5821 if (mp->refcount)
5822 mp->next->offset = mp->offset + mp->fix_size;
5823 else
5824 mp->next->offset = mp->offset;
5825
5826 mp = mp->next;
5827 }
5828
5829 return min_mp;
5830 }
5831
5832 static void
5833 assign_minipool_offsets (barrier)
5834 Mfix * barrier;
5835 {
5836 HOST_WIDE_INT offset = 0;
5837 Mnode * mp;
5838
5839 minipool_barrier = barrier;
5840
5841 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5842 {
5843 mp->offset = offset;
5844
5845 if (mp->refcount > 0)
5846 offset += mp->fix_size;
5847 }
5848 }
5849
5850 /* Output the literal table */
5851 static void
5852 dump_minipool (scan)
5853 rtx scan;
5854 {
5855 Mnode * mp;
5856 Mnode * nmp;
5857
5858 if (rtl_dump_file)
5859 fprintf (rtl_dump_file,
5860 ";; Emitting minipool after insn %u; address %ld\n",
5861 INSN_UID (scan), (unsigned long) minipool_barrier->address);
5862
5863 scan = emit_label_after (gen_label_rtx (), scan);
5864 scan = emit_insn_after (gen_align_4 (), scan);
5865 scan = emit_label_after (minipool_vector_label, scan);
5866
5867 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
5868 {
5869 if (mp->refcount > 0)
5870 {
5871 if (rtl_dump_file)
5872 {
5873 fprintf (rtl_dump_file,
5874 ";; Offset %u, min %ld, max %ld ",
5875 (unsigned) mp->offset, (unsigned long) mp->min_address,
5876 (unsigned long) mp->max_address);
5877 arm_print_value (rtl_dump_file, mp->value);
5878 fputc ('\n', rtl_dump_file);
5879 }
5880
5881 switch (mp->fix_size)
5882 {
5883 #ifdef HAVE_consttable_1
5884 case 1:
5885 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
5886 break;
5887
5888 #endif
5889 #ifdef HAVE_consttable_2
5890 case 2:
5891 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
5892 break;
5893
5894 #endif
5895 #ifdef HAVE_consttable_4
5896 case 4:
5897 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
5898 break;
5899
5900 #endif
5901 #ifdef HAVE_consttable_8
5902 case 8:
5903 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
5904 break;
5905
5906 #endif
5907 default:
5908 abort ();
5909 break;
5910 }
5911 }
5912
5913 nmp = mp->next;
5914 free (mp);
5915 }
5916
5917 minipool_vector_head = minipool_vector_tail = NULL;
5918 scan = emit_insn_after (gen_consttable_end (), scan);
5919 scan = emit_barrier_after (scan);
5920 }
5921
5922 /* Return the cost of forcibly inserting a barrier after INSN. */
5923
5924 static int
5925 arm_barrier_cost (insn)
5926 rtx insn;
5927 {
5928 /* Basing the location of the pool on the loop depth is preferable,
5929 but at the moment, the basic block information seems to be
5930 corrupt by this stage of the compilation. */
5931 int base_cost = 50;
5932 rtx next = next_nonnote_insn (insn);
5933
5934 if (next != NULL && GET_CODE (next) == CODE_LABEL)
5935 base_cost -= 20;
5936
5937 switch (GET_CODE (insn))
5938 {
5939 case CODE_LABEL:
5940 /* It will always be better to place the table before the label, rather
5941 than after it. */
5942 return 50;
5943
5944 case INSN:
5945 case CALL_INSN:
5946 return base_cost;
5947
5948 case JUMP_INSN:
5949 return base_cost - 10;
5950
5951 default:
5952 return base_cost + 10;
5953 }
5954 }
5955
5956 /* Find the best place in the insn stream in the range
5957 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
5958 Create the barrier by inserting a jump and add a new fix entry for
5959 it. */
5960
5961 static Mfix *
5962 create_fix_barrier (fix, max_address)
5963 Mfix * fix;
5964 HOST_WIDE_INT max_address;
5965 {
5966 HOST_WIDE_INT count = 0;
5967 rtx barrier;
5968 rtx from = fix->insn;
5969 rtx selected = from;
5970 int selected_cost;
5971 HOST_WIDE_INT selected_address;
5972 Mfix * new_fix;
5973 HOST_WIDE_INT max_count = max_address - fix->address;
5974 rtx label = gen_label_rtx ();
5975
5976 selected_cost = arm_barrier_cost (from);
5977 selected_address = fix->address;
5978
5979 while (from && count < max_count)
5980 {
5981 rtx tmp;
5982 int new_cost;
5983
5984 /* This code shouldn't have been called if there was a natural barrier
5985 within range. */
5986 if (GET_CODE (from) == BARRIER)
5987 abort ();
5988
5989 /* Count the length of this insn. */
5990 count += get_attr_length (from);
5991
5992 /* If there is a jump table, add its length. */
5993 tmp = is_jump_table (from);
5994 if (tmp != NULL)
5995 {
5996 count += get_jump_table_size (tmp);
5997
5998 /* Jump tables aren't in a basic block, so base the cost on
5999 the dispatch insn. If we select this location, we will
6000 still put the pool after the table. */
6001 new_cost = arm_barrier_cost (from);
6002
6003 if (count < max_count && new_cost <= selected_cost)
6004 {
6005 selected = tmp;
6006 selected_cost = new_cost;
6007 selected_address = fix->address + count;
6008 }
6009
6010 /* Continue after the dispatch table. */
6011 from = NEXT_INSN (tmp);
6012 continue;
6013 }
6014
6015 new_cost = arm_barrier_cost (from);
6016
6017 if (count < max_count && new_cost <= selected_cost)
6018 {
6019 selected = from;
6020 selected_cost = new_cost;
6021 selected_address = fix->address + count;
6022 }
6023
6024 from = NEXT_INSN (from);
6025 }
6026
6027 /* Create a new JUMP_INSN that branches around a barrier. */
6028 from = emit_jump_insn_after (gen_jump (label), selected);
6029 JUMP_LABEL (from) = label;
6030 barrier = emit_barrier_after (from);
6031 emit_label_after (label, barrier);
6032
6033 /* Create a minipool barrier entry for the new barrier. */
6034 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
6035 new_fix->insn = barrier;
6036 new_fix->address = selected_address;
6037 new_fix->next = fix->next;
6038 fix->next = new_fix;
6039
6040 return new_fix;
6041 }
6042
6043 /* Record that there is a natural barrier in the insn stream at
6044 ADDRESS. */
6045 static void
6046 push_minipool_barrier (insn, address)
6047 rtx insn;
6048 HOST_WIDE_INT address;
6049 {
6050 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6051
6052 fix->insn = insn;
6053 fix->address = address;
6054
6055 fix->next = NULL;
6056 if (minipool_fix_head != NULL)
6057 minipool_fix_tail->next = fix;
6058 else
6059 minipool_fix_head = fix;
6060
6061 minipool_fix_tail = fix;
6062 }
6063
6064 /* Record INSN, which will need fixing up to load a value from the
6065 minipool. ADDRESS is the offset of the insn since the start of the
6066 function; LOC is a pointer to the part of the insn which requires
6067 fixing; VALUE is the constant that must be loaded, which is of type
6068 MODE. */
6069 static void
6070 push_minipool_fix (insn, address, loc, mode, value)
6071 rtx insn;
6072 HOST_WIDE_INT address;
6073 rtx * loc;
6074 enum machine_mode mode;
6075 rtx value;
6076 {
6077 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6078
6079 #ifdef AOF_ASSEMBLER
6080 /* PIC symbol refereneces need to be converted into offsets into the
6081 based area. */
6082 /* XXX This shouldn't be done here. */
6083 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
6084 value = aof_pic_entry (value);
6085 #endif /* AOF_ASSEMBLER */
6086
6087 fix->insn = insn;
6088 fix->address = address;
6089 fix->loc = loc;
6090 fix->mode = mode;
6091 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
6092 fix->value = value;
6093 fix->forwards = get_attr_pool_range (insn);
6094 fix->backwards = get_attr_neg_pool_range (insn);
6095 fix->minipool = NULL;
6096
6097 /* If an insn doesn't have a range defined for it, then it isn't
6098 expecting to be reworked by this code. Better to abort now than
6099 to generate duff assembly code. */
6100 if (fix->forwards == 0 && fix->backwards == 0)
6101 abort ();
6102
6103 if (rtl_dump_file)
6104 {
6105 fprintf (rtl_dump_file,
6106 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
6107 GET_MODE_NAME (mode),
6108 INSN_UID (insn), (unsigned long) address,
6109 -1 * (long)fix->backwards, (long)fix->forwards);
6110 arm_print_value (rtl_dump_file, fix->value);
6111 fprintf (rtl_dump_file, "\n");
6112 }
6113
6114 /* Add it to the chain of fixes. */
6115 fix->next = NULL;
6116
6117 if (minipool_fix_head != NULL)
6118 minipool_fix_tail->next = fix;
6119 else
6120 minipool_fix_head = fix;
6121
6122 minipool_fix_tail = fix;
6123 }
6124
6125 /* Scan INSN and note any of its operands that need fixing. */
6126
6127 static void
6128 note_invalid_constants (insn, address)
6129 rtx insn;
6130 HOST_WIDE_INT address;
6131 {
6132 int opno;
6133
6134 extract_insn (insn);
6135
6136 if (!constrain_operands (1))
6137 fatal_insn_not_found (insn);
6138
6139 /* Fill in recog_op_alt with information about the constraints of this
6140 insn. */
6141 preprocess_constraints ();
6142
6143 for (opno = 0; opno < recog_data.n_operands; opno++)
6144 {
6145 /* Things we need to fix can only occur in inputs. */
6146 if (recog_data.operand_type[opno] != OP_IN)
6147 continue;
6148
6149 /* If this alternative is a memory reference, then any mention
6150 of constants in this alternative is really to fool reload
6151 into allowing us to accept one there. We need to fix them up
6152 now so that we output the right code. */
6153 if (recog_op_alt[opno][which_alternative].memory_ok)
6154 {
6155 rtx op = recog_data.operand[opno];
6156
6157 if (CONSTANT_P (op))
6158 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6159 recog_data.operand_mode[opno], op);
6160 #if 0
6161 /* RWE: Now we look correctly at the operands for the insn,
6162 this shouldn't be needed any more. */
6163 #ifndef AOF_ASSEMBLER
6164 /* XXX Is this still needed? */
6165 else if (GET_CODE (op) == UNSPEC && XINT (op, 1) == UNSPEC_PIC_SYM)
6166 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6167 recog_data.operand_mode[opno],
6168 XVECEXP (op, 0, 0));
6169 #endif
6170 #endif
6171 else if (GET_CODE (op) == MEM
6172 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
6173 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
6174 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6175 recog_data.operand_mode[opno],
6176 get_pool_constant (XEXP (op, 0)));
6177 }
6178 }
6179 }
6180
6181 void
6182 arm_reorg (first)
6183 rtx first;
6184 {
6185 rtx insn;
6186 HOST_WIDE_INT address = 0;
6187 Mfix * fix;
6188
6189 minipool_fix_head = minipool_fix_tail = NULL;
6190
6191 /* The first insn must always be a note, or the code below won't
6192 scan it properly. */
6193 if (GET_CODE (first) != NOTE)
6194 abort ();
6195
6196 /* Scan all the insns and record the operands that will need fixing. */
6197 for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn))
6198 {
6199 if (GET_CODE (insn) == BARRIER)
6200 push_minipool_barrier (insn, address);
6201 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
6202 || GET_CODE (insn) == JUMP_INSN)
6203 {
6204 rtx table;
6205
6206 note_invalid_constants (insn, address);
6207 address += get_attr_length (insn);
6208
6209 /* If the insn is a vector jump, add the size of the table
6210 and skip the table. */
6211 if ((table = is_jump_table (insn)) != NULL)
6212 {
6213 address += get_jump_table_size (table);
6214 insn = table;
6215 }
6216 }
6217 }
6218
6219 fix = minipool_fix_head;
6220
6221 /* Now scan the fixups and perform the required changes. */
6222 while (fix)
6223 {
6224 Mfix * ftmp;
6225 Mfix * fdel;
6226 Mfix * last_added_fix;
6227 Mfix * last_barrier = NULL;
6228 Mfix * this_fix;
6229
6230 /* Skip any further barriers before the next fix. */
6231 while (fix && GET_CODE (fix->insn) == BARRIER)
6232 fix = fix->next;
6233
6234 /* No more fixes. */
6235 if (fix == NULL)
6236 break;
6237
6238 last_added_fix = NULL;
6239
6240 for (ftmp = fix; ftmp; ftmp = ftmp->next)
6241 {
6242 if (GET_CODE (ftmp->insn) == BARRIER)
6243 {
6244 if (ftmp->address >= minipool_vector_head->max_address)
6245 break;
6246
6247 last_barrier = ftmp;
6248 }
6249 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
6250 break;
6251
6252 last_added_fix = ftmp; /* Keep track of the last fix added. */
6253 }
6254
6255 /* If we found a barrier, drop back to that; any fixes that we
6256 could have reached but come after the barrier will now go in
6257 the next mini-pool. */
6258 if (last_barrier != NULL)
6259 {
6260 /* Reduce the refcount for those fixes that won't go into this
6261 pool after all. */
6262 for (fdel = last_barrier->next;
6263 fdel && fdel != ftmp;
6264 fdel = fdel->next)
6265 {
6266 fdel->minipool->refcount--;
6267 fdel->minipool = NULL;
6268 }
6269
6270 ftmp = last_barrier;
6271 }
6272 else
6273 {
6274 /* ftmp is first fix that we can't fit into this pool and
6275 there no natural barriers that we could use. Insert a
6276 new barrier in the code somewhere between the previous
6277 fix and this one, and arrange to jump around it. */
6278 HOST_WIDE_INT max_address;
6279
6280 /* The last item on the list of fixes must be a barrier, so
6281 we can never run off the end of the list of fixes without
6282 last_barrier being set. */
6283 if (ftmp == NULL)
6284 abort ();
6285
6286 max_address = minipool_vector_head->max_address;
6287 /* Check that there isn't another fix that is in range that
6288 we couldn't fit into this pool because the pool was
6289 already too large: we need to put the pool before such an
6290 instruction. */
6291 if (ftmp->address < max_address)
6292 max_address = ftmp->address;
6293
6294 last_barrier = create_fix_barrier (last_added_fix, max_address);
6295 }
6296
6297 assign_minipool_offsets (last_barrier);
6298
6299 while (ftmp)
6300 {
6301 if (GET_CODE (ftmp->insn) != BARRIER
6302 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
6303 == NULL))
6304 break;
6305
6306 ftmp = ftmp->next;
6307 }
6308
6309 /* Scan over the fixes we have identified for this pool, fixing them
6310 up and adding the constants to the pool itself. */
6311 for (this_fix = fix; this_fix && ftmp != this_fix;
6312 this_fix = this_fix->next)
6313 if (GET_CODE (this_fix->insn) != BARRIER)
6314 {
6315 rtx addr
6316 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
6317 minipool_vector_label),
6318 this_fix->minipool->offset);
6319 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
6320 }
6321
6322 dump_minipool (last_barrier->insn);
6323 fix = ftmp;
6324 }
6325
6326 /* From now on we must synthesize any constants that we can't handle
6327 directly. This can happen if the RTL gets split during final
6328 instruction generation. */
6329 after_arm_reorg = 1;
6330
6331 /* Free the minipool memory. */
6332 obstack_free (&minipool_obstack, minipool_startobj);
6333 }
6334 \f
6335 /* Routines to output assembly language. */
6336
6337 /* If the rtx is the correct value then return the string of the number.
6338 In this way we can ensure that valid double constants are generated even
6339 when cross compiling. */
6340
6341 const char *
6342 fp_immediate_constant (x)
6343 rtx x;
6344 {
6345 REAL_VALUE_TYPE r;
6346 int i;
6347
6348 if (!fpa_consts_inited)
6349 init_fpa_table ();
6350
6351 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6352 for (i = 0; i < 8; i++)
6353 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
6354 return strings_fpa[i];
6355
6356 abort ();
6357 }
6358
6359 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
6360
6361 static const char *
6362 fp_const_from_val (r)
6363 REAL_VALUE_TYPE * r;
6364 {
6365 int i;
6366
6367 if (!fpa_consts_inited)
6368 init_fpa_table ();
6369
6370 for (i = 0; i < 8; i++)
6371 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
6372 return strings_fpa[i];
6373
6374 abort ();
6375 }
6376
6377 /* Output the operands of a LDM/STM instruction to STREAM.
6378 MASK is the ARM register set mask of which only bits 0-15 are important.
6379 REG is the base register, either the frame pointer or the stack pointer,
6380 INSTR is the possibly suffixed load or store instruction. */
6381
6382 static void
6383 print_multi_reg (stream, instr, reg, mask)
6384 FILE * stream;
6385 const char * instr;
6386 int reg;
6387 int mask;
6388 {
6389 int i;
6390 int not_first = FALSE;
6391
6392 fputc ('\t', stream);
6393 asm_fprintf (stream, instr, reg);
6394 fputs (", {", stream);
6395
6396 for (i = 0; i <= LAST_ARM_REGNUM; i++)
6397 if (mask & (1 << i))
6398 {
6399 if (not_first)
6400 fprintf (stream, ", ");
6401
6402 asm_fprintf (stream, "%r", i);
6403 not_first = TRUE;
6404 }
6405
6406 fprintf (stream, "}%s\n", TARGET_APCS_32 ? "" : "^");
6407 }
6408
6409 /* Output a 'call' insn. */
6410
6411 const char *
6412 output_call (operands)
6413 rtx * operands;
6414 {
6415 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
6416
6417 if (REGNO (operands[0]) == LR_REGNUM)
6418 {
6419 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
6420 output_asm_insn ("mov%?\t%0, %|lr", operands);
6421 }
6422
6423 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6424
6425 if (TARGET_INTERWORK)
6426 output_asm_insn ("bx%?\t%0", operands);
6427 else
6428 output_asm_insn ("mov%?\t%|pc, %0", operands);
6429
6430 return "";
6431 }
6432
6433 static int
6434 eliminate_lr2ip (x)
6435 rtx * x;
6436 {
6437 int something_changed = 0;
6438 rtx x0 = * x;
6439 int code = GET_CODE (x0);
6440 int i, j;
6441 const char * fmt;
6442
6443 switch (code)
6444 {
6445 case REG:
6446 if (REGNO (x0) == LR_REGNUM)
6447 {
6448 *x = gen_rtx_REG (SImode, IP_REGNUM);
6449 return 1;
6450 }
6451 return 0;
6452 default:
6453 /* Scan through the sub-elements and change any references there. */
6454 fmt = GET_RTX_FORMAT (code);
6455
6456 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6457 if (fmt[i] == 'e')
6458 something_changed |= eliminate_lr2ip (&XEXP (x0, i));
6459 else if (fmt[i] == 'E')
6460 for (j = 0; j < XVECLEN (x0, i); j++)
6461 something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
6462
6463 return something_changed;
6464 }
6465 }
6466
6467 /* Output a 'call' insn that is a reference in memory. */
6468
6469 const char *
6470 output_call_mem (operands)
6471 rtx * operands;
6472 {
6473 operands[0] = copy_rtx (operands[0]); /* Be ultra careful. */
6474 /* Handle calls using lr by using ip (which may be clobbered in subr anyway). */
6475 if (eliminate_lr2ip (&operands[0]))
6476 output_asm_insn ("mov%?\t%|ip, %|lr", operands);
6477
6478 if (TARGET_INTERWORK)
6479 {
6480 output_asm_insn ("ldr%?\t%|ip, %0", operands);
6481 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6482 output_asm_insn ("bx%?\t%|ip", operands);
6483 }
6484 else
6485 {
6486 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6487 output_asm_insn ("ldr%?\t%|pc, %0", operands);
6488 }
6489
6490 return "";
6491 }
6492
6493
6494 /* Output a move from arm registers to an fpu registers.
6495 OPERANDS[0] is an fpu register.
6496 OPERANDS[1] is the first registers of an arm register pair. */
6497
6498 const char *
6499 output_mov_long_double_fpu_from_arm (operands)
6500 rtx * operands;
6501 {
6502 int arm_reg0 = REGNO (operands[1]);
6503 rtx ops[3];
6504
6505 if (arm_reg0 == IP_REGNUM)
6506 abort ();
6507
6508 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6509 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6510 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
6511
6512 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
6513 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
6514
6515 return "";
6516 }
6517
6518 /* Output a move from an fpu register to arm registers.
6519 OPERANDS[0] is the first registers of an arm register pair.
6520 OPERANDS[1] is an fpu register. */
6521
6522 const char *
6523 output_mov_long_double_arm_from_fpu (operands)
6524 rtx * operands;
6525 {
6526 int arm_reg0 = REGNO (operands[0]);
6527 rtx ops[3];
6528
6529 if (arm_reg0 == IP_REGNUM)
6530 abort ();
6531
6532 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6533 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6534 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
6535
6536 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
6537 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
6538 return "";
6539 }
6540
6541 /* Output a move from arm registers to arm registers of a long double
6542 OPERANDS[0] is the destination.
6543 OPERANDS[1] is the source. */
6544
6545 const char *
6546 output_mov_long_double_arm_from_arm (operands)
6547 rtx * operands;
6548 {
6549 /* We have to be careful here because the two might overlap. */
6550 int dest_start = REGNO (operands[0]);
6551 int src_start = REGNO (operands[1]);
6552 rtx ops[2];
6553 int i;
6554
6555 if (dest_start < src_start)
6556 {
6557 for (i = 0; i < 3; i++)
6558 {
6559 ops[0] = gen_rtx_REG (SImode, dest_start + i);
6560 ops[1] = gen_rtx_REG (SImode, src_start + i);
6561 output_asm_insn ("mov%?\t%0, %1", ops);
6562 }
6563 }
6564 else
6565 {
6566 for (i = 2; i >= 0; i--)
6567 {
6568 ops[0] = gen_rtx_REG (SImode, dest_start + i);
6569 ops[1] = gen_rtx_REG (SImode, src_start + i);
6570 output_asm_insn ("mov%?\t%0, %1", ops);
6571 }
6572 }
6573
6574 return "";
6575 }
6576
6577
6578 /* Output a move from arm registers to an fpu registers.
6579 OPERANDS[0] is an fpu register.
6580 OPERANDS[1] is the first registers of an arm register pair. */
6581
6582 const char *
6583 output_mov_double_fpu_from_arm (operands)
6584 rtx * operands;
6585 {
6586 int arm_reg0 = REGNO (operands[1]);
6587 rtx ops[2];
6588
6589 if (arm_reg0 == IP_REGNUM)
6590 abort ();
6591
6592 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6593 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6594 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
6595 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
6596 return "";
6597 }
6598
6599 /* Output a move from an fpu register to arm registers.
6600 OPERANDS[0] is the first registers of an arm register pair.
6601 OPERANDS[1] is an fpu register. */
6602
6603 const char *
6604 output_mov_double_arm_from_fpu (operands)
6605 rtx * operands;
6606 {
6607 int arm_reg0 = REGNO (operands[0]);
6608 rtx ops[2];
6609
6610 if (arm_reg0 == IP_REGNUM)
6611 abort ();
6612
6613 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6614 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6615 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
6616 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
6617 return "";
6618 }
6619
6620 /* Output a move between double words.
6621 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
6622 or MEM<-REG and all MEMs must be offsettable addresses. */
6623
6624 const char *
6625 output_move_double (operands)
6626 rtx * operands;
6627 {
6628 enum rtx_code code0 = GET_CODE (operands[0]);
6629 enum rtx_code code1 = GET_CODE (operands[1]);
6630 rtx otherops[3];
6631
6632 if (code0 == REG)
6633 {
6634 int reg0 = REGNO (operands[0]);
6635
6636 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
6637
6638 if (code1 == REG)
6639 {
6640 int reg1 = REGNO (operands[1]);
6641 if (reg1 == IP_REGNUM)
6642 abort ();
6643
6644 /* Ensure the second source is not overwritten. */
6645 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
6646 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
6647 else
6648 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
6649 }
6650 else if (code1 == CONST_DOUBLE)
6651 {
6652 if (GET_MODE (operands[1]) == DFmode)
6653 {
6654 REAL_VALUE_TYPE r;
6655 long l[2];
6656
6657 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
6658 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
6659 otherops[1] = GEN_INT (l[1]);
6660 operands[1] = GEN_INT (l[0]);
6661 }
6662 else if (GET_MODE (operands[1]) != VOIDmode)
6663 abort ();
6664 else if (WORDS_BIG_ENDIAN)
6665 {
6666 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
6667 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
6668 }
6669 else
6670 {
6671 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
6672 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
6673 }
6674
6675 output_mov_immediate (operands);
6676 output_mov_immediate (otherops);
6677 }
6678 else if (code1 == CONST_INT)
6679 {
6680 #if HOST_BITS_PER_WIDE_INT > 32
6681 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
6682 what the upper word is. */
6683 if (WORDS_BIG_ENDIAN)
6684 {
6685 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
6686 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
6687 }
6688 else
6689 {
6690 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
6691 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
6692 }
6693 #else
6694 /* Sign extend the intval into the high-order word. */
6695 if (WORDS_BIG_ENDIAN)
6696 {
6697 otherops[1] = operands[1];
6698 operands[1] = (INTVAL (operands[1]) < 0
6699 ? constm1_rtx : const0_rtx);
6700 }
6701 else
6702 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
6703 #endif
6704 output_mov_immediate (otherops);
6705 output_mov_immediate (operands);
6706 }
6707 else if (code1 == MEM)
6708 {
6709 switch (GET_CODE (XEXP (operands[1], 0)))
6710 {
6711 case REG:
6712 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
6713 break;
6714
6715 case PRE_INC:
6716 abort (); /* Should never happen now. */
6717 break;
6718
6719 case PRE_DEC:
6720 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
6721 break;
6722
6723 case POST_INC:
6724 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
6725 break;
6726
6727 case POST_DEC:
6728 abort (); /* Should never happen now. */
6729 break;
6730
6731 case LABEL_REF:
6732 case CONST:
6733 output_asm_insn ("adr%?\t%0, %1", operands);
6734 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
6735 break;
6736
6737 default:
6738 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
6739 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
6740 {
6741 otherops[0] = operands[0];
6742 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
6743 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
6744
6745 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
6746 {
6747 if (GET_CODE (otherops[2]) == CONST_INT)
6748 {
6749 switch (INTVAL (otherops[2]))
6750 {
6751 case -8:
6752 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
6753 return "";
6754 case -4:
6755 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
6756 return "";
6757 case 4:
6758 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
6759 return "";
6760 }
6761
6762 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
6763 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
6764 else
6765 output_asm_insn ("add%?\t%0, %1, %2", otherops);
6766 }
6767 else
6768 output_asm_insn ("add%?\t%0, %1, %2", otherops);
6769 }
6770 else
6771 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
6772
6773 return "ldm%?ia\t%0, %M0";
6774 }
6775 else
6776 {
6777 otherops[1] = adjust_address (operands[1], VOIDmode, 4);
6778 /* Take care of overlapping base/data reg. */
6779 if (reg_mentioned_p (operands[0], operands[1]))
6780 {
6781 output_asm_insn ("ldr%?\t%0, %1", otherops);
6782 output_asm_insn ("ldr%?\t%0, %1", operands);
6783 }
6784 else
6785 {
6786 output_asm_insn ("ldr%?\t%0, %1", operands);
6787 output_asm_insn ("ldr%?\t%0, %1", otherops);
6788 }
6789 }
6790 }
6791 }
6792 else
6793 abort (); /* Constraints should prevent this. */
6794 }
6795 else if (code0 == MEM && code1 == REG)
6796 {
6797 if (REGNO (operands[1]) == IP_REGNUM)
6798 abort ();
6799
6800 switch (GET_CODE (XEXP (operands[0], 0)))
6801 {
6802 case REG:
6803 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
6804 break;
6805
6806 case PRE_INC:
6807 abort (); /* Should never happen now. */
6808 break;
6809
6810 case PRE_DEC:
6811 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
6812 break;
6813
6814 case POST_INC:
6815 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
6816 break;
6817
6818 case POST_DEC:
6819 abort (); /* Should never happen now. */
6820 break;
6821
6822 case PLUS:
6823 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
6824 {
6825 switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
6826 {
6827 case -8:
6828 output_asm_insn ("stm%?db\t%m0, %M1", operands);
6829 return "";
6830
6831 case -4:
6832 output_asm_insn ("stm%?da\t%m0, %M1", operands);
6833 return "";
6834
6835 case 4:
6836 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
6837 return "";
6838 }
6839 }
6840 /* Fall through */
6841
6842 default:
6843 otherops[0] = adjust_address (operands[0], VOIDmode, 4);
6844 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
6845 output_asm_insn ("str%?\t%1, %0", operands);
6846 output_asm_insn ("str%?\t%1, %0", otherops);
6847 }
6848 }
6849 else
6850 /* Constraints should prevent this. */
6851 abort ();
6852
6853 return "";
6854 }
6855
6856
6857 /* Output an arbitrary MOV reg, #n.
6858 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
6859
6860 const char *
6861 output_mov_immediate (operands)
6862 rtx * operands;
6863 {
6864 HOST_WIDE_INT n = INTVAL (operands[1]);
6865
6866 /* Try to use one MOV. */
6867 if (const_ok_for_arm (n))
6868 output_asm_insn ("mov%?\t%0, %1", operands);
6869
6870 /* Try to use one MVN. */
6871 else if (const_ok_for_arm (~n))
6872 {
6873 operands[1] = GEN_INT (~n);
6874 output_asm_insn ("mvn%?\t%0, %1", operands);
6875 }
6876 else
6877 {
6878 int n_ones = 0;
6879 int i;
6880
6881 /* If all else fails, make it out of ORRs or BICs as appropriate. */
6882 for (i = 0; i < 32; i ++)
6883 if (n & 1 << i)
6884 n_ones ++;
6885
6886 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
6887 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
6888 else
6889 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
6890 }
6891
6892 return "";
6893 }
6894
6895 /* Output an ADD r, s, #n where n may be too big for one instruction.
6896 If adding zero to one register, output nothing. */
6897
6898 const char *
6899 output_add_immediate (operands)
6900 rtx * operands;
6901 {
6902 HOST_WIDE_INT n = INTVAL (operands[2]);
6903
6904 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
6905 {
6906 if (n < 0)
6907 output_multi_immediate (operands,
6908 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
6909 -n);
6910 else
6911 output_multi_immediate (operands,
6912 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
6913 n);
6914 }
6915
6916 return "";
6917 }
6918
6919 /* Output a multiple immediate operation.
6920 OPERANDS is the vector of operands referred to in the output patterns.
6921 INSTR1 is the output pattern to use for the first constant.
6922 INSTR2 is the output pattern to use for subsequent constants.
6923 IMMED_OP is the index of the constant slot in OPERANDS.
6924 N is the constant value. */
6925
6926 static const char *
6927 output_multi_immediate (operands, instr1, instr2, immed_op, n)
6928 rtx * operands;
6929 const char * instr1;
6930 const char * instr2;
6931 int immed_op;
6932 HOST_WIDE_INT n;
6933 {
6934 #if HOST_BITS_PER_WIDE_INT > 32
6935 n &= 0xffffffff;
6936 #endif
6937
6938 if (n == 0)
6939 {
6940 /* Quick and easy output. */
6941 operands[immed_op] = const0_rtx;
6942 output_asm_insn (instr1, operands);
6943 }
6944 else
6945 {
6946 int i;
6947 const char * instr = instr1;
6948
6949 /* Note that n is never zero here (which would give no output). */
6950 for (i = 0; i < 32; i += 2)
6951 {
6952 if (n & (3 << i))
6953 {
6954 operands[immed_op] = GEN_INT (n & (255 << i));
6955 output_asm_insn (instr, operands);
6956 instr = instr2;
6957 i += 6;
6958 }
6959 }
6960 }
6961
6962 return "";
6963 }
6964
6965 /* Return the appropriate ARM instruction for the operation code.
6966 The returned result should not be overwritten. OP is the rtx of the
6967 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
6968 was shifted. */
6969
6970 const char *
6971 arithmetic_instr (op, shift_first_arg)
6972 rtx op;
6973 int shift_first_arg;
6974 {
6975 switch (GET_CODE (op))
6976 {
6977 case PLUS:
6978 return "add";
6979
6980 case MINUS:
6981 return shift_first_arg ? "rsb" : "sub";
6982
6983 case IOR:
6984 return "orr";
6985
6986 case XOR:
6987 return "eor";
6988
6989 case AND:
6990 return "and";
6991
6992 default:
6993 abort ();
6994 }
6995 }
6996
6997 /* Ensure valid constant shifts and return the appropriate shift mnemonic
6998 for the operation code. The returned result should not be overwritten.
6999 OP is the rtx code of the shift.
7000 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
7001 shift. */
7002
7003 static const char *
7004 shift_op (op, amountp)
7005 rtx op;
7006 HOST_WIDE_INT *amountp;
7007 {
7008 const char * mnem;
7009 enum rtx_code code = GET_CODE (op);
7010
7011 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
7012 *amountp = -1;
7013 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
7014 *amountp = INTVAL (XEXP (op, 1));
7015 else
7016 abort ();
7017
7018 switch (code)
7019 {
7020 case ASHIFT:
7021 mnem = "asl";
7022 break;
7023
7024 case ASHIFTRT:
7025 mnem = "asr";
7026 break;
7027
7028 case LSHIFTRT:
7029 mnem = "lsr";
7030 break;
7031
7032 case ROTATERT:
7033 mnem = "ror";
7034 break;
7035
7036 case MULT:
7037 /* We never have to worry about the amount being other than a
7038 power of 2, since this case can never be reloaded from a reg. */
7039 if (*amountp != -1)
7040 *amountp = int_log2 (*amountp);
7041 else
7042 abort ();
7043 return "asl";
7044
7045 default:
7046 abort ();
7047 }
7048
7049 if (*amountp != -1)
7050 {
7051 /* This is not 100% correct, but follows from the desire to merge
7052 multiplication by a power of 2 with the recognizer for a
7053 shift. >=32 is not a valid shift for "asl", so we must try and
7054 output a shift that produces the correct arithmetical result.
7055 Using lsr #32 is identical except for the fact that the carry bit
7056 is not set correctly if we set the flags; but we never use the
7057 carry bit from such an operation, so we can ignore that. */
7058 if (code == ROTATERT)
7059 /* Rotate is just modulo 32. */
7060 *amountp &= 31;
7061 else if (*amountp != (*amountp & 31))
7062 {
7063 if (code == ASHIFT)
7064 mnem = "lsr";
7065 *amountp = 32;
7066 }
7067
7068 /* Shifts of 0 are no-ops. */
7069 if (*amountp == 0)
7070 return NULL;
7071 }
7072
7073 return mnem;
7074 }
7075
7076 /* Obtain the shift from the POWER of two. */
7077
7078 static HOST_WIDE_INT
7079 int_log2 (power)
7080 HOST_WIDE_INT power;
7081 {
7082 HOST_WIDE_INT shift = 0;
7083
7084 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
7085 {
7086 if (shift > 31)
7087 abort ();
7088 shift ++;
7089 }
7090
7091 return shift;
7092 }
7093
7094 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
7095 /bin/as is horribly restrictive. */
7096 #define MAX_ASCII_LEN 51
7097
7098 void
7099 output_ascii_pseudo_op (stream, p, len)
7100 FILE * stream;
7101 const unsigned char * p;
7102 int len;
7103 {
7104 int i;
7105 int len_so_far = 0;
7106
7107 fputs ("\t.ascii\t\"", stream);
7108
7109 for (i = 0; i < len; i++)
7110 {
7111 int c = p[i];
7112
7113 if (len_so_far >= MAX_ASCII_LEN)
7114 {
7115 fputs ("\"\n\t.ascii\t\"", stream);
7116 len_so_far = 0;
7117 }
7118
7119 switch (c)
7120 {
7121 case TARGET_TAB:
7122 fputs ("\\t", stream);
7123 len_so_far += 2;
7124 break;
7125
7126 case TARGET_FF:
7127 fputs ("\\f", stream);
7128 len_so_far += 2;
7129 break;
7130
7131 case TARGET_BS:
7132 fputs ("\\b", stream);
7133 len_so_far += 2;
7134 break;
7135
7136 case TARGET_CR:
7137 fputs ("\\r", stream);
7138 len_so_far += 2;
7139 break;
7140
7141 case TARGET_NEWLINE:
7142 fputs ("\\n", stream);
7143 c = p [i + 1];
7144 if ((c >= ' ' && c <= '~')
7145 || c == TARGET_TAB)
7146 /* This is a good place for a line break. */
7147 len_so_far = MAX_ASCII_LEN;
7148 else
7149 len_so_far += 2;
7150 break;
7151
7152 case '\"':
7153 case '\\':
7154 putc ('\\', stream);
7155 len_so_far++;
7156 /* drop through. */
7157
7158 default:
7159 if (c >= ' ' && c <= '~')
7160 {
7161 putc (c, stream);
7162 len_so_far++;
7163 }
7164 else
7165 {
7166 fprintf (stream, "\\%03o", c);
7167 len_so_far += 4;
7168 }
7169 break;
7170 }
7171 }
7172
7173 fputs ("\"\n", stream);
7174 }
7175 \f
7176 /* Compute the register sabe mask for registers 0 through 12
7177 inclusive. This code is used by both arm_compute_save_reg_mask
7178 and arm_compute_initial_elimination_offset. */
7179
7180 static unsigned long
7181 arm_compute_save_reg0_reg12_mask ()
7182 {
7183 unsigned long func_type = arm_current_func_type ();
7184 unsigned int save_reg_mask = 0;
7185 unsigned int reg;
7186
7187 if (IS_INTERRUPT (func_type))
7188 {
7189 unsigned int max_reg;
7190 /* Interrupt functions must not corrupt any registers,
7191 even call clobbered ones. If this is a leaf function
7192 we can just examine the registers used by the RTL, but
7193 otherwise we have to assume that whatever function is
7194 called might clobber anything, and so we have to save
7195 all the call-clobbered registers as well. */
7196 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
7197 /* FIQ handlers have registers r8 - r12 banked, so
7198 we only need to check r0 - r7, Normal ISRs only
7199 bank r14 and r15, so we must check up to r12.
7200 r13 is the stack pointer which is always preserved,
7201 so we do not need to consider it here. */
7202 max_reg = 7;
7203 else
7204 max_reg = 12;
7205
7206 for (reg = 0; reg <= max_reg; reg++)
7207 if (regs_ever_live[reg]
7208 || (! current_function_is_leaf && call_used_regs [reg]))
7209 save_reg_mask |= (1 << reg);
7210 }
7211 else
7212 {
7213 /* In the normal case we only need to save those registers
7214 which are call saved and which are used by this function. */
7215 for (reg = 0; reg <= 10; reg++)
7216 if (regs_ever_live[reg] && ! call_used_regs [reg])
7217 save_reg_mask |= (1 << reg);
7218
7219 /* Handle the frame pointer as a special case. */
7220 if (! TARGET_APCS_FRAME
7221 && ! frame_pointer_needed
7222 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
7223 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
7224 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
7225
7226 /* If we aren't loading the PIC register,
7227 don't stack it even though it may be live. */
7228 if (flag_pic
7229 && ! TARGET_SINGLE_PIC_BASE
7230 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
7231 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
7232 }
7233
7234 return save_reg_mask;
7235 }
7236
7237 /* Compute a bit mask of which registers need to be
7238 saved on the stack for the current function. */
7239
7240 static unsigned long
7241 arm_compute_save_reg_mask ()
7242 {
7243 unsigned int save_reg_mask = 0;
7244 unsigned long func_type = arm_current_func_type ();
7245
7246 if (IS_NAKED (func_type))
7247 /* This should never really happen. */
7248 return 0;
7249
7250 /* If we are creating a stack frame, then we must save the frame pointer,
7251 IP (which will hold the old stack pointer), LR and the PC. */
7252 if (frame_pointer_needed)
7253 save_reg_mask |=
7254 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
7255 | (1 << IP_REGNUM)
7256 | (1 << LR_REGNUM)
7257 | (1 << PC_REGNUM);
7258
7259 /* Volatile functions do not return, so there
7260 is no need to save any other registers. */
7261 if (IS_VOLATILE (func_type))
7262 return save_reg_mask;
7263
7264 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
7265
7266 /* Decide if we need to save the link register.
7267 Interrupt routines have their own banked link register,
7268 so they never need to save it.
7269 Otherwise if we do not use the link register we do not need to save
7270 it. If we are pushing other registers onto the stack however, we
7271 can save an instruction in the epilogue by pushing the link register
7272 now and then popping it back into the PC. This incurs extra memory
7273 accesses though, so we only do it when optimising for size, and only
7274 if we know that we will not need a fancy return sequence. */
7275 if (regs_ever_live [LR_REGNUM]
7276 || (save_reg_mask
7277 && optimize_size
7278 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
7279 save_reg_mask |= 1 << LR_REGNUM;
7280
7281 if (cfun->machine->lr_save_eliminated)
7282 save_reg_mask &= ~ (1 << LR_REGNUM);
7283
7284 return save_reg_mask;
7285 }
7286
7287 /* Generate a function exit sequence. If REALLY_RETURN is true, then do
7288 everything bar the final return instruction. */
7289
7290 const char *
7291 output_return_instruction (operand, really_return, reverse)
7292 rtx operand;
7293 int really_return;
7294 int reverse;
7295 {
7296 char conditional[10];
7297 char instr[100];
7298 int reg;
7299 unsigned long live_regs_mask;
7300 unsigned long func_type;
7301
7302 func_type = arm_current_func_type ();
7303
7304 if (IS_NAKED (func_type))
7305 return "";
7306
7307 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
7308 {
7309 /* If this function was declared non-returning, and we have found a tail
7310 call, then we have to trust that the called function won't return. */
7311 if (really_return)
7312 {
7313 rtx ops[2];
7314
7315 /* Otherwise, trap an attempted return by aborting. */
7316 ops[0] = operand;
7317 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
7318 : "abort");
7319 assemble_external_libcall (ops[1]);
7320 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
7321 }
7322
7323 return "";
7324 }
7325
7326 if (current_function_calls_alloca && !really_return)
7327 abort ();
7328
7329 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
7330
7331 return_used_this_function = 1;
7332
7333 live_regs_mask = arm_compute_save_reg_mask ();
7334
7335 if (live_regs_mask)
7336 {
7337 const char * return_reg;
7338
7339 /* If we do not have any special requirements for function exit
7340 (eg interworking, or ISR) then we can load the return address
7341 directly into the PC. Otherwise we must load it into LR. */
7342 if (really_return
7343 && ! TARGET_INTERWORK)
7344 return_reg = reg_names[PC_REGNUM];
7345 else
7346 return_reg = reg_names[LR_REGNUM];
7347
7348 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
7349 /* There are two possible reasons for the IP register being saved.
7350 Either a stack frame was created, in which case IP contains the
7351 old stack pointer, or an ISR routine corrupted it. If this in an
7352 ISR routine then just restore IP, otherwise restore IP into SP. */
7353 if (! IS_INTERRUPT (func_type))
7354 {
7355 live_regs_mask &= ~ (1 << IP_REGNUM);
7356 live_regs_mask |= (1 << SP_REGNUM);
7357 }
7358
7359 /* On some ARM architectures it is faster to use LDR rather than
7360 LDM to load a single register. On other architectures, the
7361 cost is the same. In 26 bit mode, or for exception handlers,
7362 we have to use LDM to load the PC so that the CPSR is also
7363 restored. */
7364 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
7365 {
7366 if (live_regs_mask == (unsigned int)(1 << reg))
7367 break;
7368 }
7369 if (reg <= LAST_ARM_REGNUM
7370 && (reg != LR_REGNUM
7371 || ! really_return
7372 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
7373 {
7374 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
7375 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
7376 }
7377 else
7378 {
7379 char *p;
7380 int first = 1;
7381
7382 /* Generate the load multiple instruction to restore the registers. */
7383 if (frame_pointer_needed)
7384 sprintf (instr, "ldm%sea\t%%|fp, {", conditional);
7385 else if (live_regs_mask & (1 << SP_REGNUM))
7386 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
7387 else
7388 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
7389
7390 p = instr + strlen (instr);
7391
7392 for (reg = 0; reg <= SP_REGNUM; reg++)
7393 if (live_regs_mask & (1 << reg))
7394 {
7395 int l = strlen (reg_names[reg]);
7396
7397 if (first)
7398 first = 0;
7399 else
7400 {
7401 memcpy (p, ", ", 2);
7402 p += 2;
7403 }
7404
7405 memcpy (p, "%|", 2);
7406 memcpy (p + 2, reg_names[reg], l);
7407 p += l + 2;
7408 }
7409
7410 if (live_regs_mask & (1 << LR_REGNUM))
7411 {
7412 int l = strlen (return_reg);
7413
7414 if (! first)
7415 {
7416 memcpy (p, ", ", 2);
7417 p += 2;
7418 }
7419
7420 memcpy (p, "%|", 2);
7421 memcpy (p + 2, return_reg, l);
7422 strcpy (p + 2 + l, ((TARGET_APCS_32
7423 && !IS_INTERRUPT (func_type))
7424 || !really_return)
7425 ? "}" : "}^");
7426 }
7427 else
7428 strcpy (p, "}");
7429 }
7430
7431 output_asm_insn (instr, & operand);
7432
7433 /* See if we need to generate an extra instruction to
7434 perform the actual function return. */
7435 if (really_return
7436 && func_type != ARM_FT_INTERWORKED
7437 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
7438 {
7439 /* The return has already been handled
7440 by loading the LR into the PC. */
7441 really_return = 0;
7442 }
7443 }
7444
7445 if (really_return)
7446 {
7447 switch ((int) ARM_FUNC_TYPE (func_type))
7448 {
7449 case ARM_FT_ISR:
7450 case ARM_FT_FIQ:
7451 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
7452 break;
7453
7454 case ARM_FT_INTERWORKED:
7455 sprintf (instr, "bx%s\t%%|lr", conditional);
7456 break;
7457
7458 case ARM_FT_EXCEPTION:
7459 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
7460 break;
7461
7462 default:
7463 /* ARMv5 implementations always provide BX, so interworking
7464 is the default unless APCS-26 is in use. */
7465 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
7466 sprintf (instr, "bx%s\t%%|lr", conditional);
7467 else
7468 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
7469 conditional, TARGET_APCS_32 ? "" : "s");
7470 break;
7471 }
7472
7473 output_asm_insn (instr, & operand);
7474 }
7475
7476 return "";
7477 }
7478
7479 /* Write the function name into the code section, directly preceding
7480 the function prologue.
7481
7482 Code will be output similar to this:
7483 t0
7484 .ascii "arm_poke_function_name", 0
7485 .align
7486 t1
7487 .word 0xff000000 + (t1 - t0)
7488 arm_poke_function_name
7489 mov ip, sp
7490 stmfd sp!, {fp, ip, lr, pc}
7491 sub fp, ip, #4
7492
7493 When performing a stack backtrace, code can inspect the value
7494 of 'pc' stored at 'fp' + 0. If the trace function then looks
7495 at location pc - 12 and the top 8 bits are set, then we know
7496 that there is a function name embedded immediately preceding this
7497 location and has length ((pc[-3]) & 0xff000000).
7498
7499 We assume that pc is declared as a pointer to an unsigned long.
7500
7501 It is of no benefit to output the function name if we are assembling
7502 a leaf function. These function types will not contain a stack
7503 backtrace structure, therefore it is not possible to determine the
7504 function name. */
7505
7506 void
7507 arm_poke_function_name (stream, name)
7508 FILE * stream;
7509 const char * name;
7510 {
7511 unsigned long alignlength;
7512 unsigned long length;
7513 rtx x;
7514
7515 length = strlen (name) + 1;
7516 alignlength = ROUND_UP (length);
7517
7518 ASM_OUTPUT_ASCII (stream, name, length);
7519 ASM_OUTPUT_ALIGN (stream, 2);
7520 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
7521 assemble_aligned_integer (UNITS_PER_WORD, x);
7522 }
7523
7524 /* Place some comments into the assembler stream
7525 describing the current function. */
7526
7527 static void
7528 arm_output_function_prologue (f, frame_size)
7529 FILE * f;
7530 HOST_WIDE_INT frame_size;
7531 {
7532 unsigned long func_type;
7533
7534 if (!TARGET_ARM)
7535 {
7536 thumb_output_function_prologue (f, frame_size);
7537 return;
7538 }
7539
7540 /* Sanity check. */
7541 if (arm_ccfsm_state || arm_target_insn)
7542 abort ();
7543
7544 func_type = arm_current_func_type ();
7545
7546 switch ((int) ARM_FUNC_TYPE (func_type))
7547 {
7548 default:
7549 case ARM_FT_NORMAL:
7550 break;
7551 case ARM_FT_INTERWORKED:
7552 asm_fprintf (f, "\t%@ Function supports interworking.\n");
7553 break;
7554 case ARM_FT_EXCEPTION_HANDLER:
7555 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
7556 break;
7557 case ARM_FT_ISR:
7558 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
7559 break;
7560 case ARM_FT_FIQ:
7561 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
7562 break;
7563 case ARM_FT_EXCEPTION:
7564 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
7565 break;
7566 }
7567
7568 if (IS_NAKED (func_type))
7569 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
7570
7571 if (IS_VOLATILE (func_type))
7572 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
7573
7574 if (IS_NESTED (func_type))
7575 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
7576
7577 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %d\n",
7578 current_function_args_size,
7579 current_function_pretend_args_size, frame_size);
7580
7581 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
7582 frame_pointer_needed,
7583 cfun->machine->uses_anonymous_args);
7584
7585 if (cfun->machine->lr_save_eliminated)
7586 asm_fprintf (f, "\t%@ link register save eliminated.\n");
7587
7588 #ifdef AOF_ASSEMBLER
7589 if (flag_pic)
7590 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
7591 #endif
7592
7593 return_used_this_function = 0;
7594 }
7595
7596 const char *
7597 arm_output_epilogue (really_return)
7598 int really_return;
7599 {
7600 int reg;
7601 unsigned long saved_regs_mask;
7602 unsigned long func_type;
7603 /* Floats_offset is the offset from the "virtual" frame. In an APCS
7604 frame that is $fp + 4 for a non-variadic function. */
7605 int floats_offset = 0;
7606 rtx operands[3];
7607 int frame_size = arm_get_frame_size ();
7608 FILE * f = asm_out_file;
7609 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
7610
7611 /* If we have already generated the return instruction
7612 then it is futile to generate anything else. */
7613 if (use_return_insn (FALSE) && return_used_this_function)
7614 return "";
7615
7616 func_type = arm_current_func_type ();
7617
7618 if (IS_NAKED (func_type))
7619 /* Naked functions don't have epilogues. */
7620 return "";
7621
7622 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
7623 {
7624 rtx op;
7625
7626 /* A volatile function should never return. Call abort. */
7627 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
7628 assemble_external_libcall (op);
7629 output_asm_insn ("bl\t%a0", &op);
7630
7631 return "";
7632 }
7633
7634 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
7635 && ! really_return)
7636 /* If we are throwing an exception, then we really must
7637 be doing a return, so we can't tail-call. */
7638 abort ();
7639
7640 saved_regs_mask = arm_compute_save_reg_mask ();
7641
7642 /* XXX We should adjust floats_offset for any anonymous args, and then
7643 re-adjust vfp_offset below to compensate. */
7644
7645 /* Compute how far away the floats will be. */
7646 for (reg = 0; reg <= LAST_ARM_REGNUM; reg ++)
7647 if (saved_regs_mask & (1 << reg))
7648 floats_offset += 4;
7649
7650 if (frame_pointer_needed)
7651 {
7652 int vfp_offset = 4;
7653
7654 if (arm_fpu_arch == FP_SOFT2)
7655 {
7656 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
7657 if (regs_ever_live[reg] && !call_used_regs[reg])
7658 {
7659 floats_offset += 12;
7660 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
7661 reg, FP_REGNUM, floats_offset - vfp_offset);
7662 }
7663 }
7664 else
7665 {
7666 int start_reg = LAST_ARM_FP_REGNUM;
7667
7668 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
7669 {
7670 if (regs_ever_live[reg] && !call_used_regs[reg])
7671 {
7672 floats_offset += 12;
7673
7674 /* We can't unstack more than four registers at once. */
7675 if (start_reg - reg == 3)
7676 {
7677 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
7678 reg, FP_REGNUM, floats_offset - vfp_offset);
7679 start_reg = reg - 1;
7680 }
7681 }
7682 else
7683 {
7684 if (reg != start_reg)
7685 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
7686 reg + 1, start_reg - reg,
7687 FP_REGNUM, floats_offset - vfp_offset);
7688 start_reg = reg - 1;
7689 }
7690 }
7691
7692 /* Just in case the last register checked also needs unstacking. */
7693 if (reg != start_reg)
7694 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
7695 reg + 1, start_reg - reg,
7696 FP_REGNUM, floats_offset - vfp_offset);
7697 }
7698
7699 /* saved_regs_mask should contain the IP, which at the time of stack
7700 frame generation actually contains the old stack pointer. So a
7701 quick way to unwind the stack is just pop the IP register directly
7702 into the stack pointer. */
7703 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
7704 abort ();
7705 saved_regs_mask &= ~ (1 << IP_REGNUM);
7706 saved_regs_mask |= (1 << SP_REGNUM);
7707
7708 /* There are two registers left in saved_regs_mask - LR and PC. We
7709 only need to restore the LR register (the return address), but to
7710 save time we can load it directly into the PC, unless we need a
7711 special function exit sequence, or we are not really returning. */
7712 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
7713 /* Delete the LR from the register mask, so that the LR on
7714 the stack is loaded into the PC in the register mask. */
7715 saved_regs_mask &= ~ (1 << LR_REGNUM);
7716 else
7717 saved_regs_mask &= ~ (1 << PC_REGNUM);
7718
7719 print_multi_reg (f, "ldmea\t%r", FP_REGNUM, saved_regs_mask);
7720
7721 if (IS_INTERRUPT (func_type))
7722 /* Interrupt handlers will have pushed the
7723 IP onto the stack, so restore it now. */
7724 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, 1 << IP_REGNUM);
7725 }
7726 else
7727 {
7728 /* Restore stack pointer if necessary. */
7729 if (frame_size + current_function_outgoing_args_size != 0)
7730 {
7731 operands[0] = operands[1] = stack_pointer_rtx;
7732 operands[2] = GEN_INT (frame_size
7733 + current_function_outgoing_args_size);
7734 output_add_immediate (operands);
7735 }
7736
7737 if (arm_fpu_arch == FP_SOFT2)
7738 {
7739 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
7740 if (regs_ever_live[reg] && !call_used_regs[reg])
7741 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
7742 reg, SP_REGNUM);
7743 }
7744 else
7745 {
7746 int start_reg = FIRST_ARM_FP_REGNUM;
7747
7748 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
7749 {
7750 if (regs_ever_live[reg] && !call_used_regs[reg])
7751 {
7752 if (reg - start_reg == 3)
7753 {
7754 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
7755 start_reg, SP_REGNUM);
7756 start_reg = reg + 1;
7757 }
7758 }
7759 else
7760 {
7761 if (reg != start_reg)
7762 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
7763 start_reg, reg - start_reg,
7764 SP_REGNUM);
7765
7766 start_reg = reg + 1;
7767 }
7768 }
7769
7770 /* Just in case the last register checked also needs unstacking. */
7771 if (reg != start_reg)
7772 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
7773 start_reg, reg - start_reg, SP_REGNUM);
7774 }
7775
7776 /* If we can, restore the LR into the PC. */
7777 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
7778 && really_return
7779 && current_function_pretend_args_size == 0
7780 && saved_regs_mask & (1 << LR_REGNUM))
7781 {
7782 saved_regs_mask &= ~ (1 << LR_REGNUM);
7783 saved_regs_mask |= (1 << PC_REGNUM);
7784 }
7785
7786 /* Load the registers off the stack. If we only have one register
7787 to load use the LDR instruction - it is faster. */
7788 if (saved_regs_mask == (1 << LR_REGNUM))
7789 {
7790 /* The exception handler ignores the LR, so we do
7791 not really need to load it off the stack. */
7792 if (eh_ofs)
7793 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
7794 else
7795 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
7796 }
7797 else if (saved_regs_mask)
7798 {
7799 if (saved_regs_mask & (1 << SP_REGNUM))
7800 /* Note - write back to the stack register is not enabled
7801 (ie "ldmfd sp!..."). We know that the stack pointer is
7802 in the list of registers and if we add writeback the
7803 instruction becomes UNPREDICTABLE. */
7804 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
7805 else
7806 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
7807 }
7808
7809 if (current_function_pretend_args_size)
7810 {
7811 /* Unwind the pre-pushed regs. */
7812 operands[0] = operands[1] = stack_pointer_rtx;
7813 operands[2] = GEN_INT (current_function_pretend_args_size);
7814 output_add_immediate (operands);
7815 }
7816 }
7817
7818 #if 0
7819 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER)
7820 /* Adjust the stack to remove the exception handler stuff. */
7821 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
7822 REGNO (eh_ofs));
7823 #endif
7824
7825 if (! really_return
7826 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
7827 && current_function_pretend_args_size == 0
7828 && saved_regs_mask & (1 << PC_REGNUM)))
7829 return "";
7830
7831 /* Generate the return instruction. */
7832 switch ((int) ARM_FUNC_TYPE (func_type))
7833 {
7834 case ARM_FT_EXCEPTION_HANDLER:
7835 /* Even in 26-bit mode we do a mov (rather than a movs)
7836 because we don't have the PSR bits set in the address. */
7837 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
7838 break;
7839
7840 case ARM_FT_ISR:
7841 case ARM_FT_FIQ:
7842 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
7843 break;
7844
7845 case ARM_FT_EXCEPTION:
7846 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7847 break;
7848
7849 case ARM_FT_INTERWORKED:
7850 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
7851 break;
7852
7853 default:
7854 if (frame_pointer_needed)
7855 /* If we used the frame pointer then the return adddress
7856 will have been loaded off the stack directly into the
7857 PC, so there is no need to issue a MOV instruction
7858 here. */
7859 ;
7860 else if (current_function_pretend_args_size == 0
7861 && (saved_regs_mask & (1 << LR_REGNUM)))
7862 /* Similarly we may have been able to load LR into the PC
7863 even if we did not create a stack frame. */
7864 ;
7865 else if (TARGET_APCS_32)
7866 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7867 else
7868 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7869 break;
7870 }
7871
7872 return "";
7873 }
7874
7875 static void
7876 arm_output_function_epilogue (file, frame_size)
7877 FILE *file ATTRIBUTE_UNUSED;
7878 HOST_WIDE_INT frame_size;
7879 {
7880 if (TARGET_THUMB)
7881 {
7882 /* ??? Probably not safe to set this here, since it assumes that a
7883 function will be emitted as assembly immediately after we generate
7884 RTL for it. This does not happen for inline functions. */
7885 return_used_this_function = 0;
7886 }
7887 else
7888 {
7889 /* We need to take into account any stack-frame rounding. */
7890 frame_size = arm_get_frame_size ();
7891
7892 if (use_return_insn (FALSE)
7893 && return_used_this_function
7894 && (frame_size + current_function_outgoing_args_size) != 0
7895 && !frame_pointer_needed)
7896 abort ();
7897
7898 /* Reset the ARM-specific per-function variables. */
7899 after_arm_reorg = 0;
7900 }
7901 }
7902
7903 /* Generate and emit an insn that we will recognize as a push_multi.
7904 Unfortunately, since this insn does not reflect very well the actual
7905 semantics of the operation, we need to annotate the insn for the benefit
7906 of DWARF2 frame unwind information. */
7907
7908 static rtx
7909 emit_multi_reg_push (mask)
7910 int mask;
7911 {
7912 int num_regs = 0;
7913 int num_dwarf_regs;
7914 int i, j;
7915 rtx par;
7916 rtx dwarf;
7917 int dwarf_par_index;
7918 rtx tmp, reg;
7919
7920 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7921 if (mask & (1 << i))
7922 num_regs++;
7923
7924 if (num_regs == 0 || num_regs > 16)
7925 abort ();
7926
7927 /* We don't record the PC in the dwarf frame information. */
7928 num_dwarf_regs = num_regs;
7929 if (mask & (1 << PC_REGNUM))
7930 num_dwarf_regs--;
7931
7932 /* For the body of the insn we are going to generate an UNSPEC in
7933 parallel with several USEs. This allows the insn to be recognized
7934 by the push_multi pattern in the arm.md file. The insn looks
7935 something like this:
7936
7937 (parallel [
7938 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
7939 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
7940 (use (reg:SI 11 fp))
7941 (use (reg:SI 12 ip))
7942 (use (reg:SI 14 lr))
7943 (use (reg:SI 15 pc))
7944 ])
7945
7946 For the frame note however, we try to be more explicit and actually
7947 show each register being stored into the stack frame, plus a (single)
7948 decrement of the stack pointer. We do it this way in order to be
7949 friendly to the stack unwinding code, which only wants to see a single
7950 stack decrement per instruction. The RTL we generate for the note looks
7951 something like this:
7952
7953 (sequence [
7954 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
7955 (set (mem:SI (reg:SI sp)) (reg:SI r4))
7956 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
7957 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
7958 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
7959 ])
7960
7961 This sequence is used both by the code to support stack unwinding for
7962 exceptions handlers and the code to generate dwarf2 frame debugging. */
7963
7964 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
7965 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
7966 dwarf_par_index = 1;
7967
7968 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7969 {
7970 if (mask & (1 << i))
7971 {
7972 reg = gen_rtx_REG (SImode, i);
7973
7974 XVECEXP (par, 0, 0)
7975 = gen_rtx_SET (VOIDmode,
7976 gen_rtx_MEM (BLKmode,
7977 gen_rtx_PRE_DEC (BLKmode,
7978 stack_pointer_rtx)),
7979 gen_rtx_UNSPEC (BLKmode,
7980 gen_rtvec (1, reg),
7981 UNSPEC_PUSH_MULT));
7982
7983 if (i != PC_REGNUM)
7984 {
7985 tmp = gen_rtx_SET (VOIDmode,
7986 gen_rtx_MEM (SImode, stack_pointer_rtx),
7987 reg);
7988 RTX_FRAME_RELATED_P (tmp) = 1;
7989 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
7990 dwarf_par_index++;
7991 }
7992
7993 break;
7994 }
7995 }
7996
7997 for (j = 1, i++; j < num_regs; i++)
7998 {
7999 if (mask & (1 << i))
8000 {
8001 reg = gen_rtx_REG (SImode, i);
8002
8003 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
8004
8005 if (i != PC_REGNUM)
8006 {
8007 tmp = gen_rtx_SET (VOIDmode,
8008 gen_rtx_MEM (SImode,
8009 plus_constant (stack_pointer_rtx,
8010 4 * j)),
8011 reg);
8012 RTX_FRAME_RELATED_P (tmp) = 1;
8013 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
8014 }
8015
8016 j++;
8017 }
8018 }
8019
8020 par = emit_insn (par);
8021
8022 tmp = gen_rtx_SET (SImode,
8023 stack_pointer_rtx,
8024 gen_rtx_PLUS (SImode,
8025 stack_pointer_rtx,
8026 GEN_INT (-4 * num_regs)));
8027 RTX_FRAME_RELATED_P (tmp) = 1;
8028 XVECEXP (dwarf, 0, 0) = tmp;
8029
8030 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8031 REG_NOTES (par));
8032 return par;
8033 }
8034
8035 static rtx
8036 emit_sfm (base_reg, count)
8037 int base_reg;
8038 int count;
8039 {
8040 rtx par;
8041 rtx dwarf;
8042 rtx tmp, reg;
8043 int i;
8044
8045 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8046 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8047
8048 reg = gen_rtx_REG (XFmode, base_reg++);
8049
8050 XVECEXP (par, 0, 0)
8051 = gen_rtx_SET (VOIDmode,
8052 gen_rtx_MEM (BLKmode,
8053 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8054 gen_rtx_UNSPEC (BLKmode,
8055 gen_rtvec (1, reg),
8056 UNSPEC_PUSH_MULT));
8057 tmp
8058 = gen_rtx_SET (VOIDmode,
8059 gen_rtx_MEM (XFmode,
8060 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8061 reg);
8062 RTX_FRAME_RELATED_P (tmp) = 1;
8063 XVECEXP (dwarf, 0, count - 1) = tmp;
8064
8065 for (i = 1; i < count; i++)
8066 {
8067 reg = gen_rtx_REG (XFmode, base_reg++);
8068 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8069
8070 tmp = gen_rtx_SET (VOIDmode,
8071 gen_rtx_MEM (XFmode,
8072 gen_rtx_PRE_DEC (BLKmode,
8073 stack_pointer_rtx)),
8074 reg);
8075 RTX_FRAME_RELATED_P (tmp) = 1;
8076 XVECEXP (dwarf, 0, count - i - 1) = tmp;
8077 }
8078
8079 par = emit_insn (par);
8080 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8081 REG_NOTES (par));
8082 return par;
8083 }
8084
8085 /* Compute the distance from register FROM to register TO.
8086 These can be the arg pointer (26), the soft frame pointer (25),
8087 the stack pointer (13) or the hard frame pointer (11).
8088 Typical stack layout looks like this:
8089
8090 old stack pointer -> | |
8091 ----
8092 | | \
8093 | | saved arguments for
8094 | | vararg functions
8095 | | /
8096 --
8097 hard FP & arg pointer -> | | \
8098 | | stack
8099 | | frame
8100 | | /
8101 --
8102 | | \
8103 | | call saved
8104 | | registers
8105 soft frame pointer -> | | /
8106 --
8107 | | \
8108 | | local
8109 | | variables
8110 | | /
8111 --
8112 | | \
8113 | | outgoing
8114 | | arguments
8115 current stack pointer -> | | /
8116 --
8117
8118 For a given function some or all of these stack components
8119 may not be needed, giving rise to the possibility of
8120 eliminating some of the registers.
8121
8122 The values returned by this function must reflect the behavior
8123 of arm_expand_prologue() and arm_compute_save_reg_mask().
8124
8125 The sign of the number returned reflects the direction of stack
8126 growth, so the values are positive for all eliminations except
8127 from the soft frame pointer to the hard frame pointer. */
8128
8129 unsigned int
8130 arm_compute_initial_elimination_offset (from, to)
8131 unsigned int from;
8132 unsigned int to;
8133 {
8134 unsigned int local_vars = arm_get_frame_size ();
8135 unsigned int outgoing_args = current_function_outgoing_args_size;
8136 unsigned int stack_frame;
8137 unsigned int call_saved_registers;
8138 unsigned long func_type;
8139
8140 func_type = arm_current_func_type ();
8141
8142 /* Volatile functions never return, so there is
8143 no need to save call saved registers. */
8144 call_saved_registers = 0;
8145 if (! IS_VOLATILE (func_type))
8146 {
8147 unsigned int reg_mask;
8148 unsigned int reg;
8149
8150 /* Make sure that we compute which registers will be saved
8151 on the stack using the same algorithm that is used by
8152 arm_compute_save_reg_mask(). */
8153 reg_mask = arm_compute_save_reg0_reg12_mask ();
8154
8155 /* Now count the number of bits set in save_reg_mask.
8156 For each set bit we need 4 bytes of stack space. */
8157 while (reg_mask)
8158 {
8159 call_saved_registers += 4;
8160 reg_mask = reg_mask & ~ (reg_mask & - reg_mask);
8161 }
8162
8163 if (regs_ever_live[LR_REGNUM]
8164 /* If a stack frame is going to be created, the LR will
8165 be saved as part of that, so we do not need to allow
8166 for it here. */
8167 && ! frame_pointer_needed)
8168 call_saved_registers += 4;
8169
8170 /* If the hard floating point registers are going to be
8171 used then they must be saved on the stack as well.
8172 Each register occupies 12 bytes of stack space. */
8173 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg ++)
8174 if (regs_ever_live[reg] && ! call_used_regs[reg])
8175 call_saved_registers += 12;
8176 }
8177
8178 /* The stack frame contains 4 registers - the old frame pointer,
8179 the old stack pointer, the return address and PC of the start
8180 of the function. */
8181 stack_frame = frame_pointer_needed ? 16 : 0;
8182
8183 /* OK, now we have enough information to compute the distances.
8184 There must be an entry in these switch tables for each pair
8185 of registers in ELIMINABLE_REGS, even if some of the entries
8186 seem to be redundant or useless. */
8187 switch (from)
8188 {
8189 case ARG_POINTER_REGNUM:
8190 switch (to)
8191 {
8192 case THUMB_HARD_FRAME_POINTER_REGNUM:
8193 return 0;
8194
8195 case FRAME_POINTER_REGNUM:
8196 /* This is the reverse of the soft frame pointer
8197 to hard frame pointer elimination below. */
8198 if (call_saved_registers == 0 && stack_frame == 0)
8199 return 0;
8200 return (call_saved_registers + stack_frame - 4);
8201
8202 case ARM_HARD_FRAME_POINTER_REGNUM:
8203 /* If there is no stack frame then the hard
8204 frame pointer and the arg pointer coincide. */
8205 if (stack_frame == 0 && call_saved_registers != 0)
8206 return 0;
8207 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
8208 return (frame_pointer_needed
8209 && current_function_needs_context
8210 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
8211
8212 case STACK_POINTER_REGNUM:
8213 /* If nothing has been pushed on the stack at all
8214 then this will return -4. This *is* correct! */
8215 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
8216
8217 default:
8218 abort ();
8219 }
8220 break;
8221
8222 case FRAME_POINTER_REGNUM:
8223 switch (to)
8224 {
8225 case THUMB_HARD_FRAME_POINTER_REGNUM:
8226 return 0;
8227
8228 case ARM_HARD_FRAME_POINTER_REGNUM:
8229 /* The hard frame pointer points to the top entry in the
8230 stack frame. The soft frame pointer to the bottom entry
8231 in the stack frame. If there is no stack frame at all,
8232 then they are identical. */
8233 if (call_saved_registers == 0 && stack_frame == 0)
8234 return 0;
8235 return - (call_saved_registers + stack_frame - 4);
8236
8237 case STACK_POINTER_REGNUM:
8238 return local_vars + outgoing_args;
8239
8240 default:
8241 abort ();
8242 }
8243 break;
8244
8245 default:
8246 /* You cannot eliminate from the stack pointer.
8247 In theory you could eliminate from the hard frame
8248 pointer to the stack pointer, but this will never
8249 happen, since if a stack frame is not needed the
8250 hard frame pointer will never be used. */
8251 abort ();
8252 }
8253 }
8254
8255 /* Calculate the size of the stack frame, taking into account any
8256 padding that is required to ensure stack-alignment. */
8257
8258 HOST_WIDE_INT
8259 arm_get_frame_size ()
8260 {
8261 int regno;
8262
8263 int base_size = ROUND_UP (get_frame_size ());
8264 int entry_size = 0;
8265 unsigned long func_type = arm_current_func_type ();
8266 int leaf;
8267
8268 if (! TARGET_ARM)
8269 abort();
8270
8271 if (! TARGET_ATPCS)
8272 return base_size;
8273
8274 /* We need to know if we are a leaf function. Unfortunately, it
8275 is possible to be called after start_sequence has been called,
8276 which causes get_insns to return the insns for the sequence,
8277 not the function, which will cause leaf_function_p to return
8278 the incorrect result.
8279
8280 To work around this, we cache the computed frame size. This
8281 works because we will only be calling RTL expanders that need
8282 to know about leaf functions once reload has completed, and the
8283 frame size cannot be changed after that time, so we can safely
8284 use the cached value. */
8285
8286 if (reload_completed)
8287 return cfun->machine->frame_size;
8288
8289 leaf = leaf_function_p ();
8290
8291 /* A leaf function does not need any stack alignment if it has nothing
8292 on the stack. */
8293 if (leaf && base_size == 0)
8294 {
8295 cfun->machine->frame_size = 0;
8296 return 0;
8297 }
8298
8299 /* We know that SP will be word aligned on entry, and we must
8300 preserve that condition at any subroutine call. But those are
8301 the only constraints. */
8302
8303 /* Space for variadic functions. */
8304 if (current_function_pretend_args_size)
8305 entry_size += current_function_pretend_args_size;
8306
8307 /* Space for saved registers. */
8308 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
8309
8310 /* Space for saved FPA registers. */
8311 if (! IS_VOLATILE (func_type))
8312 {
8313 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
8314 if (regs_ever_live[regno] && ! call_used_regs[regno])
8315 entry_size += 12;
8316 }
8317
8318 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
8319 base_size += 4;
8320 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
8321 abort ();
8322
8323 cfun->machine->frame_size = base_size;
8324
8325 return base_size;
8326 }
8327
8328 /* Generate the prologue instructions for entry into an ARM function. */
8329
8330 void
8331 arm_expand_prologue ()
8332 {
8333 int reg;
8334 rtx amount;
8335 rtx insn;
8336 rtx ip_rtx;
8337 unsigned long live_regs_mask;
8338 unsigned long func_type;
8339 int fp_offset = 0;
8340 int saved_pretend_args = 0;
8341 unsigned int args_to_push;
8342
8343 func_type = arm_current_func_type ();
8344
8345 /* Naked functions don't have prologues. */
8346 if (IS_NAKED (func_type))
8347 return;
8348
8349 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
8350 args_to_push = current_function_pretend_args_size;
8351
8352 /* Compute which register we will have to save onto the stack. */
8353 live_regs_mask = arm_compute_save_reg_mask ();
8354
8355 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
8356
8357 if (frame_pointer_needed)
8358 {
8359 if (IS_INTERRUPT (func_type))
8360 {
8361 /* Interrupt functions must not corrupt any registers.
8362 Creating a frame pointer however, corrupts the IP
8363 register, so we must push it first. */
8364 insn = emit_multi_reg_push (1 << IP_REGNUM);
8365
8366 /* Do not set RTX_FRAME_RELATED_P on this insn.
8367 The dwarf stack unwinding code only wants to see one
8368 stack decrement per function, and this is not it. If
8369 this instruction is labeled as being part of the frame
8370 creation sequence then dwarf2out_frame_debug_expr will
8371 abort when it encounters the assignment of IP to FP
8372 later on, since the use of SP here establishes SP as
8373 the CFA register and not IP.
8374
8375 Anyway this instruction is not really part of the stack
8376 frame creation although it is part of the prologue. */
8377 }
8378 else if (IS_NESTED (func_type))
8379 {
8380 /* The Static chain register is the same as the IP register
8381 used as a scratch register during stack frame creation.
8382 To get around this need to find somewhere to store IP
8383 whilst the frame is being created. We try the following
8384 places in order:
8385
8386 1. The last argument register.
8387 2. A slot on the stack above the frame. (This only
8388 works if the function is not a varargs function).
8389 3. Register r3, after pushing the argument registers
8390 onto the stack.
8391
8392 Note - we only need to tell the dwarf2 backend about the SP
8393 adjustment in the second variant; the static chain register
8394 doesn't need to be unwound, as it doesn't contain a value
8395 inherited from the caller. */
8396
8397 if (regs_ever_live[3] == 0)
8398 {
8399 insn = gen_rtx_REG (SImode, 3);
8400 insn = gen_rtx_SET (SImode, insn, ip_rtx);
8401 insn = emit_insn (insn);
8402 }
8403 else if (args_to_push == 0)
8404 {
8405 rtx dwarf;
8406 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
8407 insn = gen_rtx_MEM (SImode, insn);
8408 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
8409 insn = emit_insn (insn);
8410
8411 fp_offset = 4;
8412
8413 /* Just tell the dwarf backend that we adjusted SP. */
8414 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8415 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8416 GEN_INT (-fp_offset)));
8417 RTX_FRAME_RELATED_P (insn) = 1;
8418 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8419 dwarf, REG_NOTES (insn));
8420 }
8421 else
8422 {
8423 /* Store the args on the stack. */
8424 if (cfun->machine->uses_anonymous_args)
8425 insn = emit_multi_reg_push
8426 ((0xf0 >> (args_to_push / 4)) & 0xf);
8427 else
8428 insn = emit_insn
8429 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8430 GEN_INT (- args_to_push)));
8431
8432 RTX_FRAME_RELATED_P (insn) = 1;
8433
8434 saved_pretend_args = 1;
8435 fp_offset = args_to_push;
8436 args_to_push = 0;
8437
8438 /* Now reuse r3 to preserve IP. */
8439 insn = gen_rtx_REG (SImode, 3);
8440 insn = gen_rtx_SET (SImode, insn, ip_rtx);
8441 (void) emit_insn (insn);
8442 }
8443 }
8444
8445 if (fp_offset)
8446 {
8447 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
8448 insn = gen_rtx_SET (SImode, ip_rtx, insn);
8449 }
8450 else
8451 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
8452
8453 insn = emit_insn (insn);
8454 RTX_FRAME_RELATED_P (insn) = 1;
8455 }
8456
8457 if (args_to_push)
8458 {
8459 /* Push the argument registers, or reserve space for them. */
8460 if (cfun->machine->uses_anonymous_args)
8461 insn = emit_multi_reg_push
8462 ((0xf0 >> (args_to_push / 4)) & 0xf);
8463 else
8464 insn = emit_insn
8465 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8466 GEN_INT (- args_to_push)));
8467 RTX_FRAME_RELATED_P (insn) = 1;
8468 }
8469
8470 /* If this is an interrupt service routine, and the link register is
8471 going to be pushed, subtracting four now will mean that the
8472 function return can be done with a single instruction. */
8473 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
8474 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8475 {
8476 emit_insn (gen_rtx_SET (SImode,
8477 gen_rtx_REG (SImode, LR_REGNUM),
8478 gen_rtx_PLUS (SImode,
8479 gen_rtx_REG (SImode, LR_REGNUM),
8480 GEN_INT (-4))));
8481 }
8482
8483 if (live_regs_mask)
8484 {
8485 insn = emit_multi_reg_push (live_regs_mask);
8486 RTX_FRAME_RELATED_P (insn) = 1;
8487 }
8488
8489 if (! IS_VOLATILE (func_type))
8490 {
8491 /* Save any floating point call-saved registers used by this function. */
8492 if (arm_fpu_arch == FP_SOFT2)
8493 {
8494 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
8495 if (regs_ever_live[reg] && !call_used_regs[reg])
8496 {
8497 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
8498 insn = gen_rtx_MEM (XFmode, insn);
8499 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
8500 gen_rtx_REG (XFmode, reg)));
8501 RTX_FRAME_RELATED_P (insn) = 1;
8502 }
8503 }
8504 else
8505 {
8506 int start_reg = LAST_ARM_FP_REGNUM;
8507
8508 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
8509 {
8510 if (regs_ever_live[reg] && !call_used_regs[reg])
8511 {
8512 if (start_reg - reg == 3)
8513 {
8514 insn = emit_sfm (reg, 4);
8515 RTX_FRAME_RELATED_P (insn) = 1;
8516 start_reg = reg - 1;
8517 }
8518 }
8519 else
8520 {
8521 if (start_reg != reg)
8522 {
8523 insn = emit_sfm (reg + 1, start_reg - reg);
8524 RTX_FRAME_RELATED_P (insn) = 1;
8525 }
8526 start_reg = reg - 1;
8527 }
8528 }
8529
8530 if (start_reg != reg)
8531 {
8532 insn = emit_sfm (reg + 1, start_reg - reg);
8533 RTX_FRAME_RELATED_P (insn) = 1;
8534 }
8535 }
8536 }
8537
8538 if (frame_pointer_needed)
8539 {
8540 /* Create the new frame pointer. */
8541 insn = GEN_INT (-(4 + args_to_push + fp_offset));
8542 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
8543 RTX_FRAME_RELATED_P (insn) = 1;
8544
8545 if (IS_NESTED (func_type))
8546 {
8547 /* Recover the static chain register. */
8548 if (regs_ever_live [3] == 0
8549 || saved_pretend_args)
8550 insn = gen_rtx_REG (SImode, 3);
8551 else /* if (current_function_pretend_args_size == 0) */
8552 {
8553 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx, GEN_INT (4));
8554 insn = gen_rtx_MEM (SImode, insn);
8555 }
8556
8557 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
8558 /* Add a USE to stop propagate_one_insn() from barfing. */
8559 emit_insn (gen_prologue_use (ip_rtx));
8560 }
8561 }
8562
8563 amount = GEN_INT (-(arm_get_frame_size ()
8564 + current_function_outgoing_args_size));
8565
8566 if (amount != const0_rtx)
8567 {
8568 /* This add can produce multiple insns for a large constant, so we
8569 need to get tricky. */
8570 rtx last = get_last_insn ();
8571 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8572 amount));
8573 do
8574 {
8575 last = last ? NEXT_INSN (last) : get_insns ();
8576 RTX_FRAME_RELATED_P (last) = 1;
8577 }
8578 while (last != insn);
8579
8580 /* If the frame pointer is needed, emit a special barrier that
8581 will prevent the scheduler from moving stores to the frame
8582 before the stack adjustment. */
8583 if (frame_pointer_needed)
8584 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
8585 hard_frame_pointer_rtx));
8586 }
8587
8588 /* If we are profiling, make sure no instructions are scheduled before
8589 the call to mcount. Similarly if the user has requested no
8590 scheduling in the prolog. */
8591 if (current_function_profile || TARGET_NO_SCHED_PRO)
8592 emit_insn (gen_blockage ());
8593
8594 /* If the link register is being kept alive, with the return address in it,
8595 then make sure that it does not get reused by the ce2 pass. */
8596 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
8597 {
8598 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
8599 cfun->machine->lr_save_eliminated = 1;
8600 }
8601 }
8602 \f
8603 /* If CODE is 'd', then the X is a condition operand and the instruction
8604 should only be executed if the condition is true.
8605 if CODE is 'D', then the X is a condition operand and the instruction
8606 should only be executed if the condition is false: however, if the mode
8607 of the comparison is CCFPEmode, then always execute the instruction -- we
8608 do this because in these circumstances !GE does not necessarily imply LT;
8609 in these cases the instruction pattern will take care to make sure that
8610 an instruction containing %d will follow, thereby undoing the effects of
8611 doing this instruction unconditionally.
8612 If CODE is 'N' then X is a floating point operand that must be negated
8613 before output.
8614 If CODE is 'B' then output a bitwise inverted value of X (a const int).
8615 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
8616
8617 void
8618 arm_print_operand (stream, x, code)
8619 FILE * stream;
8620 rtx x;
8621 int code;
8622 {
8623 switch (code)
8624 {
8625 case '@':
8626 fputs (ASM_COMMENT_START, stream);
8627 return;
8628
8629 case '_':
8630 fputs (user_label_prefix, stream);
8631 return;
8632
8633 case '|':
8634 fputs (REGISTER_PREFIX, stream);
8635 return;
8636
8637 case '?':
8638 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
8639 {
8640 if (TARGET_THUMB || current_insn_predicate != NULL)
8641 abort ();
8642
8643 fputs (arm_condition_codes[arm_current_cc], stream);
8644 }
8645 else if (current_insn_predicate)
8646 {
8647 enum arm_cond_code code;
8648
8649 if (TARGET_THUMB)
8650 abort ();
8651
8652 code = get_arm_condition_code (current_insn_predicate);
8653 fputs (arm_condition_codes[code], stream);
8654 }
8655 return;
8656
8657 case 'N':
8658 {
8659 REAL_VALUE_TYPE r;
8660 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8661 r = REAL_VALUE_NEGATE (r);
8662 fprintf (stream, "%s", fp_const_from_val (&r));
8663 }
8664 return;
8665
8666 case 'B':
8667 if (GET_CODE (x) == CONST_INT)
8668 {
8669 HOST_WIDE_INT val;
8670 val = ARM_SIGN_EXTEND (~INTVAL (x));
8671 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
8672 }
8673 else
8674 {
8675 putc ('~', stream);
8676 output_addr_const (stream, x);
8677 }
8678 return;
8679
8680 case 'i':
8681 fprintf (stream, "%s", arithmetic_instr (x, 1));
8682 return;
8683
8684 case 'I':
8685 fprintf (stream, "%s", arithmetic_instr (x, 0));
8686 return;
8687
8688 case 'S':
8689 {
8690 HOST_WIDE_INT val;
8691 const char * shift = shift_op (x, &val);
8692
8693 if (shift)
8694 {
8695 fprintf (stream, ", %s ", shift_op (x, &val));
8696 if (val == -1)
8697 arm_print_operand (stream, XEXP (x, 1), 0);
8698 else
8699 {
8700 fputc ('#', stream);
8701 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
8702 }
8703 }
8704 }
8705 return;
8706
8707 /* An explanation of the 'Q', 'R' and 'H' register operands:
8708
8709 In a pair of registers containing a DI or DF value the 'Q'
8710 operand returns the register number of the register containing
8711 the least signficant part of the value. The 'R' operand returns
8712 the register number of the register containing the most
8713 significant part of the value.
8714
8715 The 'H' operand returns the higher of the two register numbers.
8716 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
8717 same as the 'Q' operand, since the most signficant part of the
8718 value is held in the lower number register. The reverse is true
8719 on systems where WORDS_BIG_ENDIAN is false.
8720
8721 The purpose of these operands is to distinguish between cases
8722 where the endian-ness of the values is important (for example
8723 when they are added together), and cases where the endian-ness
8724 is irrelevant, but the order of register operations is important.
8725 For example when loading a value from memory into a register
8726 pair, the endian-ness does not matter. Provided that the value
8727 from the lower memory address is put into the lower numbered
8728 register, and the value from the higher address is put into the
8729 higher numbered register, the load will work regardless of whether
8730 the value being loaded is big-wordian or little-wordian. The
8731 order of the two register loads can matter however, if the address
8732 of the memory location is actually held in one of the registers
8733 being overwritten by the load. */
8734 case 'Q':
8735 if (REGNO (x) > LAST_ARM_REGNUM)
8736 abort ();
8737 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
8738 return;
8739
8740 case 'R':
8741 if (REGNO (x) > LAST_ARM_REGNUM)
8742 abort ();
8743 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
8744 return;
8745
8746 case 'H':
8747 if (REGNO (x) > LAST_ARM_REGNUM)
8748 abort ();
8749 asm_fprintf (stream, "%r", REGNO (x) + 1);
8750 return;
8751
8752 case 'm':
8753 asm_fprintf (stream, "%r",
8754 GET_CODE (XEXP (x, 0)) == REG
8755 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
8756 return;
8757
8758 case 'M':
8759 asm_fprintf (stream, "{%r-%r}",
8760 REGNO (x),
8761 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
8762 return;
8763
8764 case 'd':
8765 /* CONST_TRUE_RTX means always -- that's the default. */
8766 if (x == const_true_rtx)
8767 return;
8768
8769 if (TARGET_ARM)
8770 fputs (arm_condition_codes[get_arm_condition_code (x)],
8771 stream);
8772 else
8773 fputs (thumb_condition_code (x, 0), stream);
8774 return;
8775
8776 case 'D':
8777 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
8778 want to do that. */
8779 if (x == const_true_rtx)
8780 abort ();
8781
8782 if (TARGET_ARM)
8783 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
8784 (get_arm_condition_code (x))],
8785 stream);
8786 else
8787 fputs (thumb_condition_code (x, 1), stream);
8788 return;
8789
8790 default:
8791 if (x == 0)
8792 abort ();
8793
8794 if (GET_CODE (x) == REG)
8795 asm_fprintf (stream, "%r", REGNO (x));
8796 else if (GET_CODE (x) == MEM)
8797 {
8798 output_memory_reference_mode = GET_MODE (x);
8799 output_address (XEXP (x, 0));
8800 }
8801 else if (GET_CODE (x) == CONST_DOUBLE)
8802 fprintf (stream, "#%s", fp_immediate_constant (x));
8803 else if (GET_CODE (x) == NEG)
8804 abort (); /* This should never happen now. */
8805 else
8806 {
8807 fputc ('#', stream);
8808 output_addr_const (stream, x);
8809 }
8810 }
8811 }
8812 \f
8813 #ifndef AOF_ASSEMBLER
8814 /* Target hook for assembling integer objects. The ARM version needs to
8815 handle word-sized values specially. */
8816
8817 static bool
8818 arm_assemble_integer (x, size, aligned_p)
8819 rtx x;
8820 unsigned int size;
8821 int aligned_p;
8822 {
8823 if (size == UNITS_PER_WORD && aligned_p)
8824 {
8825 fputs ("\t.word\t", asm_out_file);
8826 output_addr_const (asm_out_file, x);
8827
8828 /* Mark symbols as position independent. We only do this in the
8829 .text segment, not in the .data segment. */
8830 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
8831 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
8832 {
8833 if (GET_CODE (x) == SYMBOL_REF
8834 && (CONSTANT_POOL_ADDRESS_P (x)
8835 || ENCODED_SHORT_CALL_ATTR_P (XSTR (x, 0))))
8836 fputs ("(GOTOFF)", asm_out_file);
8837 else if (GET_CODE (x) == LABEL_REF)
8838 fputs ("(GOTOFF)", asm_out_file);
8839 else
8840 fputs ("(GOT)", asm_out_file);
8841 }
8842 fputc ('\n', asm_out_file);
8843 return true;
8844 }
8845
8846 return default_assemble_integer (x, size, aligned_p);
8847 }
8848 #endif
8849 \f
8850 /* A finite state machine takes care of noticing whether or not instructions
8851 can be conditionally executed, and thus decrease execution time and code
8852 size by deleting branch instructions. The fsm is controlled by
8853 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
8854
8855 /* The state of the fsm controlling condition codes are:
8856 0: normal, do nothing special
8857 1: make ASM_OUTPUT_OPCODE not output this instruction
8858 2: make ASM_OUTPUT_OPCODE not output this instruction
8859 3: make instructions conditional
8860 4: make instructions conditional
8861
8862 State transitions (state->state by whom under condition):
8863 0 -> 1 final_prescan_insn if the `target' is a label
8864 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
8865 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
8866 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
8867 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
8868 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
8869 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
8870 (the target insn is arm_target_insn).
8871
8872 If the jump clobbers the conditions then we use states 2 and 4.
8873
8874 A similar thing can be done with conditional return insns.
8875
8876 XXX In case the `target' is an unconditional branch, this conditionalising
8877 of the instructions always reduces code size, but not always execution
8878 time. But then, I want to reduce the code size to somewhere near what
8879 /bin/cc produces. */
8880
8881 /* Returns the index of the ARM condition code string in
8882 `arm_condition_codes'. COMPARISON should be an rtx like
8883 `(eq (...) (...))'. */
8884
8885 static enum arm_cond_code
8886 get_arm_condition_code (comparison)
8887 rtx comparison;
8888 {
8889 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
8890 int code;
8891 enum rtx_code comp_code = GET_CODE (comparison);
8892
8893 if (GET_MODE_CLASS (mode) != MODE_CC)
8894 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
8895 XEXP (comparison, 1));
8896
8897 switch (mode)
8898 {
8899 case CC_DNEmode: code = ARM_NE; goto dominance;
8900 case CC_DEQmode: code = ARM_EQ; goto dominance;
8901 case CC_DGEmode: code = ARM_GE; goto dominance;
8902 case CC_DGTmode: code = ARM_GT; goto dominance;
8903 case CC_DLEmode: code = ARM_LE; goto dominance;
8904 case CC_DLTmode: code = ARM_LT; goto dominance;
8905 case CC_DGEUmode: code = ARM_CS; goto dominance;
8906 case CC_DGTUmode: code = ARM_HI; goto dominance;
8907 case CC_DLEUmode: code = ARM_LS; goto dominance;
8908 case CC_DLTUmode: code = ARM_CC;
8909
8910 dominance:
8911 if (comp_code != EQ && comp_code != NE)
8912 abort ();
8913
8914 if (comp_code == EQ)
8915 return ARM_INVERSE_CONDITION_CODE (code);
8916 return code;
8917
8918 case CC_NOOVmode:
8919 switch (comp_code)
8920 {
8921 case NE: return ARM_NE;
8922 case EQ: return ARM_EQ;
8923 case GE: return ARM_PL;
8924 case LT: return ARM_MI;
8925 default: abort ();
8926 }
8927
8928 case CC_Zmode:
8929 switch (comp_code)
8930 {
8931 case NE: return ARM_NE;
8932 case EQ: return ARM_EQ;
8933 default: abort ();
8934 }
8935
8936 case CCFPEmode:
8937 case CCFPmode:
8938 /* These encodings assume that AC=1 in the FPA system control
8939 byte. This allows us to handle all cases except UNEQ and
8940 LTGT. */
8941 switch (comp_code)
8942 {
8943 case GE: return ARM_GE;
8944 case GT: return ARM_GT;
8945 case LE: return ARM_LS;
8946 case LT: return ARM_MI;
8947 case NE: return ARM_NE;
8948 case EQ: return ARM_EQ;
8949 case ORDERED: return ARM_VC;
8950 case UNORDERED: return ARM_VS;
8951 case UNLT: return ARM_LT;
8952 case UNLE: return ARM_LE;
8953 case UNGT: return ARM_HI;
8954 case UNGE: return ARM_PL;
8955 /* UNEQ and LTGT do not have a representation. */
8956 case UNEQ: /* Fall through. */
8957 case LTGT: /* Fall through. */
8958 default: abort ();
8959 }
8960
8961 case CC_SWPmode:
8962 switch (comp_code)
8963 {
8964 case NE: return ARM_NE;
8965 case EQ: return ARM_EQ;
8966 case GE: return ARM_LE;
8967 case GT: return ARM_LT;
8968 case LE: return ARM_GE;
8969 case LT: return ARM_GT;
8970 case GEU: return ARM_LS;
8971 case GTU: return ARM_CC;
8972 case LEU: return ARM_CS;
8973 case LTU: return ARM_HI;
8974 default: abort ();
8975 }
8976
8977 case CC_Cmode:
8978 switch (comp_code)
8979 {
8980 case LTU: return ARM_CS;
8981 case GEU: return ARM_CC;
8982 default: abort ();
8983 }
8984
8985 case CCmode:
8986 switch (comp_code)
8987 {
8988 case NE: return ARM_NE;
8989 case EQ: return ARM_EQ;
8990 case GE: return ARM_GE;
8991 case GT: return ARM_GT;
8992 case LE: return ARM_LE;
8993 case LT: return ARM_LT;
8994 case GEU: return ARM_CS;
8995 case GTU: return ARM_HI;
8996 case LEU: return ARM_LS;
8997 case LTU: return ARM_CC;
8998 default: abort ();
8999 }
9000
9001 default: abort ();
9002 }
9003
9004 abort ();
9005 }
9006
9007
9008 void
9009 arm_final_prescan_insn (insn)
9010 rtx insn;
9011 {
9012 /* BODY will hold the body of INSN. */
9013 rtx body = PATTERN (insn);
9014
9015 /* This will be 1 if trying to repeat the trick, and things need to be
9016 reversed if it appears to fail. */
9017 int reverse = 0;
9018
9019 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
9020 taken are clobbered, even if the rtl suggests otherwise. It also
9021 means that we have to grub around within the jump expression to find
9022 out what the conditions are when the jump isn't taken. */
9023 int jump_clobbers = 0;
9024
9025 /* If we start with a return insn, we only succeed if we find another one. */
9026 int seeking_return = 0;
9027
9028 /* START_INSN will hold the insn from where we start looking. This is the
9029 first insn after the following code_label if REVERSE is true. */
9030 rtx start_insn = insn;
9031
9032 /* If in state 4, check if the target branch is reached, in order to
9033 change back to state 0. */
9034 if (arm_ccfsm_state == 4)
9035 {
9036 if (insn == arm_target_insn)
9037 {
9038 arm_target_insn = NULL;
9039 arm_ccfsm_state = 0;
9040 }
9041 return;
9042 }
9043
9044 /* If in state 3, it is possible to repeat the trick, if this insn is an
9045 unconditional branch to a label, and immediately following this branch
9046 is the previous target label which is only used once, and the label this
9047 branch jumps to is not too far off. */
9048 if (arm_ccfsm_state == 3)
9049 {
9050 if (simplejump_p (insn))
9051 {
9052 start_insn = next_nonnote_insn (start_insn);
9053 if (GET_CODE (start_insn) == BARRIER)
9054 {
9055 /* XXX Isn't this always a barrier? */
9056 start_insn = next_nonnote_insn (start_insn);
9057 }
9058 if (GET_CODE (start_insn) == CODE_LABEL
9059 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
9060 && LABEL_NUSES (start_insn) == 1)
9061 reverse = TRUE;
9062 else
9063 return;
9064 }
9065 else if (GET_CODE (body) == RETURN)
9066 {
9067 start_insn = next_nonnote_insn (start_insn);
9068 if (GET_CODE (start_insn) == BARRIER)
9069 start_insn = next_nonnote_insn (start_insn);
9070 if (GET_CODE (start_insn) == CODE_LABEL
9071 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
9072 && LABEL_NUSES (start_insn) == 1)
9073 {
9074 reverse = TRUE;
9075 seeking_return = 1;
9076 }
9077 else
9078 return;
9079 }
9080 else
9081 return;
9082 }
9083
9084 if (arm_ccfsm_state != 0 && !reverse)
9085 abort ();
9086 if (GET_CODE (insn) != JUMP_INSN)
9087 return;
9088
9089 /* This jump might be paralleled with a clobber of the condition codes
9090 the jump should always come first */
9091 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
9092 body = XVECEXP (body, 0, 0);
9093
9094 #if 0
9095 /* If this is a conditional return then we don't want to know */
9096 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
9097 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
9098 && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
9099 || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
9100 return;
9101 #endif
9102
9103 if (reverse
9104 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
9105 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
9106 {
9107 int insns_skipped;
9108 int fail = FALSE, succeed = FALSE;
9109 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
9110 int then_not_else = TRUE;
9111 rtx this_insn = start_insn, label = 0;
9112
9113 /* If the jump cannot be done with one instruction, we cannot
9114 conditionally execute the instruction in the inverse case. */
9115 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
9116 {
9117 jump_clobbers = 1;
9118 return;
9119 }
9120
9121 /* Register the insn jumped to. */
9122 if (reverse)
9123 {
9124 if (!seeking_return)
9125 label = XEXP (SET_SRC (body), 0);
9126 }
9127 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
9128 label = XEXP (XEXP (SET_SRC (body), 1), 0);
9129 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
9130 {
9131 label = XEXP (XEXP (SET_SRC (body), 2), 0);
9132 then_not_else = FALSE;
9133 }
9134 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
9135 seeking_return = 1;
9136 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
9137 {
9138 seeking_return = 1;
9139 then_not_else = FALSE;
9140 }
9141 else
9142 abort ();
9143
9144 /* See how many insns this branch skips, and what kind of insns. If all
9145 insns are okay, and the label or unconditional branch to the same
9146 label is not too far away, succeed. */
9147 for (insns_skipped = 0;
9148 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
9149 {
9150 rtx scanbody;
9151
9152 this_insn = next_nonnote_insn (this_insn);
9153 if (!this_insn)
9154 break;
9155
9156 switch (GET_CODE (this_insn))
9157 {
9158 case CODE_LABEL:
9159 /* Succeed if it is the target label, otherwise fail since
9160 control falls in from somewhere else. */
9161 if (this_insn == label)
9162 {
9163 if (jump_clobbers)
9164 {
9165 arm_ccfsm_state = 2;
9166 this_insn = next_nonnote_insn (this_insn);
9167 }
9168 else
9169 arm_ccfsm_state = 1;
9170 succeed = TRUE;
9171 }
9172 else
9173 fail = TRUE;
9174 break;
9175
9176 case BARRIER:
9177 /* Succeed if the following insn is the target label.
9178 Otherwise fail.
9179 If return insns are used then the last insn in a function
9180 will be a barrier. */
9181 this_insn = next_nonnote_insn (this_insn);
9182 if (this_insn && this_insn == label)
9183 {
9184 if (jump_clobbers)
9185 {
9186 arm_ccfsm_state = 2;
9187 this_insn = next_nonnote_insn (this_insn);
9188 }
9189 else
9190 arm_ccfsm_state = 1;
9191 succeed = TRUE;
9192 }
9193 else
9194 fail = TRUE;
9195 break;
9196
9197 case CALL_INSN:
9198 /* If using 32-bit addresses the cc is not preserved over
9199 calls. */
9200 if (TARGET_APCS_32)
9201 {
9202 /* Succeed if the following insn is the target label,
9203 or if the following two insns are a barrier and
9204 the target label. */
9205 this_insn = next_nonnote_insn (this_insn);
9206 if (this_insn && GET_CODE (this_insn) == BARRIER)
9207 this_insn = next_nonnote_insn (this_insn);
9208
9209 if (this_insn && this_insn == label
9210 && insns_skipped < max_insns_skipped)
9211 {
9212 if (jump_clobbers)
9213 {
9214 arm_ccfsm_state = 2;
9215 this_insn = next_nonnote_insn (this_insn);
9216 }
9217 else
9218 arm_ccfsm_state = 1;
9219 succeed = TRUE;
9220 }
9221 else
9222 fail = TRUE;
9223 }
9224 break;
9225
9226 case JUMP_INSN:
9227 /* If this is an unconditional branch to the same label, succeed.
9228 If it is to another label, do nothing. If it is conditional,
9229 fail. */
9230 /* XXX Probably, the tests for SET and the PC are unnecessary. */
9231
9232 scanbody = PATTERN (this_insn);
9233 if (GET_CODE (scanbody) == SET
9234 && GET_CODE (SET_DEST (scanbody)) == PC)
9235 {
9236 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
9237 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
9238 {
9239 arm_ccfsm_state = 2;
9240 succeed = TRUE;
9241 }
9242 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
9243 fail = TRUE;
9244 }
9245 /* Fail if a conditional return is undesirable (eg on a
9246 StrongARM), but still allow this if optimizing for size. */
9247 else if (GET_CODE (scanbody) == RETURN
9248 && !use_return_insn (TRUE)
9249 && !optimize_size)
9250 fail = TRUE;
9251 else if (GET_CODE (scanbody) == RETURN
9252 && seeking_return)
9253 {
9254 arm_ccfsm_state = 2;
9255 succeed = TRUE;
9256 }
9257 else if (GET_CODE (scanbody) == PARALLEL)
9258 {
9259 switch (get_attr_conds (this_insn))
9260 {
9261 case CONDS_NOCOND:
9262 break;
9263 default:
9264 fail = TRUE;
9265 break;
9266 }
9267 }
9268 else
9269 fail = TRUE; /* Unrecognized jump (eg epilogue). */
9270
9271 break;
9272
9273 case INSN:
9274 /* Instructions using or affecting the condition codes make it
9275 fail. */
9276 scanbody = PATTERN (this_insn);
9277 if (!(GET_CODE (scanbody) == SET
9278 || GET_CODE (scanbody) == PARALLEL)
9279 || get_attr_conds (this_insn) != CONDS_NOCOND)
9280 fail = TRUE;
9281 break;
9282
9283 default:
9284 break;
9285 }
9286 }
9287 if (succeed)
9288 {
9289 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
9290 arm_target_label = CODE_LABEL_NUMBER (label);
9291 else if (seeking_return || arm_ccfsm_state == 2)
9292 {
9293 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
9294 {
9295 this_insn = next_nonnote_insn (this_insn);
9296 if (this_insn && (GET_CODE (this_insn) == BARRIER
9297 || GET_CODE (this_insn) == CODE_LABEL))
9298 abort ();
9299 }
9300 if (!this_insn)
9301 {
9302 /* Oh, dear! we ran off the end.. give up */
9303 recog (PATTERN (insn), insn, NULL);
9304 arm_ccfsm_state = 0;
9305 arm_target_insn = NULL;
9306 return;
9307 }
9308 arm_target_insn = this_insn;
9309 }
9310 else
9311 abort ();
9312 if (jump_clobbers)
9313 {
9314 if (reverse)
9315 abort ();
9316 arm_current_cc =
9317 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
9318 0), 0), 1));
9319 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
9320 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9321 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
9322 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9323 }
9324 else
9325 {
9326 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
9327 what it was. */
9328 if (!reverse)
9329 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
9330 0));
9331 }
9332
9333 if (reverse || then_not_else)
9334 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9335 }
9336
9337 /* Restore recog_data (getting the attributes of other insns can
9338 destroy this array, but final.c assumes that it remains intact
9339 across this call; since the insn has been recognized already we
9340 call recog direct). */
9341 recog (PATTERN (insn), insn, NULL);
9342 }
9343 }
9344
9345 /* Returns true if REGNO is a valid register
9346 for holding a quantity of tyoe MODE. */
9347
9348 int
9349 arm_hard_regno_mode_ok (regno, mode)
9350 unsigned int regno;
9351 enum machine_mode mode;
9352 {
9353 if (GET_MODE_CLASS (mode) == MODE_CC)
9354 return regno == CC_REGNUM;
9355
9356 if (TARGET_THUMB)
9357 /* For the Thumb we only allow values bigger than SImode in
9358 registers 0 - 6, so that there is always a second low
9359 register available to hold the upper part of the value.
9360 We probably we ought to ensure that the register is the
9361 start of an even numbered register pair. */
9362 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
9363
9364 if (regno <= LAST_ARM_REGNUM)
9365 /* We allow any value to be stored in the general regisetrs. */
9366 return 1;
9367
9368 if ( regno == FRAME_POINTER_REGNUM
9369 || regno == ARG_POINTER_REGNUM)
9370 /* We only allow integers in the fake hard registers. */
9371 return GET_MODE_CLASS (mode) == MODE_INT;
9372
9373 /* The only registers left are the FPU registers
9374 which we only allow to hold FP values. */
9375 return GET_MODE_CLASS (mode) == MODE_FLOAT
9376 && regno >= FIRST_ARM_FP_REGNUM
9377 && regno <= LAST_ARM_FP_REGNUM;
9378 }
9379
9380 int
9381 arm_regno_class (regno)
9382 int regno;
9383 {
9384 if (TARGET_THUMB)
9385 {
9386 if (regno == STACK_POINTER_REGNUM)
9387 return STACK_REG;
9388 if (regno == CC_REGNUM)
9389 return CC_REG;
9390 if (regno < 8)
9391 return LO_REGS;
9392 return HI_REGS;
9393 }
9394
9395 if ( regno <= LAST_ARM_REGNUM
9396 || regno == FRAME_POINTER_REGNUM
9397 || regno == ARG_POINTER_REGNUM)
9398 return GENERAL_REGS;
9399
9400 if (regno == CC_REGNUM)
9401 return NO_REGS;
9402
9403 return FPU_REGS;
9404 }
9405
9406 /* Handle a special case when computing the offset
9407 of an argument from the frame pointer. */
9408
9409 int
9410 arm_debugger_arg_offset (value, addr)
9411 int value;
9412 rtx addr;
9413 {
9414 rtx insn;
9415
9416 /* We are only interested if dbxout_parms() failed to compute the offset. */
9417 if (value != 0)
9418 return 0;
9419
9420 /* We can only cope with the case where the address is held in a register. */
9421 if (GET_CODE (addr) != REG)
9422 return 0;
9423
9424 /* If we are using the frame pointer to point at the argument, then
9425 an offset of 0 is correct. */
9426 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
9427 return 0;
9428
9429 /* If we are using the stack pointer to point at the
9430 argument, then an offset of 0 is correct. */
9431 if ((TARGET_THUMB || !frame_pointer_needed)
9432 && REGNO (addr) == SP_REGNUM)
9433 return 0;
9434
9435 /* Oh dear. The argument is pointed to by a register rather
9436 than being held in a register, or being stored at a known
9437 offset from the frame pointer. Since GDB only understands
9438 those two kinds of argument we must translate the address
9439 held in the register into an offset from the frame pointer.
9440 We do this by searching through the insns for the function
9441 looking to see where this register gets its value. If the
9442 register is initialized from the frame pointer plus an offset
9443 then we are in luck and we can continue, otherwise we give up.
9444
9445 This code is exercised by producing debugging information
9446 for a function with arguments like this:
9447
9448 double func (double a, double b, int c, double d) {return d;}
9449
9450 Without this code the stab for parameter 'd' will be set to
9451 an offset of 0 from the frame pointer, rather than 8. */
9452
9453 /* The if() statement says:
9454
9455 If the insn is a normal instruction
9456 and if the insn is setting the value in a register
9457 and if the register being set is the register holding the address of the argument
9458 and if the address is computing by an addition
9459 that involves adding to a register
9460 which is the frame pointer
9461 a constant integer
9462
9463 then... */
9464
9465 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9466 {
9467 if ( GET_CODE (insn) == INSN
9468 && GET_CODE (PATTERN (insn)) == SET
9469 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
9470 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
9471 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
9472 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
9473 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
9474 )
9475 {
9476 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
9477
9478 break;
9479 }
9480 }
9481
9482 if (value == 0)
9483 {
9484 debug_rtx (addr);
9485 warning ("unable to compute real location of stacked parameter");
9486 value = 8; /* XXX magic hack */
9487 }
9488
9489 return value;
9490 }
9491
9492 #define def_builtin(NAME, TYPE, CODE) \
9493 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE)
9494
9495 void
9496 arm_init_builtins ()
9497 {
9498 tree endlink = void_list_node;
9499 tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
9500 tree pchar_type_node = build_pointer_type (char_type_node);
9501
9502 tree int_ftype_int, void_ftype_pchar;
9503
9504 /* void func (char *) */
9505 void_ftype_pchar
9506 = build_function_type_list (void_type_node, pchar_type_node, NULL_TREE);
9507
9508 /* int func (int) */
9509 int_ftype_int
9510 = build_function_type (integer_type_node, int_endlink);
9511
9512 /* Initialize arm V5 builtins. */
9513 if (arm_arch5)
9514 def_builtin ("__builtin_clz", int_ftype_int, ARM_BUILTIN_CLZ);
9515 }
9516
9517 /* Expand an expression EXP that calls a built-in function,
9518 with result going to TARGET if that's convenient
9519 (and in mode MODE if that's convenient).
9520 SUBTARGET may be used as the target for computing one of EXP's operands.
9521 IGNORE is nonzero if the value is to be ignored. */
9522
9523 rtx
9524 arm_expand_builtin (exp, target, subtarget, mode, ignore)
9525 tree exp;
9526 rtx target;
9527 rtx subtarget ATTRIBUTE_UNUSED;
9528 enum machine_mode mode ATTRIBUTE_UNUSED;
9529 int ignore ATTRIBUTE_UNUSED;
9530 {
9531 enum insn_code icode;
9532 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9533 tree arglist = TREE_OPERAND (exp, 1);
9534 tree arg0;
9535 rtx op0, pat;
9536 enum machine_mode tmode, mode0;
9537 int fcode = DECL_FUNCTION_CODE (fndecl);
9538
9539 switch (fcode)
9540 {
9541 default:
9542 break;
9543
9544 case ARM_BUILTIN_CLZ:
9545 icode = CODE_FOR_clz;
9546 arg0 = TREE_VALUE (arglist);
9547 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
9548 tmode = insn_data[icode].operand[0].mode;
9549 mode0 = insn_data[icode].operand[1].mode;
9550
9551 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9552 op0 = copy_to_mode_reg (mode0, op0);
9553 if (target == 0
9554 || GET_MODE (target) != tmode
9555 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9556 target = gen_reg_rtx (tmode);
9557 pat = GEN_FCN (icode) (target, op0);
9558 if (! pat)
9559 return 0;
9560 emit_insn (pat);
9561 return target;
9562 }
9563
9564 /* @@@ Should really do something sensible here. */
9565 return NULL_RTX;
9566 }
9567 \f
9568 /* Recursively search through all of the blocks in a function
9569 checking to see if any of the variables created in that
9570 function match the RTX called 'orig'. If they do then
9571 replace them with the RTX called 'new'. */
9572
9573 static void
9574 replace_symbols_in_block (block, orig, new)
9575 tree block;
9576 rtx orig;
9577 rtx new;
9578 {
9579 for (; block; block = BLOCK_CHAIN (block))
9580 {
9581 tree sym;
9582
9583 if (!TREE_USED (block))
9584 continue;
9585
9586 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
9587 {
9588 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
9589 || DECL_IGNORED_P (sym)
9590 || TREE_CODE (sym) != VAR_DECL
9591 || DECL_EXTERNAL (sym)
9592 || !rtx_equal_p (DECL_RTL (sym), orig)
9593 )
9594 continue;
9595
9596 SET_DECL_RTL (sym, new);
9597 }
9598
9599 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
9600 }
9601 }
9602
9603 /* Return the number (counting from 0) of
9604 the least significant set bit in MASK. */
9605
9606 #ifdef __GNUC__
9607 inline
9608 #endif
9609 static int
9610 number_of_first_bit_set (mask)
9611 int mask;
9612 {
9613 int bit;
9614
9615 for (bit = 0;
9616 (mask & (1 << bit)) == 0;
9617 ++bit)
9618 continue;
9619
9620 return bit;
9621 }
9622
9623 /* Generate code to return from a thumb function.
9624 If 'reg_containing_return_addr' is -1, then the return address is
9625 actually on the stack, at the stack pointer. */
9626 static void
9627 thumb_exit (f, reg_containing_return_addr, eh_ofs)
9628 FILE * f;
9629 int reg_containing_return_addr;
9630 rtx eh_ofs;
9631 {
9632 unsigned regs_available_for_popping;
9633 unsigned regs_to_pop;
9634 int pops_needed;
9635 unsigned available;
9636 unsigned required;
9637 int mode;
9638 int size;
9639 int restore_a4 = FALSE;
9640
9641 /* Compute the registers we need to pop. */
9642 regs_to_pop = 0;
9643 pops_needed = 0;
9644
9645 /* There is an assumption here, that if eh_ofs is not NULL, the
9646 normal return address will have been pushed. */
9647 if (reg_containing_return_addr == -1 || eh_ofs)
9648 {
9649 /* When we are generating a return for __builtin_eh_return,
9650 reg_containing_return_addr must specify the return regno. */
9651 if (eh_ofs && reg_containing_return_addr == -1)
9652 abort ();
9653
9654 regs_to_pop |= 1 << LR_REGNUM;
9655 ++pops_needed;
9656 }
9657
9658 if (TARGET_BACKTRACE)
9659 {
9660 /* Restore the (ARM) frame pointer and stack pointer. */
9661 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
9662 pops_needed += 2;
9663 }
9664
9665 /* If there is nothing to pop then just emit the BX instruction and
9666 return. */
9667 if (pops_needed == 0)
9668 {
9669 if (eh_ofs)
9670 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9671
9672 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9673 return;
9674 }
9675 /* Otherwise if we are not supporting interworking and we have not created
9676 a backtrace structure and the function was not entered in ARM mode then
9677 just pop the return address straight into the PC. */
9678 else if (!TARGET_INTERWORK
9679 && !TARGET_BACKTRACE
9680 && !is_called_in_ARM_mode (current_function_decl))
9681 {
9682 if (eh_ofs)
9683 {
9684 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
9685 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9686 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9687 }
9688 else
9689 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
9690
9691 return;
9692 }
9693
9694 /* Find out how many of the (return) argument registers we can corrupt. */
9695 regs_available_for_popping = 0;
9696
9697 /* If returning via __builtin_eh_return, the bottom three registers
9698 all contain information needed for the return. */
9699 if (eh_ofs)
9700 size = 12;
9701 else
9702 {
9703 #ifdef RTX_CODE
9704 /* If we can deduce the registers used from the function's
9705 return value. This is more reliable that examining
9706 regs_ever_live[] because that will be set if the register is
9707 ever used in the function, not just if the register is used
9708 to hold a return value. */
9709
9710 if (current_function_return_rtx != 0)
9711 mode = GET_MODE (current_function_return_rtx);
9712 else
9713 #endif
9714 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9715
9716 size = GET_MODE_SIZE (mode);
9717
9718 if (size == 0)
9719 {
9720 /* In a void function we can use any argument register.
9721 In a function that returns a structure on the stack
9722 we can use the second and third argument registers. */
9723 if (mode == VOIDmode)
9724 regs_available_for_popping =
9725 (1 << ARG_REGISTER (1))
9726 | (1 << ARG_REGISTER (2))
9727 | (1 << ARG_REGISTER (3));
9728 else
9729 regs_available_for_popping =
9730 (1 << ARG_REGISTER (2))
9731 | (1 << ARG_REGISTER (3));
9732 }
9733 else if (size <= 4)
9734 regs_available_for_popping =
9735 (1 << ARG_REGISTER (2))
9736 | (1 << ARG_REGISTER (3));
9737 else if (size <= 8)
9738 regs_available_for_popping =
9739 (1 << ARG_REGISTER (3));
9740 }
9741
9742 /* Match registers to be popped with registers into which we pop them. */
9743 for (available = regs_available_for_popping,
9744 required = regs_to_pop;
9745 required != 0 && available != 0;
9746 available &= ~(available & - available),
9747 required &= ~(required & - required))
9748 -- pops_needed;
9749
9750 /* If we have any popping registers left over, remove them. */
9751 if (available > 0)
9752 regs_available_for_popping &= ~available;
9753
9754 /* Otherwise if we need another popping register we can use
9755 the fourth argument register. */
9756 else if (pops_needed)
9757 {
9758 /* If we have not found any free argument registers and
9759 reg a4 contains the return address, we must move it. */
9760 if (regs_available_for_popping == 0
9761 && reg_containing_return_addr == LAST_ARG_REGNUM)
9762 {
9763 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
9764 reg_containing_return_addr = LR_REGNUM;
9765 }
9766 else if (size > 12)
9767 {
9768 /* Register a4 is being used to hold part of the return value,
9769 but we have dire need of a free, low register. */
9770 restore_a4 = TRUE;
9771
9772 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
9773 }
9774
9775 if (reg_containing_return_addr != LAST_ARG_REGNUM)
9776 {
9777 /* The fourth argument register is available. */
9778 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
9779
9780 --pops_needed;
9781 }
9782 }
9783
9784 /* Pop as many registers as we can. */
9785 thumb_pushpop (f, regs_available_for_popping, FALSE);
9786
9787 /* Process the registers we popped. */
9788 if (reg_containing_return_addr == -1)
9789 {
9790 /* The return address was popped into the lowest numbered register. */
9791 regs_to_pop &= ~(1 << LR_REGNUM);
9792
9793 reg_containing_return_addr =
9794 number_of_first_bit_set (regs_available_for_popping);
9795
9796 /* Remove this register for the mask of available registers, so that
9797 the return address will not be corrupted by futher pops. */
9798 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
9799 }
9800
9801 /* If we popped other registers then handle them here. */
9802 if (regs_available_for_popping)
9803 {
9804 int frame_pointer;
9805
9806 /* Work out which register currently contains the frame pointer. */
9807 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
9808
9809 /* Move it into the correct place. */
9810 asm_fprintf (f, "\tmov\t%r, %r\n",
9811 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
9812
9813 /* (Temporarily) remove it from the mask of popped registers. */
9814 regs_available_for_popping &= ~(1 << frame_pointer);
9815 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
9816
9817 if (regs_available_for_popping)
9818 {
9819 int stack_pointer;
9820
9821 /* We popped the stack pointer as well,
9822 find the register that contains it. */
9823 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
9824
9825 /* Move it into the stack register. */
9826 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
9827
9828 /* At this point we have popped all necessary registers, so
9829 do not worry about restoring regs_available_for_popping
9830 to its correct value:
9831
9832 assert (pops_needed == 0)
9833 assert (regs_available_for_popping == (1 << frame_pointer))
9834 assert (regs_to_pop == (1 << STACK_POINTER)) */
9835 }
9836 else
9837 {
9838 /* Since we have just move the popped value into the frame
9839 pointer, the popping register is available for reuse, and
9840 we know that we still have the stack pointer left to pop. */
9841 regs_available_for_popping |= (1 << frame_pointer);
9842 }
9843 }
9844
9845 /* If we still have registers left on the stack, but we no longer have
9846 any registers into which we can pop them, then we must move the return
9847 address into the link register and make available the register that
9848 contained it. */
9849 if (regs_available_for_popping == 0 && pops_needed > 0)
9850 {
9851 regs_available_for_popping |= 1 << reg_containing_return_addr;
9852
9853 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
9854 reg_containing_return_addr);
9855
9856 reg_containing_return_addr = LR_REGNUM;
9857 }
9858
9859 /* If we have registers left on the stack then pop some more.
9860 We know that at most we will want to pop FP and SP. */
9861 if (pops_needed > 0)
9862 {
9863 int popped_into;
9864 int move_to;
9865
9866 thumb_pushpop (f, regs_available_for_popping, FALSE);
9867
9868 /* We have popped either FP or SP.
9869 Move whichever one it is into the correct register. */
9870 popped_into = number_of_first_bit_set (regs_available_for_popping);
9871 move_to = number_of_first_bit_set (regs_to_pop);
9872
9873 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
9874
9875 regs_to_pop &= ~(1 << move_to);
9876
9877 --pops_needed;
9878 }
9879
9880 /* If we still have not popped everything then we must have only
9881 had one register available to us and we are now popping the SP. */
9882 if (pops_needed > 0)
9883 {
9884 int popped_into;
9885
9886 thumb_pushpop (f, regs_available_for_popping, FALSE);
9887
9888 popped_into = number_of_first_bit_set (regs_available_for_popping);
9889
9890 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
9891 /*
9892 assert (regs_to_pop == (1 << STACK_POINTER))
9893 assert (pops_needed == 1)
9894 */
9895 }
9896
9897 /* If necessary restore the a4 register. */
9898 if (restore_a4)
9899 {
9900 if (reg_containing_return_addr != LR_REGNUM)
9901 {
9902 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
9903 reg_containing_return_addr = LR_REGNUM;
9904 }
9905
9906 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
9907 }
9908
9909 if (eh_ofs)
9910 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9911
9912 /* Return to caller. */
9913 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9914 }
9915
9916 /* Emit code to push or pop registers to or from the stack. */
9917
9918 static void
9919 thumb_pushpop (f, mask, push)
9920 FILE * f;
9921 int mask;
9922 int push;
9923 {
9924 int regno;
9925 int lo_mask = mask & 0xFF;
9926
9927 if (lo_mask == 0 && !push && (mask & (1 << 15)))
9928 {
9929 /* Special case. Do not generate a POP PC statement here, do it in
9930 thumb_exit() */
9931 thumb_exit (f, -1, NULL_RTX);
9932 return;
9933 }
9934
9935 fprintf (f, "\t%s\t{", push ? "push" : "pop");
9936
9937 /* Look at the low registers first. */
9938 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
9939 {
9940 if (lo_mask & 1)
9941 {
9942 asm_fprintf (f, "%r", regno);
9943
9944 if ((lo_mask & ~1) != 0)
9945 fprintf (f, ", ");
9946 }
9947 }
9948
9949 if (push && (mask & (1 << LR_REGNUM)))
9950 {
9951 /* Catch pushing the LR. */
9952 if (mask & 0xFF)
9953 fprintf (f, ", ");
9954
9955 asm_fprintf (f, "%r", LR_REGNUM);
9956 }
9957 else if (!push && (mask & (1 << PC_REGNUM)))
9958 {
9959 /* Catch popping the PC. */
9960 if (TARGET_INTERWORK || TARGET_BACKTRACE)
9961 {
9962 /* The PC is never poped directly, instead
9963 it is popped into r3 and then BX is used. */
9964 fprintf (f, "}\n");
9965
9966 thumb_exit (f, -1, NULL_RTX);
9967
9968 return;
9969 }
9970 else
9971 {
9972 if (mask & 0xFF)
9973 fprintf (f, ", ");
9974
9975 asm_fprintf (f, "%r", PC_REGNUM);
9976 }
9977 }
9978
9979 fprintf (f, "}\n");
9980 }
9981 \f
9982 void
9983 thumb_final_prescan_insn (insn)
9984 rtx insn;
9985 {
9986 if (flag_print_asm_name)
9987 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
9988 INSN_ADDRESSES (INSN_UID (insn)));
9989 }
9990
9991 int
9992 thumb_shiftable_const (val)
9993 unsigned HOST_WIDE_INT val;
9994 {
9995 unsigned HOST_WIDE_INT mask = 0xff;
9996 int i;
9997
9998 if (val == 0) /* XXX */
9999 return 0;
10000
10001 for (i = 0; i < 25; i++)
10002 if ((val & (mask << i)) == val)
10003 return 1;
10004
10005 return 0;
10006 }
10007
10008 /* Returns nonzero if the current function contains,
10009 or might contain a far jump. */
10010
10011 int
10012 thumb_far_jump_used_p (in_prologue)
10013 int in_prologue;
10014 {
10015 rtx insn;
10016
10017 /* This test is only important for leaf functions. */
10018 /* assert (!leaf_function_p ()); */
10019
10020 /* If we have already decided that far jumps may be used,
10021 do not bother checking again, and always return true even if
10022 it turns out that they are not being used. Once we have made
10023 the decision that far jumps are present (and that hence the link
10024 register will be pushed onto the stack) we cannot go back on it. */
10025 if (cfun->machine->far_jump_used)
10026 return 1;
10027
10028 /* If this function is not being called from the prologue/epilogue
10029 generation code then it must be being called from the
10030 INITIAL_ELIMINATION_OFFSET macro. */
10031 if (!in_prologue)
10032 {
10033 /* In this case we know that we are being asked about the elimination
10034 of the arg pointer register. If that register is not being used,
10035 then there are no arguments on the stack, and we do not have to
10036 worry that a far jump might force the prologue to push the link
10037 register, changing the stack offsets. In this case we can just
10038 return false, since the presence of far jumps in the function will
10039 not affect stack offsets.
10040
10041 If the arg pointer is live (or if it was live, but has now been
10042 eliminated and so set to dead) then we do have to test to see if
10043 the function might contain a far jump. This test can lead to some
10044 false negatives, since before reload is completed, then length of
10045 branch instructions is not known, so gcc defaults to returning their
10046 longest length, which in turn sets the far jump attribute to true.
10047
10048 A false negative will not result in bad code being generated, but it
10049 will result in a needless push and pop of the link register. We
10050 hope that this does not occur too often. */
10051 if (regs_ever_live [ARG_POINTER_REGNUM])
10052 cfun->machine->arg_pointer_live = 1;
10053 else if (!cfun->machine->arg_pointer_live)
10054 return 0;
10055 }
10056
10057 /* Check to see if the function contains a branch
10058 insn with the far jump attribute set. */
10059 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10060 {
10061 if (GET_CODE (insn) == JUMP_INSN
10062 /* Ignore tablejump patterns. */
10063 && GET_CODE (PATTERN (insn)) != ADDR_VEC
10064 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
10065 && get_attr_far_jump (insn) == FAR_JUMP_YES
10066 )
10067 {
10068 /* Record the fact that we have decied that
10069 the function does use far jumps. */
10070 cfun->machine->far_jump_used = 1;
10071 return 1;
10072 }
10073 }
10074
10075 return 0;
10076 }
10077
10078 /* Return nonzero if FUNC must be entered in ARM mode. */
10079
10080 int
10081 is_called_in_ARM_mode (func)
10082 tree func;
10083 {
10084 if (TREE_CODE (func) != FUNCTION_DECL)
10085 abort ();
10086
10087 /* Ignore the problem about functions whoes address is taken. */
10088 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
10089 return TRUE;
10090
10091 #ifdef ARM_PE
10092 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
10093 #else
10094 return FALSE;
10095 #endif
10096 }
10097
10098 /* The bits which aren't usefully expanded as rtl. */
10099
10100 const char *
10101 thumb_unexpanded_epilogue ()
10102 {
10103 int regno;
10104 int live_regs_mask = 0;
10105 int high_regs_pushed = 0;
10106 int leaf_function = leaf_function_p ();
10107 int had_to_push_lr;
10108 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
10109
10110 if (return_used_this_function)
10111 return "";
10112
10113 if (IS_NAKED (arm_current_func_type ()))
10114 return "";
10115
10116 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10117 if (THUMB_REG_PUSHED_P (regno))
10118 live_regs_mask |= 1 << regno;
10119
10120 for (regno = 8; regno < 13; regno++)
10121 if (THUMB_REG_PUSHED_P (regno))
10122 high_regs_pushed++;
10123
10124 /* The prolog may have pushed some high registers to use as
10125 work registers. eg the testuite file:
10126 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
10127 compiles to produce:
10128 push {r4, r5, r6, r7, lr}
10129 mov r7, r9
10130 mov r6, r8
10131 push {r6, r7}
10132 as part of the prolog. We have to undo that pushing here. */
10133
10134 if (high_regs_pushed)
10135 {
10136 int mask = live_regs_mask;
10137 int next_hi_reg;
10138 int size;
10139 int mode;
10140
10141 #ifdef RTX_CODE
10142 /* If we can deduce the registers used from the function's return value.
10143 This is more reliable that examining regs_ever_live[] because that
10144 will be set if the register is ever used in the function, not just if
10145 the register is used to hold a return value. */
10146
10147 if (current_function_return_rtx != 0)
10148 mode = GET_MODE (current_function_return_rtx);
10149 else
10150 #endif
10151 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10152
10153 size = GET_MODE_SIZE (mode);
10154
10155 /* Unless we are returning a type of size > 12 register r3 is
10156 available. */
10157 if (size < 13)
10158 mask |= 1 << 3;
10159
10160 if (mask == 0)
10161 /* Oh dear! We have no low registers into which we can pop
10162 high registers! */
10163 internal_error
10164 ("no low registers available for popping high registers");
10165
10166 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
10167 if (THUMB_REG_PUSHED_P (next_hi_reg))
10168 break;
10169
10170 while (high_regs_pushed)
10171 {
10172 /* Find lo register(s) into which the high register(s) can
10173 be popped. */
10174 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10175 {
10176 if (mask & (1 << regno))
10177 high_regs_pushed--;
10178 if (high_regs_pushed == 0)
10179 break;
10180 }
10181
10182 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
10183
10184 /* Pop the values into the low register(s). */
10185 thumb_pushpop (asm_out_file, mask, 0);
10186
10187 /* Move the value(s) into the high registers. */
10188 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10189 {
10190 if (mask & (1 << regno))
10191 {
10192 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
10193 regno);
10194
10195 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
10196 if (THUMB_REG_PUSHED_P (next_hi_reg))
10197 break;
10198 }
10199 }
10200 }
10201 }
10202
10203 had_to_push_lr = (live_regs_mask || !leaf_function
10204 || thumb_far_jump_used_p (1));
10205
10206 if (TARGET_BACKTRACE
10207 && ((live_regs_mask & 0xFF) == 0)
10208 && regs_ever_live [LAST_ARG_REGNUM] != 0)
10209 {
10210 /* The stack backtrace structure creation code had to
10211 push R7 in order to get a work register, so we pop
10212 it now. */
10213 live_regs_mask |= (1 << LAST_LO_REGNUM);
10214 }
10215
10216 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
10217 {
10218 if (had_to_push_lr
10219 && !is_called_in_ARM_mode (current_function_decl)
10220 && !eh_ofs)
10221 live_regs_mask |= 1 << PC_REGNUM;
10222
10223 /* Either no argument registers were pushed or a backtrace
10224 structure was created which includes an adjusted stack
10225 pointer, so just pop everything. */
10226 if (live_regs_mask)
10227 thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
10228
10229 if (eh_ofs)
10230 thumb_exit (asm_out_file, 2, eh_ofs);
10231 /* We have either just popped the return address into the
10232 PC or it is was kept in LR for the entire function or
10233 it is still on the stack because we do not want to
10234 return by doing a pop {pc}. */
10235 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
10236 thumb_exit (asm_out_file,
10237 (had_to_push_lr
10238 && is_called_in_ARM_mode (current_function_decl)) ?
10239 -1 : LR_REGNUM, NULL_RTX);
10240 }
10241 else
10242 {
10243 /* Pop everything but the return address. */
10244 live_regs_mask &= ~(1 << PC_REGNUM);
10245
10246 if (live_regs_mask)
10247 thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
10248
10249 if (had_to_push_lr)
10250 /* Get the return address into a temporary register. */
10251 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0);
10252
10253 /* Remove the argument registers that were pushed onto the stack. */
10254 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
10255 SP_REGNUM, SP_REGNUM,
10256 current_function_pretend_args_size);
10257
10258 if (eh_ofs)
10259 thumb_exit (asm_out_file, 2, eh_ofs);
10260 else
10261 thumb_exit (asm_out_file,
10262 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
10263 }
10264
10265 return "";
10266 }
10267
10268 /* Functions to save and restore machine-specific function data. */
10269
10270 static struct machine_function *
10271 arm_init_machine_status ()
10272 {
10273 struct machine_function *machine;
10274 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
10275
10276 #if ARM_FT_UNKNOWN != 0
10277 machine->func_type = ARM_FT_UNKNOWN;
10278 #endif
10279 return machine;
10280 }
10281
10282 /* Return an RTX indicating where the return address to the
10283 calling function can be found. */
10284
10285 rtx
10286 arm_return_addr (count, frame)
10287 int count;
10288 rtx frame ATTRIBUTE_UNUSED;
10289 {
10290 if (count != 0)
10291 return NULL_RTX;
10292
10293 if (TARGET_APCS_32)
10294 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
10295 else
10296 {
10297 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
10298 GEN_INT (RETURN_ADDR_MASK26));
10299 return get_func_hard_reg_initial_val (cfun, lr);
10300 }
10301 }
10302
10303 /* Do anything needed before RTL is emitted for each function. */
10304
10305 void
10306 arm_init_expanders ()
10307 {
10308 /* Arrange to initialize and mark the machine per-function status. */
10309 init_machine_status = arm_init_machine_status;
10310 }
10311
10312 HOST_WIDE_INT
10313 thumb_get_frame_size ()
10314 {
10315 int regno;
10316
10317 int base_size = ROUND_UP (get_frame_size ());
10318 int count_regs = 0;
10319 int entry_size = 0;
10320 int leaf;
10321
10322 if (! TARGET_THUMB)
10323 abort ();
10324
10325 if (! TARGET_ATPCS)
10326 return base_size;
10327
10328 /* We need to know if we are a leaf function. Unfortunately, it
10329 is possible to be called after start_sequence has been called,
10330 which causes get_insns to return the insns for the sequence,
10331 not the function, which will cause leaf_function_p to return
10332 the incorrect result.
10333
10334 To work around this, we cache the computed frame size. This
10335 works because we will only be calling RTL expanders that need
10336 to know about leaf functions once reload has completed, and the
10337 frame size cannot be changed after that time, so we can safely
10338 use the cached value. */
10339
10340 if (reload_completed)
10341 return cfun->machine->frame_size;
10342
10343 leaf = leaf_function_p ();
10344
10345 /* A leaf function does not need any stack alignment if it has nothing
10346 on the stack. */
10347 if (leaf && base_size == 0)
10348 {
10349 cfun->machine->frame_size = 0;
10350 return 0;
10351 }
10352
10353 /* We know that SP will be word aligned on entry, and we must
10354 preserve that condition at any subroutine call. But those are
10355 the only constraints. */
10356
10357 /* Space for variadic functions. */
10358 if (current_function_pretend_args_size)
10359 entry_size += current_function_pretend_args_size;
10360
10361 /* Space for pushed lo registers. */
10362 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10363 if (THUMB_REG_PUSHED_P (regno))
10364 count_regs++;
10365
10366 /* Space for backtrace structure. */
10367 if (TARGET_BACKTRACE)
10368 {
10369 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
10370 entry_size += 20;
10371 else
10372 entry_size += 16;
10373 }
10374
10375 if (count_regs || !leaf || thumb_far_jump_used_p (1))
10376 count_regs++; /* LR */
10377
10378 entry_size += count_regs * 4;
10379 count_regs = 0;
10380
10381 /* Space for pushed hi regs. */
10382 for (regno = 8; regno < 13; regno++)
10383 if (THUMB_REG_PUSHED_P (regno))
10384 count_regs++;
10385
10386 entry_size += count_regs * 4;
10387
10388 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10389 base_size += 4;
10390 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10391 abort ();
10392
10393 cfun->machine->frame_size = base_size;
10394
10395 return base_size;
10396 }
10397
10398 /* Generate the rest of a function's prologue. */
10399
10400 void
10401 thumb_expand_prologue ()
10402 {
10403 HOST_WIDE_INT amount = (thumb_get_frame_size ()
10404 + current_function_outgoing_args_size);
10405 unsigned long func_type;
10406
10407 func_type = arm_current_func_type ();
10408
10409 /* Naked functions don't have prologues. */
10410 if (IS_NAKED (func_type))
10411 return;
10412
10413 if (IS_INTERRUPT (func_type))
10414 {
10415 error ("interrupt Service Routines cannot be coded in Thumb mode");
10416 return;
10417 }
10418
10419 if (frame_pointer_needed)
10420 emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
10421
10422 if (amount)
10423 {
10424 amount = ROUND_UP (amount);
10425
10426 if (amount < 512)
10427 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10428 GEN_INT (- amount)));
10429 else
10430 {
10431 int regno;
10432 rtx reg;
10433
10434 /* The stack decrement is too big for an immediate value in a single
10435 insn. In theory we could issue multiple subtracts, but after
10436 three of them it becomes more space efficient to place the full
10437 value in the constant pool and load into a register. (Also the
10438 ARM debugger really likes to see only one stack decrement per
10439 function). So instead we look for a scratch register into which
10440 we can load the decrement, and then we subtract this from the
10441 stack pointer. Unfortunately on the thumb the only available
10442 scratch registers are the argument registers, and we cannot use
10443 these as they may hold arguments to the function. Instead we
10444 attempt to locate a call preserved register which is used by this
10445 function. If we can find one, then we know that it will have
10446 been pushed at the start of the prologue and so we can corrupt
10447 it now. */
10448 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
10449 if (THUMB_REG_PUSHED_P (regno)
10450 && !(frame_pointer_needed
10451 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
10452 break;
10453
10454 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
10455 {
10456 rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
10457
10458 /* Choose an arbitary, non-argument low register. */
10459 reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
10460
10461 /* Save it by copying it into a high, scratch register. */
10462 emit_insn (gen_movsi (spare, reg));
10463 /* Add a USE to stop propagate_one_insn() from barfing. */
10464 emit_insn (gen_prologue_use (spare));
10465
10466 /* Decrement the stack. */
10467 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
10468 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10469 reg));
10470
10471 /* Restore the low register's original value. */
10472 emit_insn (gen_movsi (reg, spare));
10473
10474 /* Emit a USE of the restored scratch register, so that flow
10475 analysis will not consider the restore redundant. The
10476 register won't be used again in this function and isn't
10477 restored by the epilogue. */
10478 emit_insn (gen_prologue_use (reg));
10479 }
10480 else
10481 {
10482 reg = gen_rtx (REG, SImode, regno);
10483
10484 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
10485 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10486 reg));
10487 }
10488 }
10489 }
10490
10491 if (current_function_profile || TARGET_NO_SCHED_PRO)
10492 emit_insn (gen_blockage ());
10493 }
10494
10495 void
10496 thumb_expand_epilogue ()
10497 {
10498 HOST_WIDE_INT amount = (thumb_get_frame_size ()
10499 + current_function_outgoing_args_size);
10500
10501 /* Naked functions don't have prologues. */
10502 if (IS_NAKED (arm_current_func_type ()))
10503 return;
10504
10505 if (frame_pointer_needed)
10506 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
10507 else if (amount)
10508 {
10509 amount = ROUND_UP (amount);
10510
10511 if (amount < 512)
10512 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10513 GEN_INT (amount)));
10514 else
10515 {
10516 /* r3 is always free in the epilogue. */
10517 rtx reg = gen_rtx (REG, SImode, LAST_ARG_REGNUM);
10518
10519 emit_insn (gen_movsi (reg, GEN_INT (amount)));
10520 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
10521 }
10522 }
10523
10524 /* Emit a USE (stack_pointer_rtx), so that
10525 the stack adjustment will not be deleted. */
10526 emit_insn (gen_prologue_use (stack_pointer_rtx));
10527
10528 if (current_function_profile || TARGET_NO_SCHED_PRO)
10529 emit_insn (gen_blockage ());
10530 }
10531
10532 static void
10533 thumb_output_function_prologue (f, size)
10534 FILE * f;
10535 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
10536 {
10537 int live_regs_mask = 0;
10538 int high_regs_pushed = 0;
10539 int regno;
10540
10541 if (IS_NAKED (arm_current_func_type ()))
10542 return;
10543
10544 if (is_called_in_ARM_mode (current_function_decl))
10545 {
10546 const char * name;
10547
10548 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
10549 abort ();
10550 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
10551 abort ();
10552 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10553
10554 /* Generate code sequence to switch us into Thumb mode. */
10555 /* The .code 32 directive has already been emitted by
10556 ASM_DECLARE_FUNCTION_NAME. */
10557 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
10558 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
10559
10560 /* Generate a label, so that the debugger will notice the
10561 change in instruction sets. This label is also used by
10562 the assembler to bypass the ARM code when this function
10563 is called from a Thumb encoded function elsewhere in the
10564 same file. Hence the definition of STUB_NAME here must
10565 agree with the definition in gas/config/tc-arm.c */
10566
10567 #define STUB_NAME ".real_start_of"
10568
10569 fprintf (f, "\t.code\t16\n");
10570 #ifdef ARM_PE
10571 if (arm_dllexport_name_p (name))
10572 name = arm_strip_name_encoding (name);
10573 #endif
10574 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
10575 fprintf (f, "\t.thumb_func\n");
10576 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
10577 }
10578
10579 if (current_function_pretend_args_size)
10580 {
10581 if (cfun->machine->uses_anonymous_args)
10582 {
10583 int num_pushes;
10584
10585 fprintf (f, "\tpush\t{");
10586
10587 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
10588
10589 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
10590 regno <= LAST_ARG_REGNUM;
10591 regno++)
10592 asm_fprintf (f, "%r%s", regno,
10593 regno == LAST_ARG_REGNUM ? "" : ", ");
10594
10595 fprintf (f, "}\n");
10596 }
10597 else
10598 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
10599 SP_REGNUM, SP_REGNUM,
10600 current_function_pretend_args_size);
10601 }
10602
10603 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10604 if (THUMB_REG_PUSHED_P (regno))
10605 live_regs_mask |= 1 << regno;
10606
10607 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
10608 live_regs_mask |= 1 << LR_REGNUM;
10609
10610 if (TARGET_BACKTRACE)
10611 {
10612 int offset;
10613 int work_register = 0;
10614 int wr;
10615
10616 /* We have been asked to create a stack backtrace structure.
10617 The code looks like this:
10618
10619 0 .align 2
10620 0 func:
10621 0 sub SP, #16 Reserve space for 4 registers.
10622 2 push {R7} Get a work register.
10623 4 add R7, SP, #20 Get the stack pointer before the push.
10624 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
10625 8 mov R7, PC Get hold of the start of this code plus 12.
10626 10 str R7, [SP, #16] Store it.
10627 12 mov R7, FP Get hold of the current frame pointer.
10628 14 str R7, [SP, #4] Store it.
10629 16 mov R7, LR Get hold of the current return address.
10630 18 str R7, [SP, #12] Store it.
10631 20 add R7, SP, #16 Point at the start of the backtrace structure.
10632 22 mov FP, R7 Put this value into the frame pointer. */
10633
10634 if ((live_regs_mask & 0xFF) == 0)
10635 {
10636 /* See if the a4 register is free. */
10637
10638 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
10639 work_register = LAST_ARG_REGNUM;
10640 else /* We must push a register of our own */
10641 live_regs_mask |= (1 << LAST_LO_REGNUM);
10642 }
10643
10644 if (work_register == 0)
10645 {
10646 /* Select a register from the list that will be pushed to
10647 use as our work register. */
10648 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
10649 if ((1 << work_register) & live_regs_mask)
10650 break;
10651 }
10652
10653 asm_fprintf
10654 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
10655 SP_REGNUM, SP_REGNUM);
10656
10657 if (live_regs_mask)
10658 thumb_pushpop (f, live_regs_mask, 1);
10659
10660 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
10661 if (wr & live_regs_mask)
10662 offset += 4;
10663
10664 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
10665 offset + 16 + current_function_pretend_args_size);
10666
10667 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10668 offset + 4);
10669
10670 /* Make sure that the instruction fetching the PC is in the right place
10671 to calculate "start of backtrace creation code + 12". */
10672 if (live_regs_mask)
10673 {
10674 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
10675 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10676 offset + 12);
10677 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
10678 ARM_HARD_FRAME_POINTER_REGNUM);
10679 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10680 offset);
10681 }
10682 else
10683 {
10684 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
10685 ARM_HARD_FRAME_POINTER_REGNUM);
10686 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10687 offset);
10688 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
10689 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10690 offset + 12);
10691 }
10692
10693 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
10694 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10695 offset + 8);
10696 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
10697 offset + 12);
10698 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
10699 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
10700 }
10701 else if (live_regs_mask)
10702 thumb_pushpop (f, live_regs_mask, 1);
10703
10704 for (regno = 8; regno < 13; regno++)
10705 if (THUMB_REG_PUSHED_P (regno))
10706 high_regs_pushed++;
10707
10708 if (high_regs_pushed)
10709 {
10710 int pushable_regs = 0;
10711 int mask = live_regs_mask & 0xff;
10712 int next_hi_reg;
10713
10714 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
10715 if (THUMB_REG_PUSHED_P (next_hi_reg))
10716 break;
10717
10718 pushable_regs = mask;
10719
10720 if (pushable_regs == 0)
10721 {
10722 /* Desperation time -- this probably will never happen. */
10723 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
10724 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
10725 mask = 1 << LAST_ARG_REGNUM;
10726 }
10727
10728 while (high_regs_pushed > 0)
10729 {
10730 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
10731 {
10732 if (mask & (1 << regno))
10733 {
10734 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
10735
10736 high_regs_pushed--;
10737
10738 if (high_regs_pushed)
10739 {
10740 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
10741 next_hi_reg--)
10742 if (THUMB_REG_PUSHED_P (next_hi_reg))
10743 break;
10744 }
10745 else
10746 {
10747 mask &= ~((1 << regno) - 1);
10748 break;
10749 }
10750 }
10751 }
10752
10753 thumb_pushpop (f, mask, 1);
10754 }
10755
10756 if (pushable_regs == 0
10757 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
10758 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
10759 }
10760 }
10761
10762 /* Handle the case of a double word load into a low register from
10763 a computed memory address. The computed address may involve a
10764 register which is overwritten by the load. */
10765
10766 const char *
10767 thumb_load_double_from_address (operands)
10768 rtx *operands;
10769 {
10770 rtx addr;
10771 rtx base;
10772 rtx offset;
10773 rtx arg1;
10774 rtx arg2;
10775
10776 if (GET_CODE (operands[0]) != REG)
10777 abort ();
10778
10779 if (GET_CODE (operands[1]) != MEM)
10780 abort ();
10781
10782 /* Get the memory address. */
10783 addr = XEXP (operands[1], 0);
10784
10785 /* Work out how the memory address is computed. */
10786 switch (GET_CODE (addr))
10787 {
10788 case REG:
10789 operands[2] = gen_rtx (MEM, SImode,
10790 plus_constant (XEXP (operands[1], 0), 4));
10791
10792 if (REGNO (operands[0]) == REGNO (addr))
10793 {
10794 output_asm_insn ("ldr\t%H0, %2", operands);
10795 output_asm_insn ("ldr\t%0, %1", operands);
10796 }
10797 else
10798 {
10799 output_asm_insn ("ldr\t%0, %1", operands);
10800 output_asm_insn ("ldr\t%H0, %2", operands);
10801 }
10802 break;
10803
10804 case CONST:
10805 /* Compute <address> + 4 for the high order load. */
10806 operands[2] = gen_rtx (MEM, SImode,
10807 plus_constant (XEXP (operands[1], 0), 4));
10808
10809 output_asm_insn ("ldr\t%0, %1", operands);
10810 output_asm_insn ("ldr\t%H0, %2", operands);
10811 break;
10812
10813 case PLUS:
10814 arg1 = XEXP (addr, 0);
10815 arg2 = XEXP (addr, 1);
10816
10817 if (CONSTANT_P (arg1))
10818 base = arg2, offset = arg1;
10819 else
10820 base = arg1, offset = arg2;
10821
10822 if (GET_CODE (base) != REG)
10823 abort ();
10824
10825 /* Catch the case of <address> = <reg> + <reg> */
10826 if (GET_CODE (offset) == REG)
10827 {
10828 int reg_offset = REGNO (offset);
10829 int reg_base = REGNO (base);
10830 int reg_dest = REGNO (operands[0]);
10831
10832 /* Add the base and offset registers together into the
10833 higher destination register. */
10834 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
10835 reg_dest + 1, reg_base, reg_offset);
10836
10837 /* Load the lower destination register from the address in
10838 the higher destination register. */
10839 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
10840 reg_dest, reg_dest + 1);
10841
10842 /* Load the higher destination register from its own address
10843 plus 4. */
10844 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
10845 reg_dest + 1, reg_dest + 1);
10846 }
10847 else
10848 {
10849 /* Compute <address> + 4 for the high order load. */
10850 operands[2] = gen_rtx (MEM, SImode,
10851 plus_constant (XEXP (operands[1], 0), 4));
10852
10853 /* If the computed address is held in the low order register
10854 then load the high order register first, otherwise always
10855 load the low order register first. */
10856 if (REGNO (operands[0]) == REGNO (base))
10857 {
10858 output_asm_insn ("ldr\t%H0, %2", operands);
10859 output_asm_insn ("ldr\t%0, %1", operands);
10860 }
10861 else
10862 {
10863 output_asm_insn ("ldr\t%0, %1", operands);
10864 output_asm_insn ("ldr\t%H0, %2", operands);
10865 }
10866 }
10867 break;
10868
10869 case LABEL_REF:
10870 /* With no registers to worry about we can just load the value
10871 directly. */
10872 operands[2] = gen_rtx (MEM, SImode,
10873 plus_constant (XEXP (operands[1], 0), 4));
10874
10875 output_asm_insn ("ldr\t%H0, %2", operands);
10876 output_asm_insn ("ldr\t%0, %1", operands);
10877 break;
10878
10879 default:
10880 abort ();
10881 break;
10882 }
10883
10884 return "";
10885 }
10886
10887
10888 const char *
10889 thumb_output_move_mem_multiple (n, operands)
10890 int n;
10891 rtx * operands;
10892 {
10893 rtx tmp;
10894
10895 switch (n)
10896 {
10897 case 2:
10898 if (REGNO (operands[4]) > REGNO (operands[5]))
10899 {
10900 tmp = operands[4];
10901 operands[4] = operands[5];
10902 operands[5] = tmp;
10903 }
10904 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
10905 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
10906 break;
10907
10908 case 3:
10909 if (REGNO (operands[4]) > REGNO (operands[5]))
10910 {
10911 tmp = operands[4];
10912 operands[4] = operands[5];
10913 operands[5] = tmp;
10914 }
10915 if (REGNO (operands[5]) > REGNO (operands[6]))
10916 {
10917 tmp = operands[5];
10918 operands[5] = operands[6];
10919 operands[6] = tmp;
10920 }
10921 if (REGNO (operands[4]) > REGNO (operands[5]))
10922 {
10923 tmp = operands[4];
10924 operands[4] = operands[5];
10925 operands[5] = tmp;
10926 }
10927
10928 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
10929 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
10930 break;
10931
10932 default:
10933 abort ();
10934 }
10935
10936 return "";
10937 }
10938
10939 /* Routines for generating rtl. */
10940
10941 void
10942 thumb_expand_movstrqi (operands)
10943 rtx * operands;
10944 {
10945 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
10946 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
10947 HOST_WIDE_INT len = INTVAL (operands[2]);
10948 HOST_WIDE_INT offset = 0;
10949
10950 while (len >= 12)
10951 {
10952 emit_insn (gen_movmem12b (out, in, out, in));
10953 len -= 12;
10954 }
10955
10956 if (len >= 8)
10957 {
10958 emit_insn (gen_movmem8b (out, in, out, in));
10959 len -= 8;
10960 }
10961
10962 if (len >= 4)
10963 {
10964 rtx reg = gen_reg_rtx (SImode);
10965 emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
10966 emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
10967 len -= 4;
10968 offset += 4;
10969 }
10970
10971 if (len >= 2)
10972 {
10973 rtx reg = gen_reg_rtx (HImode);
10974 emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
10975 plus_constant (in, offset))));
10976 emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
10977 reg));
10978 len -= 2;
10979 offset += 2;
10980 }
10981
10982 if (len)
10983 {
10984 rtx reg = gen_reg_rtx (QImode);
10985 emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
10986 plus_constant (in, offset))));
10987 emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
10988 reg));
10989 }
10990 }
10991
10992 int
10993 thumb_cmp_operand (op, mode)
10994 rtx op;
10995 enum machine_mode mode;
10996 {
10997 return ((GET_CODE (op) == CONST_INT
10998 && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
10999 || register_operand (op, mode));
11000 }
11001
11002 static const char *
11003 thumb_condition_code (x, invert)
11004 rtx x;
11005 int invert;
11006 {
11007 static const char * const conds[] =
11008 {
11009 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
11010 "hi", "ls", "ge", "lt", "gt", "le"
11011 };
11012 int val;
11013
11014 switch (GET_CODE (x))
11015 {
11016 case EQ: val = 0; break;
11017 case NE: val = 1; break;
11018 case GEU: val = 2; break;
11019 case LTU: val = 3; break;
11020 case GTU: val = 8; break;
11021 case LEU: val = 9; break;
11022 case GE: val = 10; break;
11023 case LT: val = 11; break;
11024 case GT: val = 12; break;
11025 case LE: val = 13; break;
11026 default:
11027 abort ();
11028 }
11029
11030 return conds[val ^ invert];
11031 }
11032
11033 /* Handle storing a half-word to memory during reload. */
11034
11035 void
11036 thumb_reload_out_hi (operands)
11037 rtx * operands;
11038 {
11039 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
11040 }
11041
11042 /* Handle storing a half-word to memory during reload. */
11043
11044 void
11045 thumb_reload_in_hi (operands)
11046 rtx * operands ATTRIBUTE_UNUSED;
11047 {
11048 abort ();
11049 }
11050
11051 /* Return the length of a function name prefix
11052 that starts with the character 'c'. */
11053
11054 static int
11055 arm_get_strip_length (c)
11056 int c;
11057 {
11058 switch (c)
11059 {
11060 ARM_NAME_ENCODING_LENGTHS
11061 default: return 0;
11062 }
11063 }
11064
11065 /* Return a pointer to a function's name with any
11066 and all prefix encodings stripped from it. */
11067
11068 const char *
11069 arm_strip_name_encoding (name)
11070 const char * name;
11071 {
11072 int skip;
11073
11074 while ((skip = arm_get_strip_length (* name)))
11075 name += skip;
11076
11077 return name;
11078 }
11079
11080 /* If there is a '*' anywhere in the name's prefix, then
11081 emit the stripped name verbatim, otherwise prepend an
11082 underscore if leading underscores are being used. */
11083
11084 void
11085 arm_asm_output_labelref (stream, name)
11086 FILE * stream;
11087 const char * name;
11088 {
11089 int skip;
11090 int verbatim = 0;
11091
11092 while ((skip = arm_get_strip_length (* name)))
11093 {
11094 verbatim |= (*name == '*');
11095 name += skip;
11096 }
11097
11098 if (verbatim)
11099 fputs (name, stream);
11100 else
11101 asm_fprintf (stream, "%U%s", name);
11102 }
11103
11104 rtx aof_pic_label;
11105
11106 #ifdef AOF_ASSEMBLER
11107 /* Special functions only needed when producing AOF syntax assembler. */
11108
11109 struct pic_chain
11110 {
11111 struct pic_chain * next;
11112 const char * symname;
11113 };
11114
11115 static struct pic_chain * aof_pic_chain = NULL;
11116
11117 rtx
11118 aof_pic_entry (x)
11119 rtx x;
11120 {
11121 struct pic_chain ** chainp;
11122 int offset;
11123
11124 if (aof_pic_label == NULL_RTX)
11125 {
11126 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
11127 }
11128
11129 for (offset = 0, chainp = &aof_pic_chain; *chainp;
11130 offset += 4, chainp = &(*chainp)->next)
11131 if ((*chainp)->symname == XSTR (x, 0))
11132 return plus_constant (aof_pic_label, offset);
11133
11134 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
11135 (*chainp)->next = NULL;
11136 (*chainp)->symname = XSTR (x, 0);
11137 return plus_constant (aof_pic_label, offset);
11138 }
11139
11140 void
11141 aof_dump_pic_table (f)
11142 FILE * f;
11143 {
11144 struct pic_chain * chain;
11145
11146 if (aof_pic_chain == NULL)
11147 return;
11148
11149 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
11150 PIC_OFFSET_TABLE_REGNUM,
11151 PIC_OFFSET_TABLE_REGNUM);
11152 fputs ("|x$adcons|\n", f);
11153
11154 for (chain = aof_pic_chain; chain; chain = chain->next)
11155 {
11156 fputs ("\tDCD\t", f);
11157 assemble_name (f, chain->symname);
11158 fputs ("\n", f);
11159 }
11160 }
11161
11162 int arm_text_section_count = 1;
11163
11164 char *
11165 aof_text_section ()
11166 {
11167 static char buf[100];
11168 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
11169 arm_text_section_count++);
11170 if (flag_pic)
11171 strcat (buf, ", PIC, REENTRANT");
11172 return buf;
11173 }
11174
11175 static int arm_data_section_count = 1;
11176
11177 char *
11178 aof_data_section ()
11179 {
11180 static char buf[100];
11181 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
11182 return buf;
11183 }
11184
11185 /* The AOF assembler is religiously strict about declarations of
11186 imported and exported symbols, so that it is impossible to declare
11187 a function as imported near the beginning of the file, and then to
11188 export it later on. It is, however, possible to delay the decision
11189 until all the functions in the file have been compiled. To get
11190 around this, we maintain a list of the imports and exports, and
11191 delete from it any that are subsequently defined. At the end of
11192 compilation we spit the remainder of the list out before the END
11193 directive. */
11194
11195 struct import
11196 {
11197 struct import * next;
11198 const char * name;
11199 };
11200
11201 static struct import * imports_list = NULL;
11202
11203 void
11204 aof_add_import (name)
11205 const char * name;
11206 {
11207 struct import * new;
11208
11209 for (new = imports_list; new; new = new->next)
11210 if (new->name == name)
11211 return;
11212
11213 new = (struct import *) xmalloc (sizeof (struct import));
11214 new->next = imports_list;
11215 imports_list = new;
11216 new->name = name;
11217 }
11218
11219 void
11220 aof_delete_import (name)
11221 const char * name;
11222 {
11223 struct import ** old;
11224
11225 for (old = &imports_list; *old; old = & (*old)->next)
11226 {
11227 if ((*old)->name == name)
11228 {
11229 *old = (*old)->next;
11230 return;
11231 }
11232 }
11233 }
11234
11235 int arm_main_function = 0;
11236
11237 void
11238 aof_dump_imports (f)
11239 FILE * f;
11240 {
11241 /* The AOF assembler needs this to cause the startup code to be extracted
11242 from the library. Brining in __main causes the whole thing to work
11243 automagically. */
11244 if (arm_main_function)
11245 {
11246 text_section ();
11247 fputs ("\tIMPORT __main\n", f);
11248 fputs ("\tDCD __main\n", f);
11249 }
11250
11251 /* Now dump the remaining imports. */
11252 while (imports_list)
11253 {
11254 fprintf (f, "\tIMPORT\t");
11255 assemble_name (f, imports_list->name);
11256 fputc ('\n', f);
11257 imports_list = imports_list->next;
11258 }
11259 }
11260
11261 static void
11262 aof_globalize_label (stream, name)
11263 FILE *stream;
11264 const char *name;
11265 {
11266 default_globalize_label (stream, name);
11267 if (! strcmp (name, "main"))
11268 arm_main_function = 1;
11269 }
11270 #endif /* AOF_ASSEMBLER */
11271
11272 #ifdef OBJECT_FORMAT_ELF
11273 /* Switch to an arbitrary section NAME with attributes as specified
11274 by FLAGS. ALIGN specifies any known alignment requirements for
11275 the section; 0 if the default should be used.
11276
11277 Differs from the default elf version only in the prefix character
11278 used before the section type. */
11279
11280 static void
11281 arm_elf_asm_named_section (name, flags)
11282 const char *name;
11283 unsigned int flags;
11284 {
11285 char flagchars[10], *f = flagchars;
11286
11287 if (! named_section_first_declaration (name))
11288 {
11289 fprintf (asm_out_file, "\t.section\t%s\n", name);
11290 return;
11291 }
11292
11293 if (!(flags & SECTION_DEBUG))
11294 *f++ = 'a';
11295 if (flags & SECTION_WRITE)
11296 *f++ = 'w';
11297 if (flags & SECTION_CODE)
11298 *f++ = 'x';
11299 if (flags & SECTION_SMALL)
11300 *f++ = 's';
11301 if (flags & SECTION_MERGE)
11302 *f++ = 'M';
11303 if (flags & SECTION_STRINGS)
11304 *f++ = 'S';
11305 if (flags & SECTION_TLS)
11306 *f++ = 'T';
11307 *f = '\0';
11308
11309 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
11310
11311 if (!(flags & SECTION_NOTYPE))
11312 {
11313 const char *type;
11314
11315 if (flags & SECTION_BSS)
11316 type = "nobits";
11317 else
11318 type = "progbits";
11319
11320 fprintf (asm_out_file, ",%%%s", type);
11321
11322 if (flags & SECTION_ENTSIZE)
11323 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
11324 }
11325
11326 putc ('\n', asm_out_file);
11327 }
11328 #endif
11329
11330 #ifndef ARM_PE
11331 /* Symbols in the text segment can be accessed without indirecting via the
11332 constant pool; it may take an extra binary operation, but this is still
11333 faster than indirecting via memory. Don't do this when not optimizing,
11334 since we won't be calculating al of the offsets necessary to do this
11335 simplification. */
11336
11337 static void
11338 arm_encode_section_info (decl, first)
11339 tree decl;
11340 int first;
11341 {
11342 /* This doesn't work with AOF syntax, since the string table may be in
11343 a different AREA. */
11344 #ifndef AOF_ASSEMBLER
11345 if (optimize > 0 && TREE_CONSTANT (decl)
11346 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
11347 {
11348 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
11349 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
11350 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
11351 }
11352 #endif
11353
11354 /* If we are referencing a function that is weak then encode a long call
11355 flag in the function name, otherwise if the function is static or
11356 or known to be defined in this file then encode a short call flag. */
11357 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
11358 {
11359 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
11360 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
11361 else if (! TREE_PUBLIC (decl))
11362 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
11363 }
11364 }
11365 #endif /* !ARM_PE */
11366
11367 static void
11368 arm_internal_label (stream, prefix, labelno)
11369 FILE *stream;
11370 const char *prefix;
11371 unsigned long labelno;
11372 {
11373 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
11374 && !strcmp (prefix, "L"))
11375 {
11376 arm_ccfsm_state = 0;
11377 arm_target_insn = NULL;
11378 }
11379 default_internal_label (stream, prefix, labelno);
11380 }
11381
11382 /* Output code to add DELTA to the first argument, and then jump
11383 to FUNCTION. Used for C++ multiple inheritance. */
11384
11385 static void
11386 arm_output_mi_thunk (file, thunk, delta, vcall_offset, function)
11387 FILE *file;
11388 tree thunk ATTRIBUTE_UNUSED;
11389 HOST_WIDE_INT delta;
11390 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED;
11391 tree function;
11392 {
11393 int mi_delta = delta;
11394 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
11395 int shift = 0;
11396 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)))
11397 ? 1 : 0);
11398 if (mi_delta < 0)
11399 mi_delta = - mi_delta;
11400 while (mi_delta != 0)
11401 {
11402 if ((mi_delta & (3 << shift)) == 0)
11403 shift += 2;
11404 else
11405 {
11406 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
11407 mi_op, this_regno, this_regno,
11408 mi_delta & (0xff << shift));
11409 mi_delta &= ~(0xff << shift);
11410 shift += 8;
11411 }
11412 }
11413 fputs ("\tb\t", file);
11414 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
11415 if (NEED_PLT_RELOC)
11416 fputs ("(PLT)", file);
11417 fputc ('\n', file);
11418 }