]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/arm.c
111884d234b63776f1dd04bb54e9fcfe039d6f6a
[thirdparty/gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
3 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GNU CC.
9
10 GNU CC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GNU CC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "obstack.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "reload.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "toplev.h"
43 #include "recog.h"
44 #include "ggc.h"
45 #include "except.h"
46 #include "c-pragma.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51
52 /* Forward definitions of types. */
53 typedef struct minipool_node Mnode;
54 typedef struct minipool_fixup Mfix;
55
56 /* In order to improve the layout of the prototypes below
57 some short type abbreviations are defined here. */
58 #define Hint HOST_WIDE_INT
59 #define Mmode enum machine_mode
60 #define Ulong unsigned long
61 #define Ccstar const char *
62
63 const struct attribute_spec arm_attribute_table[];
64
65 /* Forward function declarations. */
66 static void arm_add_gc_roots PARAMS ((void));
67 static int arm_gen_constant PARAMS ((enum rtx_code, Mmode, Hint, rtx, rtx, int, int));
68 static Ulong bit_count PARAMS ((signed int));
69 static int const_ok_for_op PARAMS ((Hint, enum rtx_code));
70 static int eliminate_lr2ip PARAMS ((rtx *));
71 static rtx emit_multi_reg_push PARAMS ((int));
72 static rtx emit_sfm PARAMS ((int, int));
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer PARAMS ((rtx, unsigned int, int));
75 #endif
76 static Ccstar fp_const_from_val PARAMS ((REAL_VALUE_TYPE *));
77 static arm_cc get_arm_condition_code PARAMS ((rtx));
78 static void init_fpa_table PARAMS ((void));
79 static Hint int_log2 PARAMS ((Hint));
80 static rtx is_jump_table PARAMS ((rtx));
81 static Ccstar output_multi_immediate PARAMS ((rtx *, Ccstar, Ccstar, int, Hint));
82 static void print_multi_reg PARAMS ((FILE *, Ccstar, int, int));
83 static Mmode select_dominance_cc_mode PARAMS ((rtx, rtx, Hint));
84 static Ccstar shift_op PARAMS ((rtx, Hint *));
85 static void arm_init_machine_status PARAMS ((struct function *));
86 static void arm_mark_machine_status PARAMS ((struct function *));
87 static void arm_free_machine_status PARAMS ((struct function *));
88 static int number_of_first_bit_set PARAMS ((int));
89 static void replace_symbols_in_block PARAMS ((tree, rtx, rtx));
90 static void thumb_exit PARAMS ((FILE *, int, rtx));
91 static void thumb_pushpop PARAMS ((FILE *, int, int));
92 static Ccstar thumb_condition_code PARAMS ((rtx, int));
93 static rtx is_jump_table PARAMS ((rtx));
94 static Hint get_jump_table_size PARAMS ((rtx));
95 static Mnode * move_minipool_fix_forward_ref PARAMS ((Mnode *, Mnode *, Hint));
96 static Mnode * add_minipool_forward_ref PARAMS ((Mfix *));
97 static Mnode * move_minipool_fix_backward_ref PARAMS ((Mnode *, Mnode *, Hint));
98 static Mnode * add_minipool_backward_ref PARAMS ((Mfix *));
99 static void assign_minipool_offsets PARAMS ((Mfix *));
100 static void arm_print_value PARAMS ((FILE *, rtx));
101 static void dump_minipool PARAMS ((rtx));
102 static int arm_barrier_cost PARAMS ((rtx));
103 static Mfix * create_fix_barrier PARAMS ((Mfix *, Hint));
104 static void push_minipool_barrier PARAMS ((rtx, Hint));
105 static void push_minipool_fix PARAMS ((rtx, Hint, rtx *, Mmode, rtx));
106 static void note_invalid_constants PARAMS ((rtx, Hint));
107 static int current_file_function_operand PARAMS ((rtx));
108 static Ulong arm_compute_save_reg0_reg12_mask PARAMS ((void));
109 static Ulong arm_compute_save_reg_mask PARAMS ((void));
110 static Ulong arm_isr_value PARAMS ((tree));
111 static Ulong arm_compute_func_type PARAMS ((void));
112 static tree arm_handle_fndecl_attribute PARAMS ((tree *, tree, tree, int, bool *));
113 static tree arm_handle_isr_attribute PARAMS ((tree *, tree, tree, int, bool *));
114 static void arm_output_function_epilogue PARAMS ((FILE *, Hint));
115 static void arm_output_function_prologue PARAMS ((FILE *, Hint));
116 static void thumb_output_function_prologue PARAMS ((FILE *, Hint));
117 static int arm_comp_type_attributes PARAMS ((tree, tree));
118 static void arm_set_default_type_attributes PARAMS ((tree));
119 static int arm_adjust_cost PARAMS ((rtx, rtx, rtx, int));
120 #ifdef OBJECT_FORMAT_ELF
121 static void arm_elf_asm_named_section PARAMS ((const char *, unsigned int));
122 #endif
123 #ifndef ARM_PE
124 static void arm_encode_section_info PARAMS ((tree, int));
125 #endif
126
127 #undef Hint
128 #undef Mmode
129 #undef Ulong
130 #undef Ccstar
131 \f
132 /* Initialize the GCC target structure. */
133 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
134 #undef TARGET_MERGE_DECL_ATTRIBUTES
135 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
136 #endif
137
138 #undef TARGET_ATTRIBUTE_TABLE
139 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
140
141 #ifdef AOF_ASSEMBLER
142 #undef TARGET_ASM_BYTE_OP
143 #define TARGET_ASM_BYTE_OP "\tDCB\t"
144 #undef TARGET_ASM_ALIGNED_HI_OP
145 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
146 #undef TARGET_ASM_ALIGNED_SI_OP
147 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
148 #else
149 #undef TARGET_ASM_ALIGNED_SI_OP
150 #define TARGET_ASM_ALIGNED_SI_OP NULL
151 #undef TARGET_ASM_INTEGER
152 #define TARGET_ASM_INTEGER arm_assemble_integer
153 #endif
154
155 #undef TARGET_ASM_FUNCTION_PROLOGUE
156 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
157
158 #undef TARGET_ASM_FUNCTION_EPILOGUE
159 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
160
161 #undef TARGET_COMP_TYPE_ATTRIBUTES
162 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
163
164 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
165 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
166
167 #undef TARGET_INIT_BUILTINS
168 #define TARGET_INIT_BUILTINS arm_init_builtins
169
170 #undef TARGET_EXPAND_BUILTIN
171 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
172
173 #undef TARGET_SCHED_ADJUST_COST
174 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
175
176 #undef TARGET_ENCODE_SECTION_INFO
177 #ifdef ARM_PE
178 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
179 #else
180 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
181 #endif
182
183 #undef TARGET_STRIP_NAME_ENCODING
184 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
185
186 struct gcc_target targetm = TARGET_INITIALIZER;
187 \f
188 /* Obstack for minipool constant handling. */
189 static struct obstack minipool_obstack;
190 static char * minipool_startobj;
191
192 #define obstack_chunk_alloc xmalloc
193 #define obstack_chunk_free free
194
195 /* The maximum number of insns skipped which
196 will be conditionalised if possible. */
197 static int max_insns_skipped = 5;
198
199 extern FILE * asm_out_file;
200
201 /* True if we are currently building a constant table. */
202 int making_const_table;
203
204 /* Define the information needed to generate branch insns. This is
205 stored from the compare operation. */
206 rtx arm_compare_op0, arm_compare_op1;
207
208 /* What type of floating point are we tuning for? */
209 enum floating_point_type arm_fpu;
210
211 /* What type of floating point instructions are available? */
212 enum floating_point_type arm_fpu_arch;
213
214 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
215 enum prog_mode_type arm_prgmode;
216
217 /* Set by the -mfp=... option. */
218 const char * target_fp_name = NULL;
219
220 /* Used to parse -mstructure_size_boundary command line option. */
221 const char * structure_size_string = NULL;
222 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
223
224 /* Bit values used to identify processor capabilities. */
225 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
226 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
227 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
228 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
229 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
230 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
231 #define FL_THUMB (1 << 6) /* Thumb aware */
232 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
233 #define FL_STRONG (1 << 8) /* StrongARM */
234 #define FL_ARCH5E (1 << 9) /* DSP extenstions to v5 */
235 #define FL_XSCALE (1 << 10) /* XScale */
236
237 /* The bits in this mask specify which
238 instructions we are allowed to generate. */
239 static int insn_flags = 0;
240
241 /* The bits in this mask specify which instruction scheduling options should
242 be used. Note - there is an overlap with the FL_FAST_MULT. For some
243 hardware we want to be able to generate the multiply instructions, but to
244 tune as if they were not present in the architecture. */
245 static int tune_flags = 0;
246
247 /* The following are used in the arm.md file as equivalents to bits
248 in the above two flag variables. */
249
250 /* Nonzero if this is an "M" variant of the processor. */
251 int arm_fast_multiply = 0;
252
253 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
254 int arm_arch4 = 0;
255
256 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
257 int arm_arch5 = 0;
258
259 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
260 int arm_arch5e = 0;
261
262 /* Nonzero if this chip can benefit from load scheduling. */
263 int arm_ld_sched = 0;
264
265 /* Nonzero if this chip is a StrongARM. */
266 int arm_is_strong = 0;
267
268 /* Nonzero if this chip is an XScale. */
269 int arm_is_xscale = 0;
270
271 /* Nonzero if this chip is an ARM6 or an ARM7. */
272 int arm_is_6_or_7 = 0;
273
274 /* Nonzero if generating Thumb instructions. */
275 int thumb_code = 0;
276
277 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
278 must report the mode of the memory reference from PRINT_OPERAND to
279 PRINT_OPERAND_ADDRESS. */
280 enum machine_mode output_memory_reference_mode;
281
282 /* The register number to be used for the PIC offset register. */
283 const char * arm_pic_register_string = NULL;
284 int arm_pic_register = INVALID_REGNUM;
285
286 /* Set to 1 when a return insn is output, this means that the epilogue
287 is not needed. */
288 int return_used_this_function;
289
290 /* Set to 1 after arm_reorg has started. Reset to start at the start of
291 the next function. */
292 static int after_arm_reorg = 0;
293
294 /* The maximum number of insns to be used when loading a constant. */
295 static int arm_constant_limit = 3;
296
297 /* For an explanation of these variables, see final_prescan_insn below. */
298 int arm_ccfsm_state;
299 enum arm_cond_code arm_current_cc;
300 rtx arm_target_insn;
301 int arm_target_label;
302
303 /* The condition codes of the ARM, and the inverse function. */
304 static const char * const arm_condition_codes[] =
305 {
306 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
307 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
308 };
309
310 #define streq(string1, string2) (strcmp (string1, string2) == 0)
311 \f
312 /* Initialization code. */
313
314 struct processors
315 {
316 const char *const name;
317 const unsigned int flags;
318 };
319
320 /* Not all of these give usefully different compilation alternatives,
321 but there is no simple way of generalizing them. */
322 static const struct processors all_cores[] =
323 {
324 /* ARM Cores */
325
326 {"arm2", FL_CO_PROC | FL_MODE26 },
327 {"arm250", FL_CO_PROC | FL_MODE26 },
328 {"arm3", FL_CO_PROC | FL_MODE26 },
329 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
330 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
331 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
332 {"arm610", FL_MODE26 | FL_MODE32 },
333 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
334 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
335 /* arm7m doesn't exist on its own, but only with D, (and I), but
336 those don't alter the code, so arm7m is sometimes used. */
337 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
338 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
339 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
340 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
341 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
342 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
343 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
344 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
345 {"arm710", FL_MODE26 | FL_MODE32 },
346 {"arm710t", FL_MODE26 | FL_MODE32 | FL_THUMB },
347 {"arm720", FL_MODE26 | FL_MODE32 },
348 {"arm720t", FL_MODE26 | FL_MODE32 | FL_THUMB },
349 {"arm740t", FL_MODE26 | FL_MODE32 | FL_THUMB },
350 {"arm710c", FL_MODE26 | FL_MODE32 },
351 {"arm7100", FL_MODE26 | FL_MODE32 },
352 {"arm7500", FL_MODE26 | FL_MODE32 },
353 /* Doesn't have an external co-proc, but does have embedded fpu. */
354 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
355 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
356 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
357 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
358 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
359 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
360 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
361 {"arm940t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
362 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
363 {"arm9e", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
364 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
365 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
366 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
367 {"strongarm1110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
368 {"arm10tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
369 {"arm1020t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
370 {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE },
371
372 {NULL, 0}
373 };
374
375 static const struct processors all_architectures[] =
376 {
377 /* ARM Architectures */
378
379 { "armv2", FL_CO_PROC | FL_MODE26 },
380 { "armv2a", FL_CO_PROC | FL_MODE26 },
381 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
382 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
383 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
384 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
385 implementations that support it, so we will leave it out for now. */
386 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
387 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
388 { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
389 { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
390 { NULL, 0 }
391 };
392
393 /* This is a magic stucture. The 'string' field is magically filled in
394 with a pointer to the value specified by the user on the command line
395 assuming that the user has specified such a value. */
396
397 struct arm_cpu_select arm_select[] =
398 {
399 /* string name processors */
400 { NULL, "-mcpu=", all_cores },
401 { NULL, "-march=", all_architectures },
402 { NULL, "-mtune=", all_cores }
403 };
404
405 /* Return the number of bits set in value' */
406 static unsigned long
407 bit_count (value)
408 signed int value;
409 {
410 unsigned long count = 0;
411
412 while (value)
413 {
414 value &= ~(value & -value);
415 ++count;
416 }
417
418 return count;
419 }
420
421 /* Fix up any incompatible options that the user has specified.
422 This has now turned into a maze. */
423 void
424 arm_override_options ()
425 {
426 unsigned i;
427
428 /* Set up the flags based on the cpu/architecture selected by the user. */
429 for (i = ARRAY_SIZE (arm_select); i--;)
430 {
431 struct arm_cpu_select * ptr = arm_select + i;
432
433 if (ptr->string != NULL && ptr->string[0] != '\0')
434 {
435 const struct processors * sel;
436
437 for (sel = ptr->processors; sel->name != NULL; sel++)
438 if (streq (ptr->string, sel->name))
439 {
440 if (i == 2)
441 tune_flags = sel->flags;
442 else
443 {
444 /* If we have been given an architecture and a processor
445 make sure that they are compatible. We only generate
446 a warning though, and we prefer the CPU over the
447 architecture. */
448 if (insn_flags != 0 && (insn_flags ^ sel->flags))
449 warning ("switch -mcpu=%s conflicts with -march= switch",
450 ptr->string);
451
452 insn_flags = sel->flags;
453 }
454
455 break;
456 }
457
458 if (sel->name == NULL)
459 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
460 }
461 }
462
463 /* If the user did not specify a processor, choose one for them. */
464 if (insn_flags == 0)
465 {
466 const struct processors * sel;
467 unsigned int sought;
468 static const struct cpu_default
469 {
470 const int cpu;
471 const char *const name;
472 }
473 cpu_defaults[] =
474 {
475 { TARGET_CPU_arm2, "arm2" },
476 { TARGET_CPU_arm6, "arm6" },
477 { TARGET_CPU_arm610, "arm610" },
478 { TARGET_CPU_arm710, "arm710" },
479 { TARGET_CPU_arm7m, "arm7m" },
480 { TARGET_CPU_arm7500fe, "arm7500fe" },
481 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
482 { TARGET_CPU_arm8, "arm8" },
483 { TARGET_CPU_arm810, "arm810" },
484 { TARGET_CPU_arm9, "arm9" },
485 { TARGET_CPU_strongarm, "strongarm" },
486 { TARGET_CPU_xscale, "xscale" },
487 { TARGET_CPU_generic, "arm" },
488 { 0, 0 }
489 };
490 const struct cpu_default * def;
491
492 /* Find the default. */
493 for (def = cpu_defaults; def->name; def++)
494 if (def->cpu == TARGET_CPU_DEFAULT)
495 break;
496
497 /* Make sure we found the default CPU. */
498 if (def->name == NULL)
499 abort ();
500
501 /* Find the default CPU's flags. */
502 for (sel = all_cores; sel->name != NULL; sel++)
503 if (streq (def->name, sel->name))
504 break;
505
506 if (sel->name == NULL)
507 abort ();
508
509 insn_flags = sel->flags;
510
511 /* Now check to see if the user has specified some command line
512 switch that require certain abilities from the cpu. */
513 sought = 0;
514
515 if (TARGET_INTERWORK || TARGET_THUMB)
516 {
517 sought |= (FL_THUMB | FL_MODE32);
518
519 /* Force apcs-32 to be used for interworking. */
520 target_flags |= ARM_FLAG_APCS_32;
521
522 /* There are no ARM processors that support both APCS-26 and
523 interworking. Therefore we force FL_MODE26 to be removed
524 from insn_flags here (if it was set), so that the search
525 below will always be able to find a compatible processor. */
526 insn_flags &= ~FL_MODE26;
527 }
528 else if (!TARGET_APCS_32)
529 sought |= FL_MODE26;
530
531 if (sought != 0 && ((sought & insn_flags) != sought))
532 {
533 /* Try to locate a CPU type that supports all of the abilities
534 of the default CPU, plus the extra abilities requested by
535 the user. */
536 for (sel = all_cores; sel->name != NULL; sel++)
537 if ((sel->flags & sought) == (sought | insn_flags))
538 break;
539
540 if (sel->name == NULL)
541 {
542 unsigned int current_bit_count = 0;
543 const struct processors * best_fit = NULL;
544
545 /* Ideally we would like to issue an error message here
546 saying that it was not possible to find a CPU compatible
547 with the default CPU, but which also supports the command
548 line options specified by the programmer, and so they
549 ought to use the -mcpu=<name> command line option to
550 override the default CPU type.
551
552 Unfortunately this does not work with multilibing. We
553 need to be able to support multilibs for -mapcs-26 and for
554 -mthumb-interwork and there is no CPU that can support both
555 options. Instead if we cannot find a cpu that has both the
556 characteristics of the default cpu and the given command line
557 options we scan the array again looking for a best match. */
558 for (sel = all_cores; sel->name != NULL; sel++)
559 if ((sel->flags & sought) == sought)
560 {
561 unsigned int count;
562
563 count = bit_count (sel->flags & insn_flags);
564
565 if (count >= current_bit_count)
566 {
567 best_fit = sel;
568 current_bit_count = count;
569 }
570 }
571
572 if (best_fit == NULL)
573 abort ();
574 else
575 sel = best_fit;
576 }
577
578 insn_flags = sel->flags;
579 }
580 }
581
582 /* If tuning has not been specified, tune for whichever processor or
583 architecture has been selected. */
584 if (tune_flags == 0)
585 tune_flags = insn_flags;
586
587 /* Make sure that the processor choice does not conflict with any of the
588 other command line choices. */
589 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
590 {
591 /* If APCS-32 was not the default then it must have been set by the
592 user, so issue a warning message. If the user has specified
593 "-mapcs-32 -mcpu=arm2" then we loose here. */
594 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
595 warning ("target CPU does not support APCS-32" );
596 target_flags &= ~ARM_FLAG_APCS_32;
597 }
598 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
599 {
600 warning ("target CPU does not support APCS-26" );
601 target_flags |= ARM_FLAG_APCS_32;
602 }
603
604 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
605 {
606 warning ("target CPU does not support interworking" );
607 target_flags &= ~ARM_FLAG_INTERWORK;
608 }
609
610 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
611 {
612 warning ("target CPU does not support THUMB instructions");
613 target_flags &= ~ARM_FLAG_THUMB;
614 }
615
616 if (TARGET_APCS_FRAME && TARGET_THUMB)
617 {
618 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
619 target_flags &= ~ARM_FLAG_APCS_FRAME;
620 }
621
622 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
623 from here where no function is being compiled currently. */
624 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
625 && TARGET_ARM)
626 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
627
628 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
629 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
630
631 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
632 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
633
634 /* If interworking is enabled then APCS-32 must be selected as well. */
635 if (TARGET_INTERWORK)
636 {
637 if (!TARGET_APCS_32)
638 warning ("interworking forces APCS-32 to be used" );
639 target_flags |= ARM_FLAG_APCS_32;
640 }
641
642 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
643 {
644 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
645 target_flags |= ARM_FLAG_APCS_FRAME;
646 }
647
648 if (TARGET_POKE_FUNCTION_NAME)
649 target_flags |= ARM_FLAG_APCS_FRAME;
650
651 if (TARGET_APCS_REENT && flag_pic)
652 error ("-fpic and -mapcs-reent are incompatible");
653
654 if (TARGET_APCS_REENT)
655 warning ("APCS reentrant code not supported. Ignored");
656
657 /* If this target is normally configured to use APCS frames, warn if they
658 are turned off and debugging is turned on. */
659 if (TARGET_ARM
660 && write_symbols != NO_DEBUG
661 && !TARGET_APCS_FRAME
662 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
663 warning ("-g with -mno-apcs-frame may not give sensible debugging");
664
665 /* If stack checking is disabled, we can use r10 as the PIC register,
666 which keeps r9 available. */
667 if (flag_pic)
668 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
669
670 if (TARGET_APCS_FLOAT)
671 warning ("passing floating point arguments in fp regs not yet supported");
672
673 /* Initialise boolean versions of the flags, for use in the arm.md file. */
674 arm_fast_multiply = (insn_flags & FL_FAST_MULT) != 0;
675 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
676 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
677 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
678 arm_is_xscale = (insn_flags & FL_XSCALE) != 0;
679
680 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
681 arm_is_strong = (tune_flags & FL_STRONG) != 0;
682 thumb_code = (TARGET_ARM == 0);
683 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
684 && !(tune_flags & FL_ARCH4))) != 0;
685
686 /* Default value for floating point code... if no co-processor
687 bus, then schedule for emulated floating point. Otherwise,
688 assume the user has an FPA.
689 Note: this does not prevent use of floating point instructions,
690 -msoft-float does that. */
691 arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
692
693 if (target_fp_name)
694 {
695 if (streq (target_fp_name, "2"))
696 arm_fpu_arch = FP_SOFT2;
697 else if (streq (target_fp_name, "3"))
698 arm_fpu_arch = FP_SOFT3;
699 else
700 error ("invalid floating point emulation option: -mfpe-%s",
701 target_fp_name);
702 }
703 else
704 arm_fpu_arch = FP_DEFAULT;
705
706 if (TARGET_FPE && arm_fpu != FP_HARD)
707 arm_fpu = FP_SOFT2;
708
709 /* For arm2/3 there is no need to do any scheduling if there is only
710 a floating point emulator, or we are doing software floating-point. */
711 if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD)
712 && (tune_flags & FL_MODE32) == 0)
713 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
714
715 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
716
717 if (structure_size_string != NULL)
718 {
719 int size = strtol (structure_size_string, NULL, 0);
720
721 if (size == 8 || size == 32)
722 arm_structure_size_boundary = size;
723 else
724 warning ("structure size boundary can only be set to 8 or 32");
725 }
726
727 if (arm_pic_register_string != NULL)
728 {
729 int pic_register = decode_reg_name (arm_pic_register_string);
730
731 if (!flag_pic)
732 warning ("-mpic-register= is useless without -fpic");
733
734 /* Prevent the user from choosing an obviously stupid PIC register. */
735 else if (pic_register < 0 || call_used_regs[pic_register]
736 || pic_register == HARD_FRAME_POINTER_REGNUM
737 || pic_register == STACK_POINTER_REGNUM
738 || pic_register >= PC_REGNUM)
739 error ("unable to use '%s' for PIC register", arm_pic_register_string);
740 else
741 arm_pic_register = pic_register;
742 }
743
744 if (TARGET_THUMB && flag_schedule_insns)
745 {
746 /* Don't warn since it's on by default in -O2. */
747 flag_schedule_insns = 0;
748 }
749
750 /* If optimizing for space, don't synthesize constants.
751 For processors with load scheduling, it never costs more than 2 cycles
752 to load a constant, and the load scheduler may well reduce that to 1. */
753 if (optimize_size || (tune_flags & FL_LDSCHED))
754 arm_constant_limit = 1;
755
756 if (arm_is_xscale)
757 arm_constant_limit = 2;
758
759 /* If optimizing for size, bump the number of instructions that we
760 are prepared to conditionally execute (even on a StrongARM).
761 Otherwise for the StrongARM, which has early execution of branches,
762 a sequence that is worth skipping is shorter. */
763 if (optimize_size)
764 max_insns_skipped = 6;
765 else if (arm_is_strong)
766 max_insns_skipped = 3;
767
768 /* Register global variables with the garbage collector. */
769 arm_add_gc_roots ();
770 }
771
772 static void
773 arm_add_gc_roots ()
774 {
775 ggc_add_rtx_root (&arm_compare_op0, 1);
776 ggc_add_rtx_root (&arm_compare_op1, 1);
777 ggc_add_rtx_root (&arm_target_insn, 1); /* Not sure this is really a root. */
778
779 gcc_obstack_init(&minipool_obstack);
780 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
781 }
782 \f
783 /* A table of known ARM exception types.
784 For use with the interrupt function attribute. */
785
786 typedef struct
787 {
788 const char *const arg;
789 const unsigned long return_value;
790 }
791 isr_attribute_arg;
792
793 static const isr_attribute_arg isr_attribute_args [] =
794 {
795 { "IRQ", ARM_FT_ISR },
796 { "irq", ARM_FT_ISR },
797 { "FIQ", ARM_FT_FIQ },
798 { "fiq", ARM_FT_FIQ },
799 { "ABORT", ARM_FT_ISR },
800 { "abort", ARM_FT_ISR },
801 { "ABORT", ARM_FT_ISR },
802 { "abort", ARM_FT_ISR },
803 { "UNDEF", ARM_FT_EXCEPTION },
804 { "undef", ARM_FT_EXCEPTION },
805 { "SWI", ARM_FT_EXCEPTION },
806 { "swi", ARM_FT_EXCEPTION },
807 { NULL, ARM_FT_NORMAL }
808 };
809
810 /* Returns the (interrupt) function type of the current
811 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
812
813 static unsigned long
814 arm_isr_value (argument)
815 tree argument;
816 {
817 const isr_attribute_arg * ptr;
818 const char * arg;
819
820 /* No argument - default to IRQ. */
821 if (argument == NULL_TREE)
822 return ARM_FT_ISR;
823
824 /* Get the value of the argument. */
825 if (TREE_VALUE (argument) == NULL_TREE
826 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
827 return ARM_FT_UNKNOWN;
828
829 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
830
831 /* Check it against the list of known arguments. */
832 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr ++)
833 if (streq (arg, ptr->arg))
834 return ptr->return_value;
835
836 /* An unrecognised interrupt type. */
837 return ARM_FT_UNKNOWN;
838 }
839
840 /* Computes the type of the current function. */
841
842 static unsigned long
843 arm_compute_func_type ()
844 {
845 unsigned long type = ARM_FT_UNKNOWN;
846 tree a;
847 tree attr;
848
849 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
850 abort ();
851
852 /* Decide if the current function is volatile. Such functions
853 never return, and many memory cycles can be saved by not storing
854 register values that will never be needed again. This optimization
855 was added to speed up context switching in a kernel application. */
856 if (optimize > 0
857 && current_function_nothrow
858 && TREE_THIS_VOLATILE (current_function_decl))
859 type |= ARM_FT_VOLATILE;
860
861 if (current_function_needs_context)
862 type |= ARM_FT_NESTED;
863
864 attr = DECL_ATTRIBUTES (current_function_decl);
865
866 a = lookup_attribute ("naked", attr);
867 if (a != NULL_TREE)
868 type |= ARM_FT_NAKED;
869
870 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
871 type |= ARM_FT_EXCEPTION_HANDLER;
872 else
873 {
874 a = lookup_attribute ("isr", attr);
875 if (a == NULL_TREE)
876 a = lookup_attribute ("interrupt", attr);
877
878 if (a == NULL_TREE)
879 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
880 else
881 type |= arm_isr_value (TREE_VALUE (a));
882 }
883
884 return type;
885 }
886
887 /* Returns the type of the current function. */
888
889 unsigned long
890 arm_current_func_type ()
891 {
892 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
893 cfun->machine->func_type = arm_compute_func_type ();
894
895 return cfun->machine->func_type;
896 }
897 \f
898 /* Return 1 if it is possible to return using a single instruction. */
899
900 int
901 use_return_insn (iscond)
902 int iscond;
903 {
904 int regno;
905 unsigned int func_type;
906
907 /* Never use a return instruction before reload has run. */
908 if (!reload_completed)
909 return 0;
910
911 func_type = arm_current_func_type ();
912
913 /* Naked functions and volatile functions need special
914 consideration. */
915 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
916 return 0;
917
918 /* As do variadic functions. */
919 if (current_function_pretend_args_size
920 || cfun->machine->uses_anonymous_args
921 /* Of if the function calls __builtin_eh_return () */
922 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
923 /* Or if there is no frame pointer and there is a stack adjustment. */
924 || ((get_frame_size () + current_function_outgoing_args_size != 0)
925 && !frame_pointer_needed))
926 return 0;
927
928 /* Can't be done if interworking with Thumb, and any registers have been
929 stacked. Similarly, on StrongARM, conditional returns are expensive
930 if they aren't taken and registers have been stacked. */
931 if (iscond && arm_is_strong && frame_pointer_needed)
932 return 0;
933
934 if ((iscond && arm_is_strong)
935 || TARGET_INTERWORK)
936 {
937 for (regno = 0; regno <= LAST_ARM_REGNUM; regno++)
938 if (regs_ever_live[regno] && !call_used_regs[regno])
939 return 0;
940
941 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
942 return 0;
943 }
944
945 /* Can't be done if any of the FPU regs are pushed,
946 since this also requires an insn. */
947 if (TARGET_HARD_FLOAT)
948 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
949 if (regs_ever_live[regno] && !call_used_regs[regno])
950 return 0;
951
952 return 1;
953 }
954
955 /* Return TRUE if int I is a valid immediate ARM constant. */
956
957 int
958 const_ok_for_arm (i)
959 HOST_WIDE_INT i;
960 {
961 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
962
963 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
964 be all zero, or all one. */
965 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
966 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
967 != ((~(unsigned HOST_WIDE_INT) 0)
968 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
969 return FALSE;
970
971 /* Fast return for 0 and powers of 2 */
972 if ((i & (i - 1)) == 0)
973 return TRUE;
974
975 do
976 {
977 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
978 return TRUE;
979 mask =
980 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
981 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
982 }
983 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
984
985 return FALSE;
986 }
987
988 /* Return true if I is a valid constant for the operation CODE. */
989 static int
990 const_ok_for_op (i, code)
991 HOST_WIDE_INT i;
992 enum rtx_code code;
993 {
994 if (const_ok_for_arm (i))
995 return 1;
996
997 switch (code)
998 {
999 case PLUS:
1000 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1001
1002 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1003 case XOR:
1004 case IOR:
1005 return 0;
1006
1007 case AND:
1008 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1009
1010 default:
1011 abort ();
1012 }
1013 }
1014
1015 /* Emit a sequence of insns to handle a large constant.
1016 CODE is the code of the operation required, it can be any of SET, PLUS,
1017 IOR, AND, XOR, MINUS;
1018 MODE is the mode in which the operation is being performed;
1019 VAL is the integer to operate on;
1020 SOURCE is the other operand (a register, or a null-pointer for SET);
1021 SUBTARGETS means it is safe to create scratch registers if that will
1022 either produce a simpler sequence, or we will want to cse the values.
1023 Return value is the number of insns emitted. */
1024
1025 int
1026 arm_split_constant (code, mode, val, target, source, subtargets)
1027 enum rtx_code code;
1028 enum machine_mode mode;
1029 HOST_WIDE_INT val;
1030 rtx target;
1031 rtx source;
1032 int subtargets;
1033 {
1034 if (subtargets || code == SET
1035 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1036 && REGNO (target) != REGNO (source)))
1037 {
1038 /* After arm_reorg has been called, we can't fix up expensive
1039 constants by pushing them into memory so we must synthesise
1040 them in-line, regardless of the cost. This is only likely to
1041 be more costly on chips that have load delay slots and we are
1042 compiling without running the scheduler (so no splitting
1043 occurred before the final instruction emission).
1044
1045 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1046 */
1047 if (!after_arm_reorg
1048 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1049 > arm_constant_limit + (code != SET)))
1050 {
1051 if (code == SET)
1052 {
1053 /* Currently SET is the only monadic value for CODE, all
1054 the rest are diadic. */
1055 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1056 return 1;
1057 }
1058 else
1059 {
1060 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1061
1062 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1063 /* For MINUS, the value is subtracted from, since we never
1064 have subtraction of a constant. */
1065 if (code == MINUS)
1066 emit_insn (gen_rtx_SET (VOIDmode, target,
1067 gen_rtx_MINUS (mode, temp, source)));
1068 else
1069 emit_insn (gen_rtx_SET (VOIDmode, target,
1070 gen_rtx (code, mode, source, temp)));
1071 return 2;
1072 }
1073 }
1074 }
1075
1076 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1077 }
1078
1079 static int
1080 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1081 {
1082 HOST_WIDE_INT temp1;
1083 int num_insns = 0;
1084 do
1085 {
1086 int end;
1087
1088 if (i <= 0)
1089 i += 32;
1090 if (remainder & (3 << (i - 2)))
1091 {
1092 end = i - 8;
1093 if (end < 0)
1094 end += 32;
1095 temp1 = remainder & ((0x0ff << end)
1096 | ((i < end) ? (0xff >> (32 - end)) : 0));
1097 remainder &= ~temp1;
1098 num_insns++;
1099 i -= 6;
1100 }
1101 i -= 2;
1102 } while (remainder);
1103 return num_insns;
1104 }
1105
1106 /* As above, but extra parameter GENERATE which, if clear, suppresses
1107 RTL generation. */
1108
1109 static int
1110 arm_gen_constant (code, mode, val, target, source, subtargets, generate)
1111 enum rtx_code code;
1112 enum machine_mode mode;
1113 HOST_WIDE_INT val;
1114 rtx target;
1115 rtx source;
1116 int subtargets;
1117 int generate;
1118 {
1119 int can_invert = 0;
1120 int can_negate = 0;
1121 int can_negate_initial = 0;
1122 int can_shift = 0;
1123 int i;
1124 int num_bits_set = 0;
1125 int set_sign_bit_copies = 0;
1126 int clear_sign_bit_copies = 0;
1127 int clear_zero_bit_copies = 0;
1128 int set_zero_bit_copies = 0;
1129 int insns = 0;
1130 unsigned HOST_WIDE_INT temp1, temp2;
1131 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1132
1133 /* Find out which operations are safe for a given CODE. Also do a quick
1134 check for degenerate cases; these can occur when DImode operations
1135 are split. */
1136 switch (code)
1137 {
1138 case SET:
1139 can_invert = 1;
1140 can_shift = 1;
1141 can_negate = 1;
1142 break;
1143
1144 case PLUS:
1145 can_negate = 1;
1146 can_negate_initial = 1;
1147 break;
1148
1149 case IOR:
1150 if (remainder == 0xffffffff)
1151 {
1152 if (generate)
1153 emit_insn (gen_rtx_SET (VOIDmode, target,
1154 GEN_INT (ARM_SIGN_EXTEND (val))));
1155 return 1;
1156 }
1157 if (remainder == 0)
1158 {
1159 if (reload_completed && rtx_equal_p (target, source))
1160 return 0;
1161 if (generate)
1162 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1163 return 1;
1164 }
1165 break;
1166
1167 case AND:
1168 if (remainder == 0)
1169 {
1170 if (generate)
1171 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1172 return 1;
1173 }
1174 if (remainder == 0xffffffff)
1175 {
1176 if (reload_completed && rtx_equal_p (target, source))
1177 return 0;
1178 if (generate)
1179 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1180 return 1;
1181 }
1182 can_invert = 1;
1183 break;
1184
1185 case XOR:
1186 if (remainder == 0)
1187 {
1188 if (reload_completed && rtx_equal_p (target, source))
1189 return 0;
1190 if (generate)
1191 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1192 return 1;
1193 }
1194 if (remainder == 0xffffffff)
1195 {
1196 if (generate)
1197 emit_insn (gen_rtx_SET (VOIDmode, target,
1198 gen_rtx_NOT (mode, source)));
1199 return 1;
1200 }
1201
1202 /* We don't know how to handle this yet below. */
1203 abort ();
1204
1205 case MINUS:
1206 /* We treat MINUS as (val - source), since (source - val) is always
1207 passed as (source + (-val)). */
1208 if (remainder == 0)
1209 {
1210 if (generate)
1211 emit_insn (gen_rtx_SET (VOIDmode, target,
1212 gen_rtx_NEG (mode, source)));
1213 return 1;
1214 }
1215 if (const_ok_for_arm (val))
1216 {
1217 if (generate)
1218 emit_insn (gen_rtx_SET (VOIDmode, target,
1219 gen_rtx_MINUS (mode, GEN_INT (val),
1220 source)));
1221 return 1;
1222 }
1223 can_negate = 1;
1224
1225 break;
1226
1227 default:
1228 abort ();
1229 }
1230
1231 /* If we can do it in one insn get out quickly. */
1232 if (const_ok_for_arm (val)
1233 || (can_negate_initial && const_ok_for_arm (-val))
1234 || (can_invert && const_ok_for_arm (~val)))
1235 {
1236 if (generate)
1237 emit_insn (gen_rtx_SET (VOIDmode, target,
1238 (source ? gen_rtx (code, mode, source,
1239 GEN_INT (val))
1240 : GEN_INT (val))));
1241 return 1;
1242 }
1243
1244 /* Calculate a few attributes that may be useful for specific
1245 optimizations. */
1246 for (i = 31; i >= 0; i--)
1247 {
1248 if ((remainder & (1 << i)) == 0)
1249 clear_sign_bit_copies++;
1250 else
1251 break;
1252 }
1253
1254 for (i = 31; i >= 0; i--)
1255 {
1256 if ((remainder & (1 << i)) != 0)
1257 set_sign_bit_copies++;
1258 else
1259 break;
1260 }
1261
1262 for (i = 0; i <= 31; i++)
1263 {
1264 if ((remainder & (1 << i)) == 0)
1265 clear_zero_bit_copies++;
1266 else
1267 break;
1268 }
1269
1270 for (i = 0; i <= 31; i++)
1271 {
1272 if ((remainder & (1 << i)) != 0)
1273 set_zero_bit_copies++;
1274 else
1275 break;
1276 }
1277
1278 switch (code)
1279 {
1280 case SET:
1281 /* See if we can do this by sign_extending a constant that is known
1282 to be negative. This is a good, way of doing it, since the shift
1283 may well merge into a subsequent insn. */
1284 if (set_sign_bit_copies > 1)
1285 {
1286 if (const_ok_for_arm
1287 (temp1 = ARM_SIGN_EXTEND (remainder
1288 << (set_sign_bit_copies - 1))))
1289 {
1290 if (generate)
1291 {
1292 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1293 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1294 GEN_INT (temp1)));
1295 emit_insn (gen_ashrsi3 (target, new_src,
1296 GEN_INT (set_sign_bit_copies - 1)));
1297 }
1298 return 2;
1299 }
1300 /* For an inverted constant, we will need to set the low bits,
1301 these will be shifted out of harm's way. */
1302 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1303 if (const_ok_for_arm (~temp1))
1304 {
1305 if (generate)
1306 {
1307 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1308 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1309 GEN_INT (temp1)));
1310 emit_insn (gen_ashrsi3 (target, new_src,
1311 GEN_INT (set_sign_bit_copies - 1)));
1312 }
1313 return 2;
1314 }
1315 }
1316
1317 /* See if we can generate this by setting the bottom (or the top)
1318 16 bits, and then shifting these into the other half of the
1319 word. We only look for the simplest cases, to do more would cost
1320 too much. Be careful, however, not to generate this when the
1321 alternative would take fewer insns. */
1322 if (val & 0xffff0000)
1323 {
1324 temp1 = remainder & 0xffff0000;
1325 temp2 = remainder & 0x0000ffff;
1326
1327 /* Overlaps outside this range are best done using other methods. */
1328 for (i = 9; i < 24; i++)
1329 {
1330 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1331 && !const_ok_for_arm (temp2))
1332 {
1333 rtx new_src = (subtargets
1334 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1335 : target);
1336 insns = arm_gen_constant (code, mode, temp2, new_src,
1337 source, subtargets, generate);
1338 source = new_src;
1339 if (generate)
1340 emit_insn (gen_rtx_SET
1341 (VOIDmode, target,
1342 gen_rtx_IOR (mode,
1343 gen_rtx_ASHIFT (mode, source,
1344 GEN_INT (i)),
1345 source)));
1346 return insns + 1;
1347 }
1348 }
1349
1350 /* Don't duplicate cases already considered. */
1351 for (i = 17; i < 24; i++)
1352 {
1353 if (((temp1 | (temp1 >> i)) == remainder)
1354 && !const_ok_for_arm (temp1))
1355 {
1356 rtx new_src = (subtargets
1357 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1358 : target);
1359 insns = arm_gen_constant (code, mode, temp1, new_src,
1360 source, subtargets, generate);
1361 source = new_src;
1362 if (generate)
1363 emit_insn
1364 (gen_rtx_SET (VOIDmode, target,
1365 gen_rtx_IOR
1366 (mode,
1367 gen_rtx_LSHIFTRT (mode, source,
1368 GEN_INT (i)),
1369 source)));
1370 return insns + 1;
1371 }
1372 }
1373 }
1374 break;
1375
1376 case IOR:
1377 case XOR:
1378 /* If we have IOR or XOR, and the constant can be loaded in a
1379 single instruction, and we can find a temporary to put it in,
1380 then this can be done in two instructions instead of 3-4. */
1381 if (subtargets
1382 /* TARGET can't be NULL if SUBTARGETS is 0 */
1383 || (reload_completed && !reg_mentioned_p (target, source)))
1384 {
1385 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1386 {
1387 if (generate)
1388 {
1389 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1390
1391 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1392 emit_insn (gen_rtx_SET (VOIDmode, target,
1393 gen_rtx (code, mode, source, sub)));
1394 }
1395 return 2;
1396 }
1397 }
1398
1399 if (code == XOR)
1400 break;
1401
1402 if (set_sign_bit_copies > 8
1403 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1404 {
1405 if (generate)
1406 {
1407 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1408 rtx shift = GEN_INT (set_sign_bit_copies);
1409
1410 emit_insn (gen_rtx_SET (VOIDmode, sub,
1411 gen_rtx_NOT (mode,
1412 gen_rtx_ASHIFT (mode,
1413 source,
1414 shift))));
1415 emit_insn (gen_rtx_SET (VOIDmode, target,
1416 gen_rtx_NOT (mode,
1417 gen_rtx_LSHIFTRT (mode, sub,
1418 shift))));
1419 }
1420 return 2;
1421 }
1422
1423 if (set_zero_bit_copies > 8
1424 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1425 {
1426 if (generate)
1427 {
1428 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1429 rtx shift = GEN_INT (set_zero_bit_copies);
1430
1431 emit_insn (gen_rtx_SET (VOIDmode, sub,
1432 gen_rtx_NOT (mode,
1433 gen_rtx_LSHIFTRT (mode,
1434 source,
1435 shift))));
1436 emit_insn (gen_rtx_SET (VOIDmode, target,
1437 gen_rtx_NOT (mode,
1438 gen_rtx_ASHIFT (mode, sub,
1439 shift))));
1440 }
1441 return 2;
1442 }
1443
1444 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1445 {
1446 if (generate)
1447 {
1448 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1449 emit_insn (gen_rtx_SET (VOIDmode, sub,
1450 gen_rtx_NOT (mode, source)));
1451 source = sub;
1452 if (subtargets)
1453 sub = gen_reg_rtx (mode);
1454 emit_insn (gen_rtx_SET (VOIDmode, sub,
1455 gen_rtx_AND (mode, source,
1456 GEN_INT (temp1))));
1457 emit_insn (gen_rtx_SET (VOIDmode, target,
1458 gen_rtx_NOT (mode, sub)));
1459 }
1460 return 3;
1461 }
1462 break;
1463
1464 case AND:
1465 /* See if two shifts will do 2 or more insn's worth of work. */
1466 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1467 {
1468 HOST_WIDE_INT shift_mask = ((0xffffffff
1469 << (32 - clear_sign_bit_copies))
1470 & 0xffffffff);
1471
1472 if ((remainder | shift_mask) != 0xffffffff)
1473 {
1474 if (generate)
1475 {
1476 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1477 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1478 new_src, source, subtargets, 1);
1479 source = new_src;
1480 }
1481 else
1482 {
1483 rtx targ = subtargets ? NULL_RTX : target;
1484 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1485 targ, source, subtargets, 0);
1486 }
1487 }
1488
1489 if (generate)
1490 {
1491 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1492 rtx shift = GEN_INT (clear_sign_bit_copies);
1493
1494 emit_insn (gen_ashlsi3 (new_src, source, shift));
1495 emit_insn (gen_lshrsi3 (target, new_src, shift));
1496 }
1497
1498 return insns + 2;
1499 }
1500
1501 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1502 {
1503 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1504
1505 if ((remainder | shift_mask) != 0xffffffff)
1506 {
1507 if (generate)
1508 {
1509 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1510
1511 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1512 new_src, source, subtargets, 1);
1513 source = new_src;
1514 }
1515 else
1516 {
1517 rtx targ = subtargets ? NULL_RTX : target;
1518
1519 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1520 targ, source, subtargets, 0);
1521 }
1522 }
1523
1524 if (generate)
1525 {
1526 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1527 rtx shift = GEN_INT (clear_zero_bit_copies);
1528
1529 emit_insn (gen_lshrsi3 (new_src, source, shift));
1530 emit_insn (gen_ashlsi3 (target, new_src, shift));
1531 }
1532
1533 return insns + 2;
1534 }
1535
1536 break;
1537
1538 default:
1539 break;
1540 }
1541
1542 for (i = 0; i < 32; i++)
1543 if (remainder & (1 << i))
1544 num_bits_set++;
1545
1546 if (code == AND || (can_invert && num_bits_set > 16))
1547 remainder = (~remainder) & 0xffffffff;
1548 else if (code == PLUS && num_bits_set > 16)
1549 remainder = (-remainder) & 0xffffffff;
1550 else
1551 {
1552 can_invert = 0;
1553 can_negate = 0;
1554 }
1555
1556 /* Now try and find a way of doing the job in either two or three
1557 instructions.
1558 We start by looking for the largest block of zeros that are aligned on
1559 a 2-bit boundary, we then fill up the temps, wrapping around to the
1560 top of the word when we drop off the bottom.
1561 In the worst case this code should produce no more than four insns. */
1562 {
1563 int best_start = 0;
1564 int best_consecutive_zeros = 0;
1565
1566 for (i = 0; i < 32; i += 2)
1567 {
1568 int consecutive_zeros = 0;
1569
1570 if (!(remainder & (3 << i)))
1571 {
1572 while ((i < 32) && !(remainder & (3 << i)))
1573 {
1574 consecutive_zeros += 2;
1575 i += 2;
1576 }
1577 if (consecutive_zeros > best_consecutive_zeros)
1578 {
1579 best_consecutive_zeros = consecutive_zeros;
1580 best_start = i - consecutive_zeros;
1581 }
1582 i -= 2;
1583 }
1584 }
1585
1586 /* So long as it won't require any more insns to do so, it's
1587 desirable to emit a small constant (in bits 0...9) in the last
1588 insn. This way there is more chance that it can be combined with
1589 a later addressing insn to form a pre-indexed load or store
1590 operation. Consider:
1591
1592 *((volatile int *)0xe0000100) = 1;
1593 *((volatile int *)0xe0000110) = 2;
1594
1595 We want this to wind up as:
1596
1597 mov rA, #0xe0000000
1598 mov rB, #1
1599 str rB, [rA, #0x100]
1600 mov rB, #2
1601 str rB, [rA, #0x110]
1602
1603 rather than having to synthesize both large constants from scratch.
1604
1605 Therefore, we calculate how many insns would be required to emit
1606 the constant starting from `best_start', and also starting from
1607 zero (ie with bit 31 first to be output). If `best_start' doesn't
1608 yield a shorter sequence, we may as well use zero. */
1609 if (best_start != 0
1610 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1611 && (count_insns_for_constant (remainder, 0) <=
1612 count_insns_for_constant (remainder, best_start)))
1613 best_start = 0;
1614
1615 /* Now start emitting the insns. */
1616 i = best_start;
1617 do
1618 {
1619 int end;
1620
1621 if (i <= 0)
1622 i += 32;
1623 if (remainder & (3 << (i - 2)))
1624 {
1625 end = i - 8;
1626 if (end < 0)
1627 end += 32;
1628 temp1 = remainder & ((0x0ff << end)
1629 | ((i < end) ? (0xff >> (32 - end)) : 0));
1630 remainder &= ~temp1;
1631
1632 if (generate)
1633 {
1634 rtx new_src, temp1_rtx;
1635
1636 if (code == SET || code == MINUS)
1637 {
1638 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1639 if (can_invert && code != MINUS)
1640 temp1 = ~temp1;
1641 }
1642 else
1643 {
1644 if (remainder && subtargets)
1645 new_src = gen_reg_rtx (mode);
1646 else
1647 new_src = target;
1648 if (can_invert)
1649 temp1 = ~temp1;
1650 else if (can_negate)
1651 temp1 = -temp1;
1652 }
1653
1654 temp1 = trunc_int_for_mode (temp1, mode);
1655 temp1_rtx = GEN_INT (temp1);
1656
1657 if (code == SET)
1658 ;
1659 else if (code == MINUS)
1660 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1661 else
1662 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1663
1664 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1665 source = new_src;
1666 }
1667
1668 if (code == SET)
1669 {
1670 can_invert = 0;
1671 code = PLUS;
1672 }
1673 else if (code == MINUS)
1674 code = PLUS;
1675
1676 insns++;
1677 i -= 6;
1678 }
1679 i -= 2;
1680 }
1681 while (remainder);
1682 }
1683
1684 return insns;
1685 }
1686
1687 /* Canonicalize a comparison so that we are more likely to recognize it.
1688 This can be done for a few constant compares, where we can make the
1689 immediate value easier to load. */
1690
1691 enum rtx_code
1692 arm_canonicalize_comparison (code, op1)
1693 enum rtx_code code;
1694 rtx * op1;
1695 {
1696 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1697
1698 switch (code)
1699 {
1700 case EQ:
1701 case NE:
1702 return code;
1703
1704 case GT:
1705 case LE:
1706 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1707 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1708 {
1709 *op1 = GEN_INT (i + 1);
1710 return code == GT ? GE : LT;
1711 }
1712 break;
1713
1714 case GE:
1715 case LT:
1716 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1717 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1718 {
1719 *op1 = GEN_INT (i - 1);
1720 return code == GE ? GT : LE;
1721 }
1722 break;
1723
1724 case GTU:
1725 case LEU:
1726 if (i != ~((unsigned HOST_WIDE_INT) 0)
1727 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1728 {
1729 *op1 = GEN_INT (i + 1);
1730 return code == GTU ? GEU : LTU;
1731 }
1732 break;
1733
1734 case GEU:
1735 case LTU:
1736 if (i != 0
1737 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1738 {
1739 *op1 = GEN_INT (i - 1);
1740 return code == GEU ? GTU : LEU;
1741 }
1742 break;
1743
1744 default:
1745 abort ();
1746 }
1747
1748 return code;
1749 }
1750
1751 /* Decide whether a type should be returned in memory (true)
1752 or in a register (false). This is called by the macro
1753 RETURN_IN_MEMORY. */
1754
1755 int
1756 arm_return_in_memory (type)
1757 tree type;
1758 {
1759 if (!AGGREGATE_TYPE_P (type))
1760 /* All simple types are returned in registers. */
1761 return 0;
1762
1763 /* For the arm-wince targets we choose to be compitable with Microsoft's
1764 ARM and Thumb compilers, which always return aggregates in memory. */
1765 #ifndef ARM_WINCE
1766 /* All structures/unions bigger than one word are returned in memory.
1767 Also catch the case where int_size_in_bytes returns -1. In this case
1768 the aggregate is either huge or of varaible size, and in either case
1769 we will want to return it via memory and not in a register. */
1770 if (((unsigned int) int_size_in_bytes (type)) > UNITS_PER_WORD)
1771 return 1;
1772
1773 if (TREE_CODE (type) == RECORD_TYPE)
1774 {
1775 tree field;
1776
1777 /* For a struct the APCS says that we only return in a register
1778 if the type is 'integer like' and every addressable element
1779 has an offset of zero. For practical purposes this means
1780 that the structure can have at most one non bit-field element
1781 and that this element must be the first one in the structure. */
1782
1783 /* Find the first field, ignoring non FIELD_DECL things which will
1784 have been created by C++. */
1785 for (field = TYPE_FIELDS (type);
1786 field && TREE_CODE (field) != FIELD_DECL;
1787 field = TREE_CHAIN (field))
1788 continue;
1789
1790 if (field == NULL)
1791 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1792
1793 /* Check that the first field is valid for returning in a register. */
1794
1795 /* ... Floats are not allowed */
1796 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1797 return 1;
1798
1799 /* ... Aggregates that are not themselves valid for returning in
1800 a register are not allowed. */
1801 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1802 return 1;
1803
1804 /* Now check the remaining fields, if any. Only bitfields are allowed,
1805 since they are not addressable. */
1806 for (field = TREE_CHAIN (field);
1807 field;
1808 field = TREE_CHAIN (field))
1809 {
1810 if (TREE_CODE (field) != FIELD_DECL)
1811 continue;
1812
1813 if (!DECL_BIT_FIELD_TYPE (field))
1814 return 1;
1815 }
1816
1817 return 0;
1818 }
1819
1820 if (TREE_CODE (type) == UNION_TYPE)
1821 {
1822 tree field;
1823
1824 /* Unions can be returned in registers if every element is
1825 integral, or can be returned in an integer register. */
1826 for (field = TYPE_FIELDS (type);
1827 field;
1828 field = TREE_CHAIN (field))
1829 {
1830 if (TREE_CODE (field) != FIELD_DECL)
1831 continue;
1832
1833 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1834 return 1;
1835
1836 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1837 return 1;
1838 }
1839
1840 return 0;
1841 }
1842 #endif /* not ARM_WINCE */
1843
1844 /* Return all other types in memory. */
1845 return 1;
1846 }
1847
1848 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1849 for a call to a function whose data type is FNTYPE.
1850 For a library call, FNTYPE is NULL. */
1851 void
1852 arm_init_cumulative_args (pcum, fntype, libname, indirect)
1853 CUMULATIVE_ARGS * pcum;
1854 tree fntype;
1855 rtx libname ATTRIBUTE_UNUSED;
1856 int indirect ATTRIBUTE_UNUSED;
1857 {
1858 /* On the ARM, the offset starts at 0. */
1859 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype))) ? 1 : 0);
1860
1861 pcum->call_cookie = CALL_NORMAL;
1862
1863 if (TARGET_LONG_CALLS)
1864 pcum->call_cookie = CALL_LONG;
1865
1866 /* Check for long call/short call attributes. The attributes
1867 override any command line option. */
1868 if (fntype)
1869 {
1870 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
1871 pcum->call_cookie = CALL_SHORT;
1872 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
1873 pcum->call_cookie = CALL_LONG;
1874 }
1875 }
1876
1877 /* Determine where to put an argument to a function.
1878 Value is zero to push the argument on the stack,
1879 or a hard register in which to store the argument.
1880
1881 MODE is the argument's machine mode.
1882 TYPE is the data type of the argument (as a tree).
1883 This is null for libcalls where that information may
1884 not be available.
1885 CUM is a variable of type CUMULATIVE_ARGS which gives info about
1886 the preceding args and about the function being called.
1887 NAMED is nonzero if this argument is a named parameter
1888 (otherwise it is an extra parameter matching an ellipsis). */
1889
1890 rtx
1891 arm_function_arg (pcum, mode, type, named)
1892 CUMULATIVE_ARGS * pcum;
1893 enum machine_mode mode;
1894 tree type ATTRIBUTE_UNUSED;
1895 int named;
1896 {
1897 if (mode == VOIDmode)
1898 /* Compute operand 2 of the call insn. */
1899 return GEN_INT (pcum->call_cookie);
1900
1901 if (!named || pcum->nregs >= NUM_ARG_REGS)
1902 return NULL_RTX;
1903
1904 return gen_rtx_REG (mode, pcum->nregs);
1905 }
1906 \f
1907 /* Encode the current state of the #pragma [no_]long_calls. */
1908 typedef enum
1909 {
1910 OFF, /* No #pramgma [no_]long_calls is in effect. */
1911 LONG, /* #pragma long_calls is in effect. */
1912 SHORT /* #pragma no_long_calls is in effect. */
1913 } arm_pragma_enum;
1914
1915 static arm_pragma_enum arm_pragma_long_calls = OFF;
1916
1917 void
1918 arm_pr_long_calls (pfile)
1919 cpp_reader * pfile ATTRIBUTE_UNUSED;
1920 {
1921 arm_pragma_long_calls = LONG;
1922 }
1923
1924 void
1925 arm_pr_no_long_calls (pfile)
1926 cpp_reader * pfile ATTRIBUTE_UNUSED;
1927 {
1928 arm_pragma_long_calls = SHORT;
1929 }
1930
1931 void
1932 arm_pr_long_calls_off (pfile)
1933 cpp_reader * pfile ATTRIBUTE_UNUSED;
1934 {
1935 arm_pragma_long_calls = OFF;
1936 }
1937 \f
1938 /* Table of machine attributes. */
1939 const struct attribute_spec arm_attribute_table[] =
1940 {
1941 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1942 /* Function calls made to this symbol must be done indirectly, because
1943 it may lie outside of the 26 bit addressing range of a normal function
1944 call. */
1945 { "long_call", 0, 0, false, true, true, NULL },
1946 /* Whereas these functions are always known to reside within the 26 bit
1947 addressing range. */
1948 { "short_call", 0, 0, false, true, true, NULL },
1949 /* Interrupt Service Routines have special prologue and epilogue requirements. */
1950 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
1951 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
1952 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
1953 #ifdef ARM_PE
1954 /* ARM/PE has three new attributes:
1955 interfacearm - ?
1956 dllexport - for exporting a function/variable that will live in a dll
1957 dllimport - for importing a function/variable from a dll
1958
1959 Microsoft allows multiple declspecs in one __declspec, separating
1960 them with spaces. We do NOT support this. Instead, use __declspec
1961 multiple times.
1962 */
1963 { "dllimport", 0, 0, true, false, false, NULL },
1964 { "dllexport", 0, 0, true, false, false, NULL },
1965 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
1966 #endif
1967 { NULL, 0, 0, false, false, false, NULL }
1968 };
1969
1970 /* Handle an attribute requiring a FUNCTION_DECL;
1971 arguments as in struct attribute_spec.handler. */
1972
1973 static tree
1974 arm_handle_fndecl_attribute (node, name, args, flags, no_add_attrs)
1975 tree * node;
1976 tree name;
1977 tree args ATTRIBUTE_UNUSED;
1978 int flags ATTRIBUTE_UNUSED;
1979 bool * no_add_attrs;
1980 {
1981 if (TREE_CODE (*node) != FUNCTION_DECL)
1982 {
1983 warning ("`%s' attribute only applies to functions",
1984 IDENTIFIER_POINTER (name));
1985 *no_add_attrs = true;
1986 }
1987
1988 return NULL_TREE;
1989 }
1990
1991 /* Handle an "interrupt" or "isr" attribute;
1992 arguments as in struct attribute_spec.handler. */
1993
1994 static tree
1995 arm_handle_isr_attribute (node, name, args, flags, no_add_attrs)
1996 tree * node;
1997 tree name;
1998 tree args;
1999 int flags;
2000 bool * no_add_attrs;
2001 {
2002 if (DECL_P (*node))
2003 {
2004 if (TREE_CODE (*node) != FUNCTION_DECL)
2005 {
2006 warning ("`%s' attribute only applies to functions",
2007 IDENTIFIER_POINTER (name));
2008 *no_add_attrs = true;
2009 }
2010 /* FIXME: the argument if any is checked for type attributes;
2011 should it be checked for decl ones? */
2012 }
2013 else
2014 {
2015 if (TREE_CODE (*node) == FUNCTION_TYPE
2016 || TREE_CODE (*node) == METHOD_TYPE)
2017 {
2018 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2019 {
2020 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2021 *no_add_attrs = true;
2022 }
2023 }
2024 else if (TREE_CODE (*node) == POINTER_TYPE
2025 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2026 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2027 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2028 {
2029 *node = build_type_copy (*node);
2030 TREE_TYPE (*node) = build_type_attribute_variant
2031 (TREE_TYPE (*node),
2032 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2033 *no_add_attrs = true;
2034 }
2035 else
2036 {
2037 /* Possibly pass this attribute on from the type to a decl. */
2038 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2039 | (int) ATTR_FLAG_FUNCTION_NEXT
2040 | (int) ATTR_FLAG_ARRAY_NEXT))
2041 {
2042 *no_add_attrs = true;
2043 return tree_cons (name, args, NULL_TREE);
2044 }
2045 else
2046 {
2047 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2048 }
2049 }
2050 }
2051
2052 return NULL_TREE;
2053 }
2054
2055 /* Return 0 if the attributes for two types are incompatible, 1 if they
2056 are compatible, and 2 if they are nearly compatible (which causes a
2057 warning to be generated). */
2058
2059 static int
2060 arm_comp_type_attributes (type1, type2)
2061 tree type1;
2062 tree type2;
2063 {
2064 int l1, l2, s1, s2;
2065
2066 /* Check for mismatch of non-default calling convention. */
2067 if (TREE_CODE (type1) != FUNCTION_TYPE)
2068 return 1;
2069
2070 /* Check for mismatched call attributes. */
2071 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2072 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2073 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2074 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2075
2076 /* Only bother to check if an attribute is defined. */
2077 if (l1 | l2 | s1 | s2)
2078 {
2079 /* If one type has an attribute, the other must have the same attribute. */
2080 if ((l1 != l2) || (s1 != s2))
2081 return 0;
2082
2083 /* Disallow mixed attributes. */
2084 if ((l1 & s2) || (l2 & s1))
2085 return 0;
2086 }
2087
2088 /* Check for mismatched ISR attribute. */
2089 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2090 if (! l1)
2091 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2092 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2093 if (! l2)
2094 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2095 if (l1 != l2)
2096 return 0;
2097
2098 return 1;
2099 }
2100
2101 /* Encode long_call or short_call attribute by prefixing
2102 symbol name in DECL with a special character FLAG. */
2103
2104 void
2105 arm_encode_call_attribute (decl, flag)
2106 tree decl;
2107 int flag;
2108 {
2109 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2110 int len = strlen (str);
2111 char * newstr;
2112
2113 /* Do not allow weak functions to be treated as short call. */
2114 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2115 return;
2116
2117 newstr = alloca (len + 2);
2118 newstr[0] = flag;
2119 strcpy (newstr + 1, str);
2120
2121 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2122 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2123 }
2124
2125 /* Assigns default attributes to newly defined type. This is used to
2126 set short_call/long_call attributes for function types of
2127 functions defined inside corresponding #pragma scopes. */
2128
2129 static void
2130 arm_set_default_type_attributes (type)
2131 tree type;
2132 {
2133 /* Add __attribute__ ((long_call)) to all functions, when
2134 inside #pragma long_calls or __attribute__ ((short_call)),
2135 when inside #pragma no_long_calls. */
2136 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2137 {
2138 tree type_attr_list, attr_name;
2139 type_attr_list = TYPE_ATTRIBUTES (type);
2140
2141 if (arm_pragma_long_calls == LONG)
2142 attr_name = get_identifier ("long_call");
2143 else if (arm_pragma_long_calls == SHORT)
2144 attr_name = get_identifier ("short_call");
2145 else
2146 return;
2147
2148 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2149 TYPE_ATTRIBUTES (type) = type_attr_list;
2150 }
2151 }
2152 \f
2153 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2154 defined within the current compilation unit. If this caanot be
2155 determined, then 0 is returned. */
2156
2157 static int
2158 current_file_function_operand (sym_ref)
2159 rtx sym_ref;
2160 {
2161 /* This is a bit of a fib. A function will have a short call flag
2162 applied to its name if it has the short call attribute, or it has
2163 already been defined within the current compilation unit. */
2164 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2165 return 1;
2166
2167 /* The current function is always defined within the current compilation
2168 unit. if it s a weak definition however, then this may not be the real
2169 definition of the function, and so we have to say no. */
2170 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2171 && !DECL_WEAK (current_function_decl))
2172 return 1;
2173
2174 /* We cannot make the determination - default to returning 0. */
2175 return 0;
2176 }
2177
2178 /* Return non-zero if a 32 bit "long_call" should be generated for
2179 this call. We generate a long_call if the function:
2180
2181 a. has an __attribute__((long call))
2182 or b. is within the scope of a #pragma long_calls
2183 or c. the -mlong-calls command line switch has been specified
2184
2185 However we do not generate a long call if the function:
2186
2187 d. has an __attribute__ ((short_call))
2188 or e. is inside the scope of a #pragma no_long_calls
2189 or f. has an __attribute__ ((section))
2190 or g. is defined within the current compilation unit.
2191
2192 This function will be called by C fragments contained in the machine
2193 description file. CALL_REF and CALL_COOKIE correspond to the matched
2194 rtl operands. CALL_SYMBOL is used to distinguish between
2195 two different callers of the function. It is set to 1 in the
2196 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2197 and "call_value" patterns. This is because of the difference in the
2198 SYM_REFs passed by these patterns. */
2199
2200 int
2201 arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
2202 rtx sym_ref;
2203 int call_cookie;
2204 int call_symbol;
2205 {
2206 if (!call_symbol)
2207 {
2208 if (GET_CODE (sym_ref) != MEM)
2209 return 0;
2210
2211 sym_ref = XEXP (sym_ref, 0);
2212 }
2213
2214 if (GET_CODE (sym_ref) != SYMBOL_REF)
2215 return 0;
2216
2217 if (call_cookie & CALL_SHORT)
2218 return 0;
2219
2220 if (TARGET_LONG_CALLS && flag_function_sections)
2221 return 1;
2222
2223 if (current_file_function_operand (sym_ref))
2224 return 0;
2225
2226 return (call_cookie & CALL_LONG)
2227 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2228 || TARGET_LONG_CALLS;
2229 }
2230
2231 /* Return non-zero if it is ok to make a tail-call to DECL. */
2232
2233 int
2234 arm_function_ok_for_sibcall (decl)
2235 tree decl;
2236 {
2237 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2238
2239 /* Never tailcall something for which we have no decl, or if we
2240 are in Thumb mode. */
2241 if (decl == NULL || TARGET_THUMB)
2242 return 0;
2243
2244 /* Get the calling method. */
2245 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2246 call_type = CALL_SHORT;
2247 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2248 call_type = CALL_LONG;
2249
2250 /* Cannot tail-call to long calls, since these are out of range of
2251 a branch instruction. However, if not compiling PIC, we know
2252 we can reach the symbol if it is in this compilation unit. */
2253 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2254 return 0;
2255
2256 /* If we are interworking and the function is not declared static
2257 then we can't tail-call it unless we know that it exists in this
2258 compilation unit (since it might be a Thumb routine). */
2259 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2260 return 0;
2261
2262 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2263 if (IS_INTERRUPT (arm_current_func_type ()))
2264 return 0;
2265
2266 /* Everything else is ok. */
2267 return 1;
2268 }
2269
2270 \f
2271 int
2272 legitimate_pic_operand_p (x)
2273 rtx x;
2274 {
2275 if (CONSTANT_P (x)
2276 && flag_pic
2277 && (GET_CODE (x) == SYMBOL_REF
2278 || (GET_CODE (x) == CONST
2279 && GET_CODE (XEXP (x, 0)) == PLUS
2280 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2281 return 0;
2282
2283 return 1;
2284 }
2285
2286 rtx
2287 legitimize_pic_address (orig, mode, reg)
2288 rtx orig;
2289 enum machine_mode mode;
2290 rtx reg;
2291 {
2292 if (GET_CODE (orig) == SYMBOL_REF
2293 || GET_CODE (orig) == LABEL_REF)
2294 {
2295 #ifndef AOF_ASSEMBLER
2296 rtx pic_ref, address;
2297 #endif
2298 rtx insn;
2299 int subregs = 0;
2300
2301 if (reg == 0)
2302 {
2303 if (no_new_pseudos)
2304 abort ();
2305 else
2306 reg = gen_reg_rtx (Pmode);
2307
2308 subregs = 1;
2309 }
2310
2311 #ifdef AOF_ASSEMBLER
2312 /* The AOF assembler can generate relocations for these directly, and
2313 understands that the PIC register has to be added into the offset. */
2314 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2315 #else
2316 if (subregs)
2317 address = gen_reg_rtx (Pmode);
2318 else
2319 address = reg;
2320
2321 if (TARGET_ARM)
2322 emit_insn (gen_pic_load_addr_arm (address, orig));
2323 else
2324 emit_insn (gen_pic_load_addr_thumb (address, orig));
2325
2326 if ((GET_CODE (orig) == LABEL_REF
2327 || (GET_CODE (orig) == SYMBOL_REF &&
2328 ENCODED_SHORT_CALL_ATTR_P (XSTR (orig, 0))))
2329 && NEED_GOT_RELOC)
2330 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2331 else
2332 {
2333 pic_ref = gen_rtx_MEM (Pmode,
2334 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2335 address));
2336 RTX_UNCHANGING_P (pic_ref) = 1;
2337 }
2338
2339 insn = emit_move_insn (reg, pic_ref);
2340 #endif
2341 current_function_uses_pic_offset_table = 1;
2342 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2343 by loop. */
2344 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2345 REG_NOTES (insn));
2346 return reg;
2347 }
2348 else if (GET_CODE (orig) == CONST)
2349 {
2350 rtx base, offset;
2351
2352 if (GET_CODE (XEXP (orig, 0)) == PLUS
2353 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2354 return orig;
2355
2356 if (reg == 0)
2357 {
2358 if (no_new_pseudos)
2359 abort ();
2360 else
2361 reg = gen_reg_rtx (Pmode);
2362 }
2363
2364 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2365 {
2366 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2367 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2368 base == reg ? 0 : reg);
2369 }
2370 else
2371 abort ();
2372
2373 if (GET_CODE (offset) == CONST_INT)
2374 {
2375 /* The base register doesn't really matter, we only want to
2376 test the index for the appropriate mode. */
2377 ARM_GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
2378
2379 if (!no_new_pseudos)
2380 offset = force_reg (Pmode, offset);
2381 else
2382 abort ();
2383
2384 win:
2385 if (GET_CODE (offset) == CONST_INT)
2386 return plus_constant (base, INTVAL (offset));
2387 }
2388
2389 if (GET_MODE_SIZE (mode) > 4
2390 && (GET_MODE_CLASS (mode) == MODE_INT
2391 || TARGET_SOFT_FLOAT))
2392 {
2393 emit_insn (gen_addsi3 (reg, base, offset));
2394 return reg;
2395 }
2396
2397 return gen_rtx_PLUS (Pmode, base, offset);
2398 }
2399
2400 return orig;
2401 }
2402
2403 /* Generate code to load the PIC register. PROLOGUE is true if
2404 called from arm_expand_prologue (in which case we want the
2405 generated insns at the start of the function); false if called
2406 by an exception receiver that needs the PIC register reloaded
2407 (in which case the insns are just dumped at the current location). */
2408
2409 void
2410 arm_finalize_pic (prologue)
2411 int prologue ATTRIBUTE_UNUSED;
2412 {
2413 #ifndef AOF_ASSEMBLER
2414 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2415 rtx global_offset_table;
2416
2417 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2418 return;
2419
2420 if (!flag_pic)
2421 abort ();
2422
2423 start_sequence ();
2424 l1 = gen_label_rtx ();
2425
2426 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2427 /* On the ARM the PC register contains 'dot + 8' at the time of the
2428 addition, on the Thumb it is 'dot + 4'. */
2429 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2430 if (GOT_PCREL)
2431 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2432 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2433 else
2434 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2435
2436 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2437
2438 if (TARGET_ARM)
2439 {
2440 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2441 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2442 }
2443 else
2444 {
2445 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2446 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2447 }
2448
2449 seq = gen_sequence ();
2450 end_sequence ();
2451 if (prologue)
2452 emit_insn_after (seq, get_insns ());
2453 else
2454 emit_insn (seq);
2455
2456 /* Need to emit this whether or not we obey regdecls,
2457 since setjmp/longjmp can cause life info to screw up. */
2458 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2459 #endif /* AOF_ASSEMBLER */
2460 }
2461
2462 #define REG_OR_SUBREG_REG(X) \
2463 (GET_CODE (X) == REG \
2464 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
2465
2466 #define REG_OR_SUBREG_RTX(X) \
2467 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
2468
2469 #ifndef COSTS_N_INSNS
2470 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
2471 #endif
2472
2473 int
2474 arm_rtx_costs (x, code, outer)
2475 rtx x;
2476 enum rtx_code code;
2477 enum rtx_code outer;
2478 {
2479 enum machine_mode mode = GET_MODE (x);
2480 enum rtx_code subcode;
2481 int extra_cost;
2482
2483 if (TARGET_THUMB)
2484 {
2485 switch (code)
2486 {
2487 case ASHIFT:
2488 case ASHIFTRT:
2489 case LSHIFTRT:
2490 case ROTATERT:
2491 case PLUS:
2492 case MINUS:
2493 case COMPARE:
2494 case NEG:
2495 case NOT:
2496 return COSTS_N_INSNS (1);
2497
2498 case MULT:
2499 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2500 {
2501 int cycles = 0;
2502 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
2503
2504 while (i)
2505 {
2506 i >>= 2;
2507 cycles++;
2508 }
2509 return COSTS_N_INSNS (2) + cycles;
2510 }
2511 return COSTS_N_INSNS (1) + 16;
2512
2513 case SET:
2514 return (COSTS_N_INSNS (1)
2515 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
2516 + GET_CODE (SET_DEST (x)) == MEM));
2517
2518 case CONST_INT:
2519 if (outer == SET)
2520 {
2521 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
2522 return 0;
2523 if (thumb_shiftable_const (INTVAL (x)))
2524 return COSTS_N_INSNS (2);
2525 return COSTS_N_INSNS (3);
2526 }
2527 else if (outer == PLUS
2528 && INTVAL (x) < 256 && INTVAL (x) > -256)
2529 return 0;
2530 else if (outer == COMPARE
2531 && (unsigned HOST_WIDE_INT) INTVAL (x) < 256)
2532 return 0;
2533 else if (outer == ASHIFT || outer == ASHIFTRT
2534 || outer == LSHIFTRT)
2535 return 0;
2536 return COSTS_N_INSNS (2);
2537
2538 case CONST:
2539 case CONST_DOUBLE:
2540 case LABEL_REF:
2541 case SYMBOL_REF:
2542 return COSTS_N_INSNS (3);
2543
2544 case UDIV:
2545 case UMOD:
2546 case DIV:
2547 case MOD:
2548 return 100;
2549
2550 case TRUNCATE:
2551 return 99;
2552
2553 case AND:
2554 case XOR:
2555 case IOR:
2556 /* XXX guess. */
2557 return 8;
2558
2559 case ADDRESSOF:
2560 case MEM:
2561 /* XXX another guess. */
2562 /* Memory costs quite a lot for the first word, but subsequent words
2563 load at the equivalent of a single insn each. */
2564 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
2565 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
2566 ? 4 : 0));
2567
2568 case IF_THEN_ELSE:
2569 /* XXX a guess. */
2570 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
2571 return 14;
2572 return 2;
2573
2574 case ZERO_EXTEND:
2575 /* XXX still guessing. */
2576 switch (GET_MODE (XEXP (x, 0)))
2577 {
2578 case QImode:
2579 return (1 + (mode == DImode ? 4 : 0)
2580 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2581
2582 case HImode:
2583 return (4 + (mode == DImode ? 4 : 0)
2584 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2585
2586 case SImode:
2587 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2588
2589 default:
2590 return 99;
2591 }
2592
2593 default:
2594 return 99;
2595 #if 0
2596 case FFS:
2597 case FLOAT:
2598 case FIX:
2599 case UNSIGNED_FIX:
2600 /* XXX guess */
2601 fprintf (stderr, "unexpected code for thumb in rtx_costs: %s\n",
2602 rtx_name[code]);
2603 abort ();
2604 #endif
2605 }
2606 }
2607
2608 switch (code)
2609 {
2610 case MEM:
2611 /* Memory costs quite a lot for the first word, but subsequent words
2612 load at the equivalent of a single insn each. */
2613 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
2614 + (GET_CODE (x) == SYMBOL_REF
2615 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
2616
2617 case DIV:
2618 case MOD:
2619 return 100;
2620
2621 case ROTATE:
2622 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
2623 return 4;
2624 /* Fall through */
2625 case ROTATERT:
2626 if (mode != SImode)
2627 return 8;
2628 /* Fall through */
2629 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
2630 if (mode == DImode)
2631 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
2632 + ((GET_CODE (XEXP (x, 0)) == REG
2633 || (GET_CODE (XEXP (x, 0)) == SUBREG
2634 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
2635 ? 0 : 8));
2636 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
2637 || (GET_CODE (XEXP (x, 0)) == SUBREG
2638 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
2639 ? 0 : 4)
2640 + ((GET_CODE (XEXP (x, 1)) == REG
2641 || (GET_CODE (XEXP (x, 1)) == SUBREG
2642 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
2643 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
2644 ? 0 : 4));
2645
2646 case MINUS:
2647 if (mode == DImode)
2648 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
2649 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
2650 || (GET_CODE (XEXP (x, 0)) == CONST_INT
2651 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
2652 ? 0 : 8));
2653
2654 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2655 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2656 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2657 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
2658 ? 0 : 8)
2659 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
2660 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
2661 && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
2662 ? 0 : 8));
2663
2664 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
2665 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
2666 && REG_OR_SUBREG_REG (XEXP (x, 1))))
2667 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
2668 || subcode == ASHIFTRT || subcode == LSHIFTRT
2669 || subcode == ROTATE || subcode == ROTATERT
2670 || (subcode == MULT
2671 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
2672 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
2673 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
2674 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
2675 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
2676 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
2677 && REG_OR_SUBREG_REG (XEXP (x, 0))))
2678 return 1;
2679 /* Fall through */
2680
2681 case PLUS:
2682 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2683 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
2684 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2685 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2686 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
2687 ? 0 : 8));
2688
2689 /* Fall through */
2690 case AND: case XOR: case IOR:
2691 extra_cost = 0;
2692
2693 /* Normally the frame registers will be spilt into reg+const during
2694 reload, so it is a bad idea to combine them with other instructions,
2695 since then they might not be moved outside of loops. As a compromise
2696 we allow integration with ops that have a constant as their second
2697 operand. */
2698 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
2699 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
2700 && GET_CODE (XEXP (x, 1)) != CONST_INT)
2701 || (REG_OR_SUBREG_REG (XEXP (x, 0))
2702 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
2703 extra_cost = 4;
2704
2705 if (mode == DImode)
2706 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
2707 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2708 || (GET_CODE (XEXP (x, 1)) == CONST_INT
2709 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
2710 ? 0 : 8));
2711
2712 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
2713 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
2714 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2715 || (GET_CODE (XEXP (x, 1)) == CONST_INT
2716 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
2717 ? 0 : 4));
2718
2719 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
2720 return (1 + extra_cost
2721 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
2722 || subcode == LSHIFTRT || subcode == ASHIFTRT
2723 || subcode == ROTATE || subcode == ROTATERT
2724 || (subcode == MULT
2725 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2726 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
2727 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
2728 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
2729 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
2730 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
2731 ? 0 : 4));
2732
2733 return 8;
2734
2735 case MULT:
2736 /* There is no point basing this on the tuning, since it is always the
2737 fast variant if it exists at all. */
2738 if (arm_fast_multiply && mode == DImode
2739 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
2740 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2741 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
2742 return 8;
2743
2744 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2745 || mode == DImode)
2746 return 30;
2747
2748 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2749 {
2750 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
2751 & (unsigned HOST_WIDE_INT) 0xffffffff);
2752 int add_cost = const_ok_for_arm (i) ? 4 : 8;
2753 int j;
2754
2755 /* Tune as appropriate. */
2756 int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
2757
2758 for (j = 0; i && j < 32; j += booth_unit_size)
2759 {
2760 i >>= booth_unit_size;
2761 add_cost += 2;
2762 }
2763
2764 return add_cost;
2765 }
2766
2767 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
2768 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
2769 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
2770
2771 case TRUNCATE:
2772 if (arm_fast_multiply && mode == SImode
2773 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
2774 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
2775 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
2776 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
2777 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
2778 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
2779 return 8;
2780 return 99;
2781
2782 case NEG:
2783 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2784 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
2785 /* Fall through */
2786 case NOT:
2787 if (mode == DImode)
2788 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
2789
2790 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
2791
2792 case IF_THEN_ELSE:
2793 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
2794 return 14;
2795 return 2;
2796
2797 case COMPARE:
2798 return 1;
2799
2800 case ABS:
2801 return 4 + (mode == DImode ? 4 : 0);
2802
2803 case SIGN_EXTEND:
2804 if (GET_MODE (XEXP (x, 0)) == QImode)
2805 return (4 + (mode == DImode ? 4 : 0)
2806 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2807 /* Fall through */
2808 case ZERO_EXTEND:
2809 switch (GET_MODE (XEXP (x, 0)))
2810 {
2811 case QImode:
2812 return (1 + (mode == DImode ? 4 : 0)
2813 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2814
2815 case HImode:
2816 return (4 + (mode == DImode ? 4 : 0)
2817 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2818
2819 case SImode:
2820 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2821
2822 default:
2823 break;
2824 }
2825 abort ();
2826
2827 case CONST_INT:
2828 if (const_ok_for_arm (INTVAL (x)))
2829 return outer == SET ? 2 : -1;
2830 else if (outer == AND
2831 && const_ok_for_arm (~INTVAL (x)))
2832 return -1;
2833 else if ((outer == COMPARE
2834 || outer == PLUS || outer == MINUS)
2835 && const_ok_for_arm (-INTVAL (x)))
2836 return -1;
2837 else
2838 return 5;
2839
2840 case CONST:
2841 case LABEL_REF:
2842 case SYMBOL_REF:
2843 return 6;
2844
2845 case CONST_DOUBLE:
2846 if (const_double_rtx_ok_for_fpu (x))
2847 return outer == SET ? 2 : -1;
2848 else if ((outer == COMPARE || outer == PLUS)
2849 && neg_const_double_rtx_ok_for_fpu (x))
2850 return -1;
2851 return 7;
2852
2853 default:
2854 return 99;
2855 }
2856 }
2857
2858 static int
2859 arm_adjust_cost (insn, link, dep, cost)
2860 rtx insn;
2861 rtx link;
2862 rtx dep;
2863 int cost;
2864 {
2865 rtx i_pat, d_pat;
2866
2867 /* Some true dependencies can have a higher cost depending
2868 on precisely how certain input operands are used. */
2869 if (arm_is_xscale
2870 && REG_NOTE_KIND (link) == 0
2871 && recog_memoized (insn) < 0
2872 && recog_memoized (dep) < 0)
2873 {
2874 int shift_opnum = get_attr_shift (insn);
2875 enum attr_type attr_type = get_attr_type (dep);
2876
2877 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
2878 operand for INSN. If we have a shifted input operand and the
2879 instruction we depend on is another ALU instruction, then we may
2880 have to account for an additional stall. */
2881 if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
2882 {
2883 rtx shifted_operand;
2884 int opno;
2885
2886 /* Get the shifted operand. */
2887 extract_insn (insn);
2888 shifted_operand = recog_data.operand[shift_opnum];
2889
2890 /* Iterate over all the operands in DEP. If we write an operand
2891 that overlaps with SHIFTED_OPERAND, then we have increase the
2892 cost of this dependency. */
2893 extract_insn (dep);
2894 preprocess_constraints ();
2895 for (opno = 0; opno < recog_data.n_operands; opno++)
2896 {
2897 /* We can ignore strict inputs. */
2898 if (recog_data.operand_type[opno] == OP_IN)
2899 continue;
2900
2901 if (reg_overlap_mentioned_p (recog_data.operand[opno],
2902 shifted_operand))
2903 return 2;
2904 }
2905 }
2906 }
2907
2908 /* XXX This is not strictly true for the FPA. */
2909 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
2910 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
2911 return 0;
2912
2913 /* Call insns don't incur a stall, even if they follow a load. */
2914 if (REG_NOTE_KIND (link) == 0
2915 && GET_CODE (insn) == CALL_INSN)
2916 return 1;
2917
2918 if ((i_pat = single_set (insn)) != NULL
2919 && GET_CODE (SET_SRC (i_pat)) == MEM
2920 && (d_pat = single_set (dep)) != NULL
2921 && GET_CODE (SET_DEST (d_pat)) == MEM)
2922 {
2923 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
2924 /* This is a load after a store, there is no conflict if the load reads
2925 from a cached area. Assume that loads from the stack, and from the
2926 constant pool are cached, and that others will miss. This is a
2927 hack. */
2928
2929 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
2930 || reg_mentioned_p (stack_pointer_rtx, src_mem)
2931 || reg_mentioned_p (frame_pointer_rtx, src_mem)
2932 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
2933 return 1;
2934 }
2935
2936 return cost;
2937 }
2938
2939 /* This code has been fixed for cross compilation. */
2940
2941 static int fpa_consts_inited = 0;
2942
2943 static const char * const strings_fpa[8] =
2944 {
2945 "0", "1", "2", "3",
2946 "4", "5", "0.5", "10"
2947 };
2948
2949 static REAL_VALUE_TYPE values_fpa[8];
2950
2951 static void
2952 init_fpa_table ()
2953 {
2954 int i;
2955 REAL_VALUE_TYPE r;
2956
2957 for (i = 0; i < 8; i++)
2958 {
2959 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
2960 values_fpa[i] = r;
2961 }
2962
2963 fpa_consts_inited = 1;
2964 }
2965
2966 /* Return TRUE if rtx X is a valid immediate FPU constant. */
2967
2968 int
2969 const_double_rtx_ok_for_fpu (x)
2970 rtx x;
2971 {
2972 REAL_VALUE_TYPE r;
2973 int i;
2974
2975 if (!fpa_consts_inited)
2976 init_fpa_table ();
2977
2978 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2979 if (REAL_VALUE_MINUS_ZERO (r))
2980 return 0;
2981
2982 for (i = 0; i < 8; i++)
2983 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
2984 return 1;
2985
2986 return 0;
2987 }
2988
2989 /* Return TRUE if rtx X is a valid immediate FPU constant. */
2990
2991 int
2992 neg_const_double_rtx_ok_for_fpu (x)
2993 rtx x;
2994 {
2995 REAL_VALUE_TYPE r;
2996 int i;
2997
2998 if (!fpa_consts_inited)
2999 init_fpa_table ();
3000
3001 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3002 r = REAL_VALUE_NEGATE (r);
3003 if (REAL_VALUE_MINUS_ZERO (r))
3004 return 0;
3005
3006 for (i = 0; i < 8; i++)
3007 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3008 return 1;
3009
3010 return 0;
3011 }
3012 \f
3013 /* Predicates for `match_operand' and `match_operator'. */
3014
3015 /* s_register_operand is the same as register_operand, but it doesn't accept
3016 (SUBREG (MEM)...).
3017
3018 This function exists because at the time it was put in it led to better
3019 code. SUBREG(MEM) always needs a reload in the places where
3020 s_register_operand is used, and this seemed to lead to excessive
3021 reloading. */
3022
3023 int
3024 s_register_operand (op, mode)
3025 rtx op;
3026 enum machine_mode mode;
3027 {
3028 if (GET_MODE (op) != mode && mode != VOIDmode)
3029 return 0;
3030
3031 if (GET_CODE (op) == SUBREG)
3032 op = SUBREG_REG (op);
3033
3034 /* We don't consider registers whose class is NO_REGS
3035 to be a register operand. */
3036 /* XXX might have to check for lo regs only for thumb ??? */
3037 return (GET_CODE (op) == REG
3038 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3039 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3040 }
3041
3042 /* A hard register operand (even before reload. */
3043
3044 int
3045 arm_hard_register_operand (op, mode)
3046 rtx op;
3047 enum machine_mode mode;
3048 {
3049 if (GET_MODE (op) != mode && mode != VOIDmode)
3050 return 0;
3051
3052 return (GET_CODE (op) == REG
3053 && REGNO (op) < FIRST_PSEUDO_REGISTER);
3054 }
3055
3056 /* Only accept reg, subreg(reg), const_int. */
3057
3058 int
3059 reg_or_int_operand (op, mode)
3060 rtx op;
3061 enum machine_mode mode;
3062 {
3063 if (GET_CODE (op) == CONST_INT)
3064 return 1;
3065
3066 if (GET_MODE (op) != mode && mode != VOIDmode)
3067 return 0;
3068
3069 if (GET_CODE (op) == SUBREG)
3070 op = SUBREG_REG (op);
3071
3072 /* We don't consider registers whose class is NO_REGS
3073 to be a register operand. */
3074 return (GET_CODE (op) == REG
3075 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3076 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3077 }
3078
3079 /* Return 1 if OP is an item in memory, given that we are in reload. */
3080
3081 int
3082 arm_reload_memory_operand (op, mode)
3083 rtx op;
3084 enum machine_mode mode ATTRIBUTE_UNUSED;
3085 {
3086 int regno = true_regnum (op);
3087
3088 return (!CONSTANT_P (op)
3089 && (regno == -1
3090 || (GET_CODE (op) == REG
3091 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
3092 }
3093
3094 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
3095 memory access (architecture V4).
3096 MODE is QImode if called when computing constraints, or VOIDmode when
3097 emitting patterns. In this latter case we cannot use memory_operand()
3098 because it will fail on badly formed MEMs, which is precisly what we are
3099 trying to catch. */
3100
3101 int
3102 bad_signed_byte_operand (op, mode)
3103 rtx op;
3104 enum machine_mode mode ATTRIBUTE_UNUSED;
3105 {
3106 #if 0
3107 if ((mode == QImode && !memory_operand (op, mode)) || GET_CODE (op) != MEM)
3108 return 0;
3109 #endif
3110 if (GET_CODE (op) != MEM)
3111 return 0;
3112
3113 op = XEXP (op, 0);
3114
3115 /* A sum of anything more complex than reg + reg or reg + const is bad. */
3116 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3117 && (!s_register_operand (XEXP (op, 0), VOIDmode)
3118 || (!s_register_operand (XEXP (op, 1), VOIDmode)
3119 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
3120 return 1;
3121
3122 /* Big constants are also bad. */
3123 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
3124 && (INTVAL (XEXP (op, 1)) > 0xff
3125 || -INTVAL (XEXP (op, 1)) > 0xff))
3126 return 1;
3127
3128 /* Everything else is good, or can will automatically be made so. */
3129 return 0;
3130 }
3131
3132 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
3133
3134 int
3135 arm_rhs_operand (op, mode)
3136 rtx op;
3137 enum machine_mode mode;
3138 {
3139 return (s_register_operand (op, mode)
3140 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
3141 }
3142
3143 /* Return TRUE for valid operands for the
3144 rhs of an ARM instruction, or a load. */
3145
3146 int
3147 arm_rhsm_operand (op, mode)
3148 rtx op;
3149 enum machine_mode mode;
3150 {
3151 return (s_register_operand (op, mode)
3152 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
3153 || memory_operand (op, mode));
3154 }
3155
3156 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
3157 constant that is valid when negated. */
3158
3159 int
3160 arm_add_operand (op, mode)
3161 rtx op;
3162 enum machine_mode mode;
3163 {
3164 if (TARGET_THUMB)
3165 return thumb_cmp_operand (op, mode);
3166
3167 return (s_register_operand (op, mode)
3168 || (GET_CODE (op) == CONST_INT
3169 && (const_ok_for_arm (INTVAL (op))
3170 || const_ok_for_arm (-INTVAL (op)))));
3171 }
3172
3173 int
3174 arm_not_operand (op, mode)
3175 rtx op;
3176 enum machine_mode mode;
3177 {
3178 return (s_register_operand (op, mode)
3179 || (GET_CODE (op) == CONST_INT
3180 && (const_ok_for_arm (INTVAL (op))
3181 || const_ok_for_arm (~INTVAL (op)))));
3182 }
3183
3184 /* Return TRUE if the operand is a memory reference which contains an
3185 offsettable address. */
3186
3187 int
3188 offsettable_memory_operand (op, mode)
3189 rtx op;
3190 enum machine_mode mode;
3191 {
3192 if (mode == VOIDmode)
3193 mode = GET_MODE (op);
3194
3195 return (mode == GET_MODE (op)
3196 && GET_CODE (op) == MEM
3197 && offsettable_address_p (reload_completed | reload_in_progress,
3198 mode, XEXP (op, 0)));
3199 }
3200
3201 /* Return TRUE if the operand is a memory reference which is, or can be
3202 made word aligned by adjusting the offset. */
3203
3204 int
3205 alignable_memory_operand (op, mode)
3206 rtx op;
3207 enum machine_mode mode;
3208 {
3209 rtx reg;
3210
3211 if (mode == VOIDmode)
3212 mode = GET_MODE (op);
3213
3214 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
3215 return 0;
3216
3217 op = XEXP (op, 0);
3218
3219 return ((GET_CODE (reg = op) == REG
3220 || (GET_CODE (op) == SUBREG
3221 && GET_CODE (reg = SUBREG_REG (op)) == REG)
3222 || (GET_CODE (op) == PLUS
3223 && GET_CODE (XEXP (op, 1)) == CONST_INT
3224 && (GET_CODE (reg = XEXP (op, 0)) == REG
3225 || (GET_CODE (XEXP (op, 0)) == SUBREG
3226 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
3227 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
3228 }
3229
3230 /* Similar to s_register_operand, but does not allow hard integer
3231 registers. */
3232
3233 int
3234 f_register_operand (op, mode)
3235 rtx op;
3236 enum machine_mode mode;
3237 {
3238 if (GET_MODE (op) != mode && mode != VOIDmode)
3239 return 0;
3240
3241 if (GET_CODE (op) == SUBREG)
3242 op = SUBREG_REG (op);
3243
3244 /* We don't consider registers whose class is NO_REGS
3245 to be a register operand. */
3246 return (GET_CODE (op) == REG
3247 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3248 || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
3249 }
3250
3251 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
3252
3253 int
3254 fpu_rhs_operand (op, mode)
3255 rtx op;
3256 enum machine_mode mode;
3257 {
3258 if (s_register_operand (op, mode))
3259 return TRUE;
3260
3261 if (GET_MODE (op) != mode && mode != VOIDmode)
3262 return FALSE;
3263
3264 if (GET_CODE (op) == CONST_DOUBLE)
3265 return const_double_rtx_ok_for_fpu (op);
3266
3267 return FALSE;
3268 }
3269
3270 int
3271 fpu_add_operand (op, mode)
3272 rtx op;
3273 enum machine_mode mode;
3274 {
3275 if (s_register_operand (op, mode))
3276 return TRUE;
3277
3278 if (GET_MODE (op) != mode && mode != VOIDmode)
3279 return FALSE;
3280
3281 if (GET_CODE (op) == CONST_DOUBLE)
3282 return (const_double_rtx_ok_for_fpu (op)
3283 || neg_const_double_rtx_ok_for_fpu (op));
3284
3285 return FALSE;
3286 }
3287
3288 /* Return nonzero if OP is a constant power of two. */
3289
3290 int
3291 power_of_two_operand (op, mode)
3292 rtx op;
3293 enum machine_mode mode ATTRIBUTE_UNUSED;
3294 {
3295 if (GET_CODE (op) == CONST_INT)
3296 {
3297 HOST_WIDE_INT value = INTVAL (op);
3298
3299 return value != 0 && (value & (value - 1)) == 0;
3300 }
3301
3302 return FALSE;
3303 }
3304
3305 /* Return TRUE for a valid operand of a DImode operation.
3306 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
3307 Note that this disallows MEM(REG+REG), but allows
3308 MEM(PRE/POST_INC/DEC(REG)). */
3309
3310 int
3311 di_operand (op, mode)
3312 rtx op;
3313 enum machine_mode mode;
3314 {
3315 if (s_register_operand (op, mode))
3316 return TRUE;
3317
3318 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
3319 return FALSE;
3320
3321 if (GET_CODE (op) == SUBREG)
3322 op = SUBREG_REG (op);
3323
3324 switch (GET_CODE (op))
3325 {
3326 case CONST_DOUBLE:
3327 case CONST_INT:
3328 return TRUE;
3329
3330 case MEM:
3331 return memory_address_p (DImode, XEXP (op, 0));
3332
3333 default:
3334 return FALSE;
3335 }
3336 }
3337
3338 /* Like di_operand, but don't accept constants. */
3339
3340 int
3341 nonimmediate_di_operand (op, mode)
3342 rtx op;
3343 enum machine_mode mode;
3344 {
3345 if (s_register_operand (op, mode))
3346 return TRUE;
3347
3348 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
3349 return FALSE;
3350
3351 if (GET_CODE (op) == SUBREG)
3352 op = SUBREG_REG (op);
3353
3354 if (GET_CODE (op) == MEM)
3355 return memory_address_p (DImode, XEXP (op, 0));
3356
3357 return FALSE;
3358 }
3359
3360 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
3361 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
3362 Note that this disallows MEM(REG+REG), but allows
3363 MEM(PRE/POST_INC/DEC(REG)). */
3364
3365 int
3366 soft_df_operand (op, mode)
3367 rtx op;
3368 enum machine_mode mode;
3369 {
3370 if (s_register_operand (op, mode))
3371 return TRUE;
3372
3373 if (mode != VOIDmode && GET_MODE (op) != mode)
3374 return FALSE;
3375
3376 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
3377 return FALSE;
3378
3379 if (GET_CODE (op) == SUBREG)
3380 op = SUBREG_REG (op);
3381
3382 switch (GET_CODE (op))
3383 {
3384 case CONST_DOUBLE:
3385 return TRUE;
3386
3387 case MEM:
3388 return memory_address_p (DFmode, XEXP (op, 0));
3389
3390 default:
3391 return FALSE;
3392 }
3393 }
3394
3395 /* Like soft_df_operand, but don't accept constants. */
3396
3397 int
3398 nonimmediate_soft_df_operand (op, mode)
3399 rtx op;
3400 enum machine_mode mode;
3401 {
3402 if (s_register_operand (op, mode))
3403 return TRUE;
3404
3405 if (mode != VOIDmode && GET_MODE (op) != mode)
3406 return FALSE;
3407
3408 if (GET_CODE (op) == SUBREG)
3409 op = SUBREG_REG (op);
3410
3411 if (GET_CODE (op) == MEM)
3412 return memory_address_p (DFmode, XEXP (op, 0));
3413 return FALSE;
3414 }
3415
3416 /* Return TRUE for valid index operands. */
3417
3418 int
3419 index_operand (op, mode)
3420 rtx op;
3421 enum machine_mode mode;
3422 {
3423 return (s_register_operand (op, mode)
3424 || (immediate_operand (op, mode)
3425 && (GET_CODE (op) != CONST_INT
3426 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
3427 }
3428
3429 /* Return TRUE for valid shifts by a constant. This also accepts any
3430 power of two on the (somewhat overly relaxed) assumption that the
3431 shift operator in this case was a mult. */
3432
3433 int
3434 const_shift_operand (op, mode)
3435 rtx op;
3436 enum machine_mode mode;
3437 {
3438 return (power_of_two_operand (op, mode)
3439 || (immediate_operand (op, mode)
3440 && (GET_CODE (op) != CONST_INT
3441 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
3442 }
3443
3444 /* Return TRUE for arithmetic operators which can be combined with a multiply
3445 (shift). */
3446
3447 int
3448 shiftable_operator (x, mode)
3449 rtx x;
3450 enum machine_mode mode;
3451 {
3452 enum rtx_code code;
3453
3454 if (GET_MODE (x) != mode)
3455 return FALSE;
3456
3457 code = GET_CODE (x);
3458
3459 return (code == PLUS || code == MINUS
3460 || code == IOR || code == XOR || code == AND);
3461 }
3462
3463 /* Return TRUE for binary logical operators. */
3464
3465 int
3466 logical_binary_operator (x, mode)
3467 rtx x;
3468 enum machine_mode mode;
3469 {
3470 enum rtx_code code;
3471
3472 if (GET_MODE (x) != mode)
3473 return FALSE;
3474
3475 code = GET_CODE (x);
3476
3477 return (code == IOR || code == XOR || code == AND);
3478 }
3479
3480 /* Return TRUE for shift operators. */
3481
3482 int
3483 shift_operator (x, mode)
3484 rtx x;
3485 enum machine_mode mode;
3486 {
3487 enum rtx_code code;
3488
3489 if (GET_MODE (x) != mode)
3490 return FALSE;
3491
3492 code = GET_CODE (x);
3493
3494 if (code == MULT)
3495 return power_of_two_operand (XEXP (x, 1), mode);
3496
3497 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
3498 || code == ROTATERT);
3499 }
3500
3501 /* Return TRUE if x is EQ or NE. */
3502
3503 int
3504 equality_operator (x, mode)
3505 rtx x;
3506 enum machine_mode mode ATTRIBUTE_UNUSED;
3507 {
3508 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
3509 }
3510
3511 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
3512
3513 int
3514 arm_comparison_operator (x, mode)
3515 rtx x;
3516 enum machine_mode mode;
3517 {
3518 return (comparison_operator (x, mode)
3519 && GET_CODE (x) != LTGT
3520 && GET_CODE (x) != UNEQ);
3521 }
3522
3523 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
3524
3525 int
3526 minmax_operator (x, mode)
3527 rtx x;
3528 enum machine_mode mode;
3529 {
3530 enum rtx_code code = GET_CODE (x);
3531
3532 if (GET_MODE (x) != mode)
3533 return FALSE;
3534
3535 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
3536 }
3537
3538 /* Return TRUE if this is the condition code register, if we aren't given
3539 a mode, accept any class CCmode register. */
3540
3541 int
3542 cc_register (x, mode)
3543 rtx x;
3544 enum machine_mode mode;
3545 {
3546 if (mode == VOIDmode)
3547 {
3548 mode = GET_MODE (x);
3549
3550 if (GET_MODE_CLASS (mode) != MODE_CC)
3551 return FALSE;
3552 }
3553
3554 if ( GET_MODE (x) == mode
3555 && GET_CODE (x) == REG
3556 && REGNO (x) == CC_REGNUM)
3557 return TRUE;
3558
3559 return FALSE;
3560 }
3561
3562 /* Return TRUE if this is the condition code register, if we aren't given
3563 a mode, accept any class CCmode register which indicates a dominance
3564 expression. */
3565
3566 int
3567 dominant_cc_register (x, mode)
3568 rtx x;
3569 enum machine_mode mode;
3570 {
3571 if (mode == VOIDmode)
3572 {
3573 mode = GET_MODE (x);
3574
3575 if (GET_MODE_CLASS (mode) != MODE_CC)
3576 return FALSE;
3577 }
3578
3579 if ( mode != CC_DNEmode && mode != CC_DEQmode
3580 && mode != CC_DLEmode && mode != CC_DLTmode
3581 && mode != CC_DGEmode && mode != CC_DGTmode
3582 && mode != CC_DLEUmode && mode != CC_DLTUmode
3583 && mode != CC_DGEUmode && mode != CC_DGTUmode)
3584 return FALSE;
3585
3586 return cc_register (x, mode);
3587 }
3588
3589 /* Return TRUE if X references a SYMBOL_REF. */
3590
3591 int
3592 symbol_mentioned_p (x)
3593 rtx x;
3594 {
3595 const char * fmt;
3596 int i;
3597
3598 if (GET_CODE (x) == SYMBOL_REF)
3599 return 1;
3600
3601 fmt = GET_RTX_FORMAT (GET_CODE (x));
3602
3603 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3604 {
3605 if (fmt[i] == 'E')
3606 {
3607 int j;
3608
3609 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3610 if (symbol_mentioned_p (XVECEXP (x, i, j)))
3611 return 1;
3612 }
3613 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
3614 return 1;
3615 }
3616
3617 return 0;
3618 }
3619
3620 /* Return TRUE if X references a LABEL_REF. */
3621
3622 int
3623 label_mentioned_p (x)
3624 rtx x;
3625 {
3626 const char * fmt;
3627 int i;
3628
3629 if (GET_CODE (x) == LABEL_REF)
3630 return 1;
3631
3632 fmt = GET_RTX_FORMAT (GET_CODE (x));
3633 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3634 {
3635 if (fmt[i] == 'E')
3636 {
3637 int j;
3638
3639 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3640 if (label_mentioned_p (XVECEXP (x, i, j)))
3641 return 1;
3642 }
3643 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
3644 return 1;
3645 }
3646
3647 return 0;
3648 }
3649
3650 enum rtx_code
3651 minmax_code (x)
3652 rtx x;
3653 {
3654 enum rtx_code code = GET_CODE (x);
3655
3656 if (code == SMAX)
3657 return GE;
3658 else if (code == SMIN)
3659 return LE;
3660 else if (code == UMIN)
3661 return LEU;
3662 else if (code == UMAX)
3663 return GEU;
3664
3665 abort ();
3666 }
3667
3668 /* Return 1 if memory locations are adjacent. */
3669
3670 int
3671 adjacent_mem_locations (a, b)
3672 rtx a, b;
3673 {
3674 if ((GET_CODE (XEXP (a, 0)) == REG
3675 || (GET_CODE (XEXP (a, 0)) == PLUS
3676 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
3677 && (GET_CODE (XEXP (b, 0)) == REG
3678 || (GET_CODE (XEXP (b, 0)) == PLUS
3679 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
3680 {
3681 int val0 = 0, val1 = 0;
3682 int reg0, reg1;
3683
3684 if (GET_CODE (XEXP (a, 0)) == PLUS)
3685 {
3686 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
3687 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
3688 }
3689 else
3690 reg0 = REGNO (XEXP (a, 0));
3691
3692 if (GET_CODE (XEXP (b, 0)) == PLUS)
3693 {
3694 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
3695 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
3696 }
3697 else
3698 reg1 = REGNO (XEXP (b, 0));
3699
3700 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
3701 }
3702 return 0;
3703 }
3704
3705 /* Return 1 if OP is a load multiple operation. It is known to be
3706 parallel and the first section will be tested. */
3707
3708 int
3709 load_multiple_operation (op, mode)
3710 rtx op;
3711 enum machine_mode mode ATTRIBUTE_UNUSED;
3712 {
3713 HOST_WIDE_INT count = XVECLEN (op, 0);
3714 int dest_regno;
3715 rtx src_addr;
3716 HOST_WIDE_INT i = 1, base = 0;
3717 rtx elt;
3718
3719 if (count <= 1
3720 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
3721 return 0;
3722
3723 /* Check to see if this might be a write-back. */
3724 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
3725 {
3726 i++;
3727 base = 1;
3728
3729 /* Now check it more carefully. */
3730 if (GET_CODE (SET_DEST (elt)) != REG
3731 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
3732 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
3733 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
3734 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
3735 return 0;
3736 }
3737
3738 /* Perform a quick check so we don't blow up below. */
3739 if (count <= i
3740 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
3741 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
3742 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
3743 return 0;
3744
3745 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
3746 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
3747
3748 for (; i < count; i++)
3749 {
3750 elt = XVECEXP (op, 0, i);
3751
3752 if (GET_CODE (elt) != SET
3753 || GET_CODE (SET_DEST (elt)) != REG
3754 || GET_MODE (SET_DEST (elt)) != SImode
3755 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
3756 || GET_CODE (SET_SRC (elt)) != MEM
3757 || GET_MODE (SET_SRC (elt)) != SImode
3758 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
3759 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
3760 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
3761 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
3762 return 0;
3763 }
3764
3765 return 1;
3766 }
3767
3768 /* Return 1 if OP is a store multiple operation. It is known to be
3769 parallel and the first section will be tested. */
3770
3771 int
3772 store_multiple_operation (op, mode)
3773 rtx op;
3774 enum machine_mode mode ATTRIBUTE_UNUSED;
3775 {
3776 HOST_WIDE_INT count = XVECLEN (op, 0);
3777 int src_regno;
3778 rtx dest_addr;
3779 HOST_WIDE_INT i = 1, base = 0;
3780 rtx elt;
3781
3782 if (count <= 1
3783 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
3784 return 0;
3785
3786 /* Check to see if this might be a write-back. */
3787 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
3788 {
3789 i++;
3790 base = 1;
3791
3792 /* Now check it more carefully. */
3793 if (GET_CODE (SET_DEST (elt)) != REG
3794 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
3795 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
3796 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
3797 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
3798 return 0;
3799 }
3800
3801 /* Perform a quick check so we don't blow up below. */
3802 if (count <= i
3803 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
3804 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
3805 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
3806 return 0;
3807
3808 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
3809 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
3810
3811 for (; i < count; i++)
3812 {
3813 elt = XVECEXP (op, 0, i);
3814
3815 if (GET_CODE (elt) != SET
3816 || GET_CODE (SET_SRC (elt)) != REG
3817 || GET_MODE (SET_SRC (elt)) != SImode
3818 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
3819 || GET_CODE (SET_DEST (elt)) != MEM
3820 || GET_MODE (SET_DEST (elt)) != SImode
3821 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
3822 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
3823 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
3824 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
3825 return 0;
3826 }
3827
3828 return 1;
3829 }
3830
3831 int
3832 load_multiple_sequence (operands, nops, regs, base, load_offset)
3833 rtx * operands;
3834 int nops;
3835 int * regs;
3836 int * base;
3837 HOST_WIDE_INT * load_offset;
3838 {
3839 int unsorted_regs[4];
3840 HOST_WIDE_INT unsorted_offsets[4];
3841 int order[4];
3842 int base_reg = -1;
3843 int i;
3844
3845 /* Can only handle 2, 3, or 4 insns at present,
3846 though could be easily extended if required. */
3847 if (nops < 2 || nops > 4)
3848 abort ();
3849
3850 /* Loop over the operands and check that the memory references are
3851 suitable (ie immediate offsets from the same base register). At
3852 the same time, extract the target register, and the memory
3853 offsets. */
3854 for (i = 0; i < nops; i++)
3855 {
3856 rtx reg;
3857 rtx offset;
3858
3859 /* Convert a subreg of a mem into the mem itself. */
3860 if (GET_CODE (operands[nops + i]) == SUBREG)
3861 operands[nops + i] = alter_subreg (operands + (nops + i));
3862
3863 if (GET_CODE (operands[nops + i]) != MEM)
3864 abort ();
3865
3866 /* Don't reorder volatile memory references; it doesn't seem worth
3867 looking for the case where the order is ok anyway. */
3868 if (MEM_VOLATILE_P (operands[nops + i]))
3869 return 0;
3870
3871 offset = const0_rtx;
3872
3873 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
3874 || (GET_CODE (reg) == SUBREG
3875 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
3876 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
3877 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
3878 == REG)
3879 || (GET_CODE (reg) == SUBREG
3880 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
3881 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
3882 == CONST_INT)))
3883 {
3884 if (i == 0)
3885 {
3886 base_reg = REGNO (reg);
3887 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
3888 ? REGNO (operands[i])
3889 : REGNO (SUBREG_REG (operands[i])));
3890 order[0] = 0;
3891 }
3892 else
3893 {
3894 if (base_reg != (int) REGNO (reg))
3895 /* Not addressed from the same base register. */
3896 return 0;
3897
3898 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
3899 ? REGNO (operands[i])
3900 : REGNO (SUBREG_REG (operands[i])));
3901 if (unsorted_regs[i] < unsorted_regs[order[0]])
3902 order[0] = i;
3903 }
3904
3905 /* If it isn't an integer register, or if it overwrites the
3906 base register but isn't the last insn in the list, then
3907 we can't do this. */
3908 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
3909 || (i != nops - 1 && unsorted_regs[i] == base_reg))
3910 return 0;
3911
3912 unsorted_offsets[i] = INTVAL (offset);
3913 }
3914 else
3915 /* Not a suitable memory address. */
3916 return 0;
3917 }
3918
3919 /* All the useful information has now been extracted from the
3920 operands into unsorted_regs and unsorted_offsets; additionally,
3921 order[0] has been set to the lowest numbered register in the
3922 list. Sort the registers into order, and check that the memory
3923 offsets are ascending and adjacent. */
3924
3925 for (i = 1; i < nops; i++)
3926 {
3927 int j;
3928
3929 order[i] = order[i - 1];
3930 for (j = 0; j < nops; j++)
3931 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
3932 && (order[i] == order[i - 1]
3933 || unsorted_regs[j] < unsorted_regs[order[i]]))
3934 order[i] = j;
3935
3936 /* Have we found a suitable register? if not, one must be used more
3937 than once. */
3938 if (order[i] == order[i - 1])
3939 return 0;
3940
3941 /* Is the memory address adjacent and ascending? */
3942 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
3943 return 0;
3944 }
3945
3946 if (base)
3947 {
3948 *base = base_reg;
3949
3950 for (i = 0; i < nops; i++)
3951 regs[i] = unsorted_regs[order[i]];
3952
3953 *load_offset = unsorted_offsets[order[0]];
3954 }
3955
3956 if (unsorted_offsets[order[0]] == 0)
3957 return 1; /* ldmia */
3958
3959 if (unsorted_offsets[order[0]] == 4)
3960 return 2; /* ldmib */
3961
3962 if (unsorted_offsets[order[nops - 1]] == 0)
3963 return 3; /* ldmda */
3964
3965 if (unsorted_offsets[order[nops - 1]] == -4)
3966 return 4; /* ldmdb */
3967
3968 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
3969 if the offset isn't small enough. The reason 2 ldrs are faster
3970 is because these ARMs are able to do more than one cache access
3971 in a single cycle. The ARM9 and StrongARM have Harvard caches,
3972 whilst the ARM8 has a double bandwidth cache. This means that
3973 these cores can do both an instruction fetch and a data fetch in
3974 a single cycle, so the trick of calculating the address into a
3975 scratch register (one of the result regs) and then doing a load
3976 multiple actually becomes slower (and no smaller in code size).
3977 That is the transformation
3978
3979 ldr rd1, [rbase + offset]
3980 ldr rd2, [rbase + offset + 4]
3981
3982 to
3983
3984 add rd1, rbase, offset
3985 ldmia rd1, {rd1, rd2}
3986
3987 produces worse code -- '3 cycles + any stalls on rd2' instead of
3988 '2 cycles + any stalls on rd2'. On ARMs with only one cache
3989 access per cycle, the first sequence could never complete in less
3990 than 6 cycles, whereas the ldm sequence would only take 5 and
3991 would make better use of sequential accesses if not hitting the
3992 cache.
3993
3994 We cheat here and test 'arm_ld_sched' which we currently know to
3995 only be true for the ARM8, ARM9 and StrongARM. If this ever
3996 changes, then the test below needs to be reworked. */
3997 if (nops == 2 && arm_ld_sched)
3998 return 0;
3999
4000 /* Can't do it without setting up the offset, only do this if it takes
4001 no more than one insn. */
4002 return (const_ok_for_arm (unsorted_offsets[order[0]])
4003 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
4004 }
4005
4006 const char *
4007 emit_ldm_seq (operands, nops)
4008 rtx * operands;
4009 int nops;
4010 {
4011 int regs[4];
4012 int base_reg;
4013 HOST_WIDE_INT offset;
4014 char buf[100];
4015 int i;
4016
4017 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4018 {
4019 case 1:
4020 strcpy (buf, "ldm%?ia\t");
4021 break;
4022
4023 case 2:
4024 strcpy (buf, "ldm%?ib\t");
4025 break;
4026
4027 case 3:
4028 strcpy (buf, "ldm%?da\t");
4029 break;
4030
4031 case 4:
4032 strcpy (buf, "ldm%?db\t");
4033 break;
4034
4035 case 5:
4036 if (offset >= 0)
4037 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4038 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4039 (long) offset);
4040 else
4041 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4042 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4043 (long) -offset);
4044 output_asm_insn (buf, operands);
4045 base_reg = regs[0];
4046 strcpy (buf, "ldm%?ia\t");
4047 break;
4048
4049 default:
4050 abort ();
4051 }
4052
4053 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4054 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4055
4056 for (i = 1; i < nops; i++)
4057 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4058 reg_names[regs[i]]);
4059
4060 strcat (buf, "}\t%@ phole ldm");
4061
4062 output_asm_insn (buf, operands);
4063 return "";
4064 }
4065
4066 int
4067 store_multiple_sequence (operands, nops, regs, base, load_offset)
4068 rtx * operands;
4069 int nops;
4070 int * regs;
4071 int * base;
4072 HOST_WIDE_INT * load_offset;
4073 {
4074 int unsorted_regs[4];
4075 HOST_WIDE_INT unsorted_offsets[4];
4076 int order[4];
4077 int base_reg = -1;
4078 int i;
4079
4080 /* Can only handle 2, 3, or 4 insns at present, though could be easily
4081 extended if required. */
4082 if (nops < 2 || nops > 4)
4083 abort ();
4084
4085 /* Loop over the operands and check that the memory references are
4086 suitable (ie immediate offsets from the same base register). At
4087 the same time, extract the target register, and the memory
4088 offsets. */
4089 for (i = 0; i < nops; i++)
4090 {
4091 rtx reg;
4092 rtx offset;
4093
4094 /* Convert a subreg of a mem into the mem itself. */
4095 if (GET_CODE (operands[nops + i]) == SUBREG)
4096 operands[nops + i] = alter_subreg (operands + (nops + i));
4097
4098 if (GET_CODE (operands[nops + i]) != MEM)
4099 abort ();
4100
4101 /* Don't reorder volatile memory references; it doesn't seem worth
4102 looking for the case where the order is ok anyway. */
4103 if (MEM_VOLATILE_P (operands[nops + i]))
4104 return 0;
4105
4106 offset = const0_rtx;
4107
4108 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4109 || (GET_CODE (reg) == SUBREG
4110 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4111 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4112 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4113 == REG)
4114 || (GET_CODE (reg) == SUBREG
4115 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4116 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4117 == CONST_INT)))
4118 {
4119 if (i == 0)
4120 {
4121 base_reg = REGNO (reg);
4122 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4123 ? REGNO (operands[i])
4124 : REGNO (SUBREG_REG (operands[i])));
4125 order[0] = 0;
4126 }
4127 else
4128 {
4129 if (base_reg != (int) REGNO (reg))
4130 /* Not addressed from the same base register. */
4131 return 0;
4132
4133 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
4134 ? REGNO (operands[i])
4135 : REGNO (SUBREG_REG (operands[i])));
4136 if (unsorted_regs[i] < unsorted_regs[order[0]])
4137 order[0] = i;
4138 }
4139
4140 /* If it isn't an integer register, then we can't do this. */
4141 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
4142 return 0;
4143
4144 unsorted_offsets[i] = INTVAL (offset);
4145 }
4146 else
4147 /* Not a suitable memory address. */
4148 return 0;
4149 }
4150
4151 /* All the useful information has now been extracted from the
4152 operands into unsorted_regs and unsorted_offsets; additionally,
4153 order[0] has been set to the lowest numbered register in the
4154 list. Sort the registers into order, and check that the memory
4155 offsets are ascending and adjacent. */
4156
4157 for (i = 1; i < nops; i++)
4158 {
4159 int j;
4160
4161 order[i] = order[i - 1];
4162 for (j = 0; j < nops; j++)
4163 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4164 && (order[i] == order[i - 1]
4165 || unsorted_regs[j] < unsorted_regs[order[i]]))
4166 order[i] = j;
4167
4168 /* Have we found a suitable register? if not, one must be used more
4169 than once. */
4170 if (order[i] == order[i - 1])
4171 return 0;
4172
4173 /* Is the memory address adjacent and ascending? */
4174 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4175 return 0;
4176 }
4177
4178 if (base)
4179 {
4180 *base = base_reg;
4181
4182 for (i = 0; i < nops; i++)
4183 regs[i] = unsorted_regs[order[i]];
4184
4185 *load_offset = unsorted_offsets[order[0]];
4186 }
4187
4188 if (unsorted_offsets[order[0]] == 0)
4189 return 1; /* stmia */
4190
4191 if (unsorted_offsets[order[0]] == 4)
4192 return 2; /* stmib */
4193
4194 if (unsorted_offsets[order[nops - 1]] == 0)
4195 return 3; /* stmda */
4196
4197 if (unsorted_offsets[order[nops - 1]] == -4)
4198 return 4; /* stmdb */
4199
4200 return 0;
4201 }
4202
4203 const char *
4204 emit_stm_seq (operands, nops)
4205 rtx * operands;
4206 int nops;
4207 {
4208 int regs[4];
4209 int base_reg;
4210 HOST_WIDE_INT offset;
4211 char buf[100];
4212 int i;
4213
4214 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4215 {
4216 case 1:
4217 strcpy (buf, "stm%?ia\t");
4218 break;
4219
4220 case 2:
4221 strcpy (buf, "stm%?ib\t");
4222 break;
4223
4224 case 3:
4225 strcpy (buf, "stm%?da\t");
4226 break;
4227
4228 case 4:
4229 strcpy (buf, "stm%?db\t");
4230 break;
4231
4232 default:
4233 abort ();
4234 }
4235
4236 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4237 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4238
4239 for (i = 1; i < nops; i++)
4240 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4241 reg_names[regs[i]]);
4242
4243 strcat (buf, "}\t%@ phole stm");
4244
4245 output_asm_insn (buf, operands);
4246 return "";
4247 }
4248
4249 int
4250 multi_register_push (op, mode)
4251 rtx op;
4252 enum machine_mode mode ATTRIBUTE_UNUSED;
4253 {
4254 if (GET_CODE (op) != PARALLEL
4255 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
4256 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
4257 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
4258 return 0;
4259
4260 return 1;
4261 }
4262 \f
4263 /* Routines for use in generating RTL. */
4264
4265 rtx
4266 arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
4267 in_struct_p, scalar_p)
4268 int base_regno;
4269 int count;
4270 rtx from;
4271 int up;
4272 int write_back;
4273 int unchanging_p;
4274 int in_struct_p;
4275 int scalar_p;
4276 {
4277 int i = 0, j;
4278 rtx result;
4279 int sign = up ? 1 : -1;
4280 rtx mem;
4281
4282 /* XScale has load-store double instructions, but they have stricter
4283 alignment requirements than load-store multiple, so we can not
4284 use them.
4285
4286 For XScale ldm requires 2 + NREGS cycles to complete and blocks
4287 the pipeline until completion.
4288
4289 NREGS CYCLES
4290 1 3
4291 2 4
4292 3 5
4293 4 6
4294
4295 An ldr instruction takes 1-3 cycles, but does not block the
4296 pipeline.
4297
4298 NREGS CYCLES
4299 1 1-3
4300 2 2-6
4301 3 3-9
4302 4 4-12
4303
4304 Best case ldr will always win. However, the more ldr instructions
4305 we issue, the less likely we are to be able to schedule them well.
4306 Using ldr instructions also increases code size.
4307
4308 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
4309 for counts of 3 or 4 regs. */
4310 if (arm_is_xscale && count <= 2 && ! optimize_size)
4311 {
4312 rtx seq;
4313
4314 start_sequence ();
4315
4316 for (i = 0; i < count; i++)
4317 {
4318 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
4319 RTX_UNCHANGING_P (mem) = unchanging_p;
4320 MEM_IN_STRUCT_P (mem) = in_struct_p;
4321 MEM_SCALAR_P (mem) = scalar_p;
4322 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
4323 }
4324
4325 if (write_back)
4326 emit_move_insn (from, plus_constant (from, count * 4 * sign));
4327
4328 seq = gen_sequence ();
4329 end_sequence ();
4330
4331 return seq;
4332 }
4333
4334 result = gen_rtx_PARALLEL (VOIDmode,
4335 rtvec_alloc (count + (write_back ? 1 : 0)));
4336 if (write_back)
4337 {
4338 XVECEXP (result, 0, 0)
4339 = gen_rtx_SET (GET_MODE (from), from,
4340 plus_constant (from, count * 4 * sign));
4341 i = 1;
4342 count++;
4343 }
4344
4345 for (j = 0; i < count; i++, j++)
4346 {
4347 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
4348 RTX_UNCHANGING_P (mem) = unchanging_p;
4349 MEM_IN_STRUCT_P (mem) = in_struct_p;
4350 MEM_SCALAR_P (mem) = scalar_p;
4351 XVECEXP (result, 0, i)
4352 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
4353 }
4354
4355 return result;
4356 }
4357
4358 rtx
4359 arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
4360 in_struct_p, scalar_p)
4361 int base_regno;
4362 int count;
4363 rtx to;
4364 int up;
4365 int write_back;
4366 int unchanging_p;
4367 int in_struct_p;
4368 int scalar_p;
4369 {
4370 int i = 0, j;
4371 rtx result;
4372 int sign = up ? 1 : -1;
4373 rtx mem;
4374
4375 /* See arm_gen_load_multiple for discussion of
4376 the pros/cons of ldm/stm usage for XScale. */
4377 if (arm_is_xscale && count <= 2 && ! optimize_size)
4378 {
4379 rtx seq;
4380
4381 start_sequence ();
4382
4383 for (i = 0; i < count; i++)
4384 {
4385 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
4386 RTX_UNCHANGING_P (mem) = unchanging_p;
4387 MEM_IN_STRUCT_P (mem) = in_struct_p;
4388 MEM_SCALAR_P (mem) = scalar_p;
4389 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
4390 }
4391
4392 if (write_back)
4393 emit_move_insn (to, plus_constant (to, count * 4 * sign));
4394
4395 seq = gen_sequence ();
4396 end_sequence ();
4397
4398 return seq;
4399 }
4400
4401 result = gen_rtx_PARALLEL (VOIDmode,
4402 rtvec_alloc (count + (write_back ? 1 : 0)));
4403 if (write_back)
4404 {
4405 XVECEXP (result, 0, 0)
4406 = gen_rtx_SET (GET_MODE (to), to,
4407 plus_constant (to, count * 4 * sign));
4408 i = 1;
4409 count++;
4410 }
4411
4412 for (j = 0; i < count; i++, j++)
4413 {
4414 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
4415 RTX_UNCHANGING_P (mem) = unchanging_p;
4416 MEM_IN_STRUCT_P (mem) = in_struct_p;
4417 MEM_SCALAR_P (mem) = scalar_p;
4418
4419 XVECEXP (result, 0, i)
4420 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
4421 }
4422
4423 return result;
4424 }
4425
4426 int
4427 arm_gen_movstrqi (operands)
4428 rtx * operands;
4429 {
4430 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
4431 int i;
4432 rtx src, dst;
4433 rtx st_src, st_dst, fin_src, fin_dst;
4434 rtx part_bytes_reg = NULL;
4435 rtx mem;
4436 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
4437 int dst_scalar_p, src_scalar_p;
4438
4439 if (GET_CODE (operands[2]) != CONST_INT
4440 || GET_CODE (operands[3]) != CONST_INT
4441 || INTVAL (operands[2]) > 64
4442 || INTVAL (operands[3]) & 3)
4443 return 0;
4444
4445 st_dst = XEXP (operands[0], 0);
4446 st_src = XEXP (operands[1], 0);
4447
4448 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
4449 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
4450 dst_scalar_p = MEM_SCALAR_P (operands[0]);
4451 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
4452 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
4453 src_scalar_p = MEM_SCALAR_P (operands[1]);
4454
4455 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
4456 fin_src = src = copy_to_mode_reg (SImode, st_src);
4457
4458 in_words_to_go = NUM_INTS (INTVAL (operands[2]));
4459 out_words_to_go = INTVAL (operands[2]) / 4;
4460 last_bytes = INTVAL (operands[2]) & 3;
4461
4462 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
4463 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
4464
4465 for (i = 0; in_words_to_go >= 2; i+=4)
4466 {
4467 if (in_words_to_go > 4)
4468 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
4469 src_unchanging_p,
4470 src_in_struct_p,
4471 src_scalar_p));
4472 else
4473 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
4474 FALSE, src_unchanging_p,
4475 src_in_struct_p, src_scalar_p));
4476
4477 if (out_words_to_go)
4478 {
4479 if (out_words_to_go > 4)
4480 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
4481 dst_unchanging_p,
4482 dst_in_struct_p,
4483 dst_scalar_p));
4484 else if (out_words_to_go != 1)
4485 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
4486 dst, TRUE,
4487 (last_bytes == 0
4488 ? FALSE : TRUE),
4489 dst_unchanging_p,
4490 dst_in_struct_p,
4491 dst_scalar_p));
4492 else
4493 {
4494 mem = gen_rtx_MEM (SImode, dst);
4495 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4496 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4497 MEM_SCALAR_P (mem) = dst_scalar_p;
4498 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
4499 if (last_bytes != 0)
4500 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
4501 }
4502 }
4503
4504 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
4505 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
4506 }
4507
4508 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
4509 if (out_words_to_go)
4510 {
4511 rtx sreg;
4512
4513 mem = gen_rtx_MEM (SImode, src);
4514 RTX_UNCHANGING_P (mem) = src_unchanging_p;
4515 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
4516 MEM_SCALAR_P (mem) = src_scalar_p;
4517 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
4518 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
4519
4520 mem = gen_rtx_MEM (SImode, dst);
4521 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4522 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4523 MEM_SCALAR_P (mem) = dst_scalar_p;
4524 emit_move_insn (mem, sreg);
4525 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
4526 in_words_to_go--;
4527
4528 if (in_words_to_go) /* Sanity check */
4529 abort ();
4530 }
4531
4532 if (in_words_to_go)
4533 {
4534 if (in_words_to_go < 0)
4535 abort ();
4536
4537 mem = gen_rtx_MEM (SImode, src);
4538 RTX_UNCHANGING_P (mem) = src_unchanging_p;
4539 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
4540 MEM_SCALAR_P (mem) = src_scalar_p;
4541 part_bytes_reg = copy_to_mode_reg (SImode, mem);
4542 }
4543
4544 if (last_bytes && part_bytes_reg == NULL)
4545 abort ();
4546
4547 if (BYTES_BIG_ENDIAN && last_bytes)
4548 {
4549 rtx tmp = gen_reg_rtx (SImode);
4550
4551 /* The bytes we want are in the top end of the word. */
4552 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
4553 GEN_INT (8 * (4 - last_bytes))));
4554 part_bytes_reg = tmp;
4555
4556 while (last_bytes)
4557 {
4558 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
4559 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4560 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4561 MEM_SCALAR_P (mem) = dst_scalar_p;
4562 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
4563
4564 if (--last_bytes)
4565 {
4566 tmp = gen_reg_rtx (SImode);
4567 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
4568 part_bytes_reg = tmp;
4569 }
4570 }
4571
4572 }
4573 else
4574 {
4575 if (last_bytes > 1)
4576 {
4577 mem = gen_rtx_MEM (HImode, dst);
4578 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4579 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4580 MEM_SCALAR_P (mem) = dst_scalar_p;
4581 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
4582 last_bytes -= 2;
4583 if (last_bytes)
4584 {
4585 rtx tmp = gen_reg_rtx (SImode);
4586
4587 emit_insn (gen_addsi3 (dst, dst, GEN_INT (2)));
4588 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
4589 part_bytes_reg = tmp;
4590 }
4591 }
4592
4593 if (last_bytes)
4594 {
4595 mem = gen_rtx_MEM (QImode, dst);
4596 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4597 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4598 MEM_SCALAR_P (mem) = dst_scalar_p;
4599 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
4600 }
4601 }
4602
4603 return 1;
4604 }
4605
4606 /* Generate a memory reference for a half word, such that it will be loaded
4607 into the top 16 bits of the word. We can assume that the address is
4608 known to be alignable and of the form reg, or plus (reg, const). */
4609
4610 rtx
4611 arm_gen_rotated_half_load (memref)
4612 rtx memref;
4613 {
4614 HOST_WIDE_INT offset = 0;
4615 rtx base = XEXP (memref, 0);
4616
4617 if (GET_CODE (base) == PLUS)
4618 {
4619 offset = INTVAL (XEXP (base, 1));
4620 base = XEXP (base, 0);
4621 }
4622
4623 /* If we aren't allowed to generate unaligned addresses, then fail. */
4624 if (TARGET_MMU_TRAPS
4625 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
4626 return NULL;
4627
4628 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
4629
4630 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
4631 return base;
4632
4633 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
4634 }
4635
4636 /* Select a dominance comparison mode if possible. We support three forms.
4637 COND_OR == 0 => (X && Y)
4638 COND_OR == 1 => ((! X( || Y)
4639 COND_OR == 2 => (X || Y)
4640 If we are unable to support a dominance comparsison we return CC mode.
4641 This will then fail to match for the RTL expressions that generate this
4642 call. */
4643
4644 static enum machine_mode
4645 select_dominance_cc_mode (x, y, cond_or)
4646 rtx x;
4647 rtx y;
4648 HOST_WIDE_INT cond_or;
4649 {
4650 enum rtx_code cond1, cond2;
4651 int swapped = 0;
4652
4653 /* Currently we will probably get the wrong result if the individual
4654 comparisons are not simple. This also ensures that it is safe to
4655 reverse a comparison if necessary. */
4656 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
4657 != CCmode)
4658 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
4659 != CCmode))
4660 return CCmode;
4661
4662 /* The if_then_else variant of this tests the second condition if the
4663 first passes, but is true if the first fails. Reverse the first
4664 condition to get a true "inclusive-or" expression. */
4665 if (cond_or == 1)
4666 cond1 = reverse_condition (cond1);
4667
4668 /* If the comparisons are not equal, and one doesn't dominate the other,
4669 then we can't do this. */
4670 if (cond1 != cond2
4671 && !comparison_dominates_p (cond1, cond2)
4672 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
4673 return CCmode;
4674
4675 if (swapped)
4676 {
4677 enum rtx_code temp = cond1;
4678 cond1 = cond2;
4679 cond2 = temp;
4680 }
4681
4682 switch (cond1)
4683 {
4684 case EQ:
4685 if (cond2 == EQ || !cond_or)
4686 return CC_DEQmode;
4687
4688 switch (cond2)
4689 {
4690 case LE: return CC_DLEmode;
4691 case LEU: return CC_DLEUmode;
4692 case GE: return CC_DGEmode;
4693 case GEU: return CC_DGEUmode;
4694 default: break;
4695 }
4696
4697 break;
4698
4699 case LT:
4700 if (cond2 == LT || !cond_or)
4701 return CC_DLTmode;
4702 if (cond2 == LE)
4703 return CC_DLEmode;
4704 if (cond2 == NE)
4705 return CC_DNEmode;
4706 break;
4707
4708 case GT:
4709 if (cond2 == GT || !cond_or)
4710 return CC_DGTmode;
4711 if (cond2 == GE)
4712 return CC_DGEmode;
4713 if (cond2 == NE)
4714 return CC_DNEmode;
4715 break;
4716
4717 case LTU:
4718 if (cond2 == LTU || !cond_or)
4719 return CC_DLTUmode;
4720 if (cond2 == LEU)
4721 return CC_DLEUmode;
4722 if (cond2 == NE)
4723 return CC_DNEmode;
4724 break;
4725
4726 case GTU:
4727 if (cond2 == GTU || !cond_or)
4728 return CC_DGTUmode;
4729 if (cond2 == GEU)
4730 return CC_DGEUmode;
4731 if (cond2 == NE)
4732 return CC_DNEmode;
4733 break;
4734
4735 /* The remaining cases only occur when both comparisons are the
4736 same. */
4737 case NE:
4738 return CC_DNEmode;
4739
4740 case LE:
4741 return CC_DLEmode;
4742
4743 case GE:
4744 return CC_DGEmode;
4745
4746 case LEU:
4747 return CC_DLEUmode;
4748
4749 case GEU:
4750 return CC_DGEUmode;
4751
4752 default:
4753 break;
4754 }
4755
4756 abort ();
4757 }
4758
4759 enum machine_mode
4760 arm_select_cc_mode (op, x, y)
4761 enum rtx_code op;
4762 rtx x;
4763 rtx y;
4764 {
4765 /* All floating point compares return CCFP if it is an equality
4766 comparison, and CCFPE otherwise. */
4767 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4768 {
4769 switch (op)
4770 {
4771 case EQ:
4772 case NE:
4773 case UNORDERED:
4774 case ORDERED:
4775 case UNLT:
4776 case UNLE:
4777 case UNGT:
4778 case UNGE:
4779 case UNEQ:
4780 case LTGT:
4781 return CCFPmode;
4782
4783 case LT:
4784 case LE:
4785 case GT:
4786 case GE:
4787 return CCFPEmode;
4788
4789 default:
4790 abort ();
4791 }
4792 }
4793
4794 /* A compare with a shifted operand. Because of canonicalization, the
4795 comparison will have to be swapped when we emit the assembler. */
4796 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
4797 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4798 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
4799 || GET_CODE (x) == ROTATERT))
4800 return CC_SWPmode;
4801
4802 /* This is a special case that is used by combine to allow a
4803 comparison of a shifted byte load to be split into a zero-extend
4804 followed by a comparison of the shifted integer (only valid for
4805 equalities and unsigned inequalities). */
4806 if (GET_MODE (x) == SImode
4807 && GET_CODE (x) == ASHIFT
4808 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
4809 && GET_CODE (XEXP (x, 0)) == SUBREG
4810 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
4811 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
4812 && (op == EQ || op == NE
4813 || op == GEU || op == GTU || op == LTU || op == LEU)
4814 && GET_CODE (y) == CONST_INT)
4815 return CC_Zmode;
4816
4817 /* A construct for a conditional compare, if the false arm contains
4818 0, then both conditions must be true, otherwise either condition
4819 must be true. Not all conditions are possible, so CCmode is
4820 returned if it can't be done. */
4821 if (GET_CODE (x) == IF_THEN_ELSE
4822 && (XEXP (x, 2) == const0_rtx
4823 || XEXP (x, 2) == const1_rtx)
4824 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4825 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4826 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
4827 INTVAL (XEXP (x, 2)));
4828
4829 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
4830 if (GET_CODE (x) == AND
4831 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4832 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4833 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1), 0);
4834
4835 if (GET_CODE (x) == IOR
4836 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4837 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4838 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1), 2);
4839
4840 /* An operation that sets the condition codes as a side-effect, the
4841 V flag is not set correctly, so we can only use comparisons where
4842 this doesn't matter. (For LT and GE we can use "mi" and "pl"
4843 instead. */
4844 if (GET_MODE (x) == SImode
4845 && y == const0_rtx
4846 && (op == EQ || op == NE || op == LT || op == GE)
4847 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
4848 || GET_CODE (x) == AND || GET_CODE (x) == IOR
4849 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
4850 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
4851 || GET_CODE (x) == LSHIFTRT
4852 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4853 || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
4854 return CC_NOOVmode;
4855
4856 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
4857 return CC_Zmode;
4858
4859 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
4860 && GET_CODE (x) == PLUS
4861 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
4862 return CC_Cmode;
4863
4864 return CCmode;
4865 }
4866
4867 /* X and Y are two things to compare using CODE. Emit the compare insn and
4868 return the rtx for register 0 in the proper mode. FP means this is a
4869 floating point compare: I don't think that it is needed on the arm. */
4870
4871 rtx
4872 arm_gen_compare_reg (code, x, y)
4873 enum rtx_code code;
4874 rtx x, y;
4875 {
4876 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
4877 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
4878
4879 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
4880 gen_rtx_COMPARE (mode, x, y)));
4881
4882 return cc_reg;
4883 }
4884
4885 void
4886 arm_reload_in_hi (operands)
4887 rtx * operands;
4888 {
4889 rtx ref = operands[1];
4890 rtx base, scratch;
4891 HOST_WIDE_INT offset = 0;
4892
4893 if (GET_CODE (ref) == SUBREG)
4894 {
4895 offset = SUBREG_BYTE (ref);
4896 ref = SUBREG_REG (ref);
4897 }
4898
4899 if (GET_CODE (ref) == REG)
4900 {
4901 /* We have a pseudo which has been spilt onto the stack; there
4902 are two cases here: the first where there is a simple
4903 stack-slot replacement and a second where the stack-slot is
4904 out of range, or is used as a subreg. */
4905 if (reg_equiv_mem[REGNO (ref)])
4906 {
4907 ref = reg_equiv_mem[REGNO (ref)];
4908 base = find_replacement (&XEXP (ref, 0));
4909 }
4910 else
4911 /* The slot is out of range, or was dressed up in a SUBREG. */
4912 base = reg_equiv_address[REGNO (ref)];
4913 }
4914 else
4915 base = find_replacement (&XEXP (ref, 0));
4916
4917 /* Handle the case where the address is too complex to be offset by 1. */
4918 if (GET_CODE (base) == MINUS
4919 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
4920 {
4921 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
4922
4923 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
4924 base = base_plus;
4925 }
4926 else if (GET_CODE (base) == PLUS)
4927 {
4928 /* The addend must be CONST_INT, or we would have dealt with it above. */
4929 HOST_WIDE_INT hi, lo;
4930
4931 offset += INTVAL (XEXP (base, 1));
4932 base = XEXP (base, 0);
4933
4934 /* Rework the address into a legal sequence of insns. */
4935 /* Valid range for lo is -4095 -> 4095 */
4936 lo = (offset >= 0
4937 ? (offset & 0xfff)
4938 : -((-offset) & 0xfff));
4939
4940 /* Corner case, if lo is the max offset then we would be out of range
4941 once we have added the additional 1 below, so bump the msb into the
4942 pre-loading insn(s). */
4943 if (lo == 4095)
4944 lo &= 0x7ff;
4945
4946 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
4947 ^ (HOST_WIDE_INT) 0x80000000)
4948 - (HOST_WIDE_INT) 0x80000000);
4949
4950 if (hi + lo != offset)
4951 abort ();
4952
4953 if (hi != 0)
4954 {
4955 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
4956
4957 /* Get the base address; addsi3 knows how to handle constants
4958 that require more than one insn. */
4959 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
4960 base = base_plus;
4961 offset = lo;
4962 }
4963 }
4964
4965 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
4966 emit_insn (gen_zero_extendqisi2 (scratch,
4967 gen_rtx_MEM (QImode,
4968 plus_constant (base,
4969 offset))));
4970 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
4971 gen_rtx_MEM (QImode,
4972 plus_constant (base,
4973 offset + 1))));
4974 if (!BYTES_BIG_ENDIAN)
4975 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
4976 gen_rtx_IOR (SImode,
4977 gen_rtx_ASHIFT
4978 (SImode,
4979 gen_rtx_SUBREG (SImode, operands[0], 0),
4980 GEN_INT (8)),
4981 scratch)));
4982 else
4983 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
4984 gen_rtx_IOR (SImode,
4985 gen_rtx_ASHIFT (SImode, scratch,
4986 GEN_INT (8)),
4987 gen_rtx_SUBREG (SImode, operands[0],
4988 0))));
4989 }
4990
4991 /* Handle storing a half-word to memory during reload by synthesising as two
4992 byte stores. Take care not to clobber the input values until after we
4993 have moved them somewhere safe. This code assumes that if the DImode
4994 scratch in operands[2] overlaps either the input value or output address
4995 in some way, then that value must die in this insn (we absolutely need
4996 two scratch registers for some corner cases). */
4997
4998 void
4999 arm_reload_out_hi (operands)
5000 rtx * operands;
5001 {
5002 rtx ref = operands[0];
5003 rtx outval = operands[1];
5004 rtx base, scratch;
5005 HOST_WIDE_INT offset = 0;
5006
5007 if (GET_CODE (ref) == SUBREG)
5008 {
5009 offset = SUBREG_BYTE (ref);
5010 ref = SUBREG_REG (ref);
5011 }
5012
5013 if (GET_CODE (ref) == REG)
5014 {
5015 /* We have a pseudo which has been spilt onto the stack; there
5016 are two cases here: the first where there is a simple
5017 stack-slot replacement and a second where the stack-slot is
5018 out of range, or is used as a subreg. */
5019 if (reg_equiv_mem[REGNO (ref)])
5020 {
5021 ref = reg_equiv_mem[REGNO (ref)];
5022 base = find_replacement (&XEXP (ref, 0));
5023 }
5024 else
5025 /* The slot is out of range, or was dressed up in a SUBREG. */
5026 base = reg_equiv_address[REGNO (ref)];
5027 }
5028 else
5029 base = find_replacement (&XEXP (ref, 0));
5030
5031 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5032
5033 /* Handle the case where the address is too complex to be offset by 1. */
5034 if (GET_CODE (base) == MINUS
5035 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5036 {
5037 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5038
5039 /* Be careful not to destroy OUTVAL. */
5040 if (reg_overlap_mentioned_p (base_plus, outval))
5041 {
5042 /* Updating base_plus might destroy outval, see if we can
5043 swap the scratch and base_plus. */
5044 if (!reg_overlap_mentioned_p (scratch, outval))
5045 {
5046 rtx tmp = scratch;
5047 scratch = base_plus;
5048 base_plus = tmp;
5049 }
5050 else
5051 {
5052 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5053
5054 /* Be conservative and copy OUTVAL into the scratch now,
5055 this should only be necessary if outval is a subreg
5056 of something larger than a word. */
5057 /* XXX Might this clobber base? I can't see how it can,
5058 since scratch is known to overlap with OUTVAL, and
5059 must be wider than a word. */
5060 emit_insn (gen_movhi (scratch_hi, outval));
5061 outval = scratch_hi;
5062 }
5063 }
5064
5065 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5066 base = base_plus;
5067 }
5068 else if (GET_CODE (base) == PLUS)
5069 {
5070 /* The addend must be CONST_INT, or we would have dealt with it above. */
5071 HOST_WIDE_INT hi, lo;
5072
5073 offset += INTVAL (XEXP (base, 1));
5074 base = XEXP (base, 0);
5075
5076 /* Rework the address into a legal sequence of insns. */
5077 /* Valid range for lo is -4095 -> 4095 */
5078 lo = (offset >= 0
5079 ? (offset & 0xfff)
5080 : -((-offset) & 0xfff));
5081
5082 /* Corner case, if lo is the max offset then we would be out of range
5083 once we have added the additional 1 below, so bump the msb into the
5084 pre-loading insn(s). */
5085 if (lo == 4095)
5086 lo &= 0x7ff;
5087
5088 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5089 ^ (HOST_WIDE_INT) 0x80000000)
5090 - (HOST_WIDE_INT) 0x80000000);
5091
5092 if (hi + lo != offset)
5093 abort ();
5094
5095 if (hi != 0)
5096 {
5097 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5098
5099 /* Be careful not to destroy OUTVAL. */
5100 if (reg_overlap_mentioned_p (base_plus, outval))
5101 {
5102 /* Updating base_plus might destroy outval, see if we
5103 can swap the scratch and base_plus. */
5104 if (!reg_overlap_mentioned_p (scratch, outval))
5105 {
5106 rtx tmp = scratch;
5107 scratch = base_plus;
5108 base_plus = tmp;
5109 }
5110 else
5111 {
5112 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5113
5114 /* Be conservative and copy outval into scratch now,
5115 this should only be necessary if outval is a
5116 subreg of something larger than a word. */
5117 /* XXX Might this clobber base? I can't see how it
5118 can, since scratch is known to overlap with
5119 outval. */
5120 emit_insn (gen_movhi (scratch_hi, outval));
5121 outval = scratch_hi;
5122 }
5123 }
5124
5125 /* Get the base address; addsi3 knows how to handle constants
5126 that require more than one insn. */
5127 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5128 base = base_plus;
5129 offset = lo;
5130 }
5131 }
5132
5133 if (BYTES_BIG_ENDIAN)
5134 {
5135 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
5136 plus_constant (base, offset + 1)),
5137 gen_lowpart (QImode, outval)));
5138 emit_insn (gen_lshrsi3 (scratch,
5139 gen_rtx_SUBREG (SImode, outval, 0),
5140 GEN_INT (8)));
5141 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
5142 gen_lowpart (QImode, scratch)));
5143 }
5144 else
5145 {
5146 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
5147 gen_lowpart (QImode, outval)));
5148 emit_insn (gen_lshrsi3 (scratch,
5149 gen_rtx_SUBREG (SImode, outval, 0),
5150 GEN_INT (8)));
5151 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
5152 plus_constant (base, offset + 1)),
5153 gen_lowpart (QImode, scratch)));
5154 }
5155 }
5156 \f
5157 /* Print a symbolic form of X to the debug file, F. */
5158
5159 static void
5160 arm_print_value (f, x)
5161 FILE * f;
5162 rtx x;
5163 {
5164 switch (GET_CODE (x))
5165 {
5166 case CONST_INT:
5167 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
5168 return;
5169
5170 case CONST_DOUBLE:
5171 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
5172 return;
5173
5174 case CONST_STRING:
5175 fprintf (f, "\"%s\"", XSTR (x, 0));
5176 return;
5177
5178 case SYMBOL_REF:
5179 fprintf (f, "`%s'", XSTR (x, 0));
5180 return;
5181
5182 case LABEL_REF:
5183 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
5184 return;
5185
5186 case CONST:
5187 arm_print_value (f, XEXP (x, 0));
5188 return;
5189
5190 case PLUS:
5191 arm_print_value (f, XEXP (x, 0));
5192 fprintf (f, "+");
5193 arm_print_value (f, XEXP (x, 1));
5194 return;
5195
5196 case PC:
5197 fprintf (f, "pc");
5198 return;
5199
5200 default:
5201 fprintf (f, "????");
5202 return;
5203 }
5204 }
5205 \f
5206 /* Routines for manipulation of the constant pool. */
5207
5208 /* Arm instructions cannot load a large constant directly into a
5209 register; they have to come from a pc relative load. The constant
5210 must therefore be placed in the addressable range of the pc
5211 relative load. Depending on the precise pc relative load
5212 instruction the range is somewhere between 256 bytes and 4k. This
5213 means that we often have to dump a constant inside a function, and
5214 generate code to branch around it.
5215
5216 It is important to minimize this, since the branches will slow
5217 things down and make the code larger.
5218
5219 Normally we can hide the table after an existing unconditional
5220 branch so that there is no interruption of the flow, but in the
5221 worst case the code looks like this:
5222
5223 ldr rn, L1
5224 ...
5225 b L2
5226 align
5227 L1: .long value
5228 L2:
5229 ...
5230
5231 ldr rn, L3
5232 ...
5233 b L4
5234 align
5235 L3: .long value
5236 L4:
5237 ...
5238
5239 We fix this by performing a scan after scheduling, which notices
5240 which instructions need to have their operands fetched from the
5241 constant table and builds the table.
5242
5243 The algorithm starts by building a table of all the constants that
5244 need fixing up and all the natural barriers in the function (places
5245 where a constant table can be dropped without breaking the flow).
5246 For each fixup we note how far the pc-relative replacement will be
5247 able to reach and the offset of the instruction into the function.
5248
5249 Having built the table we then group the fixes together to form
5250 tables that are as large as possible (subject to addressing
5251 constraints) and emit each table of constants after the last
5252 barrier that is within range of all the instructions in the group.
5253 If a group does not contain a barrier, then we forcibly create one
5254 by inserting a jump instruction into the flow. Once the table has
5255 been inserted, the insns are then modified to reference the
5256 relevant entry in the pool.
5257
5258 Possible enhancements to the algorithm (not implemented) are:
5259
5260 1) For some processors and object formats, there may be benefit in
5261 aligning the pools to the start of cache lines; this alignment
5262 would need to be taken into account when calculating addressability
5263 of a pool. */
5264
5265 /* These typedefs are located at the start of this file, so that
5266 they can be used in the prototypes there. This comment is to
5267 remind readers of that fact so that the following structures
5268 can be understood more easily.
5269
5270 typedef struct minipool_node Mnode;
5271 typedef struct minipool_fixup Mfix; */
5272
5273 struct minipool_node
5274 {
5275 /* Doubly linked chain of entries. */
5276 Mnode * next;
5277 Mnode * prev;
5278 /* The maximum offset into the code that this entry can be placed. While
5279 pushing fixes for forward references, all entries are sorted in order
5280 of increasing max_address. */
5281 HOST_WIDE_INT max_address;
5282 /* Similarly for an entry inserted for a backwards ref. */
5283 HOST_WIDE_INT min_address;
5284 /* The number of fixes referencing this entry. This can become zero
5285 if we "unpush" an entry. In this case we ignore the entry when we
5286 come to emit the code. */
5287 int refcount;
5288 /* The offset from the start of the minipool. */
5289 HOST_WIDE_INT offset;
5290 /* The value in table. */
5291 rtx value;
5292 /* The mode of value. */
5293 enum machine_mode mode;
5294 int fix_size;
5295 };
5296
5297 struct minipool_fixup
5298 {
5299 Mfix * next;
5300 rtx insn;
5301 HOST_WIDE_INT address;
5302 rtx * loc;
5303 enum machine_mode mode;
5304 int fix_size;
5305 rtx value;
5306 Mnode * minipool;
5307 HOST_WIDE_INT forwards;
5308 HOST_WIDE_INT backwards;
5309 };
5310
5311 /* Fixes less than a word need padding out to a word boundary. */
5312 #define MINIPOOL_FIX_SIZE(mode) \
5313 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
5314
5315 static Mnode * minipool_vector_head;
5316 static Mnode * minipool_vector_tail;
5317 static rtx minipool_vector_label;
5318
5319 /* The linked list of all minipool fixes required for this function. */
5320 Mfix * minipool_fix_head;
5321 Mfix * minipool_fix_tail;
5322 /* The fix entry for the current minipool, once it has been placed. */
5323 Mfix * minipool_barrier;
5324
5325 /* Determines if INSN is the start of a jump table. Returns the end
5326 of the TABLE or NULL_RTX. */
5327
5328 static rtx
5329 is_jump_table (insn)
5330 rtx insn;
5331 {
5332 rtx table;
5333
5334 if (GET_CODE (insn) == JUMP_INSN
5335 && JUMP_LABEL (insn) != NULL
5336 && ((table = next_real_insn (JUMP_LABEL (insn)))
5337 == next_real_insn (insn))
5338 && table != NULL
5339 && GET_CODE (table) == JUMP_INSN
5340 && (GET_CODE (PATTERN (table)) == ADDR_VEC
5341 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
5342 return table;
5343
5344 return NULL_RTX;
5345 }
5346
5347 #ifndef JUMP_TABLES_IN_TEXT_SECTION
5348 #define JUMP_TABLES_IN_TEXT_SECTION 0
5349 #endif
5350
5351 static HOST_WIDE_INT
5352 get_jump_table_size (insn)
5353 rtx insn;
5354 {
5355 /* ADDR_VECs only take room if read-only data does into the text
5356 section. */
5357 if (JUMP_TABLES_IN_TEXT_SECTION
5358 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
5359 || 1
5360 #endif
5361 )
5362 {
5363 rtx body = PATTERN (insn);
5364 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
5365
5366 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
5367 }
5368
5369 return 0;
5370 }
5371
5372 /* Move a minipool fix MP from its current location to before MAX_MP.
5373 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
5374 contrains may need updating. */
5375
5376 static Mnode *
5377 move_minipool_fix_forward_ref (mp, max_mp, max_address)
5378 Mnode * mp;
5379 Mnode * max_mp;
5380 HOST_WIDE_INT max_address;
5381 {
5382 /* This should never be true and the code below assumes these are
5383 different. */
5384 if (mp == max_mp)
5385 abort ();
5386
5387 if (max_mp == NULL)
5388 {
5389 if (max_address < mp->max_address)
5390 mp->max_address = max_address;
5391 }
5392 else
5393 {
5394 if (max_address > max_mp->max_address - mp->fix_size)
5395 mp->max_address = max_mp->max_address - mp->fix_size;
5396 else
5397 mp->max_address = max_address;
5398
5399 /* Unlink MP from its current position. Since max_mp is non-null,
5400 mp->prev must be non-null. */
5401 mp->prev->next = mp->next;
5402 if (mp->next != NULL)
5403 mp->next->prev = mp->prev;
5404 else
5405 minipool_vector_tail = mp->prev;
5406
5407 /* Re-insert it before MAX_MP. */
5408 mp->next = max_mp;
5409 mp->prev = max_mp->prev;
5410 max_mp->prev = mp;
5411
5412 if (mp->prev != NULL)
5413 mp->prev->next = mp;
5414 else
5415 minipool_vector_head = mp;
5416 }
5417
5418 /* Save the new entry. */
5419 max_mp = mp;
5420
5421 /* Scan over the preceding entries and adjust their addresses as
5422 required. */
5423 while (mp->prev != NULL
5424 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5425 {
5426 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5427 mp = mp->prev;
5428 }
5429
5430 return max_mp;
5431 }
5432
5433 /* Add a constant to the minipool for a forward reference. Returns the
5434 node added or NULL if the constant will not fit in this pool. */
5435
5436 static Mnode *
5437 add_minipool_forward_ref (fix)
5438 Mfix * fix;
5439 {
5440 /* If set, max_mp is the first pool_entry that has a lower
5441 constraint than the one we are trying to add. */
5442 Mnode * max_mp = NULL;
5443 HOST_WIDE_INT max_address = fix->address + fix->forwards;
5444 Mnode * mp;
5445
5446 /* If this fix's address is greater than the address of the first
5447 entry, then we can't put the fix in this pool. We subtract the
5448 size of the current fix to ensure that if the table is fully
5449 packed we still have enough room to insert this value by suffling
5450 the other fixes forwards. */
5451 if (minipool_vector_head &&
5452 fix->address >= minipool_vector_head->max_address - fix->fix_size)
5453 return NULL;
5454
5455 /* Scan the pool to see if a constant with the same value has
5456 already been added. While we are doing this, also note the
5457 location where we must insert the constant if it doesn't already
5458 exist. */
5459 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5460 {
5461 if (GET_CODE (fix->value) == GET_CODE (mp->value)
5462 && fix->mode == mp->mode
5463 && (GET_CODE (fix->value) != CODE_LABEL
5464 || (CODE_LABEL_NUMBER (fix->value)
5465 == CODE_LABEL_NUMBER (mp->value)))
5466 && rtx_equal_p (fix->value, mp->value))
5467 {
5468 /* More than one fix references this entry. */
5469 mp->refcount++;
5470 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
5471 }
5472
5473 /* Note the insertion point if necessary. */
5474 if (max_mp == NULL
5475 && mp->max_address > max_address)
5476 max_mp = mp;
5477 }
5478
5479 /* The value is not currently in the minipool, so we need to create
5480 a new entry for it. If MAX_MP is NULL, the entry will be put on
5481 the end of the list since the placement is less constrained than
5482 any existing entry. Otherwise, we insert the new fix before
5483 MAX_MP and, if neceesary, adjust the constraints on the other
5484 entries. */
5485 mp = xmalloc (sizeof (* mp));
5486 mp->fix_size = fix->fix_size;
5487 mp->mode = fix->mode;
5488 mp->value = fix->value;
5489 mp->refcount = 1;
5490 /* Not yet required for a backwards ref. */
5491 mp->min_address = -65536;
5492
5493 if (max_mp == NULL)
5494 {
5495 mp->max_address = max_address;
5496 mp->next = NULL;
5497 mp->prev = minipool_vector_tail;
5498
5499 if (mp->prev == NULL)
5500 {
5501 minipool_vector_head = mp;
5502 minipool_vector_label = gen_label_rtx ();
5503 }
5504 else
5505 mp->prev->next = mp;
5506
5507 minipool_vector_tail = mp;
5508 }
5509 else
5510 {
5511 if (max_address > max_mp->max_address - mp->fix_size)
5512 mp->max_address = max_mp->max_address - mp->fix_size;
5513 else
5514 mp->max_address = max_address;
5515
5516 mp->next = max_mp;
5517 mp->prev = max_mp->prev;
5518 max_mp->prev = mp;
5519 if (mp->prev != NULL)
5520 mp->prev->next = mp;
5521 else
5522 minipool_vector_head = mp;
5523 }
5524
5525 /* Save the new entry. */
5526 max_mp = mp;
5527
5528 /* Scan over the preceding entries and adjust their addresses as
5529 required. */
5530 while (mp->prev != NULL
5531 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5532 {
5533 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5534 mp = mp->prev;
5535 }
5536
5537 return max_mp;
5538 }
5539
5540 static Mnode *
5541 move_minipool_fix_backward_ref (mp, min_mp, min_address)
5542 Mnode * mp;
5543 Mnode * min_mp;
5544 HOST_WIDE_INT min_address;
5545 {
5546 HOST_WIDE_INT offset;
5547
5548 /* This should never be true, and the code below assumes these are
5549 different. */
5550 if (mp == min_mp)
5551 abort ();
5552
5553 if (min_mp == NULL)
5554 {
5555 if (min_address > mp->min_address)
5556 mp->min_address = min_address;
5557 }
5558 else
5559 {
5560 /* We will adjust this below if it is too loose. */
5561 mp->min_address = min_address;
5562
5563 /* Unlink MP from its current position. Since min_mp is non-null,
5564 mp->next must be non-null. */
5565 mp->next->prev = mp->prev;
5566 if (mp->prev != NULL)
5567 mp->prev->next = mp->next;
5568 else
5569 minipool_vector_head = mp->next;
5570
5571 /* Reinsert it after MIN_MP. */
5572 mp->prev = min_mp;
5573 mp->next = min_mp->next;
5574 min_mp->next = mp;
5575 if (mp->next != NULL)
5576 mp->next->prev = mp;
5577 else
5578 minipool_vector_tail = mp;
5579 }
5580
5581 min_mp = mp;
5582
5583 offset = 0;
5584 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5585 {
5586 mp->offset = offset;
5587 if (mp->refcount > 0)
5588 offset += mp->fix_size;
5589
5590 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
5591 mp->next->min_address = mp->min_address + mp->fix_size;
5592 }
5593
5594 return min_mp;
5595 }
5596
5597 /* Add a constant to the minipool for a backward reference. Returns the
5598 node added or NULL if the constant will not fit in this pool.
5599
5600 Note that the code for insertion for a backwards reference can be
5601 somewhat confusing because the calculated offsets for each fix do
5602 not take into account the size of the pool (which is still under
5603 construction. */
5604
5605 static Mnode *
5606 add_minipool_backward_ref (fix)
5607 Mfix * fix;
5608 {
5609 /* If set, min_mp is the last pool_entry that has a lower constraint
5610 than the one we are trying to add. */
5611 Mnode * min_mp = NULL;
5612 /* This can be negative, since it is only a constraint. */
5613 HOST_WIDE_INT min_address = fix->address - fix->backwards;
5614 Mnode * mp;
5615
5616 /* If we can't reach the current pool from this insn, or if we can't
5617 insert this entry at the end of the pool without pushing other
5618 fixes out of range, then we don't try. This ensures that we
5619 can't fail later on. */
5620 if (min_address >= minipool_barrier->address
5621 || (minipool_vector_tail->min_address + fix->fix_size
5622 >= minipool_barrier->address))
5623 return NULL;
5624
5625 /* Scan the pool to see if a constant with the same value has
5626 already been added. While we are doing this, also note the
5627 location where we must insert the constant if it doesn't already
5628 exist. */
5629 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
5630 {
5631 if (GET_CODE (fix->value) == GET_CODE (mp->value)
5632 && fix->mode == mp->mode
5633 && (GET_CODE (fix->value) != CODE_LABEL
5634 || (CODE_LABEL_NUMBER (fix->value)
5635 == CODE_LABEL_NUMBER (mp->value)))
5636 && rtx_equal_p (fix->value, mp->value)
5637 /* Check that there is enough slack to move this entry to the
5638 end of the table (this is conservative). */
5639 && (mp->max_address
5640 > (minipool_barrier->address
5641 + minipool_vector_tail->offset
5642 + minipool_vector_tail->fix_size)))
5643 {
5644 mp->refcount++;
5645 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
5646 }
5647
5648 if (min_mp != NULL)
5649 mp->min_address += fix->fix_size;
5650 else
5651 {
5652 /* Note the insertion point if necessary. */
5653 if (mp->min_address < min_address)
5654 min_mp = mp;
5655 else if (mp->max_address
5656 < minipool_barrier->address + mp->offset + fix->fix_size)
5657 {
5658 /* Inserting before this entry would push the fix beyond
5659 its maximum address (which can happen if we have
5660 re-located a forwards fix); force the new fix to come
5661 after it. */
5662 min_mp = mp;
5663 min_address = mp->min_address + fix->fix_size;
5664 }
5665 }
5666 }
5667
5668 /* We need to create a new entry. */
5669 mp = xmalloc (sizeof (* mp));
5670 mp->fix_size = fix->fix_size;
5671 mp->mode = fix->mode;
5672 mp->value = fix->value;
5673 mp->refcount = 1;
5674 mp->max_address = minipool_barrier->address + 65536;
5675
5676 mp->min_address = min_address;
5677
5678 if (min_mp == NULL)
5679 {
5680 mp->prev = NULL;
5681 mp->next = minipool_vector_head;
5682
5683 if (mp->next == NULL)
5684 {
5685 minipool_vector_tail = mp;
5686 minipool_vector_label = gen_label_rtx ();
5687 }
5688 else
5689 mp->next->prev = mp;
5690
5691 minipool_vector_head = mp;
5692 }
5693 else
5694 {
5695 mp->next = min_mp->next;
5696 mp->prev = min_mp;
5697 min_mp->next = mp;
5698
5699 if (mp->next != NULL)
5700 mp->next->prev = mp;
5701 else
5702 minipool_vector_tail = mp;
5703 }
5704
5705 /* Save the new entry. */
5706 min_mp = mp;
5707
5708 if (mp->prev)
5709 mp = mp->prev;
5710 else
5711 mp->offset = 0;
5712
5713 /* Scan over the following entries and adjust their offsets. */
5714 while (mp->next != NULL)
5715 {
5716 if (mp->next->min_address < mp->min_address + mp->fix_size)
5717 mp->next->min_address = mp->min_address + mp->fix_size;
5718
5719 if (mp->refcount)
5720 mp->next->offset = mp->offset + mp->fix_size;
5721 else
5722 mp->next->offset = mp->offset;
5723
5724 mp = mp->next;
5725 }
5726
5727 return min_mp;
5728 }
5729
5730 static void
5731 assign_minipool_offsets (barrier)
5732 Mfix * barrier;
5733 {
5734 HOST_WIDE_INT offset = 0;
5735 Mnode * mp;
5736
5737 minipool_barrier = barrier;
5738
5739 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5740 {
5741 mp->offset = offset;
5742
5743 if (mp->refcount > 0)
5744 offset += mp->fix_size;
5745 }
5746 }
5747
5748 /* Output the literal table */
5749 static void
5750 dump_minipool (scan)
5751 rtx scan;
5752 {
5753 Mnode * mp;
5754 Mnode * nmp;
5755
5756 if (rtl_dump_file)
5757 fprintf (rtl_dump_file,
5758 ";; Emitting minipool after insn %u; address %ld\n",
5759 INSN_UID (scan), (unsigned long) minipool_barrier->address);
5760
5761 scan = emit_label_after (gen_label_rtx (), scan);
5762 scan = emit_insn_after (gen_align_4 (), scan);
5763 scan = emit_label_after (minipool_vector_label, scan);
5764
5765 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
5766 {
5767 if (mp->refcount > 0)
5768 {
5769 if (rtl_dump_file)
5770 {
5771 fprintf (rtl_dump_file,
5772 ";; Offset %u, min %ld, max %ld ",
5773 (unsigned) mp->offset, (unsigned long) mp->min_address,
5774 (unsigned long) mp->max_address);
5775 arm_print_value (rtl_dump_file, mp->value);
5776 fputc ('\n', rtl_dump_file);
5777 }
5778
5779 switch (mp->fix_size)
5780 {
5781 #ifdef HAVE_consttable_1
5782 case 1:
5783 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
5784 break;
5785
5786 #endif
5787 #ifdef HAVE_consttable_2
5788 case 2:
5789 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
5790 break;
5791
5792 #endif
5793 #ifdef HAVE_consttable_4
5794 case 4:
5795 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
5796 break;
5797
5798 #endif
5799 #ifdef HAVE_consttable_8
5800 case 8:
5801 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
5802 break;
5803
5804 #endif
5805 default:
5806 abort ();
5807 break;
5808 }
5809 }
5810
5811 nmp = mp->next;
5812 free (mp);
5813 }
5814
5815 minipool_vector_head = minipool_vector_tail = NULL;
5816 scan = emit_insn_after (gen_consttable_end (), scan);
5817 scan = emit_barrier_after (scan);
5818 }
5819
5820 /* Return the cost of forcibly inserting a barrier after INSN. */
5821
5822 static int
5823 arm_barrier_cost (insn)
5824 rtx insn;
5825 {
5826 /* Basing the location of the pool on the loop depth is preferable,
5827 but at the moment, the basic block information seems to be
5828 corrupt by this stage of the compilation. */
5829 int base_cost = 50;
5830 rtx next = next_nonnote_insn (insn);
5831
5832 if (next != NULL && GET_CODE (next) == CODE_LABEL)
5833 base_cost -= 20;
5834
5835 switch (GET_CODE (insn))
5836 {
5837 case CODE_LABEL:
5838 /* It will always be better to place the table before the label, rather
5839 than after it. */
5840 return 50;
5841
5842 case INSN:
5843 case CALL_INSN:
5844 return base_cost;
5845
5846 case JUMP_INSN:
5847 return base_cost - 10;
5848
5849 default:
5850 return base_cost + 10;
5851 }
5852 }
5853
5854 /* Find the best place in the insn stream in the range
5855 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
5856 Create the barrier by inserting a jump and add a new fix entry for
5857 it. */
5858
5859 static Mfix *
5860 create_fix_barrier (fix, max_address)
5861 Mfix * fix;
5862 HOST_WIDE_INT max_address;
5863 {
5864 HOST_WIDE_INT count = 0;
5865 rtx barrier;
5866 rtx from = fix->insn;
5867 rtx selected = from;
5868 int selected_cost;
5869 HOST_WIDE_INT selected_address;
5870 Mfix * new_fix;
5871 HOST_WIDE_INT max_count = max_address - fix->address;
5872 rtx label = gen_label_rtx ();
5873
5874 selected_cost = arm_barrier_cost (from);
5875 selected_address = fix->address;
5876
5877 while (from && count < max_count)
5878 {
5879 rtx tmp;
5880 int new_cost;
5881
5882 /* This code shouldn't have been called if there was a natural barrier
5883 within range. */
5884 if (GET_CODE (from) == BARRIER)
5885 abort ();
5886
5887 /* Count the length of this insn. */
5888 count += get_attr_length (from);
5889
5890 /* If there is a jump table, add its length. */
5891 tmp = is_jump_table (from);
5892 if (tmp != NULL)
5893 {
5894 count += get_jump_table_size (tmp);
5895
5896 /* Jump tables aren't in a basic block, so base the cost on
5897 the dispatch insn. If we select this location, we will
5898 still put the pool after the table. */
5899 new_cost = arm_barrier_cost (from);
5900
5901 if (count < max_count && new_cost <= selected_cost)
5902 {
5903 selected = tmp;
5904 selected_cost = new_cost;
5905 selected_address = fix->address + count;
5906 }
5907
5908 /* Continue after the dispatch table. */
5909 from = NEXT_INSN (tmp);
5910 continue;
5911 }
5912
5913 new_cost = arm_barrier_cost (from);
5914
5915 if (count < max_count && new_cost <= selected_cost)
5916 {
5917 selected = from;
5918 selected_cost = new_cost;
5919 selected_address = fix->address + count;
5920 }
5921
5922 from = NEXT_INSN (from);
5923 }
5924
5925 /* Create a new JUMP_INSN that branches around a barrier. */
5926 from = emit_jump_insn_after (gen_jump (label), selected);
5927 JUMP_LABEL (from) = label;
5928 barrier = emit_barrier_after (from);
5929 emit_label_after (label, barrier);
5930
5931 /* Create a minipool barrier entry for the new barrier. */
5932 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
5933 new_fix->insn = barrier;
5934 new_fix->address = selected_address;
5935 new_fix->next = fix->next;
5936 fix->next = new_fix;
5937
5938 return new_fix;
5939 }
5940
5941 /* Record that there is a natural barrier in the insn stream at
5942 ADDRESS. */
5943 static void
5944 push_minipool_barrier (insn, address)
5945 rtx insn;
5946 HOST_WIDE_INT address;
5947 {
5948 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
5949
5950 fix->insn = insn;
5951 fix->address = address;
5952
5953 fix->next = NULL;
5954 if (minipool_fix_head != NULL)
5955 minipool_fix_tail->next = fix;
5956 else
5957 minipool_fix_head = fix;
5958
5959 minipool_fix_tail = fix;
5960 }
5961
5962 /* Record INSN, which will need fixing up to load a value from the
5963 minipool. ADDRESS is the offset of the insn since the start of the
5964 function; LOC is a pointer to the part of the insn which requires
5965 fixing; VALUE is the constant that must be loaded, which is of type
5966 MODE. */
5967 static void
5968 push_minipool_fix (insn, address, loc, mode, value)
5969 rtx insn;
5970 HOST_WIDE_INT address;
5971 rtx * loc;
5972 enum machine_mode mode;
5973 rtx value;
5974 {
5975 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
5976
5977 #ifdef AOF_ASSEMBLER
5978 /* PIC symbol refereneces need to be converted into offsets into the
5979 based area. */
5980 /* XXX This shouldn't be done here. */
5981 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
5982 value = aof_pic_entry (value);
5983 #endif /* AOF_ASSEMBLER */
5984
5985 fix->insn = insn;
5986 fix->address = address;
5987 fix->loc = loc;
5988 fix->mode = mode;
5989 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
5990 fix->value = value;
5991 fix->forwards = get_attr_pool_range (insn);
5992 fix->backwards = get_attr_neg_pool_range (insn);
5993 fix->minipool = NULL;
5994
5995 /* If an insn doesn't have a range defined for it, then it isn't
5996 expecting to be reworked by this code. Better to abort now than
5997 to generate duff assembly code. */
5998 if (fix->forwards == 0 && fix->backwards == 0)
5999 abort ();
6000
6001 if (rtl_dump_file)
6002 {
6003 fprintf (rtl_dump_file,
6004 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
6005 GET_MODE_NAME (mode),
6006 INSN_UID (insn), (unsigned long) address,
6007 -1 * (long)fix->backwards, (long)fix->forwards);
6008 arm_print_value (rtl_dump_file, fix->value);
6009 fprintf (rtl_dump_file, "\n");
6010 }
6011
6012 /* Add it to the chain of fixes. */
6013 fix->next = NULL;
6014
6015 if (minipool_fix_head != NULL)
6016 minipool_fix_tail->next = fix;
6017 else
6018 minipool_fix_head = fix;
6019
6020 minipool_fix_tail = fix;
6021 }
6022
6023 /* Scan INSN and note any of its operands that need fixing. */
6024
6025 static void
6026 note_invalid_constants (insn, address)
6027 rtx insn;
6028 HOST_WIDE_INT address;
6029 {
6030 int opno;
6031
6032 extract_insn (insn);
6033
6034 if (!constrain_operands (1))
6035 fatal_insn_not_found (insn);
6036
6037 /* Fill in recog_op_alt with information about the constraints of this
6038 insn. */
6039 preprocess_constraints ();
6040
6041 for (opno = 0; opno < recog_data.n_operands; opno++)
6042 {
6043 /* Things we need to fix can only occur in inputs. */
6044 if (recog_data.operand_type[opno] != OP_IN)
6045 continue;
6046
6047 /* If this alternative is a memory reference, then any mention
6048 of constants in this alternative is really to fool reload
6049 into allowing us to accept one there. We need to fix them up
6050 now so that we output the right code. */
6051 if (recog_op_alt[opno][which_alternative].memory_ok)
6052 {
6053 rtx op = recog_data.operand[opno];
6054
6055 if (CONSTANT_P (op))
6056 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6057 recog_data.operand_mode[opno], op);
6058 #if 0
6059 /* RWE: Now we look correctly at the operands for the insn,
6060 this shouldn't be needed any more. */
6061 #ifndef AOF_ASSEMBLER
6062 /* XXX Is this still needed? */
6063 else if (GET_CODE (op) == UNSPEC && XINT (op, 1) == UNSPEC_PIC_SYM)
6064 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6065 recog_data.operand_mode[opno],
6066 XVECEXP (op, 0, 0));
6067 #endif
6068 #endif
6069 else if (GET_CODE (op) == MEM
6070 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
6071 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
6072 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6073 recog_data.operand_mode[opno],
6074 get_pool_constant (XEXP (op, 0)));
6075 }
6076 }
6077 }
6078
6079 void
6080 arm_reorg (first)
6081 rtx first;
6082 {
6083 rtx insn;
6084 HOST_WIDE_INT address = 0;
6085 Mfix * fix;
6086
6087 minipool_fix_head = minipool_fix_tail = NULL;
6088
6089 /* The first insn must always be a note, or the code below won't
6090 scan it properly. */
6091 if (GET_CODE (first) != NOTE)
6092 abort ();
6093
6094 /* Scan all the insns and record the operands that will need fixing. */
6095 for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn))
6096 {
6097 if (GET_CODE (insn) == BARRIER)
6098 push_minipool_barrier (insn, address);
6099 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
6100 || GET_CODE (insn) == JUMP_INSN)
6101 {
6102 rtx table;
6103
6104 note_invalid_constants (insn, address);
6105 address += get_attr_length (insn);
6106
6107 /* If the insn is a vector jump, add the size of the table
6108 and skip the table. */
6109 if ((table = is_jump_table (insn)) != NULL)
6110 {
6111 address += get_jump_table_size (table);
6112 insn = table;
6113 }
6114 }
6115 }
6116
6117 fix = minipool_fix_head;
6118
6119 /* Now scan the fixups and perform the required changes. */
6120 while (fix)
6121 {
6122 Mfix * ftmp;
6123 Mfix * fdel;
6124 Mfix * last_added_fix;
6125 Mfix * last_barrier = NULL;
6126 Mfix * this_fix;
6127
6128 /* Skip any further barriers before the next fix. */
6129 while (fix && GET_CODE (fix->insn) == BARRIER)
6130 fix = fix->next;
6131
6132 /* No more fixes. */
6133 if (fix == NULL)
6134 break;
6135
6136 last_added_fix = NULL;
6137
6138 for (ftmp = fix; ftmp; ftmp = ftmp->next)
6139 {
6140 if (GET_CODE (ftmp->insn) == BARRIER)
6141 {
6142 if (ftmp->address >= minipool_vector_head->max_address)
6143 break;
6144
6145 last_barrier = ftmp;
6146 }
6147 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
6148 break;
6149
6150 last_added_fix = ftmp; /* Keep track of the last fix added. */
6151 }
6152
6153 /* If we found a barrier, drop back to that; any fixes that we
6154 could have reached but come after the barrier will now go in
6155 the next mini-pool. */
6156 if (last_barrier != NULL)
6157 {
6158 /* Reduce the refcount for those fixes that won't go into this
6159 pool after all. */
6160 for (fdel = last_barrier->next;
6161 fdel && fdel != ftmp;
6162 fdel = fdel->next)
6163 {
6164 fdel->minipool->refcount--;
6165 fdel->minipool = NULL;
6166 }
6167
6168 ftmp = last_barrier;
6169 }
6170 else
6171 {
6172 /* ftmp is first fix that we can't fit into this pool and
6173 there no natural barriers that we could use. Insert a
6174 new barrier in the code somewhere between the previous
6175 fix and this one, and arrange to jump around it. */
6176 HOST_WIDE_INT max_address;
6177
6178 /* The last item on the list of fixes must be a barrier, so
6179 we can never run off the end of the list of fixes without
6180 last_barrier being set. */
6181 if (ftmp == NULL)
6182 abort ();
6183
6184 max_address = minipool_vector_head->max_address;
6185 /* Check that there isn't another fix that is in range that
6186 we couldn't fit into this pool because the pool was
6187 already too large: we need to put the pool before such an
6188 instruction. */
6189 if (ftmp->address < max_address)
6190 max_address = ftmp->address;
6191
6192 last_barrier = create_fix_barrier (last_added_fix, max_address);
6193 }
6194
6195 assign_minipool_offsets (last_barrier);
6196
6197 while (ftmp)
6198 {
6199 if (GET_CODE (ftmp->insn) != BARRIER
6200 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
6201 == NULL))
6202 break;
6203
6204 ftmp = ftmp->next;
6205 }
6206
6207 /* Scan over the fixes we have identified for this pool, fixing them
6208 up and adding the constants to the pool itself. */
6209 for (this_fix = fix; this_fix && ftmp != this_fix;
6210 this_fix = this_fix->next)
6211 if (GET_CODE (this_fix->insn) != BARRIER)
6212 {
6213 rtx addr
6214 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
6215 minipool_vector_label),
6216 this_fix->minipool->offset);
6217 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
6218 }
6219
6220 dump_minipool (last_barrier->insn);
6221 fix = ftmp;
6222 }
6223
6224 /* From now on we must synthesize any constants that we can't handle
6225 directly. This can happen if the RTL gets split during final
6226 instruction generation. */
6227 after_arm_reorg = 1;
6228
6229 /* Free the minipool memory. */
6230 obstack_free (&minipool_obstack, minipool_startobj);
6231 }
6232 \f
6233 /* Routines to output assembly language. */
6234
6235 /* If the rtx is the correct value then return the string of the number.
6236 In this way we can ensure that valid double constants are generated even
6237 when cross compiling. */
6238
6239 const char *
6240 fp_immediate_constant (x)
6241 rtx x;
6242 {
6243 REAL_VALUE_TYPE r;
6244 int i;
6245
6246 if (!fpa_consts_inited)
6247 init_fpa_table ();
6248
6249 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6250 for (i = 0; i < 8; i++)
6251 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
6252 return strings_fpa[i];
6253
6254 abort ();
6255 }
6256
6257 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
6258
6259 static const char *
6260 fp_const_from_val (r)
6261 REAL_VALUE_TYPE * r;
6262 {
6263 int i;
6264
6265 if (!fpa_consts_inited)
6266 init_fpa_table ();
6267
6268 for (i = 0; i < 8; i++)
6269 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
6270 return strings_fpa[i];
6271
6272 abort ();
6273 }
6274
6275 /* Output the operands of a LDM/STM instruction to STREAM.
6276 MASK is the ARM register set mask of which only bits 0-15 are important.
6277 REG is the base register, either the frame pointer or the stack pointer,
6278 INSTR is the possibly suffixed load or store instruction. */
6279
6280 static void
6281 print_multi_reg (stream, instr, reg, mask)
6282 FILE * stream;
6283 const char * instr;
6284 int reg;
6285 int mask;
6286 {
6287 int i;
6288 int not_first = FALSE;
6289
6290 fputc ('\t', stream);
6291 asm_fprintf (stream, instr, reg);
6292 fputs (", {", stream);
6293
6294 for (i = 0; i <= LAST_ARM_REGNUM; i++)
6295 if (mask & (1 << i))
6296 {
6297 if (not_first)
6298 fprintf (stream, ", ");
6299
6300 asm_fprintf (stream, "%r", i);
6301 not_first = TRUE;
6302 }
6303
6304 fprintf (stream, "}%s\n", TARGET_APCS_32 ? "" : "^");
6305 }
6306
6307 /* Output a 'call' insn. */
6308
6309 const char *
6310 output_call (operands)
6311 rtx * operands;
6312 {
6313 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
6314
6315 if (REGNO (operands[0]) == LR_REGNUM)
6316 {
6317 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
6318 output_asm_insn ("mov%?\t%0, %|lr", operands);
6319 }
6320
6321 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6322
6323 if (TARGET_INTERWORK)
6324 output_asm_insn ("bx%?\t%0", operands);
6325 else
6326 output_asm_insn ("mov%?\t%|pc, %0", operands);
6327
6328 return "";
6329 }
6330
6331 static int
6332 eliminate_lr2ip (x)
6333 rtx * x;
6334 {
6335 int something_changed = 0;
6336 rtx x0 = * x;
6337 int code = GET_CODE (x0);
6338 int i, j;
6339 const char * fmt;
6340
6341 switch (code)
6342 {
6343 case REG:
6344 if (REGNO (x0) == LR_REGNUM)
6345 {
6346 *x = gen_rtx_REG (SImode, IP_REGNUM);
6347 return 1;
6348 }
6349 return 0;
6350 default:
6351 /* Scan through the sub-elements and change any references there. */
6352 fmt = GET_RTX_FORMAT (code);
6353
6354 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6355 if (fmt[i] == 'e')
6356 something_changed |= eliminate_lr2ip (&XEXP (x0, i));
6357 else if (fmt[i] == 'E')
6358 for (j = 0; j < XVECLEN (x0, i); j++)
6359 something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
6360
6361 return something_changed;
6362 }
6363 }
6364
6365 /* Output a 'call' insn that is a reference in memory. */
6366
6367 const char *
6368 output_call_mem (operands)
6369 rtx * operands;
6370 {
6371 operands[0] = copy_rtx (operands[0]); /* Be ultra careful. */
6372 /* Handle calls using lr by using ip (which may be clobbered in subr anyway). */
6373 if (eliminate_lr2ip (&operands[0]))
6374 output_asm_insn ("mov%?\t%|ip, %|lr", operands);
6375
6376 if (TARGET_INTERWORK)
6377 {
6378 output_asm_insn ("ldr%?\t%|ip, %0", operands);
6379 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6380 output_asm_insn ("bx%?\t%|ip", operands);
6381 }
6382 else
6383 {
6384 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6385 output_asm_insn ("ldr%?\t%|pc, %0", operands);
6386 }
6387
6388 return "";
6389 }
6390
6391
6392 /* Output a move from arm registers to an fpu registers.
6393 OPERANDS[0] is an fpu register.
6394 OPERANDS[1] is the first registers of an arm register pair. */
6395
6396 const char *
6397 output_mov_long_double_fpu_from_arm (operands)
6398 rtx * operands;
6399 {
6400 int arm_reg0 = REGNO (operands[1]);
6401 rtx ops[3];
6402
6403 if (arm_reg0 == IP_REGNUM)
6404 abort ();
6405
6406 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6407 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6408 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
6409
6410 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
6411 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
6412
6413 return "";
6414 }
6415
6416 /* Output a move from an fpu register to arm registers.
6417 OPERANDS[0] is the first registers of an arm register pair.
6418 OPERANDS[1] is an fpu register. */
6419
6420 const char *
6421 output_mov_long_double_arm_from_fpu (operands)
6422 rtx * operands;
6423 {
6424 int arm_reg0 = REGNO (operands[0]);
6425 rtx ops[3];
6426
6427 if (arm_reg0 == IP_REGNUM)
6428 abort ();
6429
6430 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6431 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6432 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
6433
6434 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
6435 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
6436 return "";
6437 }
6438
6439 /* Output a move from arm registers to arm registers of a long double
6440 OPERANDS[0] is the destination.
6441 OPERANDS[1] is the source. */
6442
6443 const char *
6444 output_mov_long_double_arm_from_arm (operands)
6445 rtx * operands;
6446 {
6447 /* We have to be careful here because the two might overlap. */
6448 int dest_start = REGNO (operands[0]);
6449 int src_start = REGNO (operands[1]);
6450 rtx ops[2];
6451 int i;
6452
6453 if (dest_start < src_start)
6454 {
6455 for (i = 0; i < 3; i++)
6456 {
6457 ops[0] = gen_rtx_REG (SImode, dest_start + i);
6458 ops[1] = gen_rtx_REG (SImode, src_start + i);
6459 output_asm_insn ("mov%?\t%0, %1", ops);
6460 }
6461 }
6462 else
6463 {
6464 for (i = 2; i >= 0; i--)
6465 {
6466 ops[0] = gen_rtx_REG (SImode, dest_start + i);
6467 ops[1] = gen_rtx_REG (SImode, src_start + i);
6468 output_asm_insn ("mov%?\t%0, %1", ops);
6469 }
6470 }
6471
6472 return "";
6473 }
6474
6475
6476 /* Output a move from arm registers to an fpu registers.
6477 OPERANDS[0] is an fpu register.
6478 OPERANDS[1] is the first registers of an arm register pair. */
6479
6480 const char *
6481 output_mov_double_fpu_from_arm (operands)
6482 rtx * operands;
6483 {
6484 int arm_reg0 = REGNO (operands[1]);
6485 rtx ops[2];
6486
6487 if (arm_reg0 == IP_REGNUM)
6488 abort ();
6489
6490 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6491 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6492 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
6493 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
6494 return "";
6495 }
6496
6497 /* Output a move from an fpu register to arm registers.
6498 OPERANDS[0] is the first registers of an arm register pair.
6499 OPERANDS[1] is an fpu register. */
6500
6501 const char *
6502 output_mov_double_arm_from_fpu (operands)
6503 rtx * operands;
6504 {
6505 int arm_reg0 = REGNO (operands[0]);
6506 rtx ops[2];
6507
6508 if (arm_reg0 == IP_REGNUM)
6509 abort ();
6510
6511 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6512 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6513 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
6514 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
6515 return "";
6516 }
6517
6518 /* Output a move between double words.
6519 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
6520 or MEM<-REG and all MEMs must be offsettable addresses. */
6521
6522 const char *
6523 output_move_double (operands)
6524 rtx * operands;
6525 {
6526 enum rtx_code code0 = GET_CODE (operands[0]);
6527 enum rtx_code code1 = GET_CODE (operands[1]);
6528 rtx otherops[3];
6529
6530 if (code0 == REG)
6531 {
6532 int reg0 = REGNO (operands[0]);
6533
6534 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
6535
6536 if (code1 == REG)
6537 {
6538 int reg1 = REGNO (operands[1]);
6539 if (reg1 == IP_REGNUM)
6540 abort ();
6541
6542 /* Ensure the second source is not overwritten. */
6543 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
6544 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
6545 else
6546 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
6547 }
6548 else if (code1 == CONST_DOUBLE)
6549 {
6550 if (GET_MODE (operands[1]) == DFmode)
6551 {
6552 REAL_VALUE_TYPE r;
6553 long l[2];
6554
6555 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
6556 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
6557 otherops[1] = GEN_INT (l[1]);
6558 operands[1] = GEN_INT (l[0]);
6559 }
6560 else if (GET_MODE (operands[1]) != VOIDmode)
6561 abort ();
6562 else if (WORDS_BIG_ENDIAN)
6563 {
6564 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
6565 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
6566 }
6567 else
6568 {
6569 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
6570 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
6571 }
6572
6573 output_mov_immediate (operands);
6574 output_mov_immediate (otherops);
6575 }
6576 else if (code1 == CONST_INT)
6577 {
6578 #if HOST_BITS_PER_WIDE_INT > 32
6579 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
6580 what the upper word is. */
6581 if (WORDS_BIG_ENDIAN)
6582 {
6583 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
6584 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
6585 }
6586 else
6587 {
6588 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
6589 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
6590 }
6591 #else
6592 /* Sign extend the intval into the high-order word. */
6593 if (WORDS_BIG_ENDIAN)
6594 {
6595 otherops[1] = operands[1];
6596 operands[1] = (INTVAL (operands[1]) < 0
6597 ? constm1_rtx : const0_rtx);
6598 }
6599 else
6600 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
6601 #endif
6602 output_mov_immediate (otherops);
6603 output_mov_immediate (operands);
6604 }
6605 else if (code1 == MEM)
6606 {
6607 switch (GET_CODE (XEXP (operands[1], 0)))
6608 {
6609 case REG:
6610 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
6611 break;
6612
6613 case PRE_INC:
6614 abort (); /* Should never happen now. */
6615 break;
6616
6617 case PRE_DEC:
6618 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
6619 break;
6620
6621 case POST_INC:
6622 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
6623 break;
6624
6625 case POST_DEC:
6626 abort (); /* Should never happen now. */
6627 break;
6628
6629 case LABEL_REF:
6630 case CONST:
6631 output_asm_insn ("adr%?\t%0, %1", operands);
6632 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
6633 break;
6634
6635 default:
6636 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
6637 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
6638 {
6639 otherops[0] = operands[0];
6640 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
6641 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
6642
6643 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
6644 {
6645 if (GET_CODE (otherops[2]) == CONST_INT)
6646 {
6647 switch (INTVAL (otherops[2]))
6648 {
6649 case -8:
6650 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
6651 return "";
6652 case -4:
6653 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
6654 return "";
6655 case 4:
6656 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
6657 return "";
6658 }
6659
6660 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
6661 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
6662 else
6663 output_asm_insn ("add%?\t%0, %1, %2", otherops);
6664 }
6665 else
6666 output_asm_insn ("add%?\t%0, %1, %2", otherops);
6667 }
6668 else
6669 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
6670
6671 return "ldm%?ia\t%0, %M0";
6672 }
6673 else
6674 {
6675 otherops[1] = adjust_address (operands[1], VOIDmode, 4);
6676 /* Take care of overlapping base/data reg. */
6677 if (reg_mentioned_p (operands[0], operands[1]))
6678 {
6679 output_asm_insn ("ldr%?\t%0, %1", otherops);
6680 output_asm_insn ("ldr%?\t%0, %1", operands);
6681 }
6682 else
6683 {
6684 output_asm_insn ("ldr%?\t%0, %1", operands);
6685 output_asm_insn ("ldr%?\t%0, %1", otherops);
6686 }
6687 }
6688 }
6689 }
6690 else
6691 abort (); /* Constraints should prevent this. */
6692 }
6693 else if (code0 == MEM && code1 == REG)
6694 {
6695 if (REGNO (operands[1]) == IP_REGNUM)
6696 abort ();
6697
6698 switch (GET_CODE (XEXP (operands[0], 0)))
6699 {
6700 case REG:
6701 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
6702 break;
6703
6704 case PRE_INC:
6705 abort (); /* Should never happen now. */
6706 break;
6707
6708 case PRE_DEC:
6709 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
6710 break;
6711
6712 case POST_INC:
6713 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
6714 break;
6715
6716 case POST_DEC:
6717 abort (); /* Should never happen now. */
6718 break;
6719
6720 case PLUS:
6721 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
6722 {
6723 switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
6724 {
6725 case -8:
6726 output_asm_insn ("stm%?db\t%m0, %M1", operands);
6727 return "";
6728
6729 case -4:
6730 output_asm_insn ("stm%?da\t%m0, %M1", operands);
6731 return "";
6732
6733 case 4:
6734 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
6735 return "";
6736 }
6737 }
6738 /* Fall through */
6739
6740 default:
6741 otherops[0] = adjust_address (operands[0], VOIDmode, 4);
6742 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
6743 output_asm_insn ("str%?\t%1, %0", operands);
6744 output_asm_insn ("str%?\t%1, %0", otherops);
6745 }
6746 }
6747 else
6748 /* Constraints should prevent this. */
6749 abort ();
6750
6751 return "";
6752 }
6753
6754
6755 /* Output an arbitrary MOV reg, #n.
6756 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
6757
6758 const char *
6759 output_mov_immediate (operands)
6760 rtx * operands;
6761 {
6762 HOST_WIDE_INT n = INTVAL (operands[1]);
6763
6764 /* Try to use one MOV. */
6765 if (const_ok_for_arm (n))
6766 output_asm_insn ("mov%?\t%0, %1", operands);
6767
6768 /* Try to use one MVN. */
6769 else if (const_ok_for_arm (~n))
6770 {
6771 operands[1] = GEN_INT (~n);
6772 output_asm_insn ("mvn%?\t%0, %1", operands);
6773 }
6774 else
6775 {
6776 int n_ones = 0;
6777 int i;
6778
6779 /* If all else fails, make it out of ORRs or BICs as appropriate. */
6780 for (i = 0; i < 32; i ++)
6781 if (n & 1 << i)
6782 n_ones ++;
6783
6784 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
6785 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
6786 else
6787 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
6788 }
6789
6790 return "";
6791 }
6792
6793 /* Output an ADD r, s, #n where n may be too big for one instruction.
6794 If adding zero to one register, output nothing. */
6795
6796 const char *
6797 output_add_immediate (operands)
6798 rtx * operands;
6799 {
6800 HOST_WIDE_INT n = INTVAL (operands[2]);
6801
6802 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
6803 {
6804 if (n < 0)
6805 output_multi_immediate (operands,
6806 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
6807 -n);
6808 else
6809 output_multi_immediate (operands,
6810 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
6811 n);
6812 }
6813
6814 return "";
6815 }
6816
6817 /* Output a multiple immediate operation.
6818 OPERANDS is the vector of operands referred to in the output patterns.
6819 INSTR1 is the output pattern to use for the first constant.
6820 INSTR2 is the output pattern to use for subsequent constants.
6821 IMMED_OP is the index of the constant slot in OPERANDS.
6822 N is the constant value. */
6823
6824 static const char *
6825 output_multi_immediate (operands, instr1, instr2, immed_op, n)
6826 rtx * operands;
6827 const char * instr1;
6828 const char * instr2;
6829 int immed_op;
6830 HOST_WIDE_INT n;
6831 {
6832 #if HOST_BITS_PER_WIDE_INT > 32
6833 n &= 0xffffffff;
6834 #endif
6835
6836 if (n == 0)
6837 {
6838 /* Quick and easy output. */
6839 operands[immed_op] = const0_rtx;
6840 output_asm_insn (instr1, operands);
6841 }
6842 else
6843 {
6844 int i;
6845 const char * instr = instr1;
6846
6847 /* Note that n is never zero here (which would give no output). */
6848 for (i = 0; i < 32; i += 2)
6849 {
6850 if (n & (3 << i))
6851 {
6852 operands[immed_op] = GEN_INT (n & (255 << i));
6853 output_asm_insn (instr, operands);
6854 instr = instr2;
6855 i += 6;
6856 }
6857 }
6858 }
6859
6860 return "";
6861 }
6862
6863 /* Return the appropriate ARM instruction for the operation code.
6864 The returned result should not be overwritten. OP is the rtx of the
6865 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
6866 was shifted. */
6867
6868 const char *
6869 arithmetic_instr (op, shift_first_arg)
6870 rtx op;
6871 int shift_first_arg;
6872 {
6873 switch (GET_CODE (op))
6874 {
6875 case PLUS:
6876 return "add";
6877
6878 case MINUS:
6879 return shift_first_arg ? "rsb" : "sub";
6880
6881 case IOR:
6882 return "orr";
6883
6884 case XOR:
6885 return "eor";
6886
6887 case AND:
6888 return "and";
6889
6890 default:
6891 abort ();
6892 }
6893 }
6894
6895 /* Ensure valid constant shifts and return the appropriate shift mnemonic
6896 for the operation code. The returned result should not be overwritten.
6897 OP is the rtx code of the shift.
6898 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
6899 shift. */
6900
6901 static const char *
6902 shift_op (op, amountp)
6903 rtx op;
6904 HOST_WIDE_INT *amountp;
6905 {
6906 const char * mnem;
6907 enum rtx_code code = GET_CODE (op);
6908
6909 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
6910 *amountp = -1;
6911 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
6912 *amountp = INTVAL (XEXP (op, 1));
6913 else
6914 abort ();
6915
6916 switch (code)
6917 {
6918 case ASHIFT:
6919 mnem = "asl";
6920 break;
6921
6922 case ASHIFTRT:
6923 mnem = "asr";
6924 break;
6925
6926 case LSHIFTRT:
6927 mnem = "lsr";
6928 break;
6929
6930 case ROTATERT:
6931 mnem = "ror";
6932 break;
6933
6934 case MULT:
6935 /* We never have to worry about the amount being other than a
6936 power of 2, since this case can never be reloaded from a reg. */
6937 if (*amountp != -1)
6938 *amountp = int_log2 (*amountp);
6939 else
6940 abort ();
6941 return "asl";
6942
6943 default:
6944 abort ();
6945 }
6946
6947 if (*amountp != -1)
6948 {
6949 /* This is not 100% correct, but follows from the desire to merge
6950 multiplication by a power of 2 with the recognizer for a
6951 shift. >=32 is not a valid shift for "asl", so we must try and
6952 output a shift that produces the correct arithmetical result.
6953 Using lsr #32 is identical except for the fact that the carry bit
6954 is not set correctly if we set the flags; but we never use the
6955 carry bit from such an operation, so we can ignore that. */
6956 if (code == ROTATERT)
6957 /* Rotate is just modulo 32. */
6958 *amountp &= 31;
6959 else if (*amountp != (*amountp & 31))
6960 {
6961 if (code == ASHIFT)
6962 mnem = "lsr";
6963 *amountp = 32;
6964 }
6965
6966 /* Shifts of 0 are no-ops. */
6967 if (*amountp == 0)
6968 return NULL;
6969 }
6970
6971 return mnem;
6972 }
6973
6974 /* Obtain the shift from the POWER of two. */
6975
6976 static HOST_WIDE_INT
6977 int_log2 (power)
6978 HOST_WIDE_INT power;
6979 {
6980 HOST_WIDE_INT shift = 0;
6981
6982 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
6983 {
6984 if (shift > 31)
6985 abort ();
6986 shift ++;
6987 }
6988
6989 return shift;
6990 }
6991
6992 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
6993 /bin/as is horribly restrictive. */
6994 #define MAX_ASCII_LEN 51
6995
6996 void
6997 output_ascii_pseudo_op (stream, p, len)
6998 FILE * stream;
6999 const unsigned char * p;
7000 int len;
7001 {
7002 int i;
7003 int len_so_far = 0;
7004
7005 fputs ("\t.ascii\t\"", stream);
7006
7007 for (i = 0; i < len; i++)
7008 {
7009 int c = p[i];
7010
7011 if (len_so_far >= MAX_ASCII_LEN)
7012 {
7013 fputs ("\"\n\t.ascii\t\"", stream);
7014 len_so_far = 0;
7015 }
7016
7017 switch (c)
7018 {
7019 case TARGET_TAB:
7020 fputs ("\\t", stream);
7021 len_so_far += 2;
7022 break;
7023
7024 case TARGET_FF:
7025 fputs ("\\f", stream);
7026 len_so_far += 2;
7027 break;
7028
7029 case TARGET_BS:
7030 fputs ("\\b", stream);
7031 len_so_far += 2;
7032 break;
7033
7034 case TARGET_CR:
7035 fputs ("\\r", stream);
7036 len_so_far += 2;
7037 break;
7038
7039 case TARGET_NEWLINE:
7040 fputs ("\\n", stream);
7041 c = p [i + 1];
7042 if ((c >= ' ' && c <= '~')
7043 || c == TARGET_TAB)
7044 /* This is a good place for a line break. */
7045 len_so_far = MAX_ASCII_LEN;
7046 else
7047 len_so_far += 2;
7048 break;
7049
7050 case '\"':
7051 case '\\':
7052 putc ('\\', stream);
7053 len_so_far++;
7054 /* drop through. */
7055
7056 default:
7057 if (c >= ' ' && c <= '~')
7058 {
7059 putc (c, stream);
7060 len_so_far++;
7061 }
7062 else
7063 {
7064 fprintf (stream, "\\%03o", c);
7065 len_so_far += 4;
7066 }
7067 break;
7068 }
7069 }
7070
7071 fputs ("\"\n", stream);
7072 }
7073 \f
7074 /* Compute the register sabe mask for registers 0 through 12
7075 inclusive. This code is used by both arm_compute_save_reg_mask
7076 and arm_compute_initial_elimination_offset. */
7077
7078 static unsigned long
7079 arm_compute_save_reg0_reg12_mask ()
7080 {
7081 unsigned long func_type = arm_current_func_type ();
7082 unsigned int save_reg_mask = 0;
7083 unsigned int reg;
7084
7085 if (IS_INTERRUPT (func_type))
7086 {
7087 unsigned int max_reg;
7088 /* Interrupt functions must not corrupt any registers,
7089 even call clobbered ones. If this is a leaf function
7090 we can just examine the registers used by the RTL, but
7091 otherwise we have to assume that whatever function is
7092 called might clobber anything, and so we have to save
7093 all the call-clobbered registers as well. */
7094 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
7095 /* FIQ handlers have registers r8 - r12 banked, so
7096 we only need to check r0 - r7, Normal ISRs only
7097 bank r14 and r15, so we must check up to r12.
7098 r13 is the stack pointer which is always preserved,
7099 so we do not need to consider it here. */
7100 max_reg = 7;
7101 else
7102 max_reg = 12;
7103
7104 for (reg = 0; reg <= max_reg; reg++)
7105 if (regs_ever_live[reg]
7106 || (! current_function_is_leaf && call_used_regs [reg]))
7107 save_reg_mask |= (1 << reg);
7108 }
7109 else
7110 {
7111 /* In the normal case we only need to save those registers
7112 which are call saved and which are used by this function. */
7113 for (reg = 0; reg <= 10; reg++)
7114 if (regs_ever_live[reg] && ! call_used_regs [reg])
7115 save_reg_mask |= (1 << reg);
7116
7117 /* Handle the frame pointer as a special case. */
7118 if (! TARGET_APCS_FRAME
7119 && ! frame_pointer_needed
7120 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
7121 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
7122 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
7123
7124 /* If we aren't loading the PIC register,
7125 don't stack it even though it may be live. */
7126 if (flag_pic
7127 && ! TARGET_SINGLE_PIC_BASE
7128 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
7129 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
7130 }
7131
7132 return save_reg_mask;
7133 }
7134
7135 /* Compute a bit mask of which registers need to be
7136 saved on the stack for the current function. */
7137
7138 static unsigned long
7139 arm_compute_save_reg_mask ()
7140 {
7141 unsigned int save_reg_mask = 0;
7142 unsigned long func_type = arm_current_func_type ();
7143
7144 if (IS_NAKED (func_type))
7145 /* This should never really happen. */
7146 return 0;
7147
7148 /* If we are creating a stack frame, then we must save the frame pointer,
7149 IP (which will hold the old stack pointer), LR and the PC. */
7150 if (frame_pointer_needed)
7151 save_reg_mask |=
7152 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
7153 | (1 << IP_REGNUM)
7154 | (1 << LR_REGNUM)
7155 | (1 << PC_REGNUM);
7156
7157 /* Volatile functions do not return, so there
7158 is no need to save any other registers. */
7159 if (IS_VOLATILE (func_type))
7160 return save_reg_mask;
7161
7162 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
7163
7164 /* Decide if we need to save the link register.
7165 Interrupt routines have their own banked link register,
7166 so they never need to save it.
7167 Otherwise if we do not use the link register we do not need to save
7168 it. If we are pushing other registers onto the stack however, we
7169 can save an instruction in the epilogue by pushing the link register
7170 now and then popping it back into the PC. This incurs extra memory
7171 accesses though, so we only do it when optimising for size, and only
7172 if we know that we will not need a fancy return sequence. */
7173 if (regs_ever_live [LR_REGNUM]
7174 || (save_reg_mask
7175 && optimize_size
7176 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
7177 save_reg_mask |= 1 << LR_REGNUM;
7178
7179 if (cfun->machine->lr_save_eliminated)
7180 save_reg_mask &= ~ (1 << LR_REGNUM);
7181
7182 return save_reg_mask;
7183 }
7184
7185 /* Generate a function exit sequence. If REALLY_RETURN is true, then do
7186 everything bar the final return instruction. */
7187
7188 const char *
7189 output_return_instruction (operand, really_return, reverse)
7190 rtx operand;
7191 int really_return;
7192 int reverse;
7193 {
7194 char conditional[10];
7195 char instr[100];
7196 int reg;
7197 unsigned long live_regs_mask;
7198 unsigned long func_type;
7199
7200 func_type = arm_current_func_type ();
7201
7202 if (IS_NAKED (func_type))
7203 return "";
7204
7205 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
7206 {
7207 /* If this function was declared non-returning, and we have found a tail
7208 call, then we have to trust that the called function won't return. */
7209 if (really_return)
7210 {
7211 rtx ops[2];
7212
7213 /* Otherwise, trap an attempted return by aborting. */
7214 ops[0] = operand;
7215 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
7216 : "abort");
7217 assemble_external_libcall (ops[1]);
7218 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
7219 }
7220
7221 return "";
7222 }
7223
7224 if (current_function_calls_alloca && !really_return)
7225 abort ();
7226
7227 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
7228
7229 return_used_this_function = 1;
7230
7231 live_regs_mask = arm_compute_save_reg_mask ();
7232
7233 if (live_regs_mask)
7234 {
7235 const char * return_reg;
7236
7237 /* If we do not have any special requirements for function exit
7238 (eg interworking, or ISR) then we can load the return address
7239 directly into the PC. Otherwise we must load it into LR. */
7240 if (really_return
7241 && ! TARGET_INTERWORK)
7242 return_reg = reg_names[PC_REGNUM];
7243 else
7244 return_reg = reg_names[LR_REGNUM];
7245
7246 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
7247 /* There are two possible reasons for the IP register being saved.
7248 Either a stack frame was created, in which case IP contains the
7249 old stack pointer, or an ISR routine corrupted it. If this in an
7250 ISR routine then just restore IP, otherwise restore IP into SP. */
7251 if (! IS_INTERRUPT (func_type))
7252 {
7253 live_regs_mask &= ~ (1 << IP_REGNUM);
7254 live_regs_mask |= (1 << SP_REGNUM);
7255 }
7256
7257 /* On some ARM architectures it is faster to use LDR rather than
7258 LDM to load a single register. On other architectures, the
7259 cost is the same. In 26 bit mode, or for exception handlers,
7260 we have to use LDM to load the PC so that the CPSR is also
7261 restored. */
7262 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
7263 {
7264 if (live_regs_mask == (unsigned int)(1 << reg))
7265 break;
7266 }
7267 if (reg <= LAST_ARM_REGNUM
7268 && (reg != LR_REGNUM
7269 || ! really_return
7270 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
7271 {
7272 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
7273 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
7274 }
7275 else
7276 {
7277 char *p;
7278 int first = 1;
7279
7280 /* Generate the load multiple instruction to restore the registers. */
7281 if (frame_pointer_needed)
7282 sprintf (instr, "ldm%sea\t%%|fp, {", conditional);
7283 else
7284 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
7285
7286 p = instr + strlen (instr);
7287
7288 for (reg = 0; reg <= SP_REGNUM; reg++)
7289 if (live_regs_mask & (1 << reg))
7290 {
7291 int l = strlen (reg_names[reg]);
7292
7293 if (first)
7294 first = 0;
7295 else
7296 {
7297 memcpy (p, ", ", 2);
7298 p += 2;
7299 }
7300
7301 memcpy (p, "%|", 2);
7302 memcpy (p + 2, reg_names[reg], l);
7303 p += l + 2;
7304 }
7305
7306 if (live_regs_mask & (1 << LR_REGNUM))
7307 {
7308 int l = strlen (return_reg);
7309
7310 if (! first)
7311 {
7312 memcpy (p, ", ", 2);
7313 p += 2;
7314 }
7315
7316 memcpy (p, "%|", 2);
7317 memcpy (p + 2, return_reg, l);
7318 strcpy (p + 2 + l, ((TARGET_APCS_32
7319 && !IS_INTERRUPT (func_type))
7320 || !really_return)
7321 ? "}" : "}^");
7322 }
7323 else
7324 strcpy (p, "}");
7325 }
7326
7327 output_asm_insn (instr, & operand);
7328
7329 /* See if we need to generate an extra instruction to
7330 perform the actual function return. */
7331 if (really_return
7332 && func_type != ARM_FT_INTERWORKED
7333 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
7334 {
7335 /* The return has already been handled
7336 by loading the LR into the PC. */
7337 really_return = 0;
7338 }
7339 }
7340
7341 if (really_return)
7342 {
7343 switch ((int) ARM_FUNC_TYPE (func_type))
7344 {
7345 case ARM_FT_ISR:
7346 case ARM_FT_FIQ:
7347 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
7348 break;
7349
7350 case ARM_FT_INTERWORKED:
7351 sprintf (instr, "bx%s\t%%|lr", conditional);
7352 break;
7353
7354 case ARM_FT_EXCEPTION:
7355 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
7356 break;
7357
7358 default:
7359 /* ARMv5 implementations always provide BX, so interworking
7360 is the default unless APCS-26 is in use. */
7361 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
7362 sprintf (instr, "bx%s\t%%|lr", conditional);
7363 else
7364 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
7365 conditional, TARGET_APCS_32 ? "" : "s");
7366 break;
7367 }
7368
7369 output_asm_insn (instr, & operand);
7370 }
7371
7372 return "";
7373 }
7374
7375 /* Write the function name into the code section, directly preceding
7376 the function prologue.
7377
7378 Code will be output similar to this:
7379 t0
7380 .ascii "arm_poke_function_name", 0
7381 .align
7382 t1
7383 .word 0xff000000 + (t1 - t0)
7384 arm_poke_function_name
7385 mov ip, sp
7386 stmfd sp!, {fp, ip, lr, pc}
7387 sub fp, ip, #4
7388
7389 When performing a stack backtrace, code can inspect the value
7390 of 'pc' stored at 'fp' + 0. If the trace function then looks
7391 at location pc - 12 and the top 8 bits are set, then we know
7392 that there is a function name embedded immediately preceding this
7393 location and has length ((pc[-3]) & 0xff000000).
7394
7395 We assume that pc is declared as a pointer to an unsigned long.
7396
7397 It is of no benefit to output the function name if we are assembling
7398 a leaf function. These function types will not contain a stack
7399 backtrace structure, therefore it is not possible to determine the
7400 function name. */
7401
7402 void
7403 arm_poke_function_name (stream, name)
7404 FILE * stream;
7405 const char * name;
7406 {
7407 unsigned long alignlength;
7408 unsigned long length;
7409 rtx x;
7410
7411 length = strlen (name) + 1;
7412 alignlength = ROUND_UP (length);
7413
7414 ASM_OUTPUT_ASCII (stream, name, length);
7415 ASM_OUTPUT_ALIGN (stream, 2);
7416 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
7417 assemble_aligned_integer (UNITS_PER_WORD, x);
7418 }
7419
7420 /* Place some comments into the assembler stream
7421 describing the current function. */
7422
7423 static void
7424 arm_output_function_prologue (f, frame_size)
7425 FILE * f;
7426 HOST_WIDE_INT frame_size;
7427 {
7428 unsigned long func_type;
7429
7430 if (!TARGET_ARM)
7431 {
7432 thumb_output_function_prologue (f, frame_size);
7433 return;
7434 }
7435
7436 /* Sanity check. */
7437 if (arm_ccfsm_state || arm_target_insn)
7438 abort ();
7439
7440 func_type = arm_current_func_type ();
7441
7442 switch ((int) ARM_FUNC_TYPE (func_type))
7443 {
7444 default:
7445 case ARM_FT_NORMAL:
7446 break;
7447 case ARM_FT_INTERWORKED:
7448 asm_fprintf (f, "\t%@ Function supports interworking.\n");
7449 break;
7450 case ARM_FT_EXCEPTION_HANDLER:
7451 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
7452 break;
7453 case ARM_FT_ISR:
7454 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
7455 break;
7456 case ARM_FT_FIQ:
7457 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
7458 break;
7459 case ARM_FT_EXCEPTION:
7460 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
7461 break;
7462 }
7463
7464 if (IS_NAKED (func_type))
7465 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
7466
7467 if (IS_VOLATILE (func_type))
7468 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
7469
7470 if (IS_NESTED (func_type))
7471 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
7472
7473 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %d\n",
7474 current_function_args_size,
7475 current_function_pretend_args_size, frame_size);
7476
7477 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
7478 frame_pointer_needed,
7479 cfun->machine->uses_anonymous_args);
7480
7481 if (cfun->machine->lr_save_eliminated)
7482 asm_fprintf (f, "\t%@ link register save eliminated.\n");
7483
7484 #ifdef AOF_ASSEMBLER
7485 if (flag_pic)
7486 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
7487 #endif
7488
7489 return_used_this_function = 0;
7490 }
7491
7492 const char *
7493 arm_output_epilogue (really_return)
7494 int really_return;
7495 {
7496 int reg;
7497 unsigned long saved_regs_mask;
7498 unsigned long func_type;
7499 /* Floats_offset is the offset from the "virtual" frame. In an APCS
7500 frame that is $fp + 4 for a non-variadic function. */
7501 int floats_offset = 0;
7502 rtx operands[3];
7503 int frame_size = get_frame_size ();
7504 FILE * f = asm_out_file;
7505 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
7506
7507 /* If we have already generated the return instruction
7508 then it is futile to generate anything else. */
7509 if (use_return_insn (FALSE) && return_used_this_function)
7510 return "";
7511
7512 func_type = arm_current_func_type ();
7513
7514 if (IS_NAKED (func_type))
7515 /* Naked functions don't have epilogues. */
7516 return "";
7517
7518 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
7519 {
7520 rtx op;
7521
7522 /* A volatile function should never return. Call abort. */
7523 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
7524 assemble_external_libcall (op);
7525 output_asm_insn ("bl\t%a0", &op);
7526
7527 return "";
7528 }
7529
7530 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
7531 && ! really_return)
7532 /* If we are throwing an exception, then we really must
7533 be doing a return, so we can't tail-call. */
7534 abort ();
7535
7536 saved_regs_mask = arm_compute_save_reg_mask ();
7537
7538 /* XXX We should adjust floats_offset for any anonymous args, and then
7539 re-adjust vfp_offset below to compensate. */
7540
7541 /* Compute how far away the floats will be. */
7542 for (reg = 0; reg <= LAST_ARM_REGNUM; reg ++)
7543 if (saved_regs_mask & (1 << reg))
7544 floats_offset += 4;
7545
7546 if (frame_pointer_needed)
7547 {
7548 int vfp_offset = 4;
7549
7550 if (arm_fpu_arch == FP_SOFT2)
7551 {
7552 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
7553 if (regs_ever_live[reg] && !call_used_regs[reg])
7554 {
7555 floats_offset += 12;
7556 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
7557 reg, FP_REGNUM, floats_offset - vfp_offset);
7558 }
7559 }
7560 else
7561 {
7562 int start_reg = LAST_ARM_FP_REGNUM;
7563
7564 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
7565 {
7566 if (regs_ever_live[reg] && !call_used_regs[reg])
7567 {
7568 floats_offset += 12;
7569
7570 /* We can't unstack more than four registers at once. */
7571 if (start_reg - reg == 3)
7572 {
7573 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
7574 reg, FP_REGNUM, floats_offset - vfp_offset);
7575 start_reg = reg - 1;
7576 }
7577 }
7578 else
7579 {
7580 if (reg != start_reg)
7581 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
7582 reg + 1, start_reg - reg,
7583 FP_REGNUM, floats_offset - vfp_offset);
7584 start_reg = reg - 1;
7585 }
7586 }
7587
7588 /* Just in case the last register checked also needs unstacking. */
7589 if (reg != start_reg)
7590 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
7591 reg + 1, start_reg - reg,
7592 FP_REGNUM, floats_offset - vfp_offset);
7593 }
7594
7595 /* saved_regs_mask should contain the IP, which at the time of stack
7596 frame generation actually contains the old stack pointer. So a
7597 quick way to unwind the stack is just pop the IP register directly
7598 into the stack pointer. */
7599 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
7600 abort ();
7601 saved_regs_mask &= ~ (1 << IP_REGNUM);
7602 saved_regs_mask |= (1 << SP_REGNUM);
7603
7604 /* There are two registers left in saved_regs_mask - LR and PC. We
7605 only need to restore the LR register (the return address), but to
7606 save time we can load it directly into the PC, unless we need a
7607 special function exit sequence, or we are not really returning. */
7608 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
7609 /* Delete the LR from the register mask, so that the LR on
7610 the stack is loaded into the PC in the register mask. */
7611 saved_regs_mask &= ~ (1 << LR_REGNUM);
7612 else
7613 saved_regs_mask &= ~ (1 << PC_REGNUM);
7614
7615 print_multi_reg (f, "ldmea\t%r", FP_REGNUM, saved_regs_mask);
7616
7617 if (IS_INTERRUPT (func_type))
7618 /* Interrupt handlers will have pushed the
7619 IP onto the stack, so restore it now. */
7620 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, 1 << IP_REGNUM);
7621 }
7622 else
7623 {
7624 /* Restore stack pointer if necessary. */
7625 if (frame_size + current_function_outgoing_args_size != 0)
7626 {
7627 operands[0] = operands[1] = stack_pointer_rtx;
7628 operands[2] = GEN_INT (frame_size
7629 + current_function_outgoing_args_size);
7630 output_add_immediate (operands);
7631 }
7632
7633 if (arm_fpu_arch == FP_SOFT2)
7634 {
7635 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
7636 if (regs_ever_live[reg] && !call_used_regs[reg])
7637 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
7638 reg, SP_REGNUM);
7639 }
7640 else
7641 {
7642 int start_reg = FIRST_ARM_FP_REGNUM;
7643
7644 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
7645 {
7646 if (regs_ever_live[reg] && !call_used_regs[reg])
7647 {
7648 if (reg - start_reg == 3)
7649 {
7650 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
7651 start_reg, SP_REGNUM);
7652 start_reg = reg + 1;
7653 }
7654 }
7655 else
7656 {
7657 if (reg != start_reg)
7658 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
7659 start_reg, reg - start_reg,
7660 SP_REGNUM);
7661
7662 start_reg = reg + 1;
7663 }
7664 }
7665
7666 /* Just in case the last register checked also needs unstacking. */
7667 if (reg != start_reg)
7668 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
7669 start_reg, reg - start_reg, SP_REGNUM);
7670 }
7671
7672 /* If we can, restore the LR into the PC. */
7673 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
7674 && really_return
7675 && current_function_pretend_args_size == 0
7676 && saved_regs_mask & (1 << LR_REGNUM))
7677 {
7678 saved_regs_mask &= ~ (1 << LR_REGNUM);
7679 saved_regs_mask |= (1 << PC_REGNUM);
7680 }
7681
7682 /* Load the registers off the stack. If we only have one register
7683 to load use the LDR instruction - it is faster. */
7684 if (saved_regs_mask == (1 << LR_REGNUM))
7685 {
7686 /* The exception handler ignores the LR, so we do
7687 not really need to load it off the stack. */
7688 if (eh_ofs)
7689 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
7690 else
7691 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
7692 }
7693 else if (saved_regs_mask)
7694 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
7695
7696 if (current_function_pretend_args_size)
7697 {
7698 /* Unwind the pre-pushed regs. */
7699 operands[0] = operands[1] = stack_pointer_rtx;
7700 operands[2] = GEN_INT (current_function_pretend_args_size);
7701 output_add_immediate (operands);
7702 }
7703 }
7704
7705 #if 0
7706 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER)
7707 /* Adjust the stack to remove the exception handler stuff. */
7708 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
7709 REGNO (eh_ofs));
7710 #endif
7711
7712 if (! really_return
7713 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
7714 && current_function_pretend_args_size == 0
7715 && saved_regs_mask & (1 << PC_REGNUM)))
7716 return "";
7717
7718 /* Generate the return instruction. */
7719 switch ((int) ARM_FUNC_TYPE (func_type))
7720 {
7721 case ARM_FT_EXCEPTION_HANDLER:
7722 /* Even in 26-bit mode we do a mov (rather than a movs)
7723 because we don't have the PSR bits set in the address. */
7724 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
7725 break;
7726
7727 case ARM_FT_ISR:
7728 case ARM_FT_FIQ:
7729 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
7730 break;
7731
7732 case ARM_FT_EXCEPTION:
7733 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7734 break;
7735
7736 case ARM_FT_INTERWORKED:
7737 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
7738 break;
7739
7740 default:
7741 if (frame_pointer_needed)
7742 /* If we used the frame pointer then the return adddress
7743 will have been loaded off the stack directly into the
7744 PC, so there is no need to issue a MOV instruction
7745 here. */
7746 ;
7747 else if (current_function_pretend_args_size == 0
7748 && (saved_regs_mask & (1 << LR_REGNUM)))
7749 /* Similarly we may have been able to load LR into the PC
7750 even if we did not create a stack frame. */
7751 ;
7752 else if (TARGET_APCS_32)
7753 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7754 else
7755 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7756 break;
7757 }
7758
7759 return "";
7760 }
7761
7762 static void
7763 arm_output_function_epilogue (file, frame_size)
7764 FILE *file ATTRIBUTE_UNUSED;
7765 HOST_WIDE_INT frame_size;
7766 {
7767 if (TARGET_THUMB)
7768 {
7769 /* ??? Probably not safe to set this here, since it assumes that a
7770 function will be emitted as assembly immediately after we generate
7771 RTL for it. This does not happen for inline functions. */
7772 return_used_this_function = 0;
7773 }
7774 else
7775 {
7776 if (use_return_insn (FALSE)
7777 && return_used_this_function
7778 && (frame_size + current_function_outgoing_args_size) != 0
7779 && !frame_pointer_needed)
7780 abort ();
7781
7782 /* Reset the ARM-specific per-function variables. */
7783 after_arm_reorg = 0;
7784 }
7785 }
7786
7787 /* Generate and emit an insn that we will recognize as a push_multi.
7788 Unfortunately, since this insn does not reflect very well the actual
7789 semantics of the operation, we need to annotate the insn for the benefit
7790 of DWARF2 frame unwind information. */
7791
7792 static rtx
7793 emit_multi_reg_push (mask)
7794 int mask;
7795 {
7796 int num_regs = 0;
7797 int num_dwarf_regs;
7798 int i, j;
7799 rtx par;
7800 rtx dwarf;
7801 int dwarf_par_index;
7802 rtx tmp, reg;
7803
7804 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7805 if (mask & (1 << i))
7806 num_regs++;
7807
7808 if (num_regs == 0 || num_regs > 16)
7809 abort ();
7810
7811 /* We don't record the PC in the dwarf frame information. */
7812 num_dwarf_regs = num_regs;
7813 if (mask & (1 << PC_REGNUM))
7814 num_dwarf_regs--;
7815
7816 /* For the body of the insn we are going to generate an UNSPEC in
7817 parallel with several USEs. This allows the insn to be recognised
7818 by the push_multi pattern in the arm.md file. The insn looks
7819 something like this:
7820
7821 (parallel [
7822 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
7823 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
7824 (use (reg:SI 11 fp))
7825 (use (reg:SI 12 ip))
7826 (use (reg:SI 14 lr))
7827 (use (reg:SI 15 pc))
7828 ])
7829
7830 For the frame note however, we try to be more explicit and actually
7831 show each register being stored into the stack frame, plus a (single)
7832 decrement of the stack pointer. We do it this way in order to be
7833 friendly to the stack unwinding code, which only wants to see a single
7834 stack decrement per instruction. The RTL we generate for the note looks
7835 something like this:
7836
7837 (sequence [
7838 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
7839 (set (mem:SI (reg:SI sp)) (reg:SI r4))
7840 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
7841 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
7842 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
7843 ])
7844
7845 This sequence is used both by the code to support stack unwinding for
7846 exceptions handlers and the code to generate dwarf2 frame debugging. */
7847
7848 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
7849 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
7850 dwarf_par_index = 1;
7851
7852 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7853 {
7854 if (mask & (1 << i))
7855 {
7856 reg = gen_rtx_REG (SImode, i);
7857
7858 XVECEXP (par, 0, 0)
7859 = gen_rtx_SET (VOIDmode,
7860 gen_rtx_MEM (BLKmode,
7861 gen_rtx_PRE_DEC (BLKmode,
7862 stack_pointer_rtx)),
7863 gen_rtx_UNSPEC (BLKmode,
7864 gen_rtvec (1, reg),
7865 UNSPEC_PUSH_MULT));
7866
7867 if (i != PC_REGNUM)
7868 {
7869 tmp = gen_rtx_SET (VOIDmode,
7870 gen_rtx_MEM (SImode, stack_pointer_rtx),
7871 reg);
7872 RTX_FRAME_RELATED_P (tmp) = 1;
7873 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
7874 dwarf_par_index++;
7875 }
7876
7877 break;
7878 }
7879 }
7880
7881 for (j = 1, i++; j < num_regs; i++)
7882 {
7883 if (mask & (1 << i))
7884 {
7885 reg = gen_rtx_REG (SImode, i);
7886
7887 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
7888
7889 if (i != PC_REGNUM)
7890 {
7891 tmp = gen_rtx_SET (VOIDmode,
7892 gen_rtx_MEM (SImode,
7893 plus_constant (stack_pointer_rtx,
7894 4 * j)),
7895 reg);
7896 RTX_FRAME_RELATED_P (tmp) = 1;
7897 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
7898 }
7899
7900 j++;
7901 }
7902 }
7903
7904 par = emit_insn (par);
7905
7906 tmp = gen_rtx_SET (SImode,
7907 stack_pointer_rtx,
7908 gen_rtx_PLUS (SImode,
7909 stack_pointer_rtx,
7910 GEN_INT (-4 * num_regs)));
7911 RTX_FRAME_RELATED_P (tmp) = 1;
7912 XVECEXP (dwarf, 0, 0) = tmp;
7913
7914 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7915 REG_NOTES (par));
7916 return par;
7917 }
7918
7919 static rtx
7920 emit_sfm (base_reg, count)
7921 int base_reg;
7922 int count;
7923 {
7924 rtx par;
7925 rtx dwarf;
7926 rtx tmp, reg;
7927 int i;
7928
7929 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7930 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7931 RTX_FRAME_RELATED_P (dwarf) = 1;
7932
7933 reg = gen_rtx_REG (XFmode, base_reg++);
7934
7935 XVECEXP (par, 0, 0)
7936 = gen_rtx_SET (VOIDmode,
7937 gen_rtx_MEM (BLKmode,
7938 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7939 gen_rtx_UNSPEC (BLKmode,
7940 gen_rtvec (1, reg),
7941 UNSPEC_PUSH_MULT));
7942 tmp
7943 = gen_rtx_SET (VOIDmode,
7944 gen_rtx_MEM (XFmode,
7945 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7946 reg);
7947 RTX_FRAME_RELATED_P (tmp) = 1;
7948 XVECEXP (dwarf, 0, count - 1) = tmp;
7949
7950 for (i = 1; i < count; i++)
7951 {
7952 reg = gen_rtx_REG (XFmode, base_reg++);
7953 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7954
7955 tmp = gen_rtx_SET (VOIDmode,
7956 gen_rtx_MEM (XFmode,
7957 gen_rtx_PRE_DEC (BLKmode,
7958 stack_pointer_rtx)),
7959 reg);
7960 RTX_FRAME_RELATED_P (tmp) = 1;
7961 XVECEXP (dwarf, 0, count - i - 1) = tmp;
7962 }
7963
7964 par = emit_insn (par);
7965 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7966 REG_NOTES (par));
7967 return par;
7968 }
7969
7970 /* Compute the distance from register FROM to register TO.
7971 These can be the arg pointer (26), the soft frame pointer (25),
7972 the stack pointer (13) or the hard frame pointer (11).
7973 Typical stack layout looks like this:
7974
7975 old stack pointer -> | |
7976 ----
7977 | | \
7978 | | saved arguments for
7979 | | vararg functions
7980 | | /
7981 --
7982 hard FP & arg pointer -> | | \
7983 | | stack
7984 | | frame
7985 | | /
7986 --
7987 | | \
7988 | | call saved
7989 | | registers
7990 soft frame pointer -> | | /
7991 --
7992 | | \
7993 | | local
7994 | | variables
7995 | | /
7996 --
7997 | | \
7998 | | outgoing
7999 | | arguments
8000 current stack pointer -> | | /
8001 --
8002
8003 For a given funciton some or all of these stack compomnents
8004 may not be needed, giving rise to the possibility of
8005 eliminating some of the registers.
8006
8007 The values returned by this function must reflect the behaviour
8008 of arm_expand_prologue() and arm_compute_save_reg_mask().
8009
8010 The sign of the number returned reflects the direction of stack
8011 growth, so the values are positive for all eliminations except
8012 from the soft frame pointer to the hard frame pointer. */
8013
8014 unsigned int
8015 arm_compute_initial_elimination_offset (from, to)
8016 unsigned int from;
8017 unsigned int to;
8018 {
8019 unsigned int local_vars = (get_frame_size () + 3) & ~3;
8020 unsigned int outgoing_args = current_function_outgoing_args_size;
8021 unsigned int stack_frame;
8022 unsigned int call_saved_registers;
8023 unsigned long func_type;
8024
8025 func_type = arm_current_func_type ();
8026
8027 /* Volatile functions never return, so there is
8028 no need to save call saved registers. */
8029 call_saved_registers = 0;
8030 if (! IS_VOLATILE (func_type))
8031 {
8032 unsigned int reg_mask;
8033 unsigned int reg;
8034
8035 /* Make sure that we compute which registers will be saved
8036 on the stack using the same algorithm that is used by
8037 arm_compute_save_reg_mask(). */
8038 reg_mask = arm_compute_save_reg0_reg12_mask ();
8039
8040 /* Now count the number of bits set in save_reg_mask.
8041 For each set bit we need 4 bytes of stack space. */
8042 while (reg_mask)
8043 {
8044 call_saved_registers += 4;
8045 reg_mask = reg_mask & ~ (reg_mask & - reg_mask);
8046 }
8047
8048 if (regs_ever_live[LR_REGNUM]
8049 /* If a stack frame is going to be created, the LR will
8050 be saved as part of that, so we do not need to allow
8051 for it here. */
8052 && ! frame_pointer_needed)
8053 call_saved_registers += 4;
8054
8055 /* If the hard floating point registers are going to be
8056 used then they must be saved on the stack as well.
8057 Each register occupies 12 bytes of stack space. */
8058 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg ++)
8059 if (regs_ever_live[reg] && ! call_used_regs[reg])
8060 call_saved_registers += 12;
8061 }
8062
8063 /* The stack frame contains 4 registers - the old frame pointer,
8064 the old stack pointer, the return address and PC of the start
8065 of the function. */
8066 stack_frame = frame_pointer_needed ? 16 : 0;
8067
8068 /* OK, now we have enough information to compute the distances.
8069 There must be an entry in these switch tables for each pair
8070 of registers in ELIMINABLE_REGS, even if some of the entries
8071 seem to be redundant or useless. */
8072 switch (from)
8073 {
8074 case ARG_POINTER_REGNUM:
8075 switch (to)
8076 {
8077 case THUMB_HARD_FRAME_POINTER_REGNUM:
8078 return 0;
8079
8080 case FRAME_POINTER_REGNUM:
8081 /* This is the reverse of the soft frame pointer
8082 to hard frame pointer elimination below. */
8083 if (call_saved_registers == 0 && stack_frame == 0)
8084 return 0;
8085 return (call_saved_registers + stack_frame - 4);
8086
8087 case ARM_HARD_FRAME_POINTER_REGNUM:
8088 /* If there is no stack frame then the hard
8089 frame pointer and the arg pointer coincide. */
8090 if (stack_frame == 0 && call_saved_registers != 0)
8091 return 0;
8092 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
8093 return (frame_pointer_needed
8094 && current_function_needs_context
8095 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
8096
8097 case STACK_POINTER_REGNUM:
8098 /* If nothing has been pushed on the stack at all
8099 then this will return -4. This *is* correct! */
8100 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
8101
8102 default:
8103 abort ();
8104 }
8105 break;
8106
8107 case FRAME_POINTER_REGNUM:
8108 switch (to)
8109 {
8110 case THUMB_HARD_FRAME_POINTER_REGNUM:
8111 return 0;
8112
8113 case ARM_HARD_FRAME_POINTER_REGNUM:
8114 /* The hard frame pointer points to the top entry in the
8115 stack frame. The soft frame pointer to the bottom entry
8116 in the stack frame. If there is no stack frame at all,
8117 then they are identical. */
8118 if (call_saved_registers == 0 && stack_frame == 0)
8119 return 0;
8120 return - (call_saved_registers + stack_frame - 4);
8121
8122 case STACK_POINTER_REGNUM:
8123 return local_vars + outgoing_args;
8124
8125 default:
8126 abort ();
8127 }
8128 break;
8129
8130 default:
8131 /* You cannot eliminate from the stack pointer.
8132 In theory you could eliminate from the hard frame
8133 pointer to the stack pointer, but this will never
8134 happen, since if a stack frame is not needed the
8135 hard frame pointer will never be used. */
8136 abort ();
8137 }
8138 }
8139
8140 /* Generate the prologue instructions for entry into an ARM function. */
8141
8142 void
8143 arm_expand_prologue ()
8144 {
8145 int reg;
8146 rtx amount;
8147 rtx insn;
8148 rtx ip_rtx;
8149 unsigned long live_regs_mask;
8150 unsigned long func_type;
8151 int fp_offset = 0;
8152 int saved_pretend_args = 0;
8153 unsigned int args_to_push;
8154
8155 func_type = arm_current_func_type ();
8156
8157 /* Naked functions don't have prologues. */
8158 if (IS_NAKED (func_type))
8159 return;
8160
8161 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
8162 args_to_push = current_function_pretend_args_size;
8163
8164 /* Compute which register we will have to save onto the stack. */
8165 live_regs_mask = arm_compute_save_reg_mask ();
8166
8167 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
8168
8169 if (frame_pointer_needed)
8170 {
8171 if (IS_INTERRUPT (func_type))
8172 {
8173 /* Interrupt functions must not corrupt any registers.
8174 Creating a frame pointer however, corrupts the IP
8175 register, so we must push it first. */
8176 insn = emit_multi_reg_push (1 << IP_REGNUM);
8177
8178 /* Do not set RTX_FRAME_RELATED_P on this insn.
8179 The dwarf stack unwinding code only wants to see one
8180 stack decrement per function, and this is not it. If
8181 this instruction is labeled as being part of the frame
8182 creation sequence then dwarf2out_frame_debug_expr will
8183 abort when it encounters the assignment of IP to FP
8184 later on, since the use of SP here establishes SP as
8185 the CFA register and not IP.
8186
8187 Anyway this instruction is not really part of the stack
8188 frame creation although it is part of the prologue. */
8189 }
8190 else if (IS_NESTED (func_type))
8191 {
8192 /* The Static chain register is the same as the IP register
8193 used as a scratch register during stack frame creation.
8194 To get around this need to find somewhere to store IP
8195 whilst the frame is being created. We try the following
8196 places in order:
8197
8198 1. The last argument register.
8199 2. A slot on the stack above the frame. (This only
8200 works if the function is not a varargs function).
8201 3. Register r3, after pushing the argument registers
8202 onto the stack.
8203
8204 Note - we only need to tell the dwarf2 backend about the SP
8205 adjustment in the second variant; the static chain register
8206 doesn't need to be unwound, as it doesn't contain a value
8207 inherited from the caller. */
8208
8209 if (regs_ever_live[3] == 0)
8210 {
8211 insn = gen_rtx_REG (SImode, 3);
8212 insn = gen_rtx_SET (SImode, insn, ip_rtx);
8213 insn = emit_insn (insn);
8214 }
8215 else if (args_to_push == 0)
8216 {
8217 rtx dwarf;
8218 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
8219 insn = gen_rtx_MEM (SImode, insn);
8220 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
8221 insn = emit_insn (insn);
8222
8223 fp_offset = 4;
8224
8225 /* Just tell the dwarf backend that we adjusted SP. */
8226 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8227 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8228 GEN_INT (-fp_offset)));
8229 RTX_FRAME_RELATED_P (insn) = 1;
8230 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8231 dwarf, REG_NOTES (insn));
8232 }
8233 else
8234 {
8235 /* Store the args on the stack. */
8236 if (cfun->machine->uses_anonymous_args)
8237 insn = emit_multi_reg_push
8238 ((0xf0 >> (args_to_push / 4)) & 0xf);
8239 else
8240 insn = emit_insn
8241 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8242 GEN_INT (- args_to_push)));
8243
8244 RTX_FRAME_RELATED_P (insn) = 1;
8245
8246 saved_pretend_args = 1;
8247 fp_offset = args_to_push;
8248 args_to_push = 0;
8249
8250 /* Now reuse r3 to preserve IP. */
8251 insn = gen_rtx_REG (SImode, 3);
8252 insn = gen_rtx_SET (SImode, insn, ip_rtx);
8253 (void) emit_insn (insn);
8254 }
8255 }
8256
8257 if (fp_offset)
8258 {
8259 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
8260 insn = gen_rtx_SET (SImode, ip_rtx, insn);
8261 }
8262 else
8263 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
8264
8265 insn = emit_insn (insn);
8266 RTX_FRAME_RELATED_P (insn) = 1;
8267 }
8268
8269 if (args_to_push)
8270 {
8271 /* Push the argument registers, or reserve space for them. */
8272 if (cfun->machine->uses_anonymous_args)
8273 insn = emit_multi_reg_push
8274 ((0xf0 >> (args_to_push / 4)) & 0xf);
8275 else
8276 insn = emit_insn
8277 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8278 GEN_INT (- args_to_push)));
8279 RTX_FRAME_RELATED_P (insn) = 1;
8280 }
8281
8282 /* If this is an interrupt service routine, and the link register is
8283 going to be pushed, subtracting four now will mean that the
8284 function return can be done with a single instruction. */
8285 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
8286 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8287 {
8288 emit_insn (gen_rtx_SET (SImode,
8289 gen_rtx_REG (SImode, LR_REGNUM),
8290 gen_rtx_PLUS (SImode,
8291 gen_rtx_REG (SImode, LR_REGNUM),
8292 GEN_INT (-4))));
8293 }
8294
8295 if (live_regs_mask)
8296 {
8297 insn = emit_multi_reg_push (live_regs_mask);
8298 RTX_FRAME_RELATED_P (insn) = 1;
8299 }
8300
8301 if (! IS_VOLATILE (func_type))
8302 {
8303 /* Save any floating point call-saved registers used by this function. */
8304 if (arm_fpu_arch == FP_SOFT2)
8305 {
8306 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
8307 if (regs_ever_live[reg] && !call_used_regs[reg])
8308 {
8309 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
8310 insn = gen_rtx_MEM (XFmode, insn);
8311 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
8312 gen_rtx_REG (XFmode, reg)));
8313 RTX_FRAME_RELATED_P (insn) = 1;
8314 }
8315 }
8316 else
8317 {
8318 int start_reg = LAST_ARM_FP_REGNUM;
8319
8320 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
8321 {
8322 if (regs_ever_live[reg] && !call_used_regs[reg])
8323 {
8324 if (start_reg - reg == 3)
8325 {
8326 insn = emit_sfm (reg, 4);
8327 RTX_FRAME_RELATED_P (insn) = 1;
8328 start_reg = reg - 1;
8329 }
8330 }
8331 else
8332 {
8333 if (start_reg != reg)
8334 {
8335 insn = emit_sfm (reg + 1, start_reg - reg);
8336 RTX_FRAME_RELATED_P (insn) = 1;
8337 }
8338 start_reg = reg - 1;
8339 }
8340 }
8341
8342 if (start_reg != reg)
8343 {
8344 insn = emit_sfm (reg + 1, start_reg - reg);
8345 RTX_FRAME_RELATED_P (insn) = 1;
8346 }
8347 }
8348 }
8349
8350 if (frame_pointer_needed)
8351 {
8352 /* Create the new frame pointer. */
8353 insn = GEN_INT (-(4 + args_to_push + fp_offset));
8354 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
8355 RTX_FRAME_RELATED_P (insn) = 1;
8356
8357 if (IS_NESTED (func_type))
8358 {
8359 /* Recover the static chain register. */
8360 if (regs_ever_live [3] == 0
8361 || saved_pretend_args)
8362 insn = gen_rtx_REG (SImode, 3);
8363 else /* if (current_function_pretend_args_size == 0) */
8364 {
8365 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx, GEN_INT (4));
8366 insn = gen_rtx_MEM (SImode, insn);
8367 }
8368
8369 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
8370 /* Add a USE to stop propagate_one_insn() from barfing. */
8371 emit_insn (gen_prologue_use (ip_rtx));
8372 }
8373 }
8374
8375 amount = GEN_INT (-(get_frame_size ()
8376 + current_function_outgoing_args_size));
8377
8378 if (amount != const0_rtx)
8379 {
8380 /* This add can produce multiple insns for a large constant, so we
8381 need to get tricky. */
8382 rtx last = get_last_insn ();
8383 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8384 amount));
8385 do
8386 {
8387 last = last ? NEXT_INSN (last) : get_insns ();
8388 RTX_FRAME_RELATED_P (last) = 1;
8389 }
8390 while (last != insn);
8391
8392 /* If the frame pointer is needed, emit a special barrier that
8393 will prevent the scheduler from moving stores to the frame
8394 before the stack adjustment. */
8395 if (frame_pointer_needed)
8396 {
8397 rtx unspec = gen_rtx_UNSPEC (SImode,
8398 gen_rtvec (2, stack_pointer_rtx,
8399 hard_frame_pointer_rtx),
8400 UNSPEC_PRLG_STK);
8401
8402 insn = emit_insn (gen_rtx_CLOBBER (VOIDmode,
8403 gen_rtx_MEM (BLKmode, unspec)));
8404 }
8405 }
8406
8407 /* If we are profiling, make sure no instructions are scheduled before
8408 the call to mcount. Similarly if the user has requested no
8409 scheduling in the prolog. */
8410 if (current_function_profile || TARGET_NO_SCHED_PRO)
8411 emit_insn (gen_blockage ());
8412
8413 /* If the link register is being kept alive, with the return address in it,
8414 then make sure that it does not get reused by the ce2 pass. */
8415 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
8416 {
8417 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
8418 cfun->machine->lr_save_eliminated = 1;
8419 }
8420 }
8421 \f
8422 /* If CODE is 'd', then the X is a condition operand and the instruction
8423 should only be executed if the condition is true.
8424 if CODE is 'D', then the X is a condition operand and the instruction
8425 should only be executed if the condition is false: however, if the mode
8426 of the comparison is CCFPEmode, then always execute the instruction -- we
8427 do this because in these circumstances !GE does not necessarily imply LT;
8428 in these cases the instruction pattern will take care to make sure that
8429 an instruction containing %d will follow, thereby undoing the effects of
8430 doing this instruction unconditionally.
8431 If CODE is 'N' then X is a floating point operand that must be negated
8432 before output.
8433 If CODE is 'B' then output a bitwise inverted value of X (a const int).
8434 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
8435
8436 void
8437 arm_print_operand (stream, x, code)
8438 FILE * stream;
8439 rtx x;
8440 int code;
8441 {
8442 switch (code)
8443 {
8444 case '@':
8445 fputs (ASM_COMMENT_START, stream);
8446 return;
8447
8448 case '_':
8449 fputs (user_label_prefix, stream);
8450 return;
8451
8452 case '|':
8453 fputs (REGISTER_PREFIX, stream);
8454 return;
8455
8456 case '?':
8457 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
8458 {
8459 if (TARGET_THUMB || current_insn_predicate != NULL)
8460 abort ();
8461
8462 fputs (arm_condition_codes[arm_current_cc], stream);
8463 }
8464 else if (current_insn_predicate)
8465 {
8466 enum arm_cond_code code;
8467
8468 if (TARGET_THUMB)
8469 abort ();
8470
8471 code = get_arm_condition_code (current_insn_predicate);
8472 fputs (arm_condition_codes[code], stream);
8473 }
8474 return;
8475
8476 case 'N':
8477 {
8478 REAL_VALUE_TYPE r;
8479 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8480 r = REAL_VALUE_NEGATE (r);
8481 fprintf (stream, "%s", fp_const_from_val (&r));
8482 }
8483 return;
8484
8485 case 'B':
8486 if (GET_CODE (x) == CONST_INT)
8487 {
8488 HOST_WIDE_INT val;
8489 val = ARM_SIGN_EXTEND (~INTVAL (x));
8490 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
8491 }
8492 else
8493 {
8494 putc ('~', stream);
8495 output_addr_const (stream, x);
8496 }
8497 return;
8498
8499 case 'i':
8500 fprintf (stream, "%s", arithmetic_instr (x, 1));
8501 return;
8502
8503 case 'I':
8504 fprintf (stream, "%s", arithmetic_instr (x, 0));
8505 return;
8506
8507 case 'S':
8508 {
8509 HOST_WIDE_INT val;
8510 const char * shift = shift_op (x, &val);
8511
8512 if (shift)
8513 {
8514 fprintf (stream, ", %s ", shift_op (x, &val));
8515 if (val == -1)
8516 arm_print_operand (stream, XEXP (x, 1), 0);
8517 else
8518 {
8519 fputc ('#', stream);
8520 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
8521 }
8522 }
8523 }
8524 return;
8525
8526 /* An explanation of the 'Q', 'R' and 'H' register operands:
8527
8528 In a pair of registers containing a DI or DF value the 'Q'
8529 operand returns the register number of the register containing
8530 the least signficant part of the value. The 'R' operand returns
8531 the register number of the register containing the most
8532 significant part of the value.
8533
8534 The 'H' operand returns the higher of the two register numbers.
8535 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
8536 same as the 'Q' operand, since the most signficant part of the
8537 value is held in the lower number register. The reverse is true
8538 on systems where WORDS_BIG_ENDIAN is false.
8539
8540 The purpose of these operands is to distinguish between cases
8541 where the endian-ness of the values is important (for example
8542 when they are added together), and cases where the endian-ness
8543 is irrelevant, but the order of register operations is important.
8544 For example when loading a value from memory into a register
8545 pair, the endian-ness does not matter. Provided that the value
8546 from the lower memory address is put into the lower numbered
8547 register, and the value from the higher address is put into the
8548 higher numbered register, the load will work regardless of whether
8549 the value being loaded is big-wordian or little-wordian. The
8550 order of the two register loads can matter however, if the address
8551 of the memory location is actually held in one of the registers
8552 being overwritten by the load. */
8553 case 'Q':
8554 if (REGNO (x) > LAST_ARM_REGNUM)
8555 abort ();
8556 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
8557 return;
8558
8559 case 'R':
8560 if (REGNO (x) > LAST_ARM_REGNUM)
8561 abort ();
8562 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
8563 return;
8564
8565 case 'H':
8566 if (REGNO (x) > LAST_ARM_REGNUM)
8567 abort ();
8568 asm_fprintf (stream, "%r", REGNO (x) + 1);
8569 return;
8570
8571 case 'm':
8572 asm_fprintf (stream, "%r",
8573 GET_CODE (XEXP (x, 0)) == REG
8574 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
8575 return;
8576
8577 case 'M':
8578 asm_fprintf (stream, "{%r-%r}",
8579 REGNO (x),
8580 REGNO (x) + NUM_REGS (GET_MODE (x)) - 1);
8581 return;
8582
8583 case 'd':
8584 /* CONST_TRUE_RTX means always -- that's the default. */
8585 if (x == const_true_rtx)
8586 return;
8587
8588 if (TARGET_ARM)
8589 fputs (arm_condition_codes[get_arm_condition_code (x)],
8590 stream);
8591 else
8592 fputs (thumb_condition_code (x, 0), stream);
8593 return;
8594
8595 case 'D':
8596 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
8597 want to do that. */
8598 if (x == const_true_rtx)
8599 abort ();
8600
8601 if (TARGET_ARM)
8602 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
8603 (get_arm_condition_code (x))],
8604 stream);
8605 else
8606 fputs (thumb_condition_code (x, 1), stream);
8607 return;
8608
8609 default:
8610 if (x == 0)
8611 abort ();
8612
8613 if (GET_CODE (x) == REG)
8614 asm_fprintf (stream, "%r", REGNO (x));
8615 else if (GET_CODE (x) == MEM)
8616 {
8617 output_memory_reference_mode = GET_MODE (x);
8618 output_address (XEXP (x, 0));
8619 }
8620 else if (GET_CODE (x) == CONST_DOUBLE)
8621 fprintf (stream, "#%s", fp_immediate_constant (x));
8622 else if (GET_CODE (x) == NEG)
8623 abort (); /* This should never happen now. */
8624 else
8625 {
8626 fputc ('#', stream);
8627 output_addr_const (stream, x);
8628 }
8629 }
8630 }
8631 \f
8632 #ifndef AOF_ASSEMBLER
8633 /* Target hook for assembling integer objects. The ARM version needs to
8634 handle word-sized values specially. */
8635
8636 static bool
8637 arm_assemble_integer (x, size, aligned_p)
8638 rtx x;
8639 unsigned int size;
8640 int aligned_p;
8641 {
8642 if (size == UNITS_PER_WORD && aligned_p)
8643 {
8644 fputs ("\t.word\t", asm_out_file);
8645 output_addr_const (asm_out_file, x);
8646
8647 /* Mark symbols as position independent. We only do this in the
8648 .text segment, not in the .data segment. */
8649 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
8650 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
8651 {
8652 if (GET_CODE (x) == SYMBOL_REF
8653 && (CONSTANT_POOL_ADDRESS_P (x)
8654 || ENCODED_SHORT_CALL_ATTR_P (XSTR (x, 0))))
8655 fputs ("(GOTOFF)", asm_out_file);
8656 else if (GET_CODE (x) == LABEL_REF)
8657 fputs ("(GOTOFF)", asm_out_file);
8658 else
8659 fputs ("(GOT)", asm_out_file);
8660 }
8661 fputc ('\n', asm_out_file);
8662 return true;
8663 }
8664
8665 return default_assemble_integer (x, size, aligned_p);
8666 }
8667 #endif
8668 \f
8669 /* A finite state machine takes care of noticing whether or not instructions
8670 can be conditionally executed, and thus decrease execution time and code
8671 size by deleting branch instructions. The fsm is controlled by
8672 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
8673
8674 /* The state of the fsm controlling condition codes are:
8675 0: normal, do nothing special
8676 1: make ASM_OUTPUT_OPCODE not output this instruction
8677 2: make ASM_OUTPUT_OPCODE not output this instruction
8678 3: make instructions conditional
8679 4: make instructions conditional
8680
8681 State transitions (state->state by whom under condition):
8682 0 -> 1 final_prescan_insn if the `target' is a label
8683 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
8684 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
8685 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
8686 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
8687 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
8688 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
8689 (the target insn is arm_target_insn).
8690
8691 If the jump clobbers the conditions then we use states 2 and 4.
8692
8693 A similar thing can be done with conditional return insns.
8694
8695 XXX In case the `target' is an unconditional branch, this conditionalising
8696 of the instructions always reduces code size, but not always execution
8697 time. But then, I want to reduce the code size to somewhere near what
8698 /bin/cc produces. */
8699
8700 /* Returns the index of the ARM condition code string in
8701 `arm_condition_codes'. COMPARISON should be an rtx like
8702 `(eq (...) (...))'. */
8703
8704 static enum arm_cond_code
8705 get_arm_condition_code (comparison)
8706 rtx comparison;
8707 {
8708 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
8709 int code;
8710 enum rtx_code comp_code = GET_CODE (comparison);
8711
8712 if (GET_MODE_CLASS (mode) != MODE_CC)
8713 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
8714 XEXP (comparison, 1));
8715
8716 switch (mode)
8717 {
8718 case CC_DNEmode: code = ARM_NE; goto dominance;
8719 case CC_DEQmode: code = ARM_EQ; goto dominance;
8720 case CC_DGEmode: code = ARM_GE; goto dominance;
8721 case CC_DGTmode: code = ARM_GT; goto dominance;
8722 case CC_DLEmode: code = ARM_LE; goto dominance;
8723 case CC_DLTmode: code = ARM_LT; goto dominance;
8724 case CC_DGEUmode: code = ARM_CS; goto dominance;
8725 case CC_DGTUmode: code = ARM_HI; goto dominance;
8726 case CC_DLEUmode: code = ARM_LS; goto dominance;
8727 case CC_DLTUmode: code = ARM_CC;
8728
8729 dominance:
8730 if (comp_code != EQ && comp_code != NE)
8731 abort ();
8732
8733 if (comp_code == EQ)
8734 return ARM_INVERSE_CONDITION_CODE (code);
8735 return code;
8736
8737 case CC_NOOVmode:
8738 switch (comp_code)
8739 {
8740 case NE: return ARM_NE;
8741 case EQ: return ARM_EQ;
8742 case GE: return ARM_PL;
8743 case LT: return ARM_MI;
8744 default: abort ();
8745 }
8746
8747 case CC_Zmode:
8748 switch (comp_code)
8749 {
8750 case NE: return ARM_NE;
8751 case EQ: return ARM_EQ;
8752 default: abort ();
8753 }
8754
8755 case CCFPEmode:
8756 case CCFPmode:
8757 /* These encodings assume that AC=1 in the FPA system control
8758 byte. This allows us to handle all cases except UNEQ and
8759 LTGT. */
8760 switch (comp_code)
8761 {
8762 case GE: return ARM_GE;
8763 case GT: return ARM_GT;
8764 case LE: return ARM_LS;
8765 case LT: return ARM_MI;
8766 case NE: return ARM_NE;
8767 case EQ: return ARM_EQ;
8768 case ORDERED: return ARM_VC;
8769 case UNORDERED: return ARM_VS;
8770 case UNLT: return ARM_LT;
8771 case UNLE: return ARM_LE;
8772 case UNGT: return ARM_HI;
8773 case UNGE: return ARM_PL;
8774 /* UNEQ and LTGT do not have a representation. */
8775 case UNEQ: /* Fall through. */
8776 case LTGT: /* Fall through. */
8777 default: abort ();
8778 }
8779
8780 case CC_SWPmode:
8781 switch (comp_code)
8782 {
8783 case NE: return ARM_NE;
8784 case EQ: return ARM_EQ;
8785 case GE: return ARM_LE;
8786 case GT: return ARM_LT;
8787 case LE: return ARM_GE;
8788 case LT: return ARM_GT;
8789 case GEU: return ARM_LS;
8790 case GTU: return ARM_CC;
8791 case LEU: return ARM_CS;
8792 case LTU: return ARM_HI;
8793 default: abort ();
8794 }
8795
8796 case CC_Cmode:
8797 switch (comp_code)
8798 {
8799 case LTU: return ARM_CS;
8800 case GEU: return ARM_CC;
8801 default: abort ();
8802 }
8803
8804 case CCmode:
8805 switch (comp_code)
8806 {
8807 case NE: return ARM_NE;
8808 case EQ: return ARM_EQ;
8809 case GE: return ARM_GE;
8810 case GT: return ARM_GT;
8811 case LE: return ARM_LE;
8812 case LT: return ARM_LT;
8813 case GEU: return ARM_CS;
8814 case GTU: return ARM_HI;
8815 case LEU: return ARM_LS;
8816 case LTU: return ARM_CC;
8817 default: abort ();
8818 }
8819
8820 default: abort ();
8821 }
8822
8823 abort ();
8824 }
8825
8826
8827 void
8828 arm_final_prescan_insn (insn)
8829 rtx insn;
8830 {
8831 /* BODY will hold the body of INSN. */
8832 rtx body = PATTERN (insn);
8833
8834 /* This will be 1 if trying to repeat the trick, and things need to be
8835 reversed if it appears to fail. */
8836 int reverse = 0;
8837
8838 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
8839 taken are clobbered, even if the rtl suggests otherwise. It also
8840 means that we have to grub around within the jump expression to find
8841 out what the conditions are when the jump isn't taken. */
8842 int jump_clobbers = 0;
8843
8844 /* If we start with a return insn, we only succeed if we find another one. */
8845 int seeking_return = 0;
8846
8847 /* START_INSN will hold the insn from where we start looking. This is the
8848 first insn after the following code_label if REVERSE is true. */
8849 rtx start_insn = insn;
8850
8851 /* If in state 4, check if the target branch is reached, in order to
8852 change back to state 0. */
8853 if (arm_ccfsm_state == 4)
8854 {
8855 if (insn == arm_target_insn)
8856 {
8857 arm_target_insn = NULL;
8858 arm_ccfsm_state = 0;
8859 }
8860 return;
8861 }
8862
8863 /* If in state 3, it is possible to repeat the trick, if this insn is an
8864 unconditional branch to a label, and immediately following this branch
8865 is the previous target label which is only used once, and the label this
8866 branch jumps to is not too far off. */
8867 if (arm_ccfsm_state == 3)
8868 {
8869 if (simplejump_p (insn))
8870 {
8871 start_insn = next_nonnote_insn (start_insn);
8872 if (GET_CODE (start_insn) == BARRIER)
8873 {
8874 /* XXX Isn't this always a barrier? */
8875 start_insn = next_nonnote_insn (start_insn);
8876 }
8877 if (GET_CODE (start_insn) == CODE_LABEL
8878 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
8879 && LABEL_NUSES (start_insn) == 1)
8880 reverse = TRUE;
8881 else
8882 return;
8883 }
8884 else if (GET_CODE (body) == RETURN)
8885 {
8886 start_insn = next_nonnote_insn (start_insn);
8887 if (GET_CODE (start_insn) == BARRIER)
8888 start_insn = next_nonnote_insn (start_insn);
8889 if (GET_CODE (start_insn) == CODE_LABEL
8890 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
8891 && LABEL_NUSES (start_insn) == 1)
8892 {
8893 reverse = TRUE;
8894 seeking_return = 1;
8895 }
8896 else
8897 return;
8898 }
8899 else
8900 return;
8901 }
8902
8903 if (arm_ccfsm_state != 0 && !reverse)
8904 abort ();
8905 if (GET_CODE (insn) != JUMP_INSN)
8906 return;
8907
8908 /* This jump might be paralleled with a clobber of the condition codes
8909 the jump should always come first */
8910 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
8911 body = XVECEXP (body, 0, 0);
8912
8913 #if 0
8914 /* If this is a conditional return then we don't want to know */
8915 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
8916 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
8917 && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
8918 || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
8919 return;
8920 #endif
8921
8922 if (reverse
8923 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
8924 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
8925 {
8926 int insns_skipped;
8927 int fail = FALSE, succeed = FALSE;
8928 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
8929 int then_not_else = TRUE;
8930 rtx this_insn = start_insn, label = 0;
8931
8932 /* If the jump cannot be done with one instruction, we cannot
8933 conditionally execute the instruction in the inverse case. */
8934 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
8935 {
8936 jump_clobbers = 1;
8937 return;
8938 }
8939
8940 /* Register the insn jumped to. */
8941 if (reverse)
8942 {
8943 if (!seeking_return)
8944 label = XEXP (SET_SRC (body), 0);
8945 }
8946 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
8947 label = XEXP (XEXP (SET_SRC (body), 1), 0);
8948 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
8949 {
8950 label = XEXP (XEXP (SET_SRC (body), 2), 0);
8951 then_not_else = FALSE;
8952 }
8953 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
8954 seeking_return = 1;
8955 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
8956 {
8957 seeking_return = 1;
8958 then_not_else = FALSE;
8959 }
8960 else
8961 abort ();
8962
8963 /* See how many insns this branch skips, and what kind of insns. If all
8964 insns are okay, and the label or unconditional branch to the same
8965 label is not too far away, succeed. */
8966 for (insns_skipped = 0;
8967 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
8968 {
8969 rtx scanbody;
8970
8971 this_insn = next_nonnote_insn (this_insn);
8972 if (!this_insn)
8973 break;
8974
8975 switch (GET_CODE (this_insn))
8976 {
8977 case CODE_LABEL:
8978 /* Succeed if it is the target label, otherwise fail since
8979 control falls in from somewhere else. */
8980 if (this_insn == label)
8981 {
8982 if (jump_clobbers)
8983 {
8984 arm_ccfsm_state = 2;
8985 this_insn = next_nonnote_insn (this_insn);
8986 }
8987 else
8988 arm_ccfsm_state = 1;
8989 succeed = TRUE;
8990 }
8991 else
8992 fail = TRUE;
8993 break;
8994
8995 case BARRIER:
8996 /* Succeed if the following insn is the target label.
8997 Otherwise fail.
8998 If return insns are used then the last insn in a function
8999 will be a barrier. */
9000 this_insn = next_nonnote_insn (this_insn);
9001 if (this_insn && this_insn == label)
9002 {
9003 if (jump_clobbers)
9004 {
9005 arm_ccfsm_state = 2;
9006 this_insn = next_nonnote_insn (this_insn);
9007 }
9008 else
9009 arm_ccfsm_state = 1;
9010 succeed = TRUE;
9011 }
9012 else
9013 fail = TRUE;
9014 break;
9015
9016 case CALL_INSN:
9017 /* If using 32-bit addresses the cc is not preserved over
9018 calls. */
9019 if (TARGET_APCS_32)
9020 {
9021 /* Succeed if the following insn is the target label,
9022 or if the following two insns are a barrier and
9023 the target label. */
9024 this_insn = next_nonnote_insn (this_insn);
9025 if (this_insn && GET_CODE (this_insn) == BARRIER)
9026 this_insn = next_nonnote_insn (this_insn);
9027
9028 if (this_insn && this_insn == label
9029 && insns_skipped < max_insns_skipped)
9030 {
9031 if (jump_clobbers)
9032 {
9033 arm_ccfsm_state = 2;
9034 this_insn = next_nonnote_insn (this_insn);
9035 }
9036 else
9037 arm_ccfsm_state = 1;
9038 succeed = TRUE;
9039 }
9040 else
9041 fail = TRUE;
9042 }
9043 break;
9044
9045 case JUMP_INSN:
9046 /* If this is an unconditional branch to the same label, succeed.
9047 If it is to another label, do nothing. If it is conditional,
9048 fail. */
9049 /* XXX Probably, the tests for SET and the PC are unnecessary. */
9050
9051 scanbody = PATTERN (this_insn);
9052 if (GET_CODE (scanbody) == SET
9053 && GET_CODE (SET_DEST (scanbody)) == PC)
9054 {
9055 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
9056 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
9057 {
9058 arm_ccfsm_state = 2;
9059 succeed = TRUE;
9060 }
9061 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
9062 fail = TRUE;
9063 }
9064 /* Fail if a conditional return is undesirable (eg on a
9065 StrongARM), but still allow this if optimizing for size. */
9066 else if (GET_CODE (scanbody) == RETURN
9067 && !use_return_insn (TRUE)
9068 && !optimize_size)
9069 fail = TRUE;
9070 else if (GET_CODE (scanbody) == RETURN
9071 && seeking_return)
9072 {
9073 arm_ccfsm_state = 2;
9074 succeed = TRUE;
9075 }
9076 else if (GET_CODE (scanbody) == PARALLEL)
9077 {
9078 switch (get_attr_conds (this_insn))
9079 {
9080 case CONDS_NOCOND:
9081 break;
9082 default:
9083 fail = TRUE;
9084 break;
9085 }
9086 }
9087 else
9088 fail = TRUE; /* Unrecognized jump (eg epilogue). */
9089
9090 break;
9091
9092 case INSN:
9093 /* Instructions using or affecting the condition codes make it
9094 fail. */
9095 scanbody = PATTERN (this_insn);
9096 if (!(GET_CODE (scanbody) == SET
9097 || GET_CODE (scanbody) == PARALLEL)
9098 || get_attr_conds (this_insn) != CONDS_NOCOND)
9099 fail = TRUE;
9100 break;
9101
9102 default:
9103 break;
9104 }
9105 }
9106 if (succeed)
9107 {
9108 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
9109 arm_target_label = CODE_LABEL_NUMBER (label);
9110 else if (seeking_return || arm_ccfsm_state == 2)
9111 {
9112 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
9113 {
9114 this_insn = next_nonnote_insn (this_insn);
9115 if (this_insn && (GET_CODE (this_insn) == BARRIER
9116 || GET_CODE (this_insn) == CODE_LABEL))
9117 abort ();
9118 }
9119 if (!this_insn)
9120 {
9121 /* Oh, dear! we ran off the end.. give up */
9122 recog (PATTERN (insn), insn, NULL);
9123 arm_ccfsm_state = 0;
9124 arm_target_insn = NULL;
9125 return;
9126 }
9127 arm_target_insn = this_insn;
9128 }
9129 else
9130 abort ();
9131 if (jump_clobbers)
9132 {
9133 if (reverse)
9134 abort ();
9135 arm_current_cc =
9136 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
9137 0), 0), 1));
9138 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
9139 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9140 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
9141 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9142 }
9143 else
9144 {
9145 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
9146 what it was. */
9147 if (!reverse)
9148 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
9149 0));
9150 }
9151
9152 if (reverse || then_not_else)
9153 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9154 }
9155
9156 /* Restore recog_data (getting the attributes of other insns can
9157 destroy this array, but final.c assumes that it remains intact
9158 across this call; since the insn has been recognized already we
9159 call recog direct). */
9160 recog (PATTERN (insn), insn, NULL);
9161 }
9162 }
9163
9164 /* Returns true if REGNO is a valid register
9165 for holding a quantity of tyoe MODE. */
9166
9167 int
9168 arm_hard_regno_mode_ok (regno, mode)
9169 unsigned int regno;
9170 enum machine_mode mode;
9171 {
9172 if (GET_MODE_CLASS (mode) == MODE_CC)
9173 return regno == CC_REGNUM;
9174
9175 if (TARGET_THUMB)
9176 /* For the Thumb we only allow values bigger than SImode in
9177 registers 0 - 6, so that there is always a second low
9178 register available to hold the upper part of the value.
9179 We probably we ought to ensure that the register is the
9180 start of an even numbered register pair. */
9181 return (NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
9182
9183 if (regno <= LAST_ARM_REGNUM)
9184 /* We allow any value to be stored in the general regisetrs. */
9185 return 1;
9186
9187 if ( regno == FRAME_POINTER_REGNUM
9188 || regno == ARG_POINTER_REGNUM)
9189 /* We only allow integers in the fake hard registers. */
9190 return GET_MODE_CLASS (mode) == MODE_INT;
9191
9192 /* The only registers left are the FPU registers
9193 which we only allow to hold FP values. */
9194 return GET_MODE_CLASS (mode) == MODE_FLOAT
9195 && regno >= FIRST_ARM_FP_REGNUM
9196 && regno <= LAST_ARM_FP_REGNUM;
9197 }
9198
9199 int
9200 arm_regno_class (regno)
9201 int regno;
9202 {
9203 if (TARGET_THUMB)
9204 {
9205 if (regno == STACK_POINTER_REGNUM)
9206 return STACK_REG;
9207 if (regno == CC_REGNUM)
9208 return CC_REG;
9209 if (regno < 8)
9210 return LO_REGS;
9211 return HI_REGS;
9212 }
9213
9214 if ( regno <= LAST_ARM_REGNUM
9215 || regno == FRAME_POINTER_REGNUM
9216 || regno == ARG_POINTER_REGNUM)
9217 return GENERAL_REGS;
9218
9219 if (regno == CC_REGNUM)
9220 return NO_REGS;
9221
9222 return FPU_REGS;
9223 }
9224
9225 /* Handle a special case when computing the offset
9226 of an argument from the frame pointer. */
9227
9228 int
9229 arm_debugger_arg_offset (value, addr)
9230 int value;
9231 rtx addr;
9232 {
9233 rtx insn;
9234
9235 /* We are only interested if dbxout_parms() failed to compute the offset. */
9236 if (value != 0)
9237 return 0;
9238
9239 /* We can only cope with the case where the address is held in a register. */
9240 if (GET_CODE (addr) != REG)
9241 return 0;
9242
9243 /* If we are using the frame pointer to point at the argument, then
9244 an offset of 0 is correct. */
9245 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
9246 return 0;
9247
9248 /* If we are using the stack pointer to point at the
9249 argument, then an offset of 0 is correct. */
9250 if ((TARGET_THUMB || !frame_pointer_needed)
9251 && REGNO (addr) == SP_REGNUM)
9252 return 0;
9253
9254 /* Oh dear. The argument is pointed to by a register rather
9255 than being held in a register, or being stored at a known
9256 offset from the frame pointer. Since GDB only understands
9257 those two kinds of argument we must translate the address
9258 held in the register into an offset from the frame pointer.
9259 We do this by searching through the insns for the function
9260 looking to see where this register gets its value. If the
9261 register is initialised from the frame pointer plus an offset
9262 then we are in luck and we can continue, otherwise we give up.
9263
9264 This code is exercised by producing debugging information
9265 for a function with arguments like this:
9266
9267 double func (double a, double b, int c, double d) {return d;}
9268
9269 Without this code the stab for parameter 'd' will be set to
9270 an offset of 0 from the frame pointer, rather than 8. */
9271
9272 /* The if() statement says:
9273
9274 If the insn is a normal instruction
9275 and if the insn is setting the value in a register
9276 and if the register being set is the register holding the address of the argument
9277 and if the address is computing by an addition
9278 that involves adding to a register
9279 which is the frame pointer
9280 a constant integer
9281
9282 then... */
9283
9284 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9285 {
9286 if ( GET_CODE (insn) == INSN
9287 && GET_CODE (PATTERN (insn)) == SET
9288 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
9289 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
9290 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
9291 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
9292 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
9293 )
9294 {
9295 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
9296
9297 break;
9298 }
9299 }
9300
9301 if (value == 0)
9302 {
9303 debug_rtx (addr);
9304 warning ("unable to compute real location of stacked parameter");
9305 value = 8; /* XXX magic hack */
9306 }
9307
9308 return value;
9309 }
9310
9311 #define def_builtin(NAME, TYPE, CODE) \
9312 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL)
9313
9314 void
9315 arm_init_builtins ()
9316 {
9317 tree endlink = void_list_node;
9318 tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
9319 tree pchar_type_node = build_pointer_type (char_type_node);
9320
9321 tree int_ftype_int, void_ftype_pchar;
9322
9323 /* void func (void *) */
9324 void_ftype_pchar
9325 = build_function_type (void_type_node,
9326 tree_cons (NULL_TREE, pchar_type_node, endlink));
9327
9328 /* int func (int) */
9329 int_ftype_int
9330 = build_function_type (integer_type_node, int_endlink);
9331
9332 /* Initialize arm V5 builtins. */
9333 if (arm_arch5)
9334 def_builtin ("__builtin_clz", int_ftype_int, ARM_BUILTIN_CLZ);
9335 }
9336
9337 /* Expand an expression EXP that calls a built-in function,
9338 with result going to TARGET if that's convenient
9339 (and in mode MODE if that's convenient).
9340 SUBTARGET may be used as the target for computing one of EXP's operands.
9341 IGNORE is nonzero if the value is to be ignored. */
9342
9343 rtx
9344 arm_expand_builtin (exp, target, subtarget, mode, ignore)
9345 tree exp;
9346 rtx target;
9347 rtx subtarget ATTRIBUTE_UNUSED;
9348 enum machine_mode mode ATTRIBUTE_UNUSED;
9349 int ignore ATTRIBUTE_UNUSED;
9350 {
9351 enum insn_code icode;
9352 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9353 tree arglist = TREE_OPERAND (exp, 1);
9354 tree arg0;
9355 rtx op0, pat;
9356 enum machine_mode tmode, mode0;
9357 int fcode = DECL_FUNCTION_CODE (fndecl);
9358
9359 switch (fcode)
9360 {
9361 default:
9362 break;
9363
9364 case ARM_BUILTIN_CLZ:
9365 icode = CODE_FOR_clz;
9366 arg0 = TREE_VALUE (arglist);
9367 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
9368 tmode = insn_data[icode].operand[0].mode;
9369 mode0 = insn_data[icode].operand[1].mode;
9370
9371 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9372 op0 = copy_to_mode_reg (mode0, op0);
9373 if (target == 0
9374 || GET_MODE (target) != tmode
9375 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9376 target = gen_reg_rtx (tmode);
9377 pat = GEN_FCN (icode) (target, op0);
9378 if (! pat)
9379 return 0;
9380 emit_insn (pat);
9381 return target;
9382 }
9383
9384 /* @@@ Should really do something sensible here. */
9385 return NULL_RTX;
9386 }
9387 \f
9388 /* Recursively search through all of the blocks in a function
9389 checking to see if any of the variables created in that
9390 function match the RTX called 'orig'. If they do then
9391 replace them with the RTX called 'new'. */
9392
9393 static void
9394 replace_symbols_in_block (block, orig, new)
9395 tree block;
9396 rtx orig;
9397 rtx new;
9398 {
9399 for (; block; block = BLOCK_CHAIN (block))
9400 {
9401 tree sym;
9402
9403 if (!TREE_USED (block))
9404 continue;
9405
9406 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
9407 {
9408 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
9409 || DECL_IGNORED_P (sym)
9410 || TREE_CODE (sym) != VAR_DECL
9411 || DECL_EXTERNAL (sym)
9412 || !rtx_equal_p (DECL_RTL (sym), orig)
9413 )
9414 continue;
9415
9416 SET_DECL_RTL (sym, new);
9417 }
9418
9419 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
9420 }
9421 }
9422
9423 /* Return the number (counting from 0) of
9424 the least significant set bit in MASK. */
9425
9426 #ifdef __GNUC__
9427 inline
9428 #endif
9429 static int
9430 number_of_first_bit_set (mask)
9431 int mask;
9432 {
9433 int bit;
9434
9435 for (bit = 0;
9436 (mask & (1 << bit)) == 0;
9437 ++bit)
9438 continue;
9439
9440 return bit;
9441 }
9442
9443 /* Generate code to return from a thumb function.
9444 If 'reg_containing_return_addr' is -1, then the return address is
9445 actually on the stack, at the stack pointer. */
9446 static void
9447 thumb_exit (f, reg_containing_return_addr, eh_ofs)
9448 FILE * f;
9449 int reg_containing_return_addr;
9450 rtx eh_ofs;
9451 {
9452 unsigned regs_available_for_popping;
9453 unsigned regs_to_pop;
9454 int pops_needed;
9455 unsigned available;
9456 unsigned required;
9457 int mode;
9458 int size;
9459 int restore_a4 = FALSE;
9460
9461 /* Compute the registers we need to pop. */
9462 regs_to_pop = 0;
9463 pops_needed = 0;
9464
9465 /* There is an assumption here, that if eh_ofs is not NULL, the
9466 normal return address will have been pushed. */
9467 if (reg_containing_return_addr == -1 || eh_ofs)
9468 {
9469 /* When we are generating a return for __builtin_eh_return,
9470 reg_containing_return_addr must specify the return regno. */
9471 if (eh_ofs && reg_containing_return_addr == -1)
9472 abort ();
9473
9474 regs_to_pop |= 1 << LR_REGNUM;
9475 ++pops_needed;
9476 }
9477
9478 if (TARGET_BACKTRACE)
9479 {
9480 /* Restore the (ARM) frame pointer and stack pointer. */
9481 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
9482 pops_needed += 2;
9483 }
9484
9485 /* If there is nothing to pop then just emit the BX instruction and
9486 return. */
9487 if (pops_needed == 0)
9488 {
9489 if (eh_ofs)
9490 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9491
9492 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9493 return;
9494 }
9495 /* Otherwise if we are not supporting interworking and we have not created
9496 a backtrace structure and the function was not entered in ARM mode then
9497 just pop the return address straight into the PC. */
9498 else if (!TARGET_INTERWORK
9499 && !TARGET_BACKTRACE
9500 && !is_called_in_ARM_mode (current_function_decl))
9501 {
9502 if (eh_ofs)
9503 {
9504 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
9505 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9506 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9507 }
9508 else
9509 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
9510
9511 return;
9512 }
9513
9514 /* Find out how many of the (return) argument registers we can corrupt. */
9515 regs_available_for_popping = 0;
9516
9517 /* If returning via __builtin_eh_return, the bottom three registers
9518 all contain information needed for the return. */
9519 if (eh_ofs)
9520 size = 12;
9521 else
9522 {
9523 #ifdef RTX_CODE
9524 /* If we can deduce the registers used from the function's
9525 return value. This is more reliable that examining
9526 regs_ever_live[] because that will be set if the register is
9527 ever used in the function, not just if the register is used
9528 to hold a return value. */
9529
9530 if (current_function_return_rtx != 0)
9531 mode = GET_MODE (current_function_return_rtx);
9532 else
9533 #endif
9534 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9535
9536 size = GET_MODE_SIZE (mode);
9537
9538 if (size == 0)
9539 {
9540 /* In a void function we can use any argument register.
9541 In a function that returns a structure on the stack
9542 we can use the second and third argument registers. */
9543 if (mode == VOIDmode)
9544 regs_available_for_popping =
9545 (1 << ARG_REGISTER (1))
9546 | (1 << ARG_REGISTER (2))
9547 | (1 << ARG_REGISTER (3));
9548 else
9549 regs_available_for_popping =
9550 (1 << ARG_REGISTER (2))
9551 | (1 << ARG_REGISTER (3));
9552 }
9553 else if (size <= 4)
9554 regs_available_for_popping =
9555 (1 << ARG_REGISTER (2))
9556 | (1 << ARG_REGISTER (3));
9557 else if (size <= 8)
9558 regs_available_for_popping =
9559 (1 << ARG_REGISTER (3));
9560 }
9561
9562 /* Match registers to be popped with registers into which we pop them. */
9563 for (available = regs_available_for_popping,
9564 required = regs_to_pop;
9565 required != 0 && available != 0;
9566 available &= ~(available & - available),
9567 required &= ~(required & - required))
9568 -- pops_needed;
9569
9570 /* If we have any popping registers left over, remove them. */
9571 if (available > 0)
9572 regs_available_for_popping &= ~available;
9573
9574 /* Otherwise if we need another popping register we can use
9575 the fourth argument register. */
9576 else if (pops_needed)
9577 {
9578 /* If we have not found any free argument registers and
9579 reg a4 contains the return address, we must move it. */
9580 if (regs_available_for_popping == 0
9581 && reg_containing_return_addr == LAST_ARG_REGNUM)
9582 {
9583 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
9584 reg_containing_return_addr = LR_REGNUM;
9585 }
9586 else if (size > 12)
9587 {
9588 /* Register a4 is being used to hold part of the return value,
9589 but we have dire need of a free, low register. */
9590 restore_a4 = TRUE;
9591
9592 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
9593 }
9594
9595 if (reg_containing_return_addr != LAST_ARG_REGNUM)
9596 {
9597 /* The fourth argument register is available. */
9598 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
9599
9600 --pops_needed;
9601 }
9602 }
9603
9604 /* Pop as many registers as we can. */
9605 thumb_pushpop (f, regs_available_for_popping, FALSE);
9606
9607 /* Process the registers we popped. */
9608 if (reg_containing_return_addr == -1)
9609 {
9610 /* The return address was popped into the lowest numbered register. */
9611 regs_to_pop &= ~(1 << LR_REGNUM);
9612
9613 reg_containing_return_addr =
9614 number_of_first_bit_set (regs_available_for_popping);
9615
9616 /* Remove this register for the mask of available registers, so that
9617 the return address will not be corrupted by futher pops. */
9618 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
9619 }
9620
9621 /* If we popped other registers then handle them here. */
9622 if (regs_available_for_popping)
9623 {
9624 int frame_pointer;
9625
9626 /* Work out which register currently contains the frame pointer. */
9627 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
9628
9629 /* Move it into the correct place. */
9630 asm_fprintf (f, "\tmov\t%r, %r\n",
9631 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
9632
9633 /* (Temporarily) remove it from the mask of popped registers. */
9634 regs_available_for_popping &= ~(1 << frame_pointer);
9635 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
9636
9637 if (regs_available_for_popping)
9638 {
9639 int stack_pointer;
9640
9641 /* We popped the stack pointer as well,
9642 find the register that contains it. */
9643 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
9644
9645 /* Move it into the stack register. */
9646 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
9647
9648 /* At this point we have popped all necessary registers, so
9649 do not worry about restoring regs_available_for_popping
9650 to its correct value:
9651
9652 assert (pops_needed == 0)
9653 assert (regs_available_for_popping == (1 << frame_pointer))
9654 assert (regs_to_pop == (1 << STACK_POINTER)) */
9655 }
9656 else
9657 {
9658 /* Since we have just move the popped value into the frame
9659 pointer, the popping register is available for reuse, and
9660 we know that we still have the stack pointer left to pop. */
9661 regs_available_for_popping |= (1 << frame_pointer);
9662 }
9663 }
9664
9665 /* If we still have registers left on the stack, but we no longer have
9666 any registers into which we can pop them, then we must move the return
9667 address into the link register and make available the register that
9668 contained it. */
9669 if (regs_available_for_popping == 0 && pops_needed > 0)
9670 {
9671 regs_available_for_popping |= 1 << reg_containing_return_addr;
9672
9673 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
9674 reg_containing_return_addr);
9675
9676 reg_containing_return_addr = LR_REGNUM;
9677 }
9678
9679 /* If we have registers left on the stack then pop some more.
9680 We know that at most we will want to pop FP and SP. */
9681 if (pops_needed > 0)
9682 {
9683 int popped_into;
9684 int move_to;
9685
9686 thumb_pushpop (f, regs_available_for_popping, FALSE);
9687
9688 /* We have popped either FP or SP.
9689 Move whichever one it is into the correct register. */
9690 popped_into = number_of_first_bit_set (regs_available_for_popping);
9691 move_to = number_of_first_bit_set (regs_to_pop);
9692
9693 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
9694
9695 regs_to_pop &= ~(1 << move_to);
9696
9697 --pops_needed;
9698 }
9699
9700 /* If we still have not popped everything then we must have only
9701 had one register available to us and we are now popping the SP. */
9702 if (pops_needed > 0)
9703 {
9704 int popped_into;
9705
9706 thumb_pushpop (f, regs_available_for_popping, FALSE);
9707
9708 popped_into = number_of_first_bit_set (regs_available_for_popping);
9709
9710 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
9711 /*
9712 assert (regs_to_pop == (1 << STACK_POINTER))
9713 assert (pops_needed == 1)
9714 */
9715 }
9716
9717 /* If necessary restore the a4 register. */
9718 if (restore_a4)
9719 {
9720 if (reg_containing_return_addr != LR_REGNUM)
9721 {
9722 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
9723 reg_containing_return_addr = LR_REGNUM;
9724 }
9725
9726 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
9727 }
9728
9729 if (eh_ofs)
9730 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9731
9732 /* Return to caller. */
9733 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9734 }
9735
9736 /* Emit code to push or pop registers to or from the stack. */
9737
9738 static void
9739 thumb_pushpop (f, mask, push)
9740 FILE * f;
9741 int mask;
9742 int push;
9743 {
9744 int regno;
9745 int lo_mask = mask & 0xFF;
9746
9747 if (lo_mask == 0 && !push && (mask & (1 << 15)))
9748 {
9749 /* Special case. Do not generate a POP PC statement here, do it in
9750 thumb_exit() */
9751 thumb_exit (f, -1, NULL_RTX);
9752 return;
9753 }
9754
9755 fprintf (f, "\t%s\t{", push ? "push" : "pop");
9756
9757 /* Look at the low registers first. */
9758 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
9759 {
9760 if (lo_mask & 1)
9761 {
9762 asm_fprintf (f, "%r", regno);
9763
9764 if ((lo_mask & ~1) != 0)
9765 fprintf (f, ", ");
9766 }
9767 }
9768
9769 if (push && (mask & (1 << LR_REGNUM)))
9770 {
9771 /* Catch pushing the LR. */
9772 if (mask & 0xFF)
9773 fprintf (f, ", ");
9774
9775 asm_fprintf (f, "%r", LR_REGNUM);
9776 }
9777 else if (!push && (mask & (1 << PC_REGNUM)))
9778 {
9779 /* Catch popping the PC. */
9780 if (TARGET_INTERWORK || TARGET_BACKTRACE)
9781 {
9782 /* The PC is never poped directly, instead
9783 it is popped into r3 and then BX is used. */
9784 fprintf (f, "}\n");
9785
9786 thumb_exit (f, -1, NULL_RTX);
9787
9788 return;
9789 }
9790 else
9791 {
9792 if (mask & 0xFF)
9793 fprintf (f, ", ");
9794
9795 asm_fprintf (f, "%r", PC_REGNUM);
9796 }
9797 }
9798
9799 fprintf (f, "}\n");
9800 }
9801 \f
9802 void
9803 thumb_final_prescan_insn (insn)
9804 rtx insn;
9805 {
9806 if (flag_print_asm_name)
9807 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
9808 INSN_ADDRESSES (INSN_UID (insn)));
9809 }
9810
9811 int
9812 thumb_shiftable_const (val)
9813 unsigned HOST_WIDE_INT val;
9814 {
9815 unsigned HOST_WIDE_INT mask = 0xff;
9816 int i;
9817
9818 if (val == 0) /* XXX */
9819 return 0;
9820
9821 for (i = 0; i < 25; i++)
9822 if ((val & (mask << i)) == val)
9823 return 1;
9824
9825 return 0;
9826 }
9827
9828 /* Returns non-zero if the current function contains,
9829 or might contain a far jump. */
9830
9831 int
9832 thumb_far_jump_used_p (int in_prologue)
9833 {
9834 rtx insn;
9835
9836 /* This test is only important for leaf functions. */
9837 /* assert (!leaf_function_p ()); */
9838
9839 /* If we have already decided that far jumps may be used,
9840 do not bother checking again, and always return true even if
9841 it turns out that they are not being used. Once we have made
9842 the decision that far jumps are present (and that hence the link
9843 register will be pushed onto the stack) we cannot go back on it. */
9844 if (cfun->machine->far_jump_used)
9845 return 1;
9846
9847 /* If this function is not being called from the prologue/epilogue
9848 generation code then it must be being called from the
9849 INITIAL_ELIMINATION_OFFSET macro. */
9850 if (!in_prologue)
9851 {
9852 /* In this case we know that we are being asked about the elimination
9853 of the arg pointer register. If that register is not being used,
9854 then there are no arguments on the stack, and we do not have to
9855 worry that a far jump might force the prologue to push the link
9856 register, changing the stack offsets. In this case we can just
9857 return false, since the presence of far jumps in the function will
9858 not affect stack offsets.
9859
9860 If the arg pointer is live (or if it was live, but has now been
9861 eliminated and so set to dead) then we do have to test to see if
9862 the function might contain a far jump. This test can lead to some
9863 false negatives, since before reload is completed, then length of
9864 branch instructions is not known, so gcc defaults to returning their
9865 longest length, which in turn sets the far jump attribute to true.
9866
9867 A false negative will not result in bad code being generated, but it
9868 will result in a needless push and pop of the link register. We
9869 hope that this does not occur too often. */
9870 if (regs_ever_live [ARG_POINTER_REGNUM])
9871 cfun->machine->arg_pointer_live = 1;
9872 else if (!cfun->machine->arg_pointer_live)
9873 return 0;
9874 }
9875
9876 /* Check to see if the function contains a branch
9877 insn with the far jump attribute set. */
9878 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9879 {
9880 if (GET_CODE (insn) == JUMP_INSN
9881 /* Ignore tablejump patterns. */
9882 && GET_CODE (PATTERN (insn)) != ADDR_VEC
9883 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
9884 && get_attr_far_jump (insn) == FAR_JUMP_YES
9885 )
9886 {
9887 /* Record the fact that we have decied that
9888 the function does use far jumps. */
9889 cfun->machine->far_jump_used = 1;
9890 return 1;
9891 }
9892 }
9893
9894 return 0;
9895 }
9896
9897 /* Return non-zero if FUNC must be entered in ARM mode. */
9898
9899 int
9900 is_called_in_ARM_mode (func)
9901 tree func;
9902 {
9903 if (TREE_CODE (func) != FUNCTION_DECL)
9904 abort ();
9905
9906 /* Ignore the problem about functions whoes address is taken. */
9907 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
9908 return TRUE;
9909
9910 #ifdef ARM_PE
9911 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
9912 #else
9913 return FALSE;
9914 #endif
9915 }
9916
9917 /* The bits which aren't usefully expanded as rtl. */
9918
9919 const char *
9920 thumb_unexpanded_epilogue ()
9921 {
9922 int regno;
9923 int live_regs_mask = 0;
9924 int high_regs_pushed = 0;
9925 int leaf_function = leaf_function_p ();
9926 int had_to_push_lr;
9927 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9928
9929 if (return_used_this_function)
9930 return "";
9931
9932 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
9933 if (regs_ever_live[regno] && !call_used_regs[regno]
9934 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
9935 live_regs_mask |= 1 << regno;
9936
9937 for (regno = 8; regno < 13; regno++)
9938 {
9939 if (regs_ever_live[regno] && !call_used_regs[regno]
9940 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
9941 high_regs_pushed++;
9942 }
9943
9944 /* The prolog may have pushed some high registers to use as
9945 work registers. eg the testuite file:
9946 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
9947 compiles to produce:
9948 push {r4, r5, r6, r7, lr}
9949 mov r7, r9
9950 mov r6, r8
9951 push {r6, r7}
9952 as part of the prolog. We have to undo that pushing here. */
9953
9954 if (high_regs_pushed)
9955 {
9956 int mask = live_regs_mask;
9957 int next_hi_reg;
9958 int size;
9959 int mode;
9960
9961 #ifdef RTX_CODE
9962 /* If we can deduce the registers used from the function's return value.
9963 This is more reliable that examining regs_ever_live[] because that
9964 will be set if the register is ever used in the function, not just if
9965 the register is used to hold a return value. */
9966
9967 if (current_function_return_rtx != 0)
9968 mode = GET_MODE (current_function_return_rtx);
9969 else
9970 #endif
9971 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9972
9973 size = GET_MODE_SIZE (mode);
9974
9975 /* Unless we are returning a type of size > 12 register r3 is
9976 available. */
9977 if (size < 13)
9978 mask |= 1 << 3;
9979
9980 if (mask == 0)
9981 /* Oh dear! We have no low registers into which we can pop
9982 high registers! */
9983 internal_error
9984 ("no low registers available for popping high registers");
9985
9986 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
9987 if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]
9988 && !(TARGET_SINGLE_PIC_BASE && (next_hi_reg == arm_pic_register)))
9989 break;
9990
9991 while (high_regs_pushed)
9992 {
9993 /* Find lo register(s) into which the high register(s) can
9994 be popped. */
9995 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
9996 {
9997 if (mask & (1 << regno))
9998 high_regs_pushed--;
9999 if (high_regs_pushed == 0)
10000 break;
10001 }
10002
10003 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
10004
10005 /* Pop the values into the low register(s). */
10006 thumb_pushpop (asm_out_file, mask, 0);
10007
10008 /* Move the value(s) into the high registers. */
10009 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10010 {
10011 if (mask & (1 << regno))
10012 {
10013 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
10014 regno);
10015
10016 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
10017 if (regs_ever_live[next_hi_reg]
10018 && !call_used_regs[next_hi_reg]
10019 && !(TARGET_SINGLE_PIC_BASE
10020 && (next_hi_reg == arm_pic_register)))
10021 break;
10022 }
10023 }
10024 }
10025 }
10026
10027 had_to_push_lr = (live_regs_mask || !leaf_function
10028 || thumb_far_jump_used_p (1));
10029
10030 if (TARGET_BACKTRACE
10031 && ((live_regs_mask & 0xFF) == 0)
10032 && regs_ever_live [LAST_ARG_REGNUM] != 0)
10033 {
10034 /* The stack backtrace structure creation code had to
10035 push R7 in order to get a work register, so we pop
10036 it now. */
10037 live_regs_mask |= (1 << LAST_LO_REGNUM);
10038 }
10039
10040 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
10041 {
10042 if (had_to_push_lr
10043 && !is_called_in_ARM_mode (current_function_decl)
10044 && !eh_ofs)
10045 live_regs_mask |= 1 << PC_REGNUM;
10046
10047 /* Either no argument registers were pushed or a backtrace
10048 structure was created which includes an adjusted stack
10049 pointer, so just pop everything. */
10050 if (live_regs_mask)
10051 thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
10052
10053 if (eh_ofs)
10054 thumb_exit (asm_out_file, 2, eh_ofs);
10055 /* We have either just popped the return address into the
10056 PC or it is was kept in LR for the entire function or
10057 it is still on the stack because we do not want to
10058 return by doing a pop {pc}. */
10059 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
10060 thumb_exit (asm_out_file,
10061 (had_to_push_lr
10062 && is_called_in_ARM_mode (current_function_decl)) ?
10063 -1 : LR_REGNUM, NULL_RTX);
10064 }
10065 else
10066 {
10067 /* Pop everything but the return address. */
10068 live_regs_mask &= ~(1 << PC_REGNUM);
10069
10070 if (live_regs_mask)
10071 thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
10072
10073 if (had_to_push_lr)
10074 /* Get the return address into a temporary register. */
10075 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0);
10076
10077 /* Remove the argument registers that were pushed onto the stack. */
10078 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
10079 SP_REGNUM, SP_REGNUM,
10080 current_function_pretend_args_size);
10081
10082 if (eh_ofs)
10083 thumb_exit (asm_out_file, 2, eh_ofs);
10084 else
10085 thumb_exit (asm_out_file,
10086 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
10087 }
10088
10089 return "";
10090 }
10091
10092 /* Functions to save and restore machine-specific function data. */
10093
10094 static void
10095 arm_mark_machine_status (p)
10096 struct function * p;
10097 {
10098 machine_function *machine = p->machine;
10099
10100 if (machine)
10101 ggc_mark_rtx (machine->eh_epilogue_sp_ofs);
10102 }
10103
10104 static void
10105 arm_init_machine_status (p)
10106 struct function * p;
10107 {
10108 p->machine =
10109 (machine_function *) xcalloc (1, sizeof (machine_function));
10110
10111 #if ARM_FT_UNKNOWWN != 0
10112 ((machine_function *) p->machine)->func_type = ARM_FT_UNKNOWN;
10113 #endif
10114 }
10115
10116 static void
10117 arm_free_machine_status (p)
10118 struct function * p;
10119 {
10120 if (p->machine)
10121 {
10122 free (p->machine);
10123 p->machine = NULL;
10124 }
10125 }
10126
10127 /* Return an RTX indicating where the return address to the
10128 calling function can be found. */
10129
10130 rtx
10131 arm_return_addr (count, frame)
10132 int count;
10133 rtx frame ATTRIBUTE_UNUSED;
10134 {
10135 if (count != 0)
10136 return NULL_RTX;
10137
10138 if (TARGET_APCS_32)
10139 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
10140 else
10141 {
10142 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
10143 GEN_INT (RETURN_ADDR_MASK26));
10144 return get_func_hard_reg_initial_val (cfun, lr);
10145 }
10146 }
10147
10148 /* Do anything needed before RTL is emitted for each function. */
10149
10150 void
10151 arm_init_expanders ()
10152 {
10153 /* Arrange to initialize and mark the machine per-function status. */
10154 init_machine_status = arm_init_machine_status;
10155 mark_machine_status = arm_mark_machine_status;
10156 free_machine_status = arm_free_machine_status;
10157 }
10158
10159 /* Generate the rest of a function's prologue. */
10160
10161 void
10162 thumb_expand_prologue ()
10163 {
10164 HOST_WIDE_INT amount = (get_frame_size ()
10165 + current_function_outgoing_args_size);
10166 unsigned long func_type;
10167
10168 func_type = arm_current_func_type ();
10169
10170 /* Naked functions don't have prologues. */
10171 if (IS_NAKED (func_type))
10172 return;
10173
10174 if (IS_INTERRUPT (func_type))
10175 {
10176 error ("interrupt Service Routines cannot be coded in Thumb mode");
10177 return;
10178 }
10179
10180 if (frame_pointer_needed)
10181 emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
10182
10183 if (amount)
10184 {
10185 amount = ROUND_UP (amount);
10186
10187 if (amount < 512)
10188 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10189 GEN_INT (- amount)));
10190 else
10191 {
10192 int regno;
10193 rtx reg;
10194
10195 /* The stack decrement is too big for an immediate value in a single
10196 insn. In theory we could issue multiple subtracts, but after
10197 three of them it becomes more space efficient to place the full
10198 value in the constant pool and load into a register. (Also the
10199 ARM debugger really likes to see only one stack decrement per
10200 function). So instead we look for a scratch register into which
10201 we can load the decrement, and then we subtract this from the
10202 stack pointer. Unfortunately on the thumb the only available
10203 scratch registers are the argument registers, and we cannot use
10204 these as they may hold arguments to the function. Instead we
10205 attempt to locate a call preserved register which is used by this
10206 function. If we can find one, then we know that it will have
10207 been pushed at the start of the prologue and so we can corrupt
10208 it now. */
10209 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
10210 if (regs_ever_live[regno]
10211 && !call_used_regs[regno] /* Paranoia */
10212 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register))
10213 && !(frame_pointer_needed
10214 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
10215 break;
10216
10217 if (regno > LAST_LO_REGNUM) /* Very unlikely */
10218 {
10219 rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
10220
10221 /* Choose an arbitary, non-argument low register. */
10222 reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
10223
10224 /* Save it by copying it into a high, scratch register. */
10225 emit_insn (gen_movsi (spare, reg));
10226 /* Add a USE to stop propagate_one_insn() from barfing. */
10227 emit_insn (gen_prologue_use (spare));
10228
10229 /* Decrement the stack. */
10230 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
10231 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10232 reg));
10233
10234 /* Restore the low register's original value. */
10235 emit_insn (gen_movsi (reg, spare));
10236
10237 /* Emit a USE of the restored scratch register, so that flow
10238 analysis will not consider the restore redundant. The
10239 register won't be used again in this function and isn't
10240 restored by the epilogue. */
10241 emit_insn (gen_prologue_use (reg));
10242 }
10243 else
10244 {
10245 reg = gen_rtx (REG, SImode, regno);
10246
10247 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
10248 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10249 reg));
10250 }
10251 }
10252 }
10253
10254 if (current_function_profile || TARGET_NO_SCHED_PRO)
10255 emit_insn (gen_blockage ());
10256 }
10257
10258 void
10259 thumb_expand_epilogue ()
10260 {
10261 HOST_WIDE_INT amount = (get_frame_size ()
10262 + current_function_outgoing_args_size);
10263
10264 /* Naked functions don't have prologues. */
10265 if (IS_NAKED (arm_current_func_type ()))
10266 return;
10267
10268 if (frame_pointer_needed)
10269 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
10270 else if (amount)
10271 {
10272 amount = ROUND_UP (amount);
10273
10274 if (amount < 512)
10275 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10276 GEN_INT (amount)));
10277 else
10278 {
10279 /* r3 is always free in the epilogue. */
10280 rtx reg = gen_rtx (REG, SImode, LAST_ARG_REGNUM);
10281
10282 emit_insn (gen_movsi (reg, GEN_INT (amount)));
10283 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
10284 }
10285 }
10286
10287 /* Emit a USE (stack_pointer_rtx), so that
10288 the stack adjustment will not be deleted. */
10289 emit_insn (gen_prologue_use (stack_pointer_rtx));
10290
10291 if (current_function_profile || TARGET_NO_SCHED_PRO)
10292 emit_insn (gen_blockage ());
10293 }
10294
10295 static void
10296 thumb_output_function_prologue (f, size)
10297 FILE * f;
10298 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
10299 {
10300 int live_regs_mask = 0;
10301 int high_regs_pushed = 0;
10302 int regno;
10303
10304 if (IS_NAKED (arm_current_func_type ()))
10305 return;
10306
10307 if (is_called_in_ARM_mode (current_function_decl))
10308 {
10309 const char * name;
10310
10311 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
10312 abort ();
10313 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
10314 abort ();
10315 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10316
10317 /* Generate code sequence to switch us into Thumb mode. */
10318 /* The .code 32 directive has already been emitted by
10319 ASM_DECLARE_FUNCTION_NAME. */
10320 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
10321 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
10322
10323 /* Generate a label, so that the debugger will notice the
10324 change in instruction sets. This label is also used by
10325 the assembler to bypass the ARM code when this function
10326 is called from a Thumb encoded function elsewhere in the
10327 same file. Hence the definition of STUB_NAME here must
10328 agree with the definition in gas/config/tc-arm.c */
10329
10330 #define STUB_NAME ".real_start_of"
10331
10332 asm_fprintf (f, "\t.code\t16\n");
10333 #ifdef ARM_PE
10334 if (arm_dllexport_name_p (name))
10335 name = arm_strip_name_encoding (name);
10336 #endif
10337 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
10338 asm_fprintf (f, "\t.thumb_func\n");
10339 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
10340 }
10341
10342 if (current_function_pretend_args_size)
10343 {
10344 if (cfun->machine->uses_anonymous_args)
10345 {
10346 int num_pushes;
10347
10348 asm_fprintf (f, "\tpush\t{");
10349
10350 num_pushes = NUM_INTS (current_function_pretend_args_size);
10351
10352 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
10353 regno <= LAST_ARG_REGNUM;
10354 regno++)
10355 asm_fprintf (f, "%r%s", regno,
10356 regno == LAST_ARG_REGNUM ? "" : ", ");
10357
10358 asm_fprintf (f, "}\n");
10359 }
10360 else
10361 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
10362 SP_REGNUM, SP_REGNUM,
10363 current_function_pretend_args_size);
10364 }
10365
10366 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10367 if (regs_ever_live[regno] && !call_used_regs[regno]
10368 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
10369 live_regs_mask |= 1 << regno;
10370
10371 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
10372 live_regs_mask |= 1 << LR_REGNUM;
10373
10374 if (TARGET_BACKTRACE)
10375 {
10376 int offset;
10377 int work_register = 0;
10378 int wr;
10379
10380 /* We have been asked to create a stack backtrace structure.
10381 The code looks like this:
10382
10383 0 .align 2
10384 0 func:
10385 0 sub SP, #16 Reserve space for 4 registers.
10386 2 push {R7} Get a work register.
10387 4 add R7, SP, #20 Get the stack pointer before the push.
10388 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
10389 8 mov R7, PC Get hold of the start of this code plus 12.
10390 10 str R7, [SP, #16] Store it.
10391 12 mov R7, FP Get hold of the current frame pointer.
10392 14 str R7, [SP, #4] Store it.
10393 16 mov R7, LR Get hold of the current return address.
10394 18 str R7, [SP, #12] Store it.
10395 20 add R7, SP, #16 Point at the start of the backtrace structure.
10396 22 mov FP, R7 Put this value into the frame pointer. */
10397
10398 if ((live_regs_mask & 0xFF) == 0)
10399 {
10400 /* See if the a4 register is free. */
10401
10402 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
10403 work_register = LAST_ARG_REGNUM;
10404 else /* We must push a register of our own */
10405 live_regs_mask |= (1 << LAST_LO_REGNUM);
10406 }
10407
10408 if (work_register == 0)
10409 {
10410 /* Select a register from the list that will be pushed to
10411 use as our work register. */
10412 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
10413 if ((1 << work_register) & live_regs_mask)
10414 break;
10415 }
10416
10417 asm_fprintf
10418 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
10419 SP_REGNUM, SP_REGNUM);
10420
10421 if (live_regs_mask)
10422 thumb_pushpop (f, live_regs_mask, 1);
10423
10424 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
10425 if (wr & live_regs_mask)
10426 offset += 4;
10427
10428 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
10429 offset + 16 + current_function_pretend_args_size);
10430
10431 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10432 offset + 4);
10433
10434 /* Make sure that the instruction fetching the PC is in the right place
10435 to calculate "start of backtrace creation code + 12". */
10436 if (live_regs_mask)
10437 {
10438 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
10439 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10440 offset + 12);
10441 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
10442 ARM_HARD_FRAME_POINTER_REGNUM);
10443 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10444 offset);
10445 }
10446 else
10447 {
10448 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
10449 ARM_HARD_FRAME_POINTER_REGNUM);
10450 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10451 offset);
10452 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
10453 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10454 offset + 12);
10455 }
10456
10457 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
10458 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10459 offset + 8);
10460 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
10461 offset + 12);
10462 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
10463 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
10464 }
10465 else if (live_regs_mask)
10466 thumb_pushpop (f, live_regs_mask, 1);
10467
10468 for (regno = 8; regno < 13; regno++)
10469 {
10470 if (regs_ever_live[regno] && !call_used_regs[regno]
10471 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
10472 high_regs_pushed++;
10473 }
10474
10475 if (high_regs_pushed)
10476 {
10477 int pushable_regs = 0;
10478 int mask = live_regs_mask & 0xff;
10479 int next_hi_reg;
10480
10481 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
10482 {
10483 if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]
10484 && !(TARGET_SINGLE_PIC_BASE
10485 && (next_hi_reg == arm_pic_register)))
10486 break;
10487 }
10488
10489 pushable_regs = mask;
10490
10491 if (pushable_regs == 0)
10492 {
10493 /* Desperation time -- this probably will never happen. */
10494 if (regs_ever_live[LAST_ARG_REGNUM]
10495 || !call_used_regs[LAST_ARG_REGNUM])
10496 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
10497 mask = 1 << LAST_ARG_REGNUM;
10498 }
10499
10500 while (high_regs_pushed > 0)
10501 {
10502 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
10503 {
10504 if (mask & (1 << regno))
10505 {
10506 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
10507
10508 high_regs_pushed--;
10509
10510 if (high_regs_pushed)
10511 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
10512 next_hi_reg--)
10513 {
10514 if (regs_ever_live[next_hi_reg]
10515 && !call_used_regs[next_hi_reg]
10516 && !(TARGET_SINGLE_PIC_BASE
10517 && (next_hi_reg == arm_pic_register)))
10518 break;
10519 }
10520 else
10521 {
10522 mask &= ~((1 << regno) - 1);
10523 break;
10524 }
10525 }
10526 }
10527
10528 thumb_pushpop (f, mask, 1);
10529 }
10530
10531 if (pushable_regs == 0
10532 && (regs_ever_live[LAST_ARG_REGNUM]
10533 || !call_used_regs[LAST_ARG_REGNUM]))
10534 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
10535 }
10536 }
10537
10538 /* Handle the case of a double word load into a low register from
10539 a computed memory address. The computed address may involve a
10540 register which is overwritten by the load. */
10541
10542 const char *
10543 thumb_load_double_from_address (operands)
10544 rtx *operands;
10545 {
10546 rtx addr;
10547 rtx base;
10548 rtx offset;
10549 rtx arg1;
10550 rtx arg2;
10551
10552 if (GET_CODE (operands[0]) != REG)
10553 abort ();
10554
10555 if (GET_CODE (operands[1]) != MEM)
10556 abort ();
10557
10558 /* Get the memory address. */
10559 addr = XEXP (operands[1], 0);
10560
10561 /* Work out how the memory address is computed. */
10562 switch (GET_CODE (addr))
10563 {
10564 case REG:
10565 operands[2] = gen_rtx (MEM, SImode,
10566 plus_constant (XEXP (operands[1], 0), 4));
10567
10568 if (REGNO (operands[0]) == REGNO (addr))
10569 {
10570 output_asm_insn ("ldr\t%H0, %2", operands);
10571 output_asm_insn ("ldr\t%0, %1", operands);
10572 }
10573 else
10574 {
10575 output_asm_insn ("ldr\t%0, %1", operands);
10576 output_asm_insn ("ldr\t%H0, %2", operands);
10577 }
10578 break;
10579
10580 case CONST:
10581 /* Compute <address> + 4 for the high order load. */
10582 operands[2] = gen_rtx (MEM, SImode,
10583 plus_constant (XEXP (operands[1], 0), 4));
10584
10585 output_asm_insn ("ldr\t%0, %1", operands);
10586 output_asm_insn ("ldr\t%H0, %2", operands);
10587 break;
10588
10589 case PLUS:
10590 arg1 = XEXP (addr, 0);
10591 arg2 = XEXP (addr, 1);
10592
10593 if (CONSTANT_P (arg1))
10594 base = arg2, offset = arg1;
10595 else
10596 base = arg1, offset = arg2;
10597
10598 if (GET_CODE (base) != REG)
10599 abort ();
10600
10601 /* Catch the case of <address> = <reg> + <reg> */
10602 if (GET_CODE (offset) == REG)
10603 {
10604 int reg_offset = REGNO (offset);
10605 int reg_base = REGNO (base);
10606 int reg_dest = REGNO (operands[0]);
10607
10608 /* Add the base and offset registers together into the
10609 higher destination register. */
10610 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
10611 reg_dest + 1, reg_base, reg_offset);
10612
10613 /* Load the lower destination register from the address in
10614 the higher destination register. */
10615 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
10616 reg_dest, reg_dest + 1);
10617
10618 /* Load the higher destination register from its own address
10619 plus 4. */
10620 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
10621 reg_dest + 1, reg_dest + 1);
10622 }
10623 else
10624 {
10625 /* Compute <address> + 4 for the high order load. */
10626 operands[2] = gen_rtx (MEM, SImode,
10627 plus_constant (XEXP (operands[1], 0), 4));
10628
10629 /* If the computed address is held in the low order register
10630 then load the high order register first, otherwise always
10631 load the low order register first. */
10632 if (REGNO (operands[0]) == REGNO (base))
10633 {
10634 output_asm_insn ("ldr\t%H0, %2", operands);
10635 output_asm_insn ("ldr\t%0, %1", operands);
10636 }
10637 else
10638 {
10639 output_asm_insn ("ldr\t%0, %1", operands);
10640 output_asm_insn ("ldr\t%H0, %2", operands);
10641 }
10642 }
10643 break;
10644
10645 case LABEL_REF:
10646 /* With no registers to worry about we can just load the value
10647 directly. */
10648 operands[2] = gen_rtx (MEM, SImode,
10649 plus_constant (XEXP (operands[1], 0), 4));
10650
10651 output_asm_insn ("ldr\t%H0, %2", operands);
10652 output_asm_insn ("ldr\t%0, %1", operands);
10653 break;
10654
10655 default:
10656 abort ();
10657 break;
10658 }
10659
10660 return "";
10661 }
10662
10663
10664 const char *
10665 thumb_output_move_mem_multiple (n, operands)
10666 int n;
10667 rtx * operands;
10668 {
10669 rtx tmp;
10670
10671 switch (n)
10672 {
10673 case 2:
10674 if (REGNO (operands[4]) > REGNO (operands[5]))
10675 {
10676 tmp = operands[4];
10677 operands[4] = operands[5];
10678 operands[5] = tmp;
10679 }
10680 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
10681 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
10682 break;
10683
10684 case 3:
10685 if (REGNO (operands[4]) > REGNO (operands[5]))
10686 {
10687 tmp = operands[4];
10688 operands[4] = operands[5];
10689 operands[5] = tmp;
10690 }
10691 if (REGNO (operands[5]) > REGNO (operands[6]))
10692 {
10693 tmp = operands[5];
10694 operands[5] = operands[6];
10695 operands[6] = tmp;
10696 }
10697 if (REGNO (operands[4]) > REGNO (operands[5]))
10698 {
10699 tmp = operands[4];
10700 operands[4] = operands[5];
10701 operands[5] = tmp;
10702 }
10703
10704 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
10705 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
10706 break;
10707
10708 default:
10709 abort ();
10710 }
10711
10712 return "";
10713 }
10714
10715 /* Routines for generating rtl. */
10716
10717 void
10718 thumb_expand_movstrqi (operands)
10719 rtx * operands;
10720 {
10721 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
10722 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
10723 HOST_WIDE_INT len = INTVAL (operands[2]);
10724 HOST_WIDE_INT offset = 0;
10725
10726 while (len >= 12)
10727 {
10728 emit_insn (gen_movmem12b (out, in, out, in));
10729 len -= 12;
10730 }
10731
10732 if (len >= 8)
10733 {
10734 emit_insn (gen_movmem8b (out, in, out, in));
10735 len -= 8;
10736 }
10737
10738 if (len >= 4)
10739 {
10740 rtx reg = gen_reg_rtx (SImode);
10741 emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
10742 emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
10743 len -= 4;
10744 offset += 4;
10745 }
10746
10747 if (len >= 2)
10748 {
10749 rtx reg = gen_reg_rtx (HImode);
10750 emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
10751 plus_constant (in, offset))));
10752 emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
10753 reg));
10754 len -= 2;
10755 offset += 2;
10756 }
10757
10758 if (len)
10759 {
10760 rtx reg = gen_reg_rtx (QImode);
10761 emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
10762 plus_constant (in, offset))));
10763 emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
10764 reg));
10765 }
10766 }
10767
10768 int
10769 thumb_cmp_operand (op, mode)
10770 rtx op;
10771 enum machine_mode mode;
10772 {
10773 return ((GET_CODE (op) == CONST_INT
10774 && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
10775 || register_operand (op, mode));
10776 }
10777
10778 static const char *
10779 thumb_condition_code (x, invert)
10780 rtx x;
10781 int invert;
10782 {
10783 static const char * const conds[] =
10784 {
10785 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
10786 "hi", "ls", "ge", "lt", "gt", "le"
10787 };
10788 int val;
10789
10790 switch (GET_CODE (x))
10791 {
10792 case EQ: val = 0; break;
10793 case NE: val = 1; break;
10794 case GEU: val = 2; break;
10795 case LTU: val = 3; break;
10796 case GTU: val = 8; break;
10797 case LEU: val = 9; break;
10798 case GE: val = 10; break;
10799 case LT: val = 11; break;
10800 case GT: val = 12; break;
10801 case LE: val = 13; break;
10802 default:
10803 abort ();
10804 }
10805
10806 return conds[val ^ invert];
10807 }
10808
10809 /* Handle storing a half-word to memory during reload. */
10810
10811 void
10812 thumb_reload_out_hi (operands)
10813 rtx * operands;
10814 {
10815 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
10816 }
10817
10818 /* Handle storing a half-word to memory during reload. */
10819
10820 void
10821 thumb_reload_in_hi (operands)
10822 rtx * operands ATTRIBUTE_UNUSED;
10823 {
10824 abort ();
10825 }
10826
10827 /* Return the length of a function name prefix
10828 that starts with the character 'c'. */
10829
10830 static int
10831 arm_get_strip_length (char c)
10832 {
10833 switch (c)
10834 {
10835 ARM_NAME_ENCODING_LENGTHS
10836 default: return 0;
10837 }
10838 }
10839
10840 /* Return a pointer to a function's name with any
10841 and all prefix encodings stripped from it. */
10842
10843 const char *
10844 arm_strip_name_encoding (const char * name)
10845 {
10846 int skip;
10847
10848 while ((skip = arm_get_strip_length (* name)))
10849 name += skip;
10850
10851 return name;
10852 }
10853
10854 #ifdef AOF_ASSEMBLER
10855 /* Special functions only needed when producing AOF syntax assembler. */
10856
10857 rtx aof_pic_label = NULL_RTX;
10858 struct pic_chain
10859 {
10860 struct pic_chain * next;
10861 const char * symname;
10862 };
10863
10864 static struct pic_chain * aof_pic_chain = NULL;
10865
10866 rtx
10867 aof_pic_entry (x)
10868 rtx x;
10869 {
10870 struct pic_chain ** chainp;
10871 int offset;
10872
10873 if (aof_pic_label == NULL_RTX)
10874 {
10875 /* We mark this here and not in arm_add_gc_roots() to avoid
10876 polluting even more code with ifdefs, and because it never
10877 contains anything useful until we assign to it here. */
10878 ggc_add_rtx_root (&aof_pic_label, 1);
10879 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
10880 }
10881
10882 for (offset = 0, chainp = &aof_pic_chain; *chainp;
10883 offset += 4, chainp = &(*chainp)->next)
10884 if ((*chainp)->symname == XSTR (x, 0))
10885 return plus_constant (aof_pic_label, offset);
10886
10887 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
10888 (*chainp)->next = NULL;
10889 (*chainp)->symname = XSTR (x, 0);
10890 return plus_constant (aof_pic_label, offset);
10891 }
10892
10893 void
10894 aof_dump_pic_table (f)
10895 FILE * f;
10896 {
10897 struct pic_chain * chain;
10898
10899 if (aof_pic_chain == NULL)
10900 return;
10901
10902 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
10903 PIC_OFFSET_TABLE_REGNUM,
10904 PIC_OFFSET_TABLE_REGNUM);
10905 fputs ("|x$adcons|\n", f);
10906
10907 for (chain = aof_pic_chain; chain; chain = chain->next)
10908 {
10909 fputs ("\tDCD\t", f);
10910 assemble_name (f, chain->symname);
10911 fputs ("\n", f);
10912 }
10913 }
10914
10915 int arm_text_section_count = 1;
10916
10917 char *
10918 aof_text_section ()
10919 {
10920 static char buf[100];
10921 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
10922 arm_text_section_count++);
10923 if (flag_pic)
10924 strcat (buf, ", PIC, REENTRANT");
10925 return buf;
10926 }
10927
10928 static int arm_data_section_count = 1;
10929
10930 char *
10931 aof_data_section ()
10932 {
10933 static char buf[100];
10934 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
10935 return buf;
10936 }
10937
10938 /* The AOF assembler is religiously strict about declarations of
10939 imported and exported symbols, so that it is impossible to declare
10940 a function as imported near the beginning of the file, and then to
10941 export it later on. It is, however, possible to delay the decision
10942 until all the functions in the file have been compiled. To get
10943 around this, we maintain a list of the imports and exports, and
10944 delete from it any that are subsequently defined. At the end of
10945 compilation we spit the remainder of the list out before the END
10946 directive. */
10947
10948 struct import
10949 {
10950 struct import * next;
10951 const char * name;
10952 };
10953
10954 static struct import * imports_list = NULL;
10955
10956 void
10957 aof_add_import (name)
10958 const char * name;
10959 {
10960 struct import * new;
10961
10962 for (new = imports_list; new; new = new->next)
10963 if (new->name == name)
10964 return;
10965
10966 new = (struct import *) xmalloc (sizeof (struct import));
10967 new->next = imports_list;
10968 imports_list = new;
10969 new->name = name;
10970 }
10971
10972 void
10973 aof_delete_import (name)
10974 const char * name;
10975 {
10976 struct import ** old;
10977
10978 for (old = &imports_list; *old; old = & (*old)->next)
10979 {
10980 if ((*old)->name == name)
10981 {
10982 *old = (*old)->next;
10983 return;
10984 }
10985 }
10986 }
10987
10988 int arm_main_function = 0;
10989
10990 void
10991 aof_dump_imports (f)
10992 FILE * f;
10993 {
10994 /* The AOF assembler needs this to cause the startup code to be extracted
10995 from the library. Brining in __main causes the whole thing to work
10996 automagically. */
10997 if (arm_main_function)
10998 {
10999 text_section ();
11000 fputs ("\tIMPORT __main\n", f);
11001 fputs ("\tDCD __main\n", f);
11002 }
11003
11004 /* Now dump the remaining imports. */
11005 while (imports_list)
11006 {
11007 fprintf (f, "\tIMPORT\t");
11008 assemble_name (f, imports_list->name);
11009 fputc ('\n', f);
11010 imports_list = imports_list->next;
11011 }
11012 }
11013 #endif /* AOF_ASSEMBLER */
11014
11015 #ifdef OBJECT_FORMAT_ELF
11016 /* Switch to an arbitrary section NAME with attributes as specified
11017 by FLAGS. ALIGN specifies any known alignment requirements for
11018 the section; 0 if the default should be used.
11019
11020 Differs from the default elf version only in the prefix character
11021 used before the section type. */
11022
11023 static void
11024 arm_elf_asm_named_section (name, flags)
11025 const char *name;
11026 unsigned int flags;
11027 {
11028 char flagchars[8], *f = flagchars;
11029 const char *type;
11030
11031 if (!(flags & SECTION_DEBUG))
11032 *f++ = 'a';
11033 if (flags & SECTION_WRITE)
11034 *f++ = 'w';
11035 if (flags & SECTION_CODE)
11036 *f++ = 'x';
11037 if (flags & SECTION_SMALL)
11038 *f++ = 's';
11039 if (flags & SECTION_MERGE)
11040 *f++ = 'M';
11041 if (flags & SECTION_STRINGS)
11042 *f++ = 'S';
11043 *f = '\0';
11044
11045 if (flags & SECTION_BSS)
11046 type = "nobits";
11047 else
11048 type = "progbits";
11049
11050 if (flags & SECTION_ENTSIZE)
11051 fprintf (asm_out_file, "\t.section\t%s,\"%s\",%%%s,%d\n",
11052 name, flagchars, type, flags & SECTION_ENTSIZE);
11053 else
11054 fprintf (asm_out_file, "\t.section\t%s,\"%s\",%%%s\n",
11055 name, flagchars, type);
11056 }
11057 #endif
11058
11059 #ifndef ARM_PE
11060 /* Symbols in the text segment can be accessed without indirecting via the
11061 constant pool; it may take an extra binary operation, but this is still
11062 faster than indirecting via memory. Don't do this when not optimizing,
11063 since we won't be calculating al of the offsets necessary to do this
11064 simplification. */
11065
11066 static void
11067 arm_encode_section_info (decl, first)
11068 tree decl;
11069 int first;
11070 {
11071 /* This doesn't work with AOF syntax, since the string table may be in
11072 a different AREA. */
11073 #ifndef AOF_ASSEMBLER
11074 if (optimize > 0 && TREE_CONSTANT (decl)
11075 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
11076 {
11077 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
11078 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
11079 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
11080 }
11081 #endif
11082
11083 /* If we are referencing a function that is weak then encode a long call
11084 flag in the function name, otherwise if the function is static or
11085 or known to be defined in this file then encode a short call flag. */
11086 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
11087 {
11088 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
11089 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
11090 else if (! TREE_PUBLIC (decl))
11091 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
11092 }
11093 }
11094 #endif /* !ARM_PE */