]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/arm.c
Merge from pch-branch up to tag pch-commit-20020603.
[thirdparty/gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
3 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GNU CC.
9
10 GNU CC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GNU CC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "obstack.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "reload.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "toplev.h"
43 #include "recog.h"
44 #include "ggc.h"
45 #include "except.h"
46 #include "c-pragma.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51
52 /* Forward definitions of types. */
53 typedef struct minipool_node Mnode;
54 typedef struct minipool_fixup Mfix;
55
56 /* In order to improve the layout of the prototypes below
57 some short type abbreviations are defined here. */
58 #define Hint HOST_WIDE_INT
59 #define Mmode enum machine_mode
60 #define Ulong unsigned long
61 #define Ccstar const char *
62
63 const struct attribute_spec arm_attribute_table[];
64
65 /* Forward function declarations. */
66 static void arm_add_gc_roots PARAMS ((void));
67 static int arm_gen_constant PARAMS ((enum rtx_code, Mmode, Hint, rtx, rtx, int, int));
68 static Ulong bit_count PARAMS ((signed int));
69 static int const_ok_for_op PARAMS ((Hint, enum rtx_code));
70 static int eliminate_lr2ip PARAMS ((rtx *));
71 static rtx emit_multi_reg_push PARAMS ((int));
72 static rtx emit_sfm PARAMS ((int, int));
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer PARAMS ((rtx, unsigned int, int));
75 #endif
76 static Ccstar fp_const_from_val PARAMS ((REAL_VALUE_TYPE *));
77 static arm_cc get_arm_condition_code PARAMS ((rtx));
78 static void init_fpa_table PARAMS ((void));
79 static Hint int_log2 PARAMS ((Hint));
80 static rtx is_jump_table PARAMS ((rtx));
81 static Ccstar output_multi_immediate PARAMS ((rtx *, Ccstar, Ccstar, int, Hint));
82 static void print_multi_reg PARAMS ((FILE *, Ccstar, int, int));
83 static Mmode select_dominance_cc_mode PARAMS ((rtx, rtx, Hint));
84 static Ccstar shift_op PARAMS ((rtx, Hint *));
85 static struct machine_function * arm_init_machine_status PARAMS ((void));
86 static int number_of_first_bit_set PARAMS ((int));
87 static void replace_symbols_in_block PARAMS ((tree, rtx, rtx));
88 static void thumb_exit PARAMS ((FILE *, int, rtx));
89 static void thumb_pushpop PARAMS ((FILE *, int, int));
90 static Ccstar thumb_condition_code PARAMS ((rtx, int));
91 static rtx is_jump_table PARAMS ((rtx));
92 static Hint get_jump_table_size PARAMS ((rtx));
93 static Mnode * move_minipool_fix_forward_ref PARAMS ((Mnode *, Mnode *, Hint));
94 static Mnode * add_minipool_forward_ref PARAMS ((Mfix *));
95 static Mnode * move_minipool_fix_backward_ref PARAMS ((Mnode *, Mnode *, Hint));
96 static Mnode * add_minipool_backward_ref PARAMS ((Mfix *));
97 static void assign_minipool_offsets PARAMS ((Mfix *));
98 static void arm_print_value PARAMS ((FILE *, rtx));
99 static void dump_minipool PARAMS ((rtx));
100 static int arm_barrier_cost PARAMS ((rtx));
101 static Mfix * create_fix_barrier PARAMS ((Mfix *, Hint));
102 static void push_minipool_barrier PARAMS ((rtx, Hint));
103 static void push_minipool_fix PARAMS ((rtx, Hint, rtx *, Mmode, rtx));
104 static void note_invalid_constants PARAMS ((rtx, Hint));
105 static int current_file_function_operand PARAMS ((rtx));
106 static Ulong arm_compute_save_reg0_reg12_mask PARAMS ((void));
107 static Ulong arm_compute_save_reg_mask PARAMS ((void));
108 static Ulong arm_isr_value PARAMS ((tree));
109 static Ulong arm_compute_func_type PARAMS ((void));
110 static tree arm_handle_fndecl_attribute PARAMS ((tree *, tree, tree, int, bool *));
111 static tree arm_handle_isr_attribute PARAMS ((tree *, tree, tree, int, bool *));
112 static void arm_output_function_epilogue PARAMS ((FILE *, Hint));
113 static void arm_output_function_prologue PARAMS ((FILE *, Hint));
114 static void thumb_output_function_prologue PARAMS ((FILE *, Hint));
115 static int arm_comp_type_attributes PARAMS ((tree, tree));
116 static void arm_set_default_type_attributes PARAMS ((tree));
117 static int arm_adjust_cost PARAMS ((rtx, rtx, rtx, int));
118 #ifdef OBJECT_FORMAT_ELF
119 static void arm_elf_asm_named_section PARAMS ((const char *, unsigned int));
120 #endif
121 #ifndef ARM_PE
122 static void arm_encode_section_info PARAMS ((tree, int));
123 #endif
124
125 #undef Hint
126 #undef Mmode
127 #undef Ulong
128 #undef Ccstar
129 \f
130 /* Initialize the GCC target structure. */
131 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
132 #undef TARGET_MERGE_DECL_ATTRIBUTES
133 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
134 #endif
135
136 #undef TARGET_ATTRIBUTE_TABLE
137 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
138
139 #ifdef AOF_ASSEMBLER
140 #undef TARGET_ASM_BYTE_OP
141 #define TARGET_ASM_BYTE_OP "\tDCB\t"
142 #undef TARGET_ASM_ALIGNED_HI_OP
143 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
144 #undef TARGET_ASM_ALIGNED_SI_OP
145 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
146 #else
147 #undef TARGET_ASM_ALIGNED_SI_OP
148 #define TARGET_ASM_ALIGNED_SI_OP NULL
149 #undef TARGET_ASM_INTEGER
150 #define TARGET_ASM_INTEGER arm_assemble_integer
151 #endif
152
153 #undef TARGET_ASM_FUNCTION_PROLOGUE
154 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
155
156 #undef TARGET_ASM_FUNCTION_EPILOGUE
157 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
158
159 #undef TARGET_COMP_TYPE_ATTRIBUTES
160 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
161
162 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
163 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
164
165 #undef TARGET_INIT_BUILTINS
166 #define TARGET_INIT_BUILTINS arm_init_builtins
167
168 #undef TARGET_EXPAND_BUILTIN
169 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
170
171 #undef TARGET_SCHED_ADJUST_COST
172 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
173
174 #undef TARGET_ENCODE_SECTION_INFO
175 #ifdef ARM_PE
176 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
177 #else
178 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
179 #endif
180
181 #undef TARGET_STRIP_NAME_ENCODING
182 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
183
184 struct gcc_target targetm = TARGET_INITIALIZER;
185 \f
186 /* Obstack for minipool constant handling. */
187 static struct obstack minipool_obstack;
188 static char * minipool_startobj;
189
190 #define obstack_chunk_alloc xmalloc
191 #define obstack_chunk_free free
192
193 /* The maximum number of insns skipped which
194 will be conditionalised if possible. */
195 static int max_insns_skipped = 5;
196
197 extern FILE * asm_out_file;
198
199 /* True if we are currently building a constant table. */
200 int making_const_table;
201
202 /* Define the information needed to generate branch insns. This is
203 stored from the compare operation. */
204 rtx arm_compare_op0, arm_compare_op1;
205
206 /* What type of floating point are we tuning for? */
207 enum floating_point_type arm_fpu;
208
209 /* What type of floating point instructions are available? */
210 enum floating_point_type arm_fpu_arch;
211
212 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
213 enum prog_mode_type arm_prgmode;
214
215 /* Set by the -mfp=... option. */
216 const char * target_fp_name = NULL;
217
218 /* Used to parse -mstructure_size_boundary command line option. */
219 const char * structure_size_string = NULL;
220 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
221
222 /* Bit values used to identify processor capabilities. */
223 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
224 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
225 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
226 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
227 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
228 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
229 #define FL_THUMB (1 << 6) /* Thumb aware */
230 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
231 #define FL_STRONG (1 << 8) /* StrongARM */
232 #define FL_ARCH5E (1 << 9) /* DSP extenstions to v5 */
233 #define FL_XSCALE (1 << 10) /* XScale */
234
235 /* The bits in this mask specify which
236 instructions we are allowed to generate. */
237 static int insn_flags = 0;
238
239 /* The bits in this mask specify which instruction scheduling options should
240 be used. Note - there is an overlap with the FL_FAST_MULT. For some
241 hardware we want to be able to generate the multiply instructions, but to
242 tune as if they were not present in the architecture. */
243 static int tune_flags = 0;
244
245 /* The following are used in the arm.md file as equivalents to bits
246 in the above two flag variables. */
247
248 /* Nonzero if this is an "M" variant of the processor. */
249 int arm_fast_multiply = 0;
250
251 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
252 int arm_arch4 = 0;
253
254 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
255 int arm_arch5 = 0;
256
257 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
258 int arm_arch5e = 0;
259
260 /* Nonzero if this chip can benefit from load scheduling. */
261 int arm_ld_sched = 0;
262
263 /* Nonzero if this chip is a StrongARM. */
264 int arm_is_strong = 0;
265
266 /* Nonzero if this chip is an XScale. */
267 int arm_is_xscale = 0;
268
269 /* Nonzero if this chip is an ARM6 or an ARM7. */
270 int arm_is_6_or_7 = 0;
271
272 /* Nonzero if generating Thumb instructions. */
273 int thumb_code = 0;
274
275 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
276 must report the mode of the memory reference from PRINT_OPERAND to
277 PRINT_OPERAND_ADDRESS. */
278 enum machine_mode output_memory_reference_mode;
279
280 /* The register number to be used for the PIC offset register. */
281 const char * arm_pic_register_string = NULL;
282 int arm_pic_register = INVALID_REGNUM;
283
284 /* Set to 1 when a return insn is output, this means that the epilogue
285 is not needed. */
286 int return_used_this_function;
287
288 /* Set to 1 after arm_reorg has started. Reset to start at the start of
289 the next function. */
290 static int after_arm_reorg = 0;
291
292 /* The maximum number of insns to be used when loading a constant. */
293 static int arm_constant_limit = 3;
294
295 /* For an explanation of these variables, see final_prescan_insn below. */
296 int arm_ccfsm_state;
297 enum arm_cond_code arm_current_cc;
298 rtx arm_target_insn;
299 int arm_target_label;
300
301 /* The condition codes of the ARM, and the inverse function. */
302 static const char * const arm_condition_codes[] =
303 {
304 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
305 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
306 };
307
308 #define streq(string1, string2) (strcmp (string1, string2) == 0)
309 \f
310 /* Initialization code. */
311
312 struct processors
313 {
314 const char *const name;
315 const unsigned int flags;
316 };
317
318 /* Not all of these give usefully different compilation alternatives,
319 but there is no simple way of generalizing them. */
320 static const struct processors all_cores[] =
321 {
322 /* ARM Cores */
323
324 {"arm2", FL_CO_PROC | FL_MODE26 },
325 {"arm250", FL_CO_PROC | FL_MODE26 },
326 {"arm3", FL_CO_PROC | FL_MODE26 },
327 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
328 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
329 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
330 {"arm610", FL_MODE26 | FL_MODE32 },
331 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
332 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
333 /* arm7m doesn't exist on its own, but only with D, (and I), but
334 those don't alter the code, so arm7m is sometimes used. */
335 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
336 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
337 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
338 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
339 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
340 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
341 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
342 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
343 {"arm710", FL_MODE26 | FL_MODE32 },
344 {"arm710t", FL_MODE26 | FL_MODE32 | FL_THUMB },
345 {"arm720", FL_MODE26 | FL_MODE32 },
346 {"arm720t", FL_MODE26 | FL_MODE32 | FL_THUMB },
347 {"arm740t", FL_MODE26 | FL_MODE32 | FL_THUMB },
348 {"arm710c", FL_MODE26 | FL_MODE32 },
349 {"arm7100", FL_MODE26 | FL_MODE32 },
350 {"arm7500", FL_MODE26 | FL_MODE32 },
351 /* Doesn't have an external co-proc, but does have embedded fpu. */
352 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
353 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
354 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
355 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
356 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
357 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
358 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
359 {"arm940t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
360 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
361 {"arm9e", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
362 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
363 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
364 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
365 {"strongarm1110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
366 {"arm10tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
367 {"arm1020t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
368 {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE },
369
370 {NULL, 0}
371 };
372
373 static const struct processors all_architectures[] =
374 {
375 /* ARM Architectures */
376
377 { "armv2", FL_CO_PROC | FL_MODE26 },
378 { "armv2a", FL_CO_PROC | FL_MODE26 },
379 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
380 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
381 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
382 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
383 implementations that support it, so we will leave it out for now. */
384 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
385 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
386 { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
387 { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
388 { NULL, 0 }
389 };
390
391 /* This is a magic stucture. The 'string' field is magically filled in
392 with a pointer to the value specified by the user on the command line
393 assuming that the user has specified such a value. */
394
395 struct arm_cpu_select arm_select[] =
396 {
397 /* string name processors */
398 { NULL, "-mcpu=", all_cores },
399 { NULL, "-march=", all_architectures },
400 { NULL, "-mtune=", all_cores }
401 };
402
403 /* Return the number of bits set in value' */
404 static unsigned long
405 bit_count (value)
406 signed int value;
407 {
408 unsigned long count = 0;
409
410 while (value)
411 {
412 value &= ~(value & -value);
413 ++count;
414 }
415
416 return count;
417 }
418
419 /* Fix up any incompatible options that the user has specified.
420 This has now turned into a maze. */
421 void
422 arm_override_options ()
423 {
424 unsigned i;
425
426 /* Set up the flags based on the cpu/architecture selected by the user. */
427 for (i = ARRAY_SIZE (arm_select); i--;)
428 {
429 struct arm_cpu_select * ptr = arm_select + i;
430
431 if (ptr->string != NULL && ptr->string[0] != '\0')
432 {
433 const struct processors * sel;
434
435 for (sel = ptr->processors; sel->name != NULL; sel++)
436 if (streq (ptr->string, sel->name))
437 {
438 if (i == 2)
439 tune_flags = sel->flags;
440 else
441 {
442 /* If we have been given an architecture and a processor
443 make sure that they are compatible. We only generate
444 a warning though, and we prefer the CPU over the
445 architecture. */
446 if (insn_flags != 0 && (insn_flags ^ sel->flags))
447 warning ("switch -mcpu=%s conflicts with -march= switch",
448 ptr->string);
449
450 insn_flags = sel->flags;
451 }
452
453 break;
454 }
455
456 if (sel->name == NULL)
457 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
458 }
459 }
460
461 /* If the user did not specify a processor, choose one for them. */
462 if (insn_flags == 0)
463 {
464 const struct processors * sel;
465 unsigned int sought;
466 static const struct cpu_default
467 {
468 const int cpu;
469 const char *const name;
470 }
471 cpu_defaults[] =
472 {
473 { TARGET_CPU_arm2, "arm2" },
474 { TARGET_CPU_arm6, "arm6" },
475 { TARGET_CPU_arm610, "arm610" },
476 { TARGET_CPU_arm710, "arm710" },
477 { TARGET_CPU_arm7m, "arm7m" },
478 { TARGET_CPU_arm7500fe, "arm7500fe" },
479 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
480 { TARGET_CPU_arm8, "arm8" },
481 { TARGET_CPU_arm810, "arm810" },
482 { TARGET_CPU_arm9, "arm9" },
483 { TARGET_CPU_strongarm, "strongarm" },
484 { TARGET_CPU_xscale, "xscale" },
485 { TARGET_CPU_generic, "arm" },
486 { 0, 0 }
487 };
488 const struct cpu_default * def;
489
490 /* Find the default. */
491 for (def = cpu_defaults; def->name; def++)
492 if (def->cpu == TARGET_CPU_DEFAULT)
493 break;
494
495 /* Make sure we found the default CPU. */
496 if (def->name == NULL)
497 abort ();
498
499 /* Find the default CPU's flags. */
500 for (sel = all_cores; sel->name != NULL; sel++)
501 if (streq (def->name, sel->name))
502 break;
503
504 if (sel->name == NULL)
505 abort ();
506
507 insn_flags = sel->flags;
508
509 /* Now check to see if the user has specified some command line
510 switch that require certain abilities from the cpu. */
511 sought = 0;
512
513 if (TARGET_INTERWORK || TARGET_THUMB)
514 {
515 sought |= (FL_THUMB | FL_MODE32);
516
517 /* Force apcs-32 to be used for interworking. */
518 target_flags |= ARM_FLAG_APCS_32;
519
520 /* There are no ARM processors that support both APCS-26 and
521 interworking. Therefore we force FL_MODE26 to be removed
522 from insn_flags here (if it was set), so that the search
523 below will always be able to find a compatible processor. */
524 insn_flags &= ~FL_MODE26;
525 }
526 else if (!TARGET_APCS_32)
527 sought |= FL_MODE26;
528
529 if (sought != 0 && ((sought & insn_flags) != sought))
530 {
531 /* Try to locate a CPU type that supports all of the abilities
532 of the default CPU, plus the extra abilities requested by
533 the user. */
534 for (sel = all_cores; sel->name != NULL; sel++)
535 if ((sel->flags & sought) == (sought | insn_flags))
536 break;
537
538 if (sel->name == NULL)
539 {
540 unsigned int current_bit_count = 0;
541 const struct processors * best_fit = NULL;
542
543 /* Ideally we would like to issue an error message here
544 saying that it was not possible to find a CPU compatible
545 with the default CPU, but which also supports the command
546 line options specified by the programmer, and so they
547 ought to use the -mcpu=<name> command line option to
548 override the default CPU type.
549
550 Unfortunately this does not work with multilibing. We
551 need to be able to support multilibs for -mapcs-26 and for
552 -mthumb-interwork and there is no CPU that can support both
553 options. Instead if we cannot find a cpu that has both the
554 characteristics of the default cpu and the given command line
555 options we scan the array again looking for a best match. */
556 for (sel = all_cores; sel->name != NULL; sel++)
557 if ((sel->flags & sought) == sought)
558 {
559 unsigned int count;
560
561 count = bit_count (sel->flags & insn_flags);
562
563 if (count >= current_bit_count)
564 {
565 best_fit = sel;
566 current_bit_count = count;
567 }
568 }
569
570 if (best_fit == NULL)
571 abort ();
572 else
573 sel = best_fit;
574 }
575
576 insn_flags = sel->flags;
577 }
578 }
579
580 /* If tuning has not been specified, tune for whichever processor or
581 architecture has been selected. */
582 if (tune_flags == 0)
583 tune_flags = insn_flags;
584
585 /* Make sure that the processor choice does not conflict with any of the
586 other command line choices. */
587 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
588 {
589 /* If APCS-32 was not the default then it must have been set by the
590 user, so issue a warning message. If the user has specified
591 "-mapcs-32 -mcpu=arm2" then we loose here. */
592 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
593 warning ("target CPU does not support APCS-32" );
594 target_flags &= ~ARM_FLAG_APCS_32;
595 }
596 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
597 {
598 warning ("target CPU does not support APCS-26" );
599 target_flags |= ARM_FLAG_APCS_32;
600 }
601
602 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
603 {
604 warning ("target CPU does not support interworking" );
605 target_flags &= ~ARM_FLAG_INTERWORK;
606 }
607
608 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
609 {
610 warning ("target CPU does not support THUMB instructions");
611 target_flags &= ~ARM_FLAG_THUMB;
612 }
613
614 if (TARGET_APCS_FRAME && TARGET_THUMB)
615 {
616 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
617 target_flags &= ~ARM_FLAG_APCS_FRAME;
618 }
619
620 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
621 from here where no function is being compiled currently. */
622 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
623 && TARGET_ARM)
624 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
625
626 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
627 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
628
629 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
630 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
631
632 /* If interworking is enabled then APCS-32 must be selected as well. */
633 if (TARGET_INTERWORK)
634 {
635 if (!TARGET_APCS_32)
636 warning ("interworking forces APCS-32 to be used" );
637 target_flags |= ARM_FLAG_APCS_32;
638 }
639
640 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
641 {
642 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
643 target_flags |= ARM_FLAG_APCS_FRAME;
644 }
645
646 if (TARGET_POKE_FUNCTION_NAME)
647 target_flags |= ARM_FLAG_APCS_FRAME;
648
649 if (TARGET_APCS_REENT && flag_pic)
650 error ("-fpic and -mapcs-reent are incompatible");
651
652 if (TARGET_APCS_REENT)
653 warning ("APCS reentrant code not supported. Ignored");
654
655 /* If this target is normally configured to use APCS frames, warn if they
656 are turned off and debugging is turned on. */
657 if (TARGET_ARM
658 && write_symbols != NO_DEBUG
659 && !TARGET_APCS_FRAME
660 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
661 warning ("-g with -mno-apcs-frame may not give sensible debugging");
662
663 /* If stack checking is disabled, we can use r10 as the PIC register,
664 which keeps r9 available. */
665 if (flag_pic)
666 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
667
668 if (TARGET_APCS_FLOAT)
669 warning ("passing floating point arguments in fp regs not yet supported");
670
671 /* Initialise boolean versions of the flags, for use in the arm.md file. */
672 arm_fast_multiply = (insn_flags & FL_FAST_MULT) != 0;
673 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
674 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
675 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
676 arm_is_xscale = (insn_flags & FL_XSCALE) != 0;
677
678 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
679 arm_is_strong = (tune_flags & FL_STRONG) != 0;
680 thumb_code = (TARGET_ARM == 0);
681 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
682 && !(tune_flags & FL_ARCH4))) != 0;
683
684 /* Default value for floating point code... if no co-processor
685 bus, then schedule for emulated floating point. Otherwise,
686 assume the user has an FPA.
687 Note: this does not prevent use of floating point instructions,
688 -msoft-float does that. */
689 arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
690
691 if (target_fp_name)
692 {
693 if (streq (target_fp_name, "2"))
694 arm_fpu_arch = FP_SOFT2;
695 else if (streq (target_fp_name, "3"))
696 arm_fpu_arch = FP_SOFT3;
697 else
698 error ("invalid floating point emulation option: -mfpe-%s",
699 target_fp_name);
700 }
701 else
702 arm_fpu_arch = FP_DEFAULT;
703
704 if (TARGET_FPE && arm_fpu != FP_HARD)
705 arm_fpu = FP_SOFT2;
706
707 /* For arm2/3 there is no need to do any scheduling if there is only
708 a floating point emulator, or we are doing software floating-point. */
709 if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD)
710 && (tune_flags & FL_MODE32) == 0)
711 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
712
713 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
714
715 if (structure_size_string != NULL)
716 {
717 int size = strtol (structure_size_string, NULL, 0);
718
719 if (size == 8 || size == 32)
720 arm_structure_size_boundary = size;
721 else
722 warning ("structure size boundary can only be set to 8 or 32");
723 }
724
725 if (arm_pic_register_string != NULL)
726 {
727 int pic_register = decode_reg_name (arm_pic_register_string);
728
729 if (!flag_pic)
730 warning ("-mpic-register= is useless without -fpic");
731
732 /* Prevent the user from choosing an obviously stupid PIC register. */
733 else if (pic_register < 0 || call_used_regs[pic_register]
734 || pic_register == HARD_FRAME_POINTER_REGNUM
735 || pic_register == STACK_POINTER_REGNUM
736 || pic_register >= PC_REGNUM)
737 error ("unable to use '%s' for PIC register", arm_pic_register_string);
738 else
739 arm_pic_register = pic_register;
740 }
741
742 if (TARGET_THUMB && flag_schedule_insns)
743 {
744 /* Don't warn since it's on by default in -O2. */
745 flag_schedule_insns = 0;
746 }
747
748 /* If optimizing for space, don't synthesize constants.
749 For processors with load scheduling, it never costs more than 2 cycles
750 to load a constant, and the load scheduler may well reduce that to 1. */
751 if (optimize_size || (tune_flags & FL_LDSCHED))
752 arm_constant_limit = 1;
753
754 if (arm_is_xscale)
755 arm_constant_limit = 2;
756
757 /* If optimizing for size, bump the number of instructions that we
758 are prepared to conditionally execute (even on a StrongARM).
759 Otherwise for the StrongARM, which has early execution of branches,
760 a sequence that is worth skipping is shorter. */
761 if (optimize_size)
762 max_insns_skipped = 6;
763 else if (arm_is_strong)
764 max_insns_skipped = 3;
765
766 /* Register global variables with the garbage collector. */
767 arm_add_gc_roots ();
768 }
769
770 static void
771 arm_add_gc_roots ()
772 {
773 gcc_obstack_init(&minipool_obstack);
774 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
775 }
776 \f
777 /* A table of known ARM exception types.
778 For use with the interrupt function attribute. */
779
780 typedef struct
781 {
782 const char *const arg;
783 const unsigned long return_value;
784 }
785 isr_attribute_arg;
786
787 static const isr_attribute_arg isr_attribute_args [] =
788 {
789 { "IRQ", ARM_FT_ISR },
790 { "irq", ARM_FT_ISR },
791 { "FIQ", ARM_FT_FIQ },
792 { "fiq", ARM_FT_FIQ },
793 { "ABORT", ARM_FT_ISR },
794 { "abort", ARM_FT_ISR },
795 { "ABORT", ARM_FT_ISR },
796 { "abort", ARM_FT_ISR },
797 { "UNDEF", ARM_FT_EXCEPTION },
798 { "undef", ARM_FT_EXCEPTION },
799 { "SWI", ARM_FT_EXCEPTION },
800 { "swi", ARM_FT_EXCEPTION },
801 { NULL, ARM_FT_NORMAL }
802 };
803
804 /* Returns the (interrupt) function type of the current
805 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
806
807 static unsigned long
808 arm_isr_value (argument)
809 tree argument;
810 {
811 const isr_attribute_arg * ptr;
812 const char * arg;
813
814 /* No argument - default to IRQ. */
815 if (argument == NULL_TREE)
816 return ARM_FT_ISR;
817
818 /* Get the value of the argument. */
819 if (TREE_VALUE (argument) == NULL_TREE
820 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
821 return ARM_FT_UNKNOWN;
822
823 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
824
825 /* Check it against the list of known arguments. */
826 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr ++)
827 if (streq (arg, ptr->arg))
828 return ptr->return_value;
829
830 /* An unrecognised interrupt type. */
831 return ARM_FT_UNKNOWN;
832 }
833
834 /* Computes the type of the current function. */
835
836 static unsigned long
837 arm_compute_func_type ()
838 {
839 unsigned long type = ARM_FT_UNKNOWN;
840 tree a;
841 tree attr;
842
843 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
844 abort ();
845
846 /* Decide if the current function is volatile. Such functions
847 never return, and many memory cycles can be saved by not storing
848 register values that will never be needed again. This optimization
849 was added to speed up context switching in a kernel application. */
850 if (optimize > 0
851 && current_function_nothrow
852 && TREE_THIS_VOLATILE (current_function_decl))
853 type |= ARM_FT_VOLATILE;
854
855 if (current_function_needs_context)
856 type |= ARM_FT_NESTED;
857
858 attr = DECL_ATTRIBUTES (current_function_decl);
859
860 a = lookup_attribute ("naked", attr);
861 if (a != NULL_TREE)
862 type |= ARM_FT_NAKED;
863
864 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
865 type |= ARM_FT_EXCEPTION_HANDLER;
866 else
867 {
868 a = lookup_attribute ("isr", attr);
869 if (a == NULL_TREE)
870 a = lookup_attribute ("interrupt", attr);
871
872 if (a == NULL_TREE)
873 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
874 else
875 type |= arm_isr_value (TREE_VALUE (a));
876 }
877
878 return type;
879 }
880
881 /* Returns the type of the current function. */
882
883 unsigned long
884 arm_current_func_type ()
885 {
886 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
887 cfun->machine->func_type = arm_compute_func_type ();
888
889 return cfun->machine->func_type;
890 }
891 \f
892 /* Return 1 if it is possible to return using a single instruction. */
893
894 int
895 use_return_insn (iscond)
896 int iscond;
897 {
898 int regno;
899 unsigned int func_type;
900
901 /* Never use a return instruction before reload has run. */
902 if (!reload_completed)
903 return 0;
904
905 func_type = arm_current_func_type ();
906
907 /* Naked functions and volatile functions need special
908 consideration. */
909 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
910 return 0;
911
912 /* As do variadic functions. */
913 if (current_function_pretend_args_size
914 || cfun->machine->uses_anonymous_args
915 /* Of if the function calls __builtin_eh_return () */
916 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
917 /* Or if there is no frame pointer and there is a stack adjustment. */
918 || ((get_frame_size () + current_function_outgoing_args_size != 0)
919 && !frame_pointer_needed))
920 return 0;
921
922 /* Can't be done if interworking with Thumb, and any registers have been
923 stacked. Similarly, on StrongARM, conditional returns are expensive
924 if they aren't taken and registers have been stacked. */
925 if (iscond && arm_is_strong && frame_pointer_needed)
926 return 0;
927
928 if ((iscond && arm_is_strong)
929 || TARGET_INTERWORK)
930 {
931 for (regno = 0; regno <= LAST_ARM_REGNUM; regno++)
932 if (regs_ever_live[regno] && !call_used_regs[regno])
933 return 0;
934
935 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
936 return 0;
937 }
938
939 /* Can't be done if any of the FPU regs are pushed,
940 since this also requires an insn. */
941 if (TARGET_HARD_FLOAT)
942 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
943 if (regs_ever_live[regno] && !call_used_regs[regno])
944 return 0;
945
946 return 1;
947 }
948
949 /* Return TRUE if int I is a valid immediate ARM constant. */
950
951 int
952 const_ok_for_arm (i)
953 HOST_WIDE_INT i;
954 {
955 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
956
957 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
958 be all zero, or all one. */
959 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
960 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
961 != ((~(unsigned HOST_WIDE_INT) 0)
962 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
963 return FALSE;
964
965 /* Fast return for 0 and powers of 2 */
966 if ((i & (i - 1)) == 0)
967 return TRUE;
968
969 do
970 {
971 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
972 return TRUE;
973 mask =
974 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
975 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
976 }
977 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
978
979 return FALSE;
980 }
981
982 /* Return true if I is a valid constant for the operation CODE. */
983 static int
984 const_ok_for_op (i, code)
985 HOST_WIDE_INT i;
986 enum rtx_code code;
987 {
988 if (const_ok_for_arm (i))
989 return 1;
990
991 switch (code)
992 {
993 case PLUS:
994 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
995
996 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
997 case XOR:
998 case IOR:
999 return 0;
1000
1001 case AND:
1002 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1003
1004 default:
1005 abort ();
1006 }
1007 }
1008
1009 /* Emit a sequence of insns to handle a large constant.
1010 CODE is the code of the operation required, it can be any of SET, PLUS,
1011 IOR, AND, XOR, MINUS;
1012 MODE is the mode in which the operation is being performed;
1013 VAL is the integer to operate on;
1014 SOURCE is the other operand (a register, or a null-pointer for SET);
1015 SUBTARGETS means it is safe to create scratch registers if that will
1016 either produce a simpler sequence, or we will want to cse the values.
1017 Return value is the number of insns emitted. */
1018
1019 int
1020 arm_split_constant (code, mode, val, target, source, subtargets)
1021 enum rtx_code code;
1022 enum machine_mode mode;
1023 HOST_WIDE_INT val;
1024 rtx target;
1025 rtx source;
1026 int subtargets;
1027 {
1028 if (subtargets || code == SET
1029 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1030 && REGNO (target) != REGNO (source)))
1031 {
1032 /* After arm_reorg has been called, we can't fix up expensive
1033 constants by pushing them into memory so we must synthesise
1034 them in-line, regardless of the cost. This is only likely to
1035 be more costly on chips that have load delay slots and we are
1036 compiling without running the scheduler (so no splitting
1037 occurred before the final instruction emission).
1038
1039 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1040 */
1041 if (!after_arm_reorg
1042 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1043 > arm_constant_limit + (code != SET)))
1044 {
1045 if (code == SET)
1046 {
1047 /* Currently SET is the only monadic value for CODE, all
1048 the rest are diadic. */
1049 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1050 return 1;
1051 }
1052 else
1053 {
1054 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1055
1056 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1057 /* For MINUS, the value is subtracted from, since we never
1058 have subtraction of a constant. */
1059 if (code == MINUS)
1060 emit_insn (gen_rtx_SET (VOIDmode, target,
1061 gen_rtx_MINUS (mode, temp, source)));
1062 else
1063 emit_insn (gen_rtx_SET (VOIDmode, target,
1064 gen_rtx (code, mode, source, temp)));
1065 return 2;
1066 }
1067 }
1068 }
1069
1070 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1071 }
1072
1073 static int
1074 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1075 {
1076 HOST_WIDE_INT temp1;
1077 int num_insns = 0;
1078 do
1079 {
1080 int end;
1081
1082 if (i <= 0)
1083 i += 32;
1084 if (remainder & (3 << (i - 2)))
1085 {
1086 end = i - 8;
1087 if (end < 0)
1088 end += 32;
1089 temp1 = remainder & ((0x0ff << end)
1090 | ((i < end) ? (0xff >> (32 - end)) : 0));
1091 remainder &= ~temp1;
1092 num_insns++;
1093 i -= 6;
1094 }
1095 i -= 2;
1096 } while (remainder);
1097 return num_insns;
1098 }
1099
1100 /* As above, but extra parameter GENERATE which, if clear, suppresses
1101 RTL generation. */
1102
1103 static int
1104 arm_gen_constant (code, mode, val, target, source, subtargets, generate)
1105 enum rtx_code code;
1106 enum machine_mode mode;
1107 HOST_WIDE_INT val;
1108 rtx target;
1109 rtx source;
1110 int subtargets;
1111 int generate;
1112 {
1113 int can_invert = 0;
1114 int can_negate = 0;
1115 int can_negate_initial = 0;
1116 int can_shift = 0;
1117 int i;
1118 int num_bits_set = 0;
1119 int set_sign_bit_copies = 0;
1120 int clear_sign_bit_copies = 0;
1121 int clear_zero_bit_copies = 0;
1122 int set_zero_bit_copies = 0;
1123 int insns = 0;
1124 unsigned HOST_WIDE_INT temp1, temp2;
1125 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1126
1127 /* Find out which operations are safe for a given CODE. Also do a quick
1128 check for degenerate cases; these can occur when DImode operations
1129 are split. */
1130 switch (code)
1131 {
1132 case SET:
1133 can_invert = 1;
1134 can_shift = 1;
1135 can_negate = 1;
1136 break;
1137
1138 case PLUS:
1139 can_negate = 1;
1140 can_negate_initial = 1;
1141 break;
1142
1143 case IOR:
1144 if (remainder == 0xffffffff)
1145 {
1146 if (generate)
1147 emit_insn (gen_rtx_SET (VOIDmode, target,
1148 GEN_INT (ARM_SIGN_EXTEND (val))));
1149 return 1;
1150 }
1151 if (remainder == 0)
1152 {
1153 if (reload_completed && rtx_equal_p (target, source))
1154 return 0;
1155 if (generate)
1156 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1157 return 1;
1158 }
1159 break;
1160
1161 case AND:
1162 if (remainder == 0)
1163 {
1164 if (generate)
1165 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1166 return 1;
1167 }
1168 if (remainder == 0xffffffff)
1169 {
1170 if (reload_completed && rtx_equal_p (target, source))
1171 return 0;
1172 if (generate)
1173 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1174 return 1;
1175 }
1176 can_invert = 1;
1177 break;
1178
1179 case XOR:
1180 if (remainder == 0)
1181 {
1182 if (reload_completed && rtx_equal_p (target, source))
1183 return 0;
1184 if (generate)
1185 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1186 return 1;
1187 }
1188 if (remainder == 0xffffffff)
1189 {
1190 if (generate)
1191 emit_insn (gen_rtx_SET (VOIDmode, target,
1192 gen_rtx_NOT (mode, source)));
1193 return 1;
1194 }
1195
1196 /* We don't know how to handle this yet below. */
1197 abort ();
1198
1199 case MINUS:
1200 /* We treat MINUS as (val - source), since (source - val) is always
1201 passed as (source + (-val)). */
1202 if (remainder == 0)
1203 {
1204 if (generate)
1205 emit_insn (gen_rtx_SET (VOIDmode, target,
1206 gen_rtx_NEG (mode, source)));
1207 return 1;
1208 }
1209 if (const_ok_for_arm (val))
1210 {
1211 if (generate)
1212 emit_insn (gen_rtx_SET (VOIDmode, target,
1213 gen_rtx_MINUS (mode, GEN_INT (val),
1214 source)));
1215 return 1;
1216 }
1217 can_negate = 1;
1218
1219 break;
1220
1221 default:
1222 abort ();
1223 }
1224
1225 /* If we can do it in one insn get out quickly. */
1226 if (const_ok_for_arm (val)
1227 || (can_negate_initial && const_ok_for_arm (-val))
1228 || (can_invert && const_ok_for_arm (~val)))
1229 {
1230 if (generate)
1231 emit_insn (gen_rtx_SET (VOIDmode, target,
1232 (source ? gen_rtx (code, mode, source,
1233 GEN_INT (val))
1234 : GEN_INT (val))));
1235 return 1;
1236 }
1237
1238 /* Calculate a few attributes that may be useful for specific
1239 optimizations. */
1240 for (i = 31; i >= 0; i--)
1241 {
1242 if ((remainder & (1 << i)) == 0)
1243 clear_sign_bit_copies++;
1244 else
1245 break;
1246 }
1247
1248 for (i = 31; i >= 0; i--)
1249 {
1250 if ((remainder & (1 << i)) != 0)
1251 set_sign_bit_copies++;
1252 else
1253 break;
1254 }
1255
1256 for (i = 0; i <= 31; i++)
1257 {
1258 if ((remainder & (1 << i)) == 0)
1259 clear_zero_bit_copies++;
1260 else
1261 break;
1262 }
1263
1264 for (i = 0; i <= 31; i++)
1265 {
1266 if ((remainder & (1 << i)) != 0)
1267 set_zero_bit_copies++;
1268 else
1269 break;
1270 }
1271
1272 switch (code)
1273 {
1274 case SET:
1275 /* See if we can do this by sign_extending a constant that is known
1276 to be negative. This is a good, way of doing it, since the shift
1277 may well merge into a subsequent insn. */
1278 if (set_sign_bit_copies > 1)
1279 {
1280 if (const_ok_for_arm
1281 (temp1 = ARM_SIGN_EXTEND (remainder
1282 << (set_sign_bit_copies - 1))))
1283 {
1284 if (generate)
1285 {
1286 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1287 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1288 GEN_INT (temp1)));
1289 emit_insn (gen_ashrsi3 (target, new_src,
1290 GEN_INT (set_sign_bit_copies - 1)));
1291 }
1292 return 2;
1293 }
1294 /* For an inverted constant, we will need to set the low bits,
1295 these will be shifted out of harm's way. */
1296 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1297 if (const_ok_for_arm (~temp1))
1298 {
1299 if (generate)
1300 {
1301 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1302 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1303 GEN_INT (temp1)));
1304 emit_insn (gen_ashrsi3 (target, new_src,
1305 GEN_INT (set_sign_bit_copies - 1)));
1306 }
1307 return 2;
1308 }
1309 }
1310
1311 /* See if we can generate this by setting the bottom (or the top)
1312 16 bits, and then shifting these into the other half of the
1313 word. We only look for the simplest cases, to do more would cost
1314 too much. Be careful, however, not to generate this when the
1315 alternative would take fewer insns. */
1316 if (val & 0xffff0000)
1317 {
1318 temp1 = remainder & 0xffff0000;
1319 temp2 = remainder & 0x0000ffff;
1320
1321 /* Overlaps outside this range are best done using other methods. */
1322 for (i = 9; i < 24; i++)
1323 {
1324 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1325 && !const_ok_for_arm (temp2))
1326 {
1327 rtx new_src = (subtargets
1328 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1329 : target);
1330 insns = arm_gen_constant (code, mode, temp2, new_src,
1331 source, subtargets, generate);
1332 source = new_src;
1333 if (generate)
1334 emit_insn (gen_rtx_SET
1335 (VOIDmode, target,
1336 gen_rtx_IOR (mode,
1337 gen_rtx_ASHIFT (mode, source,
1338 GEN_INT (i)),
1339 source)));
1340 return insns + 1;
1341 }
1342 }
1343
1344 /* Don't duplicate cases already considered. */
1345 for (i = 17; i < 24; i++)
1346 {
1347 if (((temp1 | (temp1 >> i)) == remainder)
1348 && !const_ok_for_arm (temp1))
1349 {
1350 rtx new_src = (subtargets
1351 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1352 : target);
1353 insns = arm_gen_constant (code, mode, temp1, new_src,
1354 source, subtargets, generate);
1355 source = new_src;
1356 if (generate)
1357 emit_insn
1358 (gen_rtx_SET (VOIDmode, target,
1359 gen_rtx_IOR
1360 (mode,
1361 gen_rtx_LSHIFTRT (mode, source,
1362 GEN_INT (i)),
1363 source)));
1364 return insns + 1;
1365 }
1366 }
1367 }
1368 break;
1369
1370 case IOR:
1371 case XOR:
1372 /* If we have IOR or XOR, and the constant can be loaded in a
1373 single instruction, and we can find a temporary to put it in,
1374 then this can be done in two instructions instead of 3-4. */
1375 if (subtargets
1376 /* TARGET can't be NULL if SUBTARGETS is 0 */
1377 || (reload_completed && !reg_mentioned_p (target, source)))
1378 {
1379 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1380 {
1381 if (generate)
1382 {
1383 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1384
1385 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1386 emit_insn (gen_rtx_SET (VOIDmode, target,
1387 gen_rtx (code, mode, source, sub)));
1388 }
1389 return 2;
1390 }
1391 }
1392
1393 if (code == XOR)
1394 break;
1395
1396 if (set_sign_bit_copies > 8
1397 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1398 {
1399 if (generate)
1400 {
1401 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1402 rtx shift = GEN_INT (set_sign_bit_copies);
1403
1404 emit_insn (gen_rtx_SET (VOIDmode, sub,
1405 gen_rtx_NOT (mode,
1406 gen_rtx_ASHIFT (mode,
1407 source,
1408 shift))));
1409 emit_insn (gen_rtx_SET (VOIDmode, target,
1410 gen_rtx_NOT (mode,
1411 gen_rtx_LSHIFTRT (mode, sub,
1412 shift))));
1413 }
1414 return 2;
1415 }
1416
1417 if (set_zero_bit_copies > 8
1418 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1419 {
1420 if (generate)
1421 {
1422 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1423 rtx shift = GEN_INT (set_zero_bit_copies);
1424
1425 emit_insn (gen_rtx_SET (VOIDmode, sub,
1426 gen_rtx_NOT (mode,
1427 gen_rtx_LSHIFTRT (mode,
1428 source,
1429 shift))));
1430 emit_insn (gen_rtx_SET (VOIDmode, target,
1431 gen_rtx_NOT (mode,
1432 gen_rtx_ASHIFT (mode, sub,
1433 shift))));
1434 }
1435 return 2;
1436 }
1437
1438 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1439 {
1440 if (generate)
1441 {
1442 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1443 emit_insn (gen_rtx_SET (VOIDmode, sub,
1444 gen_rtx_NOT (mode, source)));
1445 source = sub;
1446 if (subtargets)
1447 sub = gen_reg_rtx (mode);
1448 emit_insn (gen_rtx_SET (VOIDmode, sub,
1449 gen_rtx_AND (mode, source,
1450 GEN_INT (temp1))));
1451 emit_insn (gen_rtx_SET (VOIDmode, target,
1452 gen_rtx_NOT (mode, sub)));
1453 }
1454 return 3;
1455 }
1456 break;
1457
1458 case AND:
1459 /* See if two shifts will do 2 or more insn's worth of work. */
1460 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1461 {
1462 HOST_WIDE_INT shift_mask = ((0xffffffff
1463 << (32 - clear_sign_bit_copies))
1464 & 0xffffffff);
1465
1466 if ((remainder | shift_mask) != 0xffffffff)
1467 {
1468 if (generate)
1469 {
1470 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1471 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1472 new_src, source, subtargets, 1);
1473 source = new_src;
1474 }
1475 else
1476 {
1477 rtx targ = subtargets ? NULL_RTX : target;
1478 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1479 targ, source, subtargets, 0);
1480 }
1481 }
1482
1483 if (generate)
1484 {
1485 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1486 rtx shift = GEN_INT (clear_sign_bit_copies);
1487
1488 emit_insn (gen_ashlsi3 (new_src, source, shift));
1489 emit_insn (gen_lshrsi3 (target, new_src, shift));
1490 }
1491
1492 return insns + 2;
1493 }
1494
1495 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1496 {
1497 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1498
1499 if ((remainder | shift_mask) != 0xffffffff)
1500 {
1501 if (generate)
1502 {
1503 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1504
1505 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1506 new_src, source, subtargets, 1);
1507 source = new_src;
1508 }
1509 else
1510 {
1511 rtx targ = subtargets ? NULL_RTX : target;
1512
1513 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1514 targ, source, subtargets, 0);
1515 }
1516 }
1517
1518 if (generate)
1519 {
1520 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1521 rtx shift = GEN_INT (clear_zero_bit_copies);
1522
1523 emit_insn (gen_lshrsi3 (new_src, source, shift));
1524 emit_insn (gen_ashlsi3 (target, new_src, shift));
1525 }
1526
1527 return insns + 2;
1528 }
1529
1530 break;
1531
1532 default:
1533 break;
1534 }
1535
1536 for (i = 0; i < 32; i++)
1537 if (remainder & (1 << i))
1538 num_bits_set++;
1539
1540 if (code == AND || (can_invert && num_bits_set > 16))
1541 remainder = (~remainder) & 0xffffffff;
1542 else if (code == PLUS && num_bits_set > 16)
1543 remainder = (-remainder) & 0xffffffff;
1544 else
1545 {
1546 can_invert = 0;
1547 can_negate = 0;
1548 }
1549
1550 /* Now try and find a way of doing the job in either two or three
1551 instructions.
1552 We start by looking for the largest block of zeros that are aligned on
1553 a 2-bit boundary, we then fill up the temps, wrapping around to the
1554 top of the word when we drop off the bottom.
1555 In the worst case this code should produce no more than four insns. */
1556 {
1557 int best_start = 0;
1558 int best_consecutive_zeros = 0;
1559
1560 for (i = 0; i < 32; i += 2)
1561 {
1562 int consecutive_zeros = 0;
1563
1564 if (!(remainder & (3 << i)))
1565 {
1566 while ((i < 32) && !(remainder & (3 << i)))
1567 {
1568 consecutive_zeros += 2;
1569 i += 2;
1570 }
1571 if (consecutive_zeros > best_consecutive_zeros)
1572 {
1573 best_consecutive_zeros = consecutive_zeros;
1574 best_start = i - consecutive_zeros;
1575 }
1576 i -= 2;
1577 }
1578 }
1579
1580 /* So long as it won't require any more insns to do so, it's
1581 desirable to emit a small constant (in bits 0...9) in the last
1582 insn. This way there is more chance that it can be combined with
1583 a later addressing insn to form a pre-indexed load or store
1584 operation. Consider:
1585
1586 *((volatile int *)0xe0000100) = 1;
1587 *((volatile int *)0xe0000110) = 2;
1588
1589 We want this to wind up as:
1590
1591 mov rA, #0xe0000000
1592 mov rB, #1
1593 str rB, [rA, #0x100]
1594 mov rB, #2
1595 str rB, [rA, #0x110]
1596
1597 rather than having to synthesize both large constants from scratch.
1598
1599 Therefore, we calculate how many insns would be required to emit
1600 the constant starting from `best_start', and also starting from
1601 zero (ie with bit 31 first to be output). If `best_start' doesn't
1602 yield a shorter sequence, we may as well use zero. */
1603 if (best_start != 0
1604 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1605 && (count_insns_for_constant (remainder, 0) <=
1606 count_insns_for_constant (remainder, best_start)))
1607 best_start = 0;
1608
1609 /* Now start emitting the insns. */
1610 i = best_start;
1611 do
1612 {
1613 int end;
1614
1615 if (i <= 0)
1616 i += 32;
1617 if (remainder & (3 << (i - 2)))
1618 {
1619 end = i - 8;
1620 if (end < 0)
1621 end += 32;
1622 temp1 = remainder & ((0x0ff << end)
1623 | ((i < end) ? (0xff >> (32 - end)) : 0));
1624 remainder &= ~temp1;
1625
1626 if (generate)
1627 {
1628 rtx new_src, temp1_rtx;
1629
1630 if (code == SET || code == MINUS)
1631 {
1632 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1633 if (can_invert && code != MINUS)
1634 temp1 = ~temp1;
1635 }
1636 else
1637 {
1638 if (remainder && subtargets)
1639 new_src = gen_reg_rtx (mode);
1640 else
1641 new_src = target;
1642 if (can_invert)
1643 temp1 = ~temp1;
1644 else if (can_negate)
1645 temp1 = -temp1;
1646 }
1647
1648 temp1 = trunc_int_for_mode (temp1, mode);
1649 temp1_rtx = GEN_INT (temp1);
1650
1651 if (code == SET)
1652 ;
1653 else if (code == MINUS)
1654 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1655 else
1656 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1657
1658 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1659 source = new_src;
1660 }
1661
1662 if (code == SET)
1663 {
1664 can_invert = 0;
1665 code = PLUS;
1666 }
1667 else if (code == MINUS)
1668 code = PLUS;
1669
1670 insns++;
1671 i -= 6;
1672 }
1673 i -= 2;
1674 }
1675 while (remainder);
1676 }
1677
1678 return insns;
1679 }
1680
1681 /* Canonicalize a comparison so that we are more likely to recognize it.
1682 This can be done for a few constant compares, where we can make the
1683 immediate value easier to load. */
1684
1685 enum rtx_code
1686 arm_canonicalize_comparison (code, op1)
1687 enum rtx_code code;
1688 rtx * op1;
1689 {
1690 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1691
1692 switch (code)
1693 {
1694 case EQ:
1695 case NE:
1696 return code;
1697
1698 case GT:
1699 case LE:
1700 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1701 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1702 {
1703 *op1 = GEN_INT (i + 1);
1704 return code == GT ? GE : LT;
1705 }
1706 break;
1707
1708 case GE:
1709 case LT:
1710 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1711 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1712 {
1713 *op1 = GEN_INT (i - 1);
1714 return code == GE ? GT : LE;
1715 }
1716 break;
1717
1718 case GTU:
1719 case LEU:
1720 if (i != ~((unsigned HOST_WIDE_INT) 0)
1721 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1722 {
1723 *op1 = GEN_INT (i + 1);
1724 return code == GTU ? GEU : LTU;
1725 }
1726 break;
1727
1728 case GEU:
1729 case LTU:
1730 if (i != 0
1731 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1732 {
1733 *op1 = GEN_INT (i - 1);
1734 return code == GEU ? GTU : LEU;
1735 }
1736 break;
1737
1738 default:
1739 abort ();
1740 }
1741
1742 return code;
1743 }
1744
1745 /* Decide whether a type should be returned in memory (true)
1746 or in a register (false). This is called by the macro
1747 RETURN_IN_MEMORY. */
1748
1749 int
1750 arm_return_in_memory (type)
1751 tree type;
1752 {
1753 if (!AGGREGATE_TYPE_P (type))
1754 /* All simple types are returned in registers. */
1755 return 0;
1756
1757 /* For the arm-wince targets we choose to be compitable with Microsoft's
1758 ARM and Thumb compilers, which always return aggregates in memory. */
1759 #ifndef ARM_WINCE
1760 /* All structures/unions bigger than one word are returned in memory.
1761 Also catch the case where int_size_in_bytes returns -1. In this case
1762 the aggregate is either huge or of varaible size, and in either case
1763 we will want to return it via memory and not in a register. */
1764 if (((unsigned int) int_size_in_bytes (type)) > UNITS_PER_WORD)
1765 return 1;
1766
1767 if (TREE_CODE (type) == RECORD_TYPE)
1768 {
1769 tree field;
1770
1771 /* For a struct the APCS says that we only return in a register
1772 if the type is 'integer like' and every addressable element
1773 has an offset of zero. For practical purposes this means
1774 that the structure can have at most one non bit-field element
1775 and that this element must be the first one in the structure. */
1776
1777 /* Find the first field, ignoring non FIELD_DECL things which will
1778 have been created by C++. */
1779 for (field = TYPE_FIELDS (type);
1780 field && TREE_CODE (field) != FIELD_DECL;
1781 field = TREE_CHAIN (field))
1782 continue;
1783
1784 if (field == NULL)
1785 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1786
1787 /* Check that the first field is valid for returning in a register. */
1788
1789 /* ... Floats are not allowed */
1790 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1791 return 1;
1792
1793 /* ... Aggregates that are not themselves valid for returning in
1794 a register are not allowed. */
1795 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1796 return 1;
1797
1798 /* Now check the remaining fields, if any. Only bitfields are allowed,
1799 since they are not addressable. */
1800 for (field = TREE_CHAIN (field);
1801 field;
1802 field = TREE_CHAIN (field))
1803 {
1804 if (TREE_CODE (field) != FIELD_DECL)
1805 continue;
1806
1807 if (!DECL_BIT_FIELD_TYPE (field))
1808 return 1;
1809 }
1810
1811 return 0;
1812 }
1813
1814 if (TREE_CODE (type) == UNION_TYPE)
1815 {
1816 tree field;
1817
1818 /* Unions can be returned in registers if every element is
1819 integral, or can be returned in an integer register. */
1820 for (field = TYPE_FIELDS (type);
1821 field;
1822 field = TREE_CHAIN (field))
1823 {
1824 if (TREE_CODE (field) != FIELD_DECL)
1825 continue;
1826
1827 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1828 return 1;
1829
1830 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1831 return 1;
1832 }
1833
1834 return 0;
1835 }
1836 #endif /* not ARM_WINCE */
1837
1838 /* Return all other types in memory. */
1839 return 1;
1840 }
1841
1842 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1843 for a call to a function whose data type is FNTYPE.
1844 For a library call, FNTYPE is NULL. */
1845 void
1846 arm_init_cumulative_args (pcum, fntype, libname, indirect)
1847 CUMULATIVE_ARGS * pcum;
1848 tree fntype;
1849 rtx libname ATTRIBUTE_UNUSED;
1850 int indirect ATTRIBUTE_UNUSED;
1851 {
1852 /* On the ARM, the offset starts at 0. */
1853 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype))) ? 1 : 0);
1854
1855 pcum->call_cookie = CALL_NORMAL;
1856
1857 if (TARGET_LONG_CALLS)
1858 pcum->call_cookie = CALL_LONG;
1859
1860 /* Check for long call/short call attributes. The attributes
1861 override any command line option. */
1862 if (fntype)
1863 {
1864 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
1865 pcum->call_cookie = CALL_SHORT;
1866 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
1867 pcum->call_cookie = CALL_LONG;
1868 }
1869 }
1870
1871 /* Determine where to put an argument to a function.
1872 Value is zero to push the argument on the stack,
1873 or a hard register in which to store the argument.
1874
1875 MODE is the argument's machine mode.
1876 TYPE is the data type of the argument (as a tree).
1877 This is null for libcalls where that information may
1878 not be available.
1879 CUM is a variable of type CUMULATIVE_ARGS which gives info about
1880 the preceding args and about the function being called.
1881 NAMED is nonzero if this argument is a named parameter
1882 (otherwise it is an extra parameter matching an ellipsis). */
1883
1884 rtx
1885 arm_function_arg (pcum, mode, type, named)
1886 CUMULATIVE_ARGS * pcum;
1887 enum machine_mode mode;
1888 tree type ATTRIBUTE_UNUSED;
1889 int named;
1890 {
1891 if (mode == VOIDmode)
1892 /* Compute operand 2 of the call insn. */
1893 return GEN_INT (pcum->call_cookie);
1894
1895 if (!named || pcum->nregs >= NUM_ARG_REGS)
1896 return NULL_RTX;
1897
1898 return gen_rtx_REG (mode, pcum->nregs);
1899 }
1900 \f
1901 /* Encode the current state of the #pragma [no_]long_calls. */
1902 typedef enum
1903 {
1904 OFF, /* No #pramgma [no_]long_calls is in effect. */
1905 LONG, /* #pragma long_calls is in effect. */
1906 SHORT /* #pragma no_long_calls is in effect. */
1907 } arm_pragma_enum;
1908
1909 static arm_pragma_enum arm_pragma_long_calls = OFF;
1910
1911 void
1912 arm_pr_long_calls (pfile)
1913 cpp_reader * pfile ATTRIBUTE_UNUSED;
1914 {
1915 arm_pragma_long_calls = LONG;
1916 }
1917
1918 void
1919 arm_pr_no_long_calls (pfile)
1920 cpp_reader * pfile ATTRIBUTE_UNUSED;
1921 {
1922 arm_pragma_long_calls = SHORT;
1923 }
1924
1925 void
1926 arm_pr_long_calls_off (pfile)
1927 cpp_reader * pfile ATTRIBUTE_UNUSED;
1928 {
1929 arm_pragma_long_calls = OFF;
1930 }
1931 \f
1932 /* Table of machine attributes. */
1933 const struct attribute_spec arm_attribute_table[] =
1934 {
1935 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1936 /* Function calls made to this symbol must be done indirectly, because
1937 it may lie outside of the 26 bit addressing range of a normal function
1938 call. */
1939 { "long_call", 0, 0, false, true, true, NULL },
1940 /* Whereas these functions are always known to reside within the 26 bit
1941 addressing range. */
1942 { "short_call", 0, 0, false, true, true, NULL },
1943 /* Interrupt Service Routines have special prologue and epilogue requirements. */
1944 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
1945 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
1946 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
1947 #ifdef ARM_PE
1948 /* ARM/PE has three new attributes:
1949 interfacearm - ?
1950 dllexport - for exporting a function/variable that will live in a dll
1951 dllimport - for importing a function/variable from a dll
1952
1953 Microsoft allows multiple declspecs in one __declspec, separating
1954 them with spaces. We do NOT support this. Instead, use __declspec
1955 multiple times.
1956 */
1957 { "dllimport", 0, 0, true, false, false, NULL },
1958 { "dllexport", 0, 0, true, false, false, NULL },
1959 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
1960 #endif
1961 { NULL, 0, 0, false, false, false, NULL }
1962 };
1963
1964 /* Handle an attribute requiring a FUNCTION_DECL;
1965 arguments as in struct attribute_spec.handler. */
1966
1967 static tree
1968 arm_handle_fndecl_attribute (node, name, args, flags, no_add_attrs)
1969 tree * node;
1970 tree name;
1971 tree args ATTRIBUTE_UNUSED;
1972 int flags ATTRIBUTE_UNUSED;
1973 bool * no_add_attrs;
1974 {
1975 if (TREE_CODE (*node) != FUNCTION_DECL)
1976 {
1977 warning ("`%s' attribute only applies to functions",
1978 IDENTIFIER_POINTER (name));
1979 *no_add_attrs = true;
1980 }
1981
1982 return NULL_TREE;
1983 }
1984
1985 /* Handle an "interrupt" or "isr" attribute;
1986 arguments as in struct attribute_spec.handler. */
1987
1988 static tree
1989 arm_handle_isr_attribute (node, name, args, flags, no_add_attrs)
1990 tree * node;
1991 tree name;
1992 tree args;
1993 int flags;
1994 bool * no_add_attrs;
1995 {
1996 if (DECL_P (*node))
1997 {
1998 if (TREE_CODE (*node) != FUNCTION_DECL)
1999 {
2000 warning ("`%s' attribute only applies to functions",
2001 IDENTIFIER_POINTER (name));
2002 *no_add_attrs = true;
2003 }
2004 /* FIXME: the argument if any is checked for type attributes;
2005 should it be checked for decl ones? */
2006 }
2007 else
2008 {
2009 if (TREE_CODE (*node) == FUNCTION_TYPE
2010 || TREE_CODE (*node) == METHOD_TYPE)
2011 {
2012 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2013 {
2014 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2015 *no_add_attrs = true;
2016 }
2017 }
2018 else if (TREE_CODE (*node) == POINTER_TYPE
2019 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2020 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2021 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2022 {
2023 *node = build_type_copy (*node);
2024 TREE_TYPE (*node) = build_type_attribute_variant
2025 (TREE_TYPE (*node),
2026 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2027 *no_add_attrs = true;
2028 }
2029 else
2030 {
2031 /* Possibly pass this attribute on from the type to a decl. */
2032 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2033 | (int) ATTR_FLAG_FUNCTION_NEXT
2034 | (int) ATTR_FLAG_ARRAY_NEXT))
2035 {
2036 *no_add_attrs = true;
2037 return tree_cons (name, args, NULL_TREE);
2038 }
2039 else
2040 {
2041 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2042 }
2043 }
2044 }
2045
2046 return NULL_TREE;
2047 }
2048
2049 /* Return 0 if the attributes for two types are incompatible, 1 if they
2050 are compatible, and 2 if they are nearly compatible (which causes a
2051 warning to be generated). */
2052
2053 static int
2054 arm_comp_type_attributes (type1, type2)
2055 tree type1;
2056 tree type2;
2057 {
2058 int l1, l2, s1, s2;
2059
2060 /* Check for mismatch of non-default calling convention. */
2061 if (TREE_CODE (type1) != FUNCTION_TYPE)
2062 return 1;
2063
2064 /* Check for mismatched call attributes. */
2065 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2066 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2067 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2068 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2069
2070 /* Only bother to check if an attribute is defined. */
2071 if (l1 | l2 | s1 | s2)
2072 {
2073 /* If one type has an attribute, the other must have the same attribute. */
2074 if ((l1 != l2) || (s1 != s2))
2075 return 0;
2076
2077 /* Disallow mixed attributes. */
2078 if ((l1 & s2) || (l2 & s1))
2079 return 0;
2080 }
2081
2082 /* Check for mismatched ISR attribute. */
2083 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2084 if (! l1)
2085 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2086 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2087 if (! l2)
2088 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2089 if (l1 != l2)
2090 return 0;
2091
2092 return 1;
2093 }
2094
2095 /* Encode long_call or short_call attribute by prefixing
2096 symbol name in DECL with a special character FLAG. */
2097
2098 void
2099 arm_encode_call_attribute (decl, flag)
2100 tree decl;
2101 int flag;
2102 {
2103 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2104 int len = strlen (str);
2105 char * newstr;
2106
2107 /* Do not allow weak functions to be treated as short call. */
2108 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2109 return;
2110
2111 newstr = alloca (len + 2);
2112 newstr[0] = flag;
2113 strcpy (newstr + 1, str);
2114
2115 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2116 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2117 }
2118
2119 /* Assigns default attributes to newly defined type. This is used to
2120 set short_call/long_call attributes for function types of
2121 functions defined inside corresponding #pragma scopes. */
2122
2123 static void
2124 arm_set_default_type_attributes (type)
2125 tree type;
2126 {
2127 /* Add __attribute__ ((long_call)) to all functions, when
2128 inside #pragma long_calls or __attribute__ ((short_call)),
2129 when inside #pragma no_long_calls. */
2130 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2131 {
2132 tree type_attr_list, attr_name;
2133 type_attr_list = TYPE_ATTRIBUTES (type);
2134
2135 if (arm_pragma_long_calls == LONG)
2136 attr_name = get_identifier ("long_call");
2137 else if (arm_pragma_long_calls == SHORT)
2138 attr_name = get_identifier ("short_call");
2139 else
2140 return;
2141
2142 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2143 TYPE_ATTRIBUTES (type) = type_attr_list;
2144 }
2145 }
2146 \f
2147 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2148 defined within the current compilation unit. If this caanot be
2149 determined, then 0 is returned. */
2150
2151 static int
2152 current_file_function_operand (sym_ref)
2153 rtx sym_ref;
2154 {
2155 /* This is a bit of a fib. A function will have a short call flag
2156 applied to its name if it has the short call attribute, or it has
2157 already been defined within the current compilation unit. */
2158 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2159 return 1;
2160
2161 /* The current function is always defined within the current compilation
2162 unit. if it s a weak definition however, then this may not be the real
2163 definition of the function, and so we have to say no. */
2164 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2165 && !DECL_WEAK (current_function_decl))
2166 return 1;
2167
2168 /* We cannot make the determination - default to returning 0. */
2169 return 0;
2170 }
2171
2172 /* Return non-zero if a 32 bit "long_call" should be generated for
2173 this call. We generate a long_call if the function:
2174
2175 a. has an __attribute__((long call))
2176 or b. is within the scope of a #pragma long_calls
2177 or c. the -mlong-calls command line switch has been specified
2178
2179 However we do not generate a long call if the function:
2180
2181 d. has an __attribute__ ((short_call))
2182 or e. is inside the scope of a #pragma no_long_calls
2183 or f. has an __attribute__ ((section))
2184 or g. is defined within the current compilation unit.
2185
2186 This function will be called by C fragments contained in the machine
2187 description file. CALL_REF and CALL_COOKIE correspond to the matched
2188 rtl operands. CALL_SYMBOL is used to distinguish between
2189 two different callers of the function. It is set to 1 in the
2190 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2191 and "call_value" patterns. This is because of the difference in the
2192 SYM_REFs passed by these patterns. */
2193
2194 int
2195 arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
2196 rtx sym_ref;
2197 int call_cookie;
2198 int call_symbol;
2199 {
2200 if (!call_symbol)
2201 {
2202 if (GET_CODE (sym_ref) != MEM)
2203 return 0;
2204
2205 sym_ref = XEXP (sym_ref, 0);
2206 }
2207
2208 if (GET_CODE (sym_ref) != SYMBOL_REF)
2209 return 0;
2210
2211 if (call_cookie & CALL_SHORT)
2212 return 0;
2213
2214 if (TARGET_LONG_CALLS && flag_function_sections)
2215 return 1;
2216
2217 if (current_file_function_operand (sym_ref))
2218 return 0;
2219
2220 return (call_cookie & CALL_LONG)
2221 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2222 || TARGET_LONG_CALLS;
2223 }
2224
2225 /* Return non-zero if it is ok to make a tail-call to DECL. */
2226
2227 int
2228 arm_function_ok_for_sibcall (decl)
2229 tree decl;
2230 {
2231 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2232
2233 /* Never tailcall something for which we have no decl, or if we
2234 are in Thumb mode. */
2235 if (decl == NULL || TARGET_THUMB)
2236 return 0;
2237
2238 /* Get the calling method. */
2239 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2240 call_type = CALL_SHORT;
2241 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2242 call_type = CALL_LONG;
2243
2244 /* Cannot tail-call to long calls, since these are out of range of
2245 a branch instruction. However, if not compiling PIC, we know
2246 we can reach the symbol if it is in this compilation unit. */
2247 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2248 return 0;
2249
2250 /* If we are interworking and the function is not declared static
2251 then we can't tail-call it unless we know that it exists in this
2252 compilation unit (since it might be a Thumb routine). */
2253 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2254 return 0;
2255
2256 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2257 if (IS_INTERRUPT (arm_current_func_type ()))
2258 return 0;
2259
2260 /* Everything else is ok. */
2261 return 1;
2262 }
2263
2264 \f
2265 int
2266 legitimate_pic_operand_p (x)
2267 rtx x;
2268 {
2269 if (CONSTANT_P (x)
2270 && flag_pic
2271 && (GET_CODE (x) == SYMBOL_REF
2272 || (GET_CODE (x) == CONST
2273 && GET_CODE (XEXP (x, 0)) == PLUS
2274 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2275 return 0;
2276
2277 return 1;
2278 }
2279
2280 rtx
2281 legitimize_pic_address (orig, mode, reg)
2282 rtx orig;
2283 enum machine_mode mode;
2284 rtx reg;
2285 {
2286 if (GET_CODE (orig) == SYMBOL_REF
2287 || GET_CODE (orig) == LABEL_REF)
2288 {
2289 #ifndef AOF_ASSEMBLER
2290 rtx pic_ref, address;
2291 #endif
2292 rtx insn;
2293 int subregs = 0;
2294
2295 if (reg == 0)
2296 {
2297 if (no_new_pseudos)
2298 abort ();
2299 else
2300 reg = gen_reg_rtx (Pmode);
2301
2302 subregs = 1;
2303 }
2304
2305 #ifdef AOF_ASSEMBLER
2306 /* The AOF assembler can generate relocations for these directly, and
2307 understands that the PIC register has to be added into the offset. */
2308 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2309 #else
2310 if (subregs)
2311 address = gen_reg_rtx (Pmode);
2312 else
2313 address = reg;
2314
2315 if (TARGET_ARM)
2316 emit_insn (gen_pic_load_addr_arm (address, orig));
2317 else
2318 emit_insn (gen_pic_load_addr_thumb (address, orig));
2319
2320 if ((GET_CODE (orig) == LABEL_REF
2321 || (GET_CODE (orig) == SYMBOL_REF &&
2322 ENCODED_SHORT_CALL_ATTR_P (XSTR (orig, 0))))
2323 && NEED_GOT_RELOC)
2324 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2325 else
2326 {
2327 pic_ref = gen_rtx_MEM (Pmode,
2328 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2329 address));
2330 RTX_UNCHANGING_P (pic_ref) = 1;
2331 }
2332
2333 insn = emit_move_insn (reg, pic_ref);
2334 #endif
2335 current_function_uses_pic_offset_table = 1;
2336 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2337 by loop. */
2338 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2339 REG_NOTES (insn));
2340 return reg;
2341 }
2342 else if (GET_CODE (orig) == CONST)
2343 {
2344 rtx base, offset;
2345
2346 if (GET_CODE (XEXP (orig, 0)) == PLUS
2347 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2348 return orig;
2349
2350 if (reg == 0)
2351 {
2352 if (no_new_pseudos)
2353 abort ();
2354 else
2355 reg = gen_reg_rtx (Pmode);
2356 }
2357
2358 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2359 {
2360 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2361 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2362 base == reg ? 0 : reg);
2363 }
2364 else
2365 abort ();
2366
2367 if (GET_CODE (offset) == CONST_INT)
2368 {
2369 /* The base register doesn't really matter, we only want to
2370 test the index for the appropriate mode. */
2371 ARM_GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
2372
2373 if (!no_new_pseudos)
2374 offset = force_reg (Pmode, offset);
2375 else
2376 abort ();
2377
2378 win:
2379 if (GET_CODE (offset) == CONST_INT)
2380 return plus_constant (base, INTVAL (offset));
2381 }
2382
2383 if (GET_MODE_SIZE (mode) > 4
2384 && (GET_MODE_CLASS (mode) == MODE_INT
2385 || TARGET_SOFT_FLOAT))
2386 {
2387 emit_insn (gen_addsi3 (reg, base, offset));
2388 return reg;
2389 }
2390
2391 return gen_rtx_PLUS (Pmode, base, offset);
2392 }
2393
2394 return orig;
2395 }
2396
2397 /* Generate code to load the PIC register. PROLOGUE is true if
2398 called from arm_expand_prologue (in which case we want the
2399 generated insns at the start of the function); false if called
2400 by an exception receiver that needs the PIC register reloaded
2401 (in which case the insns are just dumped at the current location). */
2402
2403 void
2404 arm_finalize_pic (prologue)
2405 int prologue ATTRIBUTE_UNUSED;
2406 {
2407 #ifndef AOF_ASSEMBLER
2408 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2409 rtx global_offset_table;
2410
2411 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2412 return;
2413
2414 if (!flag_pic)
2415 abort ();
2416
2417 start_sequence ();
2418 l1 = gen_label_rtx ();
2419
2420 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2421 /* On the ARM the PC register contains 'dot + 8' at the time of the
2422 addition, on the Thumb it is 'dot + 4'. */
2423 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2424 if (GOT_PCREL)
2425 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2426 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2427 else
2428 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2429
2430 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2431
2432 if (TARGET_ARM)
2433 {
2434 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2435 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2436 }
2437 else
2438 {
2439 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2440 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2441 }
2442
2443 seq = gen_sequence ();
2444 end_sequence ();
2445 if (prologue)
2446 emit_insn_after (seq, get_insns ());
2447 else
2448 emit_insn (seq);
2449
2450 /* Need to emit this whether or not we obey regdecls,
2451 since setjmp/longjmp can cause life info to screw up. */
2452 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2453 #endif /* AOF_ASSEMBLER */
2454 }
2455
2456 #define REG_OR_SUBREG_REG(X) \
2457 (GET_CODE (X) == REG \
2458 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
2459
2460 #define REG_OR_SUBREG_RTX(X) \
2461 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
2462
2463 #ifndef COSTS_N_INSNS
2464 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
2465 #endif
2466
2467 int
2468 arm_rtx_costs (x, code, outer)
2469 rtx x;
2470 enum rtx_code code;
2471 enum rtx_code outer;
2472 {
2473 enum machine_mode mode = GET_MODE (x);
2474 enum rtx_code subcode;
2475 int extra_cost;
2476
2477 if (TARGET_THUMB)
2478 {
2479 switch (code)
2480 {
2481 case ASHIFT:
2482 case ASHIFTRT:
2483 case LSHIFTRT:
2484 case ROTATERT:
2485 case PLUS:
2486 case MINUS:
2487 case COMPARE:
2488 case NEG:
2489 case NOT:
2490 return COSTS_N_INSNS (1);
2491
2492 case MULT:
2493 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2494 {
2495 int cycles = 0;
2496 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
2497
2498 while (i)
2499 {
2500 i >>= 2;
2501 cycles++;
2502 }
2503 return COSTS_N_INSNS (2) + cycles;
2504 }
2505 return COSTS_N_INSNS (1) + 16;
2506
2507 case SET:
2508 return (COSTS_N_INSNS (1)
2509 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
2510 + GET_CODE (SET_DEST (x)) == MEM));
2511
2512 case CONST_INT:
2513 if (outer == SET)
2514 {
2515 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
2516 return 0;
2517 if (thumb_shiftable_const (INTVAL (x)))
2518 return COSTS_N_INSNS (2);
2519 return COSTS_N_INSNS (3);
2520 }
2521 else if (outer == PLUS
2522 && INTVAL (x) < 256 && INTVAL (x) > -256)
2523 return 0;
2524 else if (outer == COMPARE
2525 && (unsigned HOST_WIDE_INT) INTVAL (x) < 256)
2526 return 0;
2527 else if (outer == ASHIFT || outer == ASHIFTRT
2528 || outer == LSHIFTRT)
2529 return 0;
2530 return COSTS_N_INSNS (2);
2531
2532 case CONST:
2533 case CONST_DOUBLE:
2534 case LABEL_REF:
2535 case SYMBOL_REF:
2536 return COSTS_N_INSNS (3);
2537
2538 case UDIV:
2539 case UMOD:
2540 case DIV:
2541 case MOD:
2542 return 100;
2543
2544 case TRUNCATE:
2545 return 99;
2546
2547 case AND:
2548 case XOR:
2549 case IOR:
2550 /* XXX guess. */
2551 return 8;
2552
2553 case ADDRESSOF:
2554 case MEM:
2555 /* XXX another guess. */
2556 /* Memory costs quite a lot for the first word, but subsequent words
2557 load at the equivalent of a single insn each. */
2558 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
2559 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
2560 ? 4 : 0));
2561
2562 case IF_THEN_ELSE:
2563 /* XXX a guess. */
2564 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
2565 return 14;
2566 return 2;
2567
2568 case ZERO_EXTEND:
2569 /* XXX still guessing. */
2570 switch (GET_MODE (XEXP (x, 0)))
2571 {
2572 case QImode:
2573 return (1 + (mode == DImode ? 4 : 0)
2574 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2575
2576 case HImode:
2577 return (4 + (mode == DImode ? 4 : 0)
2578 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2579
2580 case SImode:
2581 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2582
2583 default:
2584 return 99;
2585 }
2586
2587 default:
2588 return 99;
2589 #if 0
2590 case FFS:
2591 case FLOAT:
2592 case FIX:
2593 case UNSIGNED_FIX:
2594 /* XXX guess */
2595 fprintf (stderr, "unexpected code for thumb in rtx_costs: %s\n",
2596 rtx_name[code]);
2597 abort ();
2598 #endif
2599 }
2600 }
2601
2602 switch (code)
2603 {
2604 case MEM:
2605 /* Memory costs quite a lot for the first word, but subsequent words
2606 load at the equivalent of a single insn each. */
2607 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
2608 + (GET_CODE (x) == SYMBOL_REF
2609 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
2610
2611 case DIV:
2612 case MOD:
2613 return 100;
2614
2615 case ROTATE:
2616 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
2617 return 4;
2618 /* Fall through */
2619 case ROTATERT:
2620 if (mode != SImode)
2621 return 8;
2622 /* Fall through */
2623 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
2624 if (mode == DImode)
2625 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
2626 + ((GET_CODE (XEXP (x, 0)) == REG
2627 || (GET_CODE (XEXP (x, 0)) == SUBREG
2628 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
2629 ? 0 : 8));
2630 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
2631 || (GET_CODE (XEXP (x, 0)) == SUBREG
2632 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
2633 ? 0 : 4)
2634 + ((GET_CODE (XEXP (x, 1)) == REG
2635 || (GET_CODE (XEXP (x, 1)) == SUBREG
2636 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
2637 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
2638 ? 0 : 4));
2639
2640 case MINUS:
2641 if (mode == DImode)
2642 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
2643 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
2644 || (GET_CODE (XEXP (x, 0)) == CONST_INT
2645 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
2646 ? 0 : 8));
2647
2648 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2649 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2650 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2651 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
2652 ? 0 : 8)
2653 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
2654 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
2655 && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
2656 ? 0 : 8));
2657
2658 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
2659 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
2660 && REG_OR_SUBREG_REG (XEXP (x, 1))))
2661 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
2662 || subcode == ASHIFTRT || subcode == LSHIFTRT
2663 || subcode == ROTATE || subcode == ROTATERT
2664 || (subcode == MULT
2665 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
2666 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
2667 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
2668 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
2669 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
2670 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
2671 && REG_OR_SUBREG_REG (XEXP (x, 0))))
2672 return 1;
2673 /* Fall through */
2674
2675 case PLUS:
2676 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2677 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
2678 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2679 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2680 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
2681 ? 0 : 8));
2682
2683 /* Fall through */
2684 case AND: case XOR: case IOR:
2685 extra_cost = 0;
2686
2687 /* Normally the frame registers will be spilt into reg+const during
2688 reload, so it is a bad idea to combine them with other instructions,
2689 since then they might not be moved outside of loops. As a compromise
2690 we allow integration with ops that have a constant as their second
2691 operand. */
2692 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
2693 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
2694 && GET_CODE (XEXP (x, 1)) != CONST_INT)
2695 || (REG_OR_SUBREG_REG (XEXP (x, 0))
2696 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
2697 extra_cost = 4;
2698
2699 if (mode == DImode)
2700 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
2701 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2702 || (GET_CODE (XEXP (x, 1)) == CONST_INT
2703 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
2704 ? 0 : 8));
2705
2706 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
2707 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
2708 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
2709 || (GET_CODE (XEXP (x, 1)) == CONST_INT
2710 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
2711 ? 0 : 4));
2712
2713 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
2714 return (1 + extra_cost
2715 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
2716 || subcode == LSHIFTRT || subcode == ASHIFTRT
2717 || subcode == ROTATE || subcode == ROTATERT
2718 || (subcode == MULT
2719 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2720 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
2721 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
2722 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
2723 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
2724 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
2725 ? 0 : 4));
2726
2727 return 8;
2728
2729 case MULT:
2730 /* There is no point basing this on the tuning, since it is always the
2731 fast variant if it exists at all. */
2732 if (arm_fast_multiply && mode == DImode
2733 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
2734 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2735 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
2736 return 8;
2737
2738 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2739 || mode == DImode)
2740 return 30;
2741
2742 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2743 {
2744 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
2745 & (unsigned HOST_WIDE_INT) 0xffffffff);
2746 int add_cost = const_ok_for_arm (i) ? 4 : 8;
2747 int j;
2748
2749 /* Tune as appropriate. */
2750 int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
2751
2752 for (j = 0; i && j < 32; j += booth_unit_size)
2753 {
2754 i >>= booth_unit_size;
2755 add_cost += 2;
2756 }
2757
2758 return add_cost;
2759 }
2760
2761 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
2762 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
2763 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
2764
2765 case TRUNCATE:
2766 if (arm_fast_multiply && mode == SImode
2767 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
2768 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
2769 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
2770 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
2771 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
2772 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
2773 return 8;
2774 return 99;
2775
2776 case NEG:
2777 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2778 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
2779 /* Fall through */
2780 case NOT:
2781 if (mode == DImode)
2782 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
2783
2784 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
2785
2786 case IF_THEN_ELSE:
2787 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
2788 return 14;
2789 return 2;
2790
2791 case COMPARE:
2792 return 1;
2793
2794 case ABS:
2795 return 4 + (mode == DImode ? 4 : 0);
2796
2797 case SIGN_EXTEND:
2798 if (GET_MODE (XEXP (x, 0)) == QImode)
2799 return (4 + (mode == DImode ? 4 : 0)
2800 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2801 /* Fall through */
2802 case ZERO_EXTEND:
2803 switch (GET_MODE (XEXP (x, 0)))
2804 {
2805 case QImode:
2806 return (1 + (mode == DImode ? 4 : 0)
2807 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2808
2809 case HImode:
2810 return (4 + (mode == DImode ? 4 : 0)
2811 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2812
2813 case SImode:
2814 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
2815
2816 default:
2817 break;
2818 }
2819 abort ();
2820
2821 case CONST_INT:
2822 if (const_ok_for_arm (INTVAL (x)))
2823 return outer == SET ? 2 : -1;
2824 else if (outer == AND
2825 && const_ok_for_arm (~INTVAL (x)))
2826 return -1;
2827 else if ((outer == COMPARE
2828 || outer == PLUS || outer == MINUS)
2829 && const_ok_for_arm (-INTVAL (x)))
2830 return -1;
2831 else
2832 return 5;
2833
2834 case CONST:
2835 case LABEL_REF:
2836 case SYMBOL_REF:
2837 return 6;
2838
2839 case CONST_DOUBLE:
2840 if (const_double_rtx_ok_for_fpu (x))
2841 return outer == SET ? 2 : -1;
2842 else if ((outer == COMPARE || outer == PLUS)
2843 && neg_const_double_rtx_ok_for_fpu (x))
2844 return -1;
2845 return 7;
2846
2847 default:
2848 return 99;
2849 }
2850 }
2851
2852 static int
2853 arm_adjust_cost (insn, link, dep, cost)
2854 rtx insn;
2855 rtx link;
2856 rtx dep;
2857 int cost;
2858 {
2859 rtx i_pat, d_pat;
2860
2861 /* Some true dependencies can have a higher cost depending
2862 on precisely how certain input operands are used. */
2863 if (arm_is_xscale
2864 && REG_NOTE_KIND (link) == 0
2865 && recog_memoized (insn) < 0
2866 && recog_memoized (dep) < 0)
2867 {
2868 int shift_opnum = get_attr_shift (insn);
2869 enum attr_type attr_type = get_attr_type (dep);
2870
2871 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
2872 operand for INSN. If we have a shifted input operand and the
2873 instruction we depend on is another ALU instruction, then we may
2874 have to account for an additional stall. */
2875 if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
2876 {
2877 rtx shifted_operand;
2878 int opno;
2879
2880 /* Get the shifted operand. */
2881 extract_insn (insn);
2882 shifted_operand = recog_data.operand[shift_opnum];
2883
2884 /* Iterate over all the operands in DEP. If we write an operand
2885 that overlaps with SHIFTED_OPERAND, then we have increase the
2886 cost of this dependency. */
2887 extract_insn (dep);
2888 preprocess_constraints ();
2889 for (opno = 0; opno < recog_data.n_operands; opno++)
2890 {
2891 /* We can ignore strict inputs. */
2892 if (recog_data.operand_type[opno] == OP_IN)
2893 continue;
2894
2895 if (reg_overlap_mentioned_p (recog_data.operand[opno],
2896 shifted_operand))
2897 return 2;
2898 }
2899 }
2900 }
2901
2902 /* XXX This is not strictly true for the FPA. */
2903 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
2904 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
2905 return 0;
2906
2907 /* Call insns don't incur a stall, even if they follow a load. */
2908 if (REG_NOTE_KIND (link) == 0
2909 && GET_CODE (insn) == CALL_INSN)
2910 return 1;
2911
2912 if ((i_pat = single_set (insn)) != NULL
2913 && GET_CODE (SET_SRC (i_pat)) == MEM
2914 && (d_pat = single_set (dep)) != NULL
2915 && GET_CODE (SET_DEST (d_pat)) == MEM)
2916 {
2917 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
2918 /* This is a load after a store, there is no conflict if the load reads
2919 from a cached area. Assume that loads from the stack, and from the
2920 constant pool are cached, and that others will miss. This is a
2921 hack. */
2922
2923 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
2924 || reg_mentioned_p (stack_pointer_rtx, src_mem)
2925 || reg_mentioned_p (frame_pointer_rtx, src_mem)
2926 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
2927 return 1;
2928 }
2929
2930 return cost;
2931 }
2932
2933 /* This code has been fixed for cross compilation. */
2934
2935 static int fpa_consts_inited = 0;
2936
2937 static const char * const strings_fpa[8] =
2938 {
2939 "0", "1", "2", "3",
2940 "4", "5", "0.5", "10"
2941 };
2942
2943 static REAL_VALUE_TYPE values_fpa[8];
2944
2945 static void
2946 init_fpa_table ()
2947 {
2948 int i;
2949 REAL_VALUE_TYPE r;
2950
2951 for (i = 0; i < 8; i++)
2952 {
2953 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
2954 values_fpa[i] = r;
2955 }
2956
2957 fpa_consts_inited = 1;
2958 }
2959
2960 /* Return TRUE if rtx X is a valid immediate FPU constant. */
2961
2962 int
2963 const_double_rtx_ok_for_fpu (x)
2964 rtx x;
2965 {
2966 REAL_VALUE_TYPE r;
2967 int i;
2968
2969 if (!fpa_consts_inited)
2970 init_fpa_table ();
2971
2972 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2973 if (REAL_VALUE_MINUS_ZERO (r))
2974 return 0;
2975
2976 for (i = 0; i < 8; i++)
2977 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
2978 return 1;
2979
2980 return 0;
2981 }
2982
2983 /* Return TRUE if rtx X is a valid immediate FPU constant. */
2984
2985 int
2986 neg_const_double_rtx_ok_for_fpu (x)
2987 rtx x;
2988 {
2989 REAL_VALUE_TYPE r;
2990 int i;
2991
2992 if (!fpa_consts_inited)
2993 init_fpa_table ();
2994
2995 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2996 r = REAL_VALUE_NEGATE (r);
2997 if (REAL_VALUE_MINUS_ZERO (r))
2998 return 0;
2999
3000 for (i = 0; i < 8; i++)
3001 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3002 return 1;
3003
3004 return 0;
3005 }
3006 \f
3007 /* Predicates for `match_operand' and `match_operator'. */
3008
3009 /* s_register_operand is the same as register_operand, but it doesn't accept
3010 (SUBREG (MEM)...).
3011
3012 This function exists because at the time it was put in it led to better
3013 code. SUBREG(MEM) always needs a reload in the places where
3014 s_register_operand is used, and this seemed to lead to excessive
3015 reloading. */
3016
3017 int
3018 s_register_operand (op, mode)
3019 rtx op;
3020 enum machine_mode mode;
3021 {
3022 if (GET_MODE (op) != mode && mode != VOIDmode)
3023 return 0;
3024
3025 if (GET_CODE (op) == SUBREG)
3026 op = SUBREG_REG (op);
3027
3028 /* We don't consider registers whose class is NO_REGS
3029 to be a register operand. */
3030 /* XXX might have to check for lo regs only for thumb ??? */
3031 return (GET_CODE (op) == REG
3032 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3033 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3034 }
3035
3036 /* A hard register operand (even before reload. */
3037
3038 int
3039 arm_hard_register_operand (op, mode)
3040 rtx op;
3041 enum machine_mode mode;
3042 {
3043 if (GET_MODE (op) != mode && mode != VOIDmode)
3044 return 0;
3045
3046 return (GET_CODE (op) == REG
3047 && REGNO (op) < FIRST_PSEUDO_REGISTER);
3048 }
3049
3050 /* Only accept reg, subreg(reg), const_int. */
3051
3052 int
3053 reg_or_int_operand (op, mode)
3054 rtx op;
3055 enum machine_mode mode;
3056 {
3057 if (GET_CODE (op) == CONST_INT)
3058 return 1;
3059
3060 if (GET_MODE (op) != mode && mode != VOIDmode)
3061 return 0;
3062
3063 if (GET_CODE (op) == SUBREG)
3064 op = SUBREG_REG (op);
3065
3066 /* We don't consider registers whose class is NO_REGS
3067 to be a register operand. */
3068 return (GET_CODE (op) == REG
3069 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3070 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3071 }
3072
3073 /* Return 1 if OP is an item in memory, given that we are in reload. */
3074
3075 int
3076 arm_reload_memory_operand (op, mode)
3077 rtx op;
3078 enum machine_mode mode ATTRIBUTE_UNUSED;
3079 {
3080 int regno = true_regnum (op);
3081
3082 return (!CONSTANT_P (op)
3083 && (regno == -1
3084 || (GET_CODE (op) == REG
3085 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
3086 }
3087
3088 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
3089 memory access (architecture V4).
3090 MODE is QImode if called when computing constraints, or VOIDmode when
3091 emitting patterns. In this latter case we cannot use memory_operand()
3092 because it will fail on badly formed MEMs, which is precisly what we are
3093 trying to catch. */
3094
3095 int
3096 bad_signed_byte_operand (op, mode)
3097 rtx op;
3098 enum machine_mode mode ATTRIBUTE_UNUSED;
3099 {
3100 #if 0
3101 if ((mode == QImode && !memory_operand (op, mode)) || GET_CODE (op) != MEM)
3102 return 0;
3103 #endif
3104 if (GET_CODE (op) != MEM)
3105 return 0;
3106
3107 op = XEXP (op, 0);
3108
3109 /* A sum of anything more complex than reg + reg or reg + const is bad. */
3110 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3111 && (!s_register_operand (XEXP (op, 0), VOIDmode)
3112 || (!s_register_operand (XEXP (op, 1), VOIDmode)
3113 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
3114 return 1;
3115
3116 /* Big constants are also bad. */
3117 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
3118 && (INTVAL (XEXP (op, 1)) > 0xff
3119 || -INTVAL (XEXP (op, 1)) > 0xff))
3120 return 1;
3121
3122 /* Everything else is good, or can will automatically be made so. */
3123 return 0;
3124 }
3125
3126 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
3127
3128 int
3129 arm_rhs_operand (op, mode)
3130 rtx op;
3131 enum machine_mode mode;
3132 {
3133 return (s_register_operand (op, mode)
3134 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
3135 }
3136
3137 /* Return TRUE for valid operands for the
3138 rhs of an ARM instruction, or a load. */
3139
3140 int
3141 arm_rhsm_operand (op, mode)
3142 rtx op;
3143 enum machine_mode mode;
3144 {
3145 return (s_register_operand (op, mode)
3146 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
3147 || memory_operand (op, mode));
3148 }
3149
3150 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
3151 constant that is valid when negated. */
3152
3153 int
3154 arm_add_operand (op, mode)
3155 rtx op;
3156 enum machine_mode mode;
3157 {
3158 if (TARGET_THUMB)
3159 return thumb_cmp_operand (op, mode);
3160
3161 return (s_register_operand (op, mode)
3162 || (GET_CODE (op) == CONST_INT
3163 && (const_ok_for_arm (INTVAL (op))
3164 || const_ok_for_arm (-INTVAL (op)))));
3165 }
3166
3167 int
3168 arm_not_operand (op, mode)
3169 rtx op;
3170 enum machine_mode mode;
3171 {
3172 return (s_register_operand (op, mode)
3173 || (GET_CODE (op) == CONST_INT
3174 && (const_ok_for_arm (INTVAL (op))
3175 || const_ok_for_arm (~INTVAL (op)))));
3176 }
3177
3178 /* Return TRUE if the operand is a memory reference which contains an
3179 offsettable address. */
3180
3181 int
3182 offsettable_memory_operand (op, mode)
3183 rtx op;
3184 enum machine_mode mode;
3185 {
3186 if (mode == VOIDmode)
3187 mode = GET_MODE (op);
3188
3189 return (mode == GET_MODE (op)
3190 && GET_CODE (op) == MEM
3191 && offsettable_address_p (reload_completed | reload_in_progress,
3192 mode, XEXP (op, 0)));
3193 }
3194
3195 /* Return TRUE if the operand is a memory reference which is, or can be
3196 made word aligned by adjusting the offset. */
3197
3198 int
3199 alignable_memory_operand (op, mode)
3200 rtx op;
3201 enum machine_mode mode;
3202 {
3203 rtx reg;
3204
3205 if (mode == VOIDmode)
3206 mode = GET_MODE (op);
3207
3208 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
3209 return 0;
3210
3211 op = XEXP (op, 0);
3212
3213 return ((GET_CODE (reg = op) == REG
3214 || (GET_CODE (op) == SUBREG
3215 && GET_CODE (reg = SUBREG_REG (op)) == REG)
3216 || (GET_CODE (op) == PLUS
3217 && GET_CODE (XEXP (op, 1)) == CONST_INT
3218 && (GET_CODE (reg = XEXP (op, 0)) == REG
3219 || (GET_CODE (XEXP (op, 0)) == SUBREG
3220 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
3221 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
3222 }
3223
3224 /* Similar to s_register_operand, but does not allow hard integer
3225 registers. */
3226
3227 int
3228 f_register_operand (op, mode)
3229 rtx op;
3230 enum machine_mode mode;
3231 {
3232 if (GET_MODE (op) != mode && mode != VOIDmode)
3233 return 0;
3234
3235 if (GET_CODE (op) == SUBREG)
3236 op = SUBREG_REG (op);
3237
3238 /* We don't consider registers whose class is NO_REGS
3239 to be a register operand. */
3240 return (GET_CODE (op) == REG
3241 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3242 || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
3243 }
3244
3245 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
3246
3247 int
3248 fpu_rhs_operand (op, mode)
3249 rtx op;
3250 enum machine_mode mode;
3251 {
3252 if (s_register_operand (op, mode))
3253 return TRUE;
3254
3255 if (GET_MODE (op) != mode && mode != VOIDmode)
3256 return FALSE;
3257
3258 if (GET_CODE (op) == CONST_DOUBLE)
3259 return const_double_rtx_ok_for_fpu (op);
3260
3261 return FALSE;
3262 }
3263
3264 int
3265 fpu_add_operand (op, mode)
3266 rtx op;
3267 enum machine_mode mode;
3268 {
3269 if (s_register_operand (op, mode))
3270 return TRUE;
3271
3272 if (GET_MODE (op) != mode && mode != VOIDmode)
3273 return FALSE;
3274
3275 if (GET_CODE (op) == CONST_DOUBLE)
3276 return (const_double_rtx_ok_for_fpu (op)
3277 || neg_const_double_rtx_ok_for_fpu (op));
3278
3279 return FALSE;
3280 }
3281
3282 /* Return nonzero if OP is a constant power of two. */
3283
3284 int
3285 power_of_two_operand (op, mode)
3286 rtx op;
3287 enum machine_mode mode ATTRIBUTE_UNUSED;
3288 {
3289 if (GET_CODE (op) == CONST_INT)
3290 {
3291 HOST_WIDE_INT value = INTVAL (op);
3292
3293 return value != 0 && (value & (value - 1)) == 0;
3294 }
3295
3296 return FALSE;
3297 }
3298
3299 /* Return TRUE for a valid operand of a DImode operation.
3300 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
3301 Note that this disallows MEM(REG+REG), but allows
3302 MEM(PRE/POST_INC/DEC(REG)). */
3303
3304 int
3305 di_operand (op, mode)
3306 rtx op;
3307 enum machine_mode mode;
3308 {
3309 if (s_register_operand (op, mode))
3310 return TRUE;
3311
3312 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
3313 return FALSE;
3314
3315 if (GET_CODE (op) == SUBREG)
3316 op = SUBREG_REG (op);
3317
3318 switch (GET_CODE (op))
3319 {
3320 case CONST_DOUBLE:
3321 case CONST_INT:
3322 return TRUE;
3323
3324 case MEM:
3325 return memory_address_p (DImode, XEXP (op, 0));
3326
3327 default:
3328 return FALSE;
3329 }
3330 }
3331
3332 /* Like di_operand, but don't accept constants. */
3333
3334 int
3335 nonimmediate_di_operand (op, mode)
3336 rtx op;
3337 enum machine_mode mode;
3338 {
3339 if (s_register_operand (op, mode))
3340 return TRUE;
3341
3342 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
3343 return FALSE;
3344
3345 if (GET_CODE (op) == SUBREG)
3346 op = SUBREG_REG (op);
3347
3348 if (GET_CODE (op) == MEM)
3349 return memory_address_p (DImode, XEXP (op, 0));
3350
3351 return FALSE;
3352 }
3353
3354 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
3355 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
3356 Note that this disallows MEM(REG+REG), but allows
3357 MEM(PRE/POST_INC/DEC(REG)). */
3358
3359 int
3360 soft_df_operand (op, mode)
3361 rtx op;
3362 enum machine_mode mode;
3363 {
3364 if (s_register_operand (op, mode))
3365 return TRUE;
3366
3367 if (mode != VOIDmode && GET_MODE (op) != mode)
3368 return FALSE;
3369
3370 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
3371 return FALSE;
3372
3373 if (GET_CODE (op) == SUBREG)
3374 op = SUBREG_REG (op);
3375
3376 switch (GET_CODE (op))
3377 {
3378 case CONST_DOUBLE:
3379 return TRUE;
3380
3381 case MEM:
3382 return memory_address_p (DFmode, XEXP (op, 0));
3383
3384 default:
3385 return FALSE;
3386 }
3387 }
3388
3389 /* Like soft_df_operand, but don't accept constants. */
3390
3391 int
3392 nonimmediate_soft_df_operand (op, mode)
3393 rtx op;
3394 enum machine_mode mode;
3395 {
3396 if (s_register_operand (op, mode))
3397 return TRUE;
3398
3399 if (mode != VOIDmode && GET_MODE (op) != mode)
3400 return FALSE;
3401
3402 if (GET_CODE (op) == SUBREG)
3403 op = SUBREG_REG (op);
3404
3405 if (GET_CODE (op) == MEM)
3406 return memory_address_p (DFmode, XEXP (op, 0));
3407 return FALSE;
3408 }
3409
3410 /* Return TRUE for valid index operands. */
3411
3412 int
3413 index_operand (op, mode)
3414 rtx op;
3415 enum machine_mode mode;
3416 {
3417 return (s_register_operand (op, mode)
3418 || (immediate_operand (op, mode)
3419 && (GET_CODE (op) != CONST_INT
3420 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
3421 }
3422
3423 /* Return TRUE for valid shifts by a constant. This also accepts any
3424 power of two on the (somewhat overly relaxed) assumption that the
3425 shift operator in this case was a mult. */
3426
3427 int
3428 const_shift_operand (op, mode)
3429 rtx op;
3430 enum machine_mode mode;
3431 {
3432 return (power_of_two_operand (op, mode)
3433 || (immediate_operand (op, mode)
3434 && (GET_CODE (op) != CONST_INT
3435 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
3436 }
3437
3438 /* Return TRUE for arithmetic operators which can be combined with a multiply
3439 (shift). */
3440
3441 int
3442 shiftable_operator (x, mode)
3443 rtx x;
3444 enum machine_mode mode;
3445 {
3446 enum rtx_code code;
3447
3448 if (GET_MODE (x) != mode)
3449 return FALSE;
3450
3451 code = GET_CODE (x);
3452
3453 return (code == PLUS || code == MINUS
3454 || code == IOR || code == XOR || code == AND);
3455 }
3456
3457 /* Return TRUE for binary logical operators. */
3458
3459 int
3460 logical_binary_operator (x, mode)
3461 rtx x;
3462 enum machine_mode mode;
3463 {
3464 enum rtx_code code;
3465
3466 if (GET_MODE (x) != mode)
3467 return FALSE;
3468
3469 code = GET_CODE (x);
3470
3471 return (code == IOR || code == XOR || code == AND);
3472 }
3473
3474 /* Return TRUE for shift operators. */
3475
3476 int
3477 shift_operator (x, mode)
3478 rtx x;
3479 enum machine_mode mode;
3480 {
3481 enum rtx_code code;
3482
3483 if (GET_MODE (x) != mode)
3484 return FALSE;
3485
3486 code = GET_CODE (x);
3487
3488 if (code == MULT)
3489 return power_of_two_operand (XEXP (x, 1), mode);
3490
3491 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
3492 || code == ROTATERT);
3493 }
3494
3495 /* Return TRUE if x is EQ or NE. */
3496
3497 int
3498 equality_operator (x, mode)
3499 rtx x;
3500 enum machine_mode mode ATTRIBUTE_UNUSED;
3501 {
3502 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
3503 }
3504
3505 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
3506
3507 int
3508 arm_comparison_operator (x, mode)
3509 rtx x;
3510 enum machine_mode mode;
3511 {
3512 return (comparison_operator (x, mode)
3513 && GET_CODE (x) != LTGT
3514 && GET_CODE (x) != UNEQ);
3515 }
3516
3517 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
3518
3519 int
3520 minmax_operator (x, mode)
3521 rtx x;
3522 enum machine_mode mode;
3523 {
3524 enum rtx_code code = GET_CODE (x);
3525
3526 if (GET_MODE (x) != mode)
3527 return FALSE;
3528
3529 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
3530 }
3531
3532 /* Return TRUE if this is the condition code register, if we aren't given
3533 a mode, accept any class CCmode register. */
3534
3535 int
3536 cc_register (x, mode)
3537 rtx x;
3538 enum machine_mode mode;
3539 {
3540 if (mode == VOIDmode)
3541 {
3542 mode = GET_MODE (x);
3543
3544 if (GET_MODE_CLASS (mode) != MODE_CC)
3545 return FALSE;
3546 }
3547
3548 if ( GET_MODE (x) == mode
3549 && GET_CODE (x) == REG
3550 && REGNO (x) == CC_REGNUM)
3551 return TRUE;
3552
3553 return FALSE;
3554 }
3555
3556 /* Return TRUE if this is the condition code register, if we aren't given
3557 a mode, accept any class CCmode register which indicates a dominance
3558 expression. */
3559
3560 int
3561 dominant_cc_register (x, mode)
3562 rtx x;
3563 enum machine_mode mode;
3564 {
3565 if (mode == VOIDmode)
3566 {
3567 mode = GET_MODE (x);
3568
3569 if (GET_MODE_CLASS (mode) != MODE_CC)
3570 return FALSE;
3571 }
3572
3573 if ( mode != CC_DNEmode && mode != CC_DEQmode
3574 && mode != CC_DLEmode && mode != CC_DLTmode
3575 && mode != CC_DGEmode && mode != CC_DGTmode
3576 && mode != CC_DLEUmode && mode != CC_DLTUmode
3577 && mode != CC_DGEUmode && mode != CC_DGTUmode)
3578 return FALSE;
3579
3580 return cc_register (x, mode);
3581 }
3582
3583 /* Return TRUE if X references a SYMBOL_REF. */
3584
3585 int
3586 symbol_mentioned_p (x)
3587 rtx x;
3588 {
3589 const char * fmt;
3590 int i;
3591
3592 if (GET_CODE (x) == SYMBOL_REF)
3593 return 1;
3594
3595 fmt = GET_RTX_FORMAT (GET_CODE (x));
3596
3597 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3598 {
3599 if (fmt[i] == 'E')
3600 {
3601 int j;
3602
3603 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3604 if (symbol_mentioned_p (XVECEXP (x, i, j)))
3605 return 1;
3606 }
3607 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
3608 return 1;
3609 }
3610
3611 return 0;
3612 }
3613
3614 /* Return TRUE if X references a LABEL_REF. */
3615
3616 int
3617 label_mentioned_p (x)
3618 rtx x;
3619 {
3620 const char * fmt;
3621 int i;
3622
3623 if (GET_CODE (x) == LABEL_REF)
3624 return 1;
3625
3626 fmt = GET_RTX_FORMAT (GET_CODE (x));
3627 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3628 {
3629 if (fmt[i] == 'E')
3630 {
3631 int j;
3632
3633 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3634 if (label_mentioned_p (XVECEXP (x, i, j)))
3635 return 1;
3636 }
3637 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
3638 return 1;
3639 }
3640
3641 return 0;
3642 }
3643
3644 enum rtx_code
3645 minmax_code (x)
3646 rtx x;
3647 {
3648 enum rtx_code code = GET_CODE (x);
3649
3650 if (code == SMAX)
3651 return GE;
3652 else if (code == SMIN)
3653 return LE;
3654 else if (code == UMIN)
3655 return LEU;
3656 else if (code == UMAX)
3657 return GEU;
3658
3659 abort ();
3660 }
3661
3662 /* Return 1 if memory locations are adjacent. */
3663
3664 int
3665 adjacent_mem_locations (a, b)
3666 rtx a, b;
3667 {
3668 if ((GET_CODE (XEXP (a, 0)) == REG
3669 || (GET_CODE (XEXP (a, 0)) == PLUS
3670 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
3671 && (GET_CODE (XEXP (b, 0)) == REG
3672 || (GET_CODE (XEXP (b, 0)) == PLUS
3673 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
3674 {
3675 int val0 = 0, val1 = 0;
3676 int reg0, reg1;
3677
3678 if (GET_CODE (XEXP (a, 0)) == PLUS)
3679 {
3680 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
3681 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
3682 }
3683 else
3684 reg0 = REGNO (XEXP (a, 0));
3685
3686 if (GET_CODE (XEXP (b, 0)) == PLUS)
3687 {
3688 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
3689 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
3690 }
3691 else
3692 reg1 = REGNO (XEXP (b, 0));
3693
3694 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
3695 }
3696 return 0;
3697 }
3698
3699 /* Return 1 if OP is a load multiple operation. It is known to be
3700 parallel and the first section will be tested. */
3701
3702 int
3703 load_multiple_operation (op, mode)
3704 rtx op;
3705 enum machine_mode mode ATTRIBUTE_UNUSED;
3706 {
3707 HOST_WIDE_INT count = XVECLEN (op, 0);
3708 int dest_regno;
3709 rtx src_addr;
3710 HOST_WIDE_INT i = 1, base = 0;
3711 rtx elt;
3712
3713 if (count <= 1
3714 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
3715 return 0;
3716
3717 /* Check to see if this might be a write-back. */
3718 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
3719 {
3720 i++;
3721 base = 1;
3722
3723 /* Now check it more carefully. */
3724 if (GET_CODE (SET_DEST (elt)) != REG
3725 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
3726 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
3727 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
3728 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
3729 return 0;
3730 }
3731
3732 /* Perform a quick check so we don't blow up below. */
3733 if (count <= i
3734 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
3735 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
3736 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
3737 return 0;
3738
3739 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
3740 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
3741
3742 for (; i < count; i++)
3743 {
3744 elt = XVECEXP (op, 0, i);
3745
3746 if (GET_CODE (elt) != SET
3747 || GET_CODE (SET_DEST (elt)) != REG
3748 || GET_MODE (SET_DEST (elt)) != SImode
3749 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
3750 || GET_CODE (SET_SRC (elt)) != MEM
3751 || GET_MODE (SET_SRC (elt)) != SImode
3752 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
3753 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
3754 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
3755 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
3756 return 0;
3757 }
3758
3759 return 1;
3760 }
3761
3762 /* Return 1 if OP is a store multiple operation. It is known to be
3763 parallel and the first section will be tested. */
3764
3765 int
3766 store_multiple_operation (op, mode)
3767 rtx op;
3768 enum machine_mode mode ATTRIBUTE_UNUSED;
3769 {
3770 HOST_WIDE_INT count = XVECLEN (op, 0);
3771 int src_regno;
3772 rtx dest_addr;
3773 HOST_WIDE_INT i = 1, base = 0;
3774 rtx elt;
3775
3776 if (count <= 1
3777 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
3778 return 0;
3779
3780 /* Check to see if this might be a write-back. */
3781 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
3782 {
3783 i++;
3784 base = 1;
3785
3786 /* Now check it more carefully. */
3787 if (GET_CODE (SET_DEST (elt)) != REG
3788 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
3789 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
3790 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
3791 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
3792 return 0;
3793 }
3794
3795 /* Perform a quick check so we don't blow up below. */
3796 if (count <= i
3797 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
3798 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
3799 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
3800 return 0;
3801
3802 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
3803 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
3804
3805 for (; i < count; i++)
3806 {
3807 elt = XVECEXP (op, 0, i);
3808
3809 if (GET_CODE (elt) != SET
3810 || GET_CODE (SET_SRC (elt)) != REG
3811 || GET_MODE (SET_SRC (elt)) != SImode
3812 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
3813 || GET_CODE (SET_DEST (elt)) != MEM
3814 || GET_MODE (SET_DEST (elt)) != SImode
3815 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
3816 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
3817 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
3818 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
3819 return 0;
3820 }
3821
3822 return 1;
3823 }
3824
3825 int
3826 load_multiple_sequence (operands, nops, regs, base, load_offset)
3827 rtx * operands;
3828 int nops;
3829 int * regs;
3830 int * base;
3831 HOST_WIDE_INT * load_offset;
3832 {
3833 int unsorted_regs[4];
3834 HOST_WIDE_INT unsorted_offsets[4];
3835 int order[4];
3836 int base_reg = -1;
3837 int i;
3838
3839 /* Can only handle 2, 3, or 4 insns at present,
3840 though could be easily extended if required. */
3841 if (nops < 2 || nops > 4)
3842 abort ();
3843
3844 /* Loop over the operands and check that the memory references are
3845 suitable (ie immediate offsets from the same base register). At
3846 the same time, extract the target register, and the memory
3847 offsets. */
3848 for (i = 0; i < nops; i++)
3849 {
3850 rtx reg;
3851 rtx offset;
3852
3853 /* Convert a subreg of a mem into the mem itself. */
3854 if (GET_CODE (operands[nops + i]) == SUBREG)
3855 operands[nops + i] = alter_subreg (operands + (nops + i));
3856
3857 if (GET_CODE (operands[nops + i]) != MEM)
3858 abort ();
3859
3860 /* Don't reorder volatile memory references; it doesn't seem worth
3861 looking for the case where the order is ok anyway. */
3862 if (MEM_VOLATILE_P (operands[nops + i]))
3863 return 0;
3864
3865 offset = const0_rtx;
3866
3867 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
3868 || (GET_CODE (reg) == SUBREG
3869 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
3870 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
3871 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
3872 == REG)
3873 || (GET_CODE (reg) == SUBREG
3874 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
3875 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
3876 == CONST_INT)))
3877 {
3878 if (i == 0)
3879 {
3880 base_reg = REGNO (reg);
3881 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
3882 ? REGNO (operands[i])
3883 : REGNO (SUBREG_REG (operands[i])));
3884 order[0] = 0;
3885 }
3886 else
3887 {
3888 if (base_reg != (int) REGNO (reg))
3889 /* Not addressed from the same base register. */
3890 return 0;
3891
3892 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
3893 ? REGNO (operands[i])
3894 : REGNO (SUBREG_REG (operands[i])));
3895 if (unsorted_regs[i] < unsorted_regs[order[0]])
3896 order[0] = i;
3897 }
3898
3899 /* If it isn't an integer register, or if it overwrites the
3900 base register but isn't the last insn in the list, then
3901 we can't do this. */
3902 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
3903 || (i != nops - 1 && unsorted_regs[i] == base_reg))
3904 return 0;
3905
3906 unsorted_offsets[i] = INTVAL (offset);
3907 }
3908 else
3909 /* Not a suitable memory address. */
3910 return 0;
3911 }
3912
3913 /* All the useful information has now been extracted from the
3914 operands into unsorted_regs and unsorted_offsets; additionally,
3915 order[0] has been set to the lowest numbered register in the
3916 list. Sort the registers into order, and check that the memory
3917 offsets are ascending and adjacent. */
3918
3919 for (i = 1; i < nops; i++)
3920 {
3921 int j;
3922
3923 order[i] = order[i - 1];
3924 for (j = 0; j < nops; j++)
3925 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
3926 && (order[i] == order[i - 1]
3927 || unsorted_regs[j] < unsorted_regs[order[i]]))
3928 order[i] = j;
3929
3930 /* Have we found a suitable register? if not, one must be used more
3931 than once. */
3932 if (order[i] == order[i - 1])
3933 return 0;
3934
3935 /* Is the memory address adjacent and ascending? */
3936 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
3937 return 0;
3938 }
3939
3940 if (base)
3941 {
3942 *base = base_reg;
3943
3944 for (i = 0; i < nops; i++)
3945 regs[i] = unsorted_regs[order[i]];
3946
3947 *load_offset = unsorted_offsets[order[0]];
3948 }
3949
3950 if (unsorted_offsets[order[0]] == 0)
3951 return 1; /* ldmia */
3952
3953 if (unsorted_offsets[order[0]] == 4)
3954 return 2; /* ldmib */
3955
3956 if (unsorted_offsets[order[nops - 1]] == 0)
3957 return 3; /* ldmda */
3958
3959 if (unsorted_offsets[order[nops - 1]] == -4)
3960 return 4; /* ldmdb */
3961
3962 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
3963 if the offset isn't small enough. The reason 2 ldrs are faster
3964 is because these ARMs are able to do more than one cache access
3965 in a single cycle. The ARM9 and StrongARM have Harvard caches,
3966 whilst the ARM8 has a double bandwidth cache. This means that
3967 these cores can do both an instruction fetch and a data fetch in
3968 a single cycle, so the trick of calculating the address into a
3969 scratch register (one of the result regs) and then doing a load
3970 multiple actually becomes slower (and no smaller in code size).
3971 That is the transformation
3972
3973 ldr rd1, [rbase + offset]
3974 ldr rd2, [rbase + offset + 4]
3975
3976 to
3977
3978 add rd1, rbase, offset
3979 ldmia rd1, {rd1, rd2}
3980
3981 produces worse code -- '3 cycles + any stalls on rd2' instead of
3982 '2 cycles + any stalls on rd2'. On ARMs with only one cache
3983 access per cycle, the first sequence could never complete in less
3984 than 6 cycles, whereas the ldm sequence would only take 5 and
3985 would make better use of sequential accesses if not hitting the
3986 cache.
3987
3988 We cheat here and test 'arm_ld_sched' which we currently know to
3989 only be true for the ARM8, ARM9 and StrongARM. If this ever
3990 changes, then the test below needs to be reworked. */
3991 if (nops == 2 && arm_ld_sched)
3992 return 0;
3993
3994 /* Can't do it without setting up the offset, only do this if it takes
3995 no more than one insn. */
3996 return (const_ok_for_arm (unsorted_offsets[order[0]])
3997 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
3998 }
3999
4000 const char *
4001 emit_ldm_seq (operands, nops)
4002 rtx * operands;
4003 int nops;
4004 {
4005 int regs[4];
4006 int base_reg;
4007 HOST_WIDE_INT offset;
4008 char buf[100];
4009 int i;
4010
4011 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4012 {
4013 case 1:
4014 strcpy (buf, "ldm%?ia\t");
4015 break;
4016
4017 case 2:
4018 strcpy (buf, "ldm%?ib\t");
4019 break;
4020
4021 case 3:
4022 strcpy (buf, "ldm%?da\t");
4023 break;
4024
4025 case 4:
4026 strcpy (buf, "ldm%?db\t");
4027 break;
4028
4029 case 5:
4030 if (offset >= 0)
4031 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4032 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4033 (long) offset);
4034 else
4035 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4036 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4037 (long) -offset);
4038 output_asm_insn (buf, operands);
4039 base_reg = regs[0];
4040 strcpy (buf, "ldm%?ia\t");
4041 break;
4042
4043 default:
4044 abort ();
4045 }
4046
4047 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4048 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4049
4050 for (i = 1; i < nops; i++)
4051 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4052 reg_names[regs[i]]);
4053
4054 strcat (buf, "}\t%@ phole ldm");
4055
4056 output_asm_insn (buf, operands);
4057 return "";
4058 }
4059
4060 int
4061 store_multiple_sequence (operands, nops, regs, base, load_offset)
4062 rtx * operands;
4063 int nops;
4064 int * regs;
4065 int * base;
4066 HOST_WIDE_INT * load_offset;
4067 {
4068 int unsorted_regs[4];
4069 HOST_WIDE_INT unsorted_offsets[4];
4070 int order[4];
4071 int base_reg = -1;
4072 int i;
4073
4074 /* Can only handle 2, 3, or 4 insns at present, though could be easily
4075 extended if required. */
4076 if (nops < 2 || nops > 4)
4077 abort ();
4078
4079 /* Loop over the operands and check that the memory references are
4080 suitable (ie immediate offsets from the same base register). At
4081 the same time, extract the target register, and the memory
4082 offsets. */
4083 for (i = 0; i < nops; i++)
4084 {
4085 rtx reg;
4086 rtx offset;
4087
4088 /* Convert a subreg of a mem into the mem itself. */
4089 if (GET_CODE (operands[nops + i]) == SUBREG)
4090 operands[nops + i] = alter_subreg (operands + (nops + i));
4091
4092 if (GET_CODE (operands[nops + i]) != MEM)
4093 abort ();
4094
4095 /* Don't reorder volatile memory references; it doesn't seem worth
4096 looking for the case where the order is ok anyway. */
4097 if (MEM_VOLATILE_P (operands[nops + i]))
4098 return 0;
4099
4100 offset = const0_rtx;
4101
4102 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4103 || (GET_CODE (reg) == SUBREG
4104 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4105 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4106 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4107 == REG)
4108 || (GET_CODE (reg) == SUBREG
4109 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4110 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4111 == CONST_INT)))
4112 {
4113 if (i == 0)
4114 {
4115 base_reg = REGNO (reg);
4116 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4117 ? REGNO (operands[i])
4118 : REGNO (SUBREG_REG (operands[i])));
4119 order[0] = 0;
4120 }
4121 else
4122 {
4123 if (base_reg != (int) REGNO (reg))
4124 /* Not addressed from the same base register. */
4125 return 0;
4126
4127 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
4128 ? REGNO (operands[i])
4129 : REGNO (SUBREG_REG (operands[i])));
4130 if (unsorted_regs[i] < unsorted_regs[order[0]])
4131 order[0] = i;
4132 }
4133
4134 /* If it isn't an integer register, then we can't do this. */
4135 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
4136 return 0;
4137
4138 unsorted_offsets[i] = INTVAL (offset);
4139 }
4140 else
4141 /* Not a suitable memory address. */
4142 return 0;
4143 }
4144
4145 /* All the useful information has now been extracted from the
4146 operands into unsorted_regs and unsorted_offsets; additionally,
4147 order[0] has been set to the lowest numbered register in the
4148 list. Sort the registers into order, and check that the memory
4149 offsets are ascending and adjacent. */
4150
4151 for (i = 1; i < nops; i++)
4152 {
4153 int j;
4154
4155 order[i] = order[i - 1];
4156 for (j = 0; j < nops; j++)
4157 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4158 && (order[i] == order[i - 1]
4159 || unsorted_regs[j] < unsorted_regs[order[i]]))
4160 order[i] = j;
4161
4162 /* Have we found a suitable register? if not, one must be used more
4163 than once. */
4164 if (order[i] == order[i - 1])
4165 return 0;
4166
4167 /* Is the memory address adjacent and ascending? */
4168 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4169 return 0;
4170 }
4171
4172 if (base)
4173 {
4174 *base = base_reg;
4175
4176 for (i = 0; i < nops; i++)
4177 regs[i] = unsorted_regs[order[i]];
4178
4179 *load_offset = unsorted_offsets[order[0]];
4180 }
4181
4182 if (unsorted_offsets[order[0]] == 0)
4183 return 1; /* stmia */
4184
4185 if (unsorted_offsets[order[0]] == 4)
4186 return 2; /* stmib */
4187
4188 if (unsorted_offsets[order[nops - 1]] == 0)
4189 return 3; /* stmda */
4190
4191 if (unsorted_offsets[order[nops - 1]] == -4)
4192 return 4; /* stmdb */
4193
4194 return 0;
4195 }
4196
4197 const char *
4198 emit_stm_seq (operands, nops)
4199 rtx * operands;
4200 int nops;
4201 {
4202 int regs[4];
4203 int base_reg;
4204 HOST_WIDE_INT offset;
4205 char buf[100];
4206 int i;
4207
4208 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4209 {
4210 case 1:
4211 strcpy (buf, "stm%?ia\t");
4212 break;
4213
4214 case 2:
4215 strcpy (buf, "stm%?ib\t");
4216 break;
4217
4218 case 3:
4219 strcpy (buf, "stm%?da\t");
4220 break;
4221
4222 case 4:
4223 strcpy (buf, "stm%?db\t");
4224 break;
4225
4226 default:
4227 abort ();
4228 }
4229
4230 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4231 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4232
4233 for (i = 1; i < nops; i++)
4234 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4235 reg_names[regs[i]]);
4236
4237 strcat (buf, "}\t%@ phole stm");
4238
4239 output_asm_insn (buf, operands);
4240 return "";
4241 }
4242
4243 int
4244 multi_register_push (op, mode)
4245 rtx op;
4246 enum machine_mode mode ATTRIBUTE_UNUSED;
4247 {
4248 if (GET_CODE (op) != PARALLEL
4249 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
4250 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
4251 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
4252 return 0;
4253
4254 return 1;
4255 }
4256 \f
4257 /* Routines for use in generating RTL. */
4258
4259 rtx
4260 arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
4261 in_struct_p, scalar_p)
4262 int base_regno;
4263 int count;
4264 rtx from;
4265 int up;
4266 int write_back;
4267 int unchanging_p;
4268 int in_struct_p;
4269 int scalar_p;
4270 {
4271 int i = 0, j;
4272 rtx result;
4273 int sign = up ? 1 : -1;
4274 rtx mem;
4275
4276 /* XScale has load-store double instructions, but they have stricter
4277 alignment requirements than load-store multiple, so we can not
4278 use them.
4279
4280 For XScale ldm requires 2 + NREGS cycles to complete and blocks
4281 the pipeline until completion.
4282
4283 NREGS CYCLES
4284 1 3
4285 2 4
4286 3 5
4287 4 6
4288
4289 An ldr instruction takes 1-3 cycles, but does not block the
4290 pipeline.
4291
4292 NREGS CYCLES
4293 1 1-3
4294 2 2-6
4295 3 3-9
4296 4 4-12
4297
4298 Best case ldr will always win. However, the more ldr instructions
4299 we issue, the less likely we are to be able to schedule them well.
4300 Using ldr instructions also increases code size.
4301
4302 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
4303 for counts of 3 or 4 regs. */
4304 if (arm_is_xscale && count <= 2 && ! optimize_size)
4305 {
4306 rtx seq;
4307
4308 start_sequence ();
4309
4310 for (i = 0; i < count; i++)
4311 {
4312 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
4313 RTX_UNCHANGING_P (mem) = unchanging_p;
4314 MEM_IN_STRUCT_P (mem) = in_struct_p;
4315 MEM_SCALAR_P (mem) = scalar_p;
4316 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
4317 }
4318
4319 if (write_back)
4320 emit_move_insn (from, plus_constant (from, count * 4 * sign));
4321
4322 seq = gen_sequence ();
4323 end_sequence ();
4324
4325 return seq;
4326 }
4327
4328 result = gen_rtx_PARALLEL (VOIDmode,
4329 rtvec_alloc (count + (write_back ? 1 : 0)));
4330 if (write_back)
4331 {
4332 XVECEXP (result, 0, 0)
4333 = gen_rtx_SET (GET_MODE (from), from,
4334 plus_constant (from, count * 4 * sign));
4335 i = 1;
4336 count++;
4337 }
4338
4339 for (j = 0; i < count; i++, j++)
4340 {
4341 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
4342 RTX_UNCHANGING_P (mem) = unchanging_p;
4343 MEM_IN_STRUCT_P (mem) = in_struct_p;
4344 MEM_SCALAR_P (mem) = scalar_p;
4345 XVECEXP (result, 0, i)
4346 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
4347 }
4348
4349 return result;
4350 }
4351
4352 rtx
4353 arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
4354 in_struct_p, scalar_p)
4355 int base_regno;
4356 int count;
4357 rtx to;
4358 int up;
4359 int write_back;
4360 int unchanging_p;
4361 int in_struct_p;
4362 int scalar_p;
4363 {
4364 int i = 0, j;
4365 rtx result;
4366 int sign = up ? 1 : -1;
4367 rtx mem;
4368
4369 /* See arm_gen_load_multiple for discussion of
4370 the pros/cons of ldm/stm usage for XScale. */
4371 if (arm_is_xscale && count <= 2 && ! optimize_size)
4372 {
4373 rtx seq;
4374
4375 start_sequence ();
4376
4377 for (i = 0; i < count; i++)
4378 {
4379 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
4380 RTX_UNCHANGING_P (mem) = unchanging_p;
4381 MEM_IN_STRUCT_P (mem) = in_struct_p;
4382 MEM_SCALAR_P (mem) = scalar_p;
4383 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
4384 }
4385
4386 if (write_back)
4387 emit_move_insn (to, plus_constant (to, count * 4 * sign));
4388
4389 seq = gen_sequence ();
4390 end_sequence ();
4391
4392 return seq;
4393 }
4394
4395 result = gen_rtx_PARALLEL (VOIDmode,
4396 rtvec_alloc (count + (write_back ? 1 : 0)));
4397 if (write_back)
4398 {
4399 XVECEXP (result, 0, 0)
4400 = gen_rtx_SET (GET_MODE (to), to,
4401 plus_constant (to, count * 4 * sign));
4402 i = 1;
4403 count++;
4404 }
4405
4406 for (j = 0; i < count; i++, j++)
4407 {
4408 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
4409 RTX_UNCHANGING_P (mem) = unchanging_p;
4410 MEM_IN_STRUCT_P (mem) = in_struct_p;
4411 MEM_SCALAR_P (mem) = scalar_p;
4412
4413 XVECEXP (result, 0, i)
4414 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
4415 }
4416
4417 return result;
4418 }
4419
4420 int
4421 arm_gen_movstrqi (operands)
4422 rtx * operands;
4423 {
4424 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
4425 int i;
4426 rtx src, dst;
4427 rtx st_src, st_dst, fin_src, fin_dst;
4428 rtx part_bytes_reg = NULL;
4429 rtx mem;
4430 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
4431 int dst_scalar_p, src_scalar_p;
4432
4433 if (GET_CODE (operands[2]) != CONST_INT
4434 || GET_CODE (operands[3]) != CONST_INT
4435 || INTVAL (operands[2]) > 64
4436 || INTVAL (operands[3]) & 3)
4437 return 0;
4438
4439 st_dst = XEXP (operands[0], 0);
4440 st_src = XEXP (operands[1], 0);
4441
4442 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
4443 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
4444 dst_scalar_p = MEM_SCALAR_P (operands[0]);
4445 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
4446 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
4447 src_scalar_p = MEM_SCALAR_P (operands[1]);
4448
4449 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
4450 fin_src = src = copy_to_mode_reg (SImode, st_src);
4451
4452 in_words_to_go = NUM_INTS (INTVAL (operands[2]));
4453 out_words_to_go = INTVAL (operands[2]) / 4;
4454 last_bytes = INTVAL (operands[2]) & 3;
4455
4456 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
4457 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
4458
4459 for (i = 0; in_words_to_go >= 2; i+=4)
4460 {
4461 if (in_words_to_go > 4)
4462 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
4463 src_unchanging_p,
4464 src_in_struct_p,
4465 src_scalar_p));
4466 else
4467 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
4468 FALSE, src_unchanging_p,
4469 src_in_struct_p, src_scalar_p));
4470
4471 if (out_words_to_go)
4472 {
4473 if (out_words_to_go > 4)
4474 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
4475 dst_unchanging_p,
4476 dst_in_struct_p,
4477 dst_scalar_p));
4478 else if (out_words_to_go != 1)
4479 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
4480 dst, TRUE,
4481 (last_bytes == 0
4482 ? FALSE : TRUE),
4483 dst_unchanging_p,
4484 dst_in_struct_p,
4485 dst_scalar_p));
4486 else
4487 {
4488 mem = gen_rtx_MEM (SImode, dst);
4489 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4490 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4491 MEM_SCALAR_P (mem) = dst_scalar_p;
4492 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
4493 if (last_bytes != 0)
4494 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
4495 }
4496 }
4497
4498 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
4499 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
4500 }
4501
4502 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
4503 if (out_words_to_go)
4504 {
4505 rtx sreg;
4506
4507 mem = gen_rtx_MEM (SImode, src);
4508 RTX_UNCHANGING_P (mem) = src_unchanging_p;
4509 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
4510 MEM_SCALAR_P (mem) = src_scalar_p;
4511 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
4512 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
4513
4514 mem = gen_rtx_MEM (SImode, dst);
4515 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4516 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4517 MEM_SCALAR_P (mem) = dst_scalar_p;
4518 emit_move_insn (mem, sreg);
4519 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
4520 in_words_to_go--;
4521
4522 if (in_words_to_go) /* Sanity check */
4523 abort ();
4524 }
4525
4526 if (in_words_to_go)
4527 {
4528 if (in_words_to_go < 0)
4529 abort ();
4530
4531 mem = gen_rtx_MEM (SImode, src);
4532 RTX_UNCHANGING_P (mem) = src_unchanging_p;
4533 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
4534 MEM_SCALAR_P (mem) = src_scalar_p;
4535 part_bytes_reg = copy_to_mode_reg (SImode, mem);
4536 }
4537
4538 if (last_bytes && part_bytes_reg == NULL)
4539 abort ();
4540
4541 if (BYTES_BIG_ENDIAN && last_bytes)
4542 {
4543 rtx tmp = gen_reg_rtx (SImode);
4544
4545 /* The bytes we want are in the top end of the word. */
4546 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
4547 GEN_INT (8 * (4 - last_bytes))));
4548 part_bytes_reg = tmp;
4549
4550 while (last_bytes)
4551 {
4552 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
4553 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4554 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4555 MEM_SCALAR_P (mem) = dst_scalar_p;
4556 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
4557
4558 if (--last_bytes)
4559 {
4560 tmp = gen_reg_rtx (SImode);
4561 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
4562 part_bytes_reg = tmp;
4563 }
4564 }
4565
4566 }
4567 else
4568 {
4569 if (last_bytes > 1)
4570 {
4571 mem = gen_rtx_MEM (HImode, dst);
4572 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4573 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4574 MEM_SCALAR_P (mem) = dst_scalar_p;
4575 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
4576 last_bytes -= 2;
4577 if (last_bytes)
4578 {
4579 rtx tmp = gen_reg_rtx (SImode);
4580
4581 emit_insn (gen_addsi3 (dst, dst, GEN_INT (2)));
4582 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
4583 part_bytes_reg = tmp;
4584 }
4585 }
4586
4587 if (last_bytes)
4588 {
4589 mem = gen_rtx_MEM (QImode, dst);
4590 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
4591 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
4592 MEM_SCALAR_P (mem) = dst_scalar_p;
4593 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
4594 }
4595 }
4596
4597 return 1;
4598 }
4599
4600 /* Generate a memory reference for a half word, such that it will be loaded
4601 into the top 16 bits of the word. We can assume that the address is
4602 known to be alignable and of the form reg, or plus (reg, const). */
4603
4604 rtx
4605 arm_gen_rotated_half_load (memref)
4606 rtx memref;
4607 {
4608 HOST_WIDE_INT offset = 0;
4609 rtx base = XEXP (memref, 0);
4610
4611 if (GET_CODE (base) == PLUS)
4612 {
4613 offset = INTVAL (XEXP (base, 1));
4614 base = XEXP (base, 0);
4615 }
4616
4617 /* If we aren't allowed to generate unaligned addresses, then fail. */
4618 if (TARGET_MMU_TRAPS
4619 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
4620 return NULL;
4621
4622 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
4623
4624 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
4625 return base;
4626
4627 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
4628 }
4629
4630 /* Select a dominance comparison mode if possible. We support three forms.
4631 COND_OR == 0 => (X && Y)
4632 COND_OR == 1 => ((! X( || Y)
4633 COND_OR == 2 => (X || Y)
4634 If we are unable to support a dominance comparsison we return CC mode.
4635 This will then fail to match for the RTL expressions that generate this
4636 call. */
4637
4638 static enum machine_mode
4639 select_dominance_cc_mode (x, y, cond_or)
4640 rtx x;
4641 rtx y;
4642 HOST_WIDE_INT cond_or;
4643 {
4644 enum rtx_code cond1, cond2;
4645 int swapped = 0;
4646
4647 /* Currently we will probably get the wrong result if the individual
4648 comparisons are not simple. This also ensures that it is safe to
4649 reverse a comparison if necessary. */
4650 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
4651 != CCmode)
4652 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
4653 != CCmode))
4654 return CCmode;
4655
4656 /* The if_then_else variant of this tests the second condition if the
4657 first passes, but is true if the first fails. Reverse the first
4658 condition to get a true "inclusive-or" expression. */
4659 if (cond_or == 1)
4660 cond1 = reverse_condition (cond1);
4661
4662 /* If the comparisons are not equal, and one doesn't dominate the other,
4663 then we can't do this. */
4664 if (cond1 != cond2
4665 && !comparison_dominates_p (cond1, cond2)
4666 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
4667 return CCmode;
4668
4669 if (swapped)
4670 {
4671 enum rtx_code temp = cond1;
4672 cond1 = cond2;
4673 cond2 = temp;
4674 }
4675
4676 switch (cond1)
4677 {
4678 case EQ:
4679 if (cond2 == EQ || !cond_or)
4680 return CC_DEQmode;
4681
4682 switch (cond2)
4683 {
4684 case LE: return CC_DLEmode;
4685 case LEU: return CC_DLEUmode;
4686 case GE: return CC_DGEmode;
4687 case GEU: return CC_DGEUmode;
4688 default: break;
4689 }
4690
4691 break;
4692
4693 case LT:
4694 if (cond2 == LT || !cond_or)
4695 return CC_DLTmode;
4696 if (cond2 == LE)
4697 return CC_DLEmode;
4698 if (cond2 == NE)
4699 return CC_DNEmode;
4700 break;
4701
4702 case GT:
4703 if (cond2 == GT || !cond_or)
4704 return CC_DGTmode;
4705 if (cond2 == GE)
4706 return CC_DGEmode;
4707 if (cond2 == NE)
4708 return CC_DNEmode;
4709 break;
4710
4711 case LTU:
4712 if (cond2 == LTU || !cond_or)
4713 return CC_DLTUmode;
4714 if (cond2 == LEU)
4715 return CC_DLEUmode;
4716 if (cond2 == NE)
4717 return CC_DNEmode;
4718 break;
4719
4720 case GTU:
4721 if (cond2 == GTU || !cond_or)
4722 return CC_DGTUmode;
4723 if (cond2 == GEU)
4724 return CC_DGEUmode;
4725 if (cond2 == NE)
4726 return CC_DNEmode;
4727 break;
4728
4729 /* The remaining cases only occur when both comparisons are the
4730 same. */
4731 case NE:
4732 return CC_DNEmode;
4733
4734 case LE:
4735 return CC_DLEmode;
4736
4737 case GE:
4738 return CC_DGEmode;
4739
4740 case LEU:
4741 return CC_DLEUmode;
4742
4743 case GEU:
4744 return CC_DGEUmode;
4745
4746 default:
4747 break;
4748 }
4749
4750 abort ();
4751 }
4752
4753 enum machine_mode
4754 arm_select_cc_mode (op, x, y)
4755 enum rtx_code op;
4756 rtx x;
4757 rtx y;
4758 {
4759 /* All floating point compares return CCFP if it is an equality
4760 comparison, and CCFPE otherwise. */
4761 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4762 {
4763 switch (op)
4764 {
4765 case EQ:
4766 case NE:
4767 case UNORDERED:
4768 case ORDERED:
4769 case UNLT:
4770 case UNLE:
4771 case UNGT:
4772 case UNGE:
4773 case UNEQ:
4774 case LTGT:
4775 return CCFPmode;
4776
4777 case LT:
4778 case LE:
4779 case GT:
4780 case GE:
4781 return CCFPEmode;
4782
4783 default:
4784 abort ();
4785 }
4786 }
4787
4788 /* A compare with a shifted operand. Because of canonicalization, the
4789 comparison will have to be swapped when we emit the assembler. */
4790 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
4791 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4792 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
4793 || GET_CODE (x) == ROTATERT))
4794 return CC_SWPmode;
4795
4796 /* This is a special case that is used by combine to allow a
4797 comparison of a shifted byte load to be split into a zero-extend
4798 followed by a comparison of the shifted integer (only valid for
4799 equalities and unsigned inequalities). */
4800 if (GET_MODE (x) == SImode
4801 && GET_CODE (x) == ASHIFT
4802 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
4803 && GET_CODE (XEXP (x, 0)) == SUBREG
4804 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
4805 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
4806 && (op == EQ || op == NE
4807 || op == GEU || op == GTU || op == LTU || op == LEU)
4808 && GET_CODE (y) == CONST_INT)
4809 return CC_Zmode;
4810
4811 /* A construct for a conditional compare, if the false arm contains
4812 0, then both conditions must be true, otherwise either condition
4813 must be true. Not all conditions are possible, so CCmode is
4814 returned if it can't be done. */
4815 if (GET_CODE (x) == IF_THEN_ELSE
4816 && (XEXP (x, 2) == const0_rtx
4817 || XEXP (x, 2) == const1_rtx)
4818 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4819 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4820 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
4821 INTVAL (XEXP (x, 2)));
4822
4823 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
4824 if (GET_CODE (x) == AND
4825 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4826 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4827 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1), 0);
4828
4829 if (GET_CODE (x) == IOR
4830 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
4831 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
4832 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1), 2);
4833
4834 /* An operation that sets the condition codes as a side-effect, the
4835 V flag is not set correctly, so we can only use comparisons where
4836 this doesn't matter. (For LT and GE we can use "mi" and "pl"
4837 instead. */
4838 if (GET_MODE (x) == SImode
4839 && y == const0_rtx
4840 && (op == EQ || op == NE || op == LT || op == GE)
4841 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
4842 || GET_CODE (x) == AND || GET_CODE (x) == IOR
4843 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
4844 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
4845 || GET_CODE (x) == LSHIFTRT
4846 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4847 || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
4848 return CC_NOOVmode;
4849
4850 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
4851 return CC_Zmode;
4852
4853 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
4854 && GET_CODE (x) == PLUS
4855 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
4856 return CC_Cmode;
4857
4858 return CCmode;
4859 }
4860
4861 /* X and Y are two things to compare using CODE. Emit the compare insn and
4862 return the rtx for register 0 in the proper mode. FP means this is a
4863 floating point compare: I don't think that it is needed on the arm. */
4864
4865 rtx
4866 arm_gen_compare_reg (code, x, y)
4867 enum rtx_code code;
4868 rtx x, y;
4869 {
4870 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
4871 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
4872
4873 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
4874 gen_rtx_COMPARE (mode, x, y)));
4875
4876 return cc_reg;
4877 }
4878
4879 void
4880 arm_reload_in_hi (operands)
4881 rtx * operands;
4882 {
4883 rtx ref = operands[1];
4884 rtx base, scratch;
4885 HOST_WIDE_INT offset = 0;
4886
4887 if (GET_CODE (ref) == SUBREG)
4888 {
4889 offset = SUBREG_BYTE (ref);
4890 ref = SUBREG_REG (ref);
4891 }
4892
4893 if (GET_CODE (ref) == REG)
4894 {
4895 /* We have a pseudo which has been spilt onto the stack; there
4896 are two cases here: the first where there is a simple
4897 stack-slot replacement and a second where the stack-slot is
4898 out of range, or is used as a subreg. */
4899 if (reg_equiv_mem[REGNO (ref)])
4900 {
4901 ref = reg_equiv_mem[REGNO (ref)];
4902 base = find_replacement (&XEXP (ref, 0));
4903 }
4904 else
4905 /* The slot is out of range, or was dressed up in a SUBREG. */
4906 base = reg_equiv_address[REGNO (ref)];
4907 }
4908 else
4909 base = find_replacement (&XEXP (ref, 0));
4910
4911 /* Handle the case where the address is too complex to be offset by 1. */
4912 if (GET_CODE (base) == MINUS
4913 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
4914 {
4915 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
4916
4917 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
4918 base = base_plus;
4919 }
4920 else if (GET_CODE (base) == PLUS)
4921 {
4922 /* The addend must be CONST_INT, or we would have dealt with it above. */
4923 HOST_WIDE_INT hi, lo;
4924
4925 offset += INTVAL (XEXP (base, 1));
4926 base = XEXP (base, 0);
4927
4928 /* Rework the address into a legal sequence of insns. */
4929 /* Valid range for lo is -4095 -> 4095 */
4930 lo = (offset >= 0
4931 ? (offset & 0xfff)
4932 : -((-offset) & 0xfff));
4933
4934 /* Corner case, if lo is the max offset then we would be out of range
4935 once we have added the additional 1 below, so bump the msb into the
4936 pre-loading insn(s). */
4937 if (lo == 4095)
4938 lo &= 0x7ff;
4939
4940 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
4941 ^ (HOST_WIDE_INT) 0x80000000)
4942 - (HOST_WIDE_INT) 0x80000000);
4943
4944 if (hi + lo != offset)
4945 abort ();
4946
4947 if (hi != 0)
4948 {
4949 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
4950
4951 /* Get the base address; addsi3 knows how to handle constants
4952 that require more than one insn. */
4953 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
4954 base = base_plus;
4955 offset = lo;
4956 }
4957 }
4958
4959 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
4960 emit_insn (gen_zero_extendqisi2 (scratch,
4961 gen_rtx_MEM (QImode,
4962 plus_constant (base,
4963 offset))));
4964 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
4965 gen_rtx_MEM (QImode,
4966 plus_constant (base,
4967 offset + 1))));
4968 if (!BYTES_BIG_ENDIAN)
4969 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
4970 gen_rtx_IOR (SImode,
4971 gen_rtx_ASHIFT
4972 (SImode,
4973 gen_rtx_SUBREG (SImode, operands[0], 0),
4974 GEN_INT (8)),
4975 scratch)));
4976 else
4977 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
4978 gen_rtx_IOR (SImode,
4979 gen_rtx_ASHIFT (SImode, scratch,
4980 GEN_INT (8)),
4981 gen_rtx_SUBREG (SImode, operands[0],
4982 0))));
4983 }
4984
4985 /* Handle storing a half-word to memory during reload by synthesising as two
4986 byte stores. Take care not to clobber the input values until after we
4987 have moved them somewhere safe. This code assumes that if the DImode
4988 scratch in operands[2] overlaps either the input value or output address
4989 in some way, then that value must die in this insn (we absolutely need
4990 two scratch registers for some corner cases). */
4991
4992 void
4993 arm_reload_out_hi (operands)
4994 rtx * operands;
4995 {
4996 rtx ref = operands[0];
4997 rtx outval = operands[1];
4998 rtx base, scratch;
4999 HOST_WIDE_INT offset = 0;
5000
5001 if (GET_CODE (ref) == SUBREG)
5002 {
5003 offset = SUBREG_BYTE (ref);
5004 ref = SUBREG_REG (ref);
5005 }
5006
5007 if (GET_CODE (ref) == REG)
5008 {
5009 /* We have a pseudo which has been spilt onto the stack; there
5010 are two cases here: the first where there is a simple
5011 stack-slot replacement and a second where the stack-slot is
5012 out of range, or is used as a subreg. */
5013 if (reg_equiv_mem[REGNO (ref)])
5014 {
5015 ref = reg_equiv_mem[REGNO (ref)];
5016 base = find_replacement (&XEXP (ref, 0));
5017 }
5018 else
5019 /* The slot is out of range, or was dressed up in a SUBREG. */
5020 base = reg_equiv_address[REGNO (ref)];
5021 }
5022 else
5023 base = find_replacement (&XEXP (ref, 0));
5024
5025 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5026
5027 /* Handle the case where the address is too complex to be offset by 1. */
5028 if (GET_CODE (base) == MINUS
5029 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5030 {
5031 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5032
5033 /* Be careful not to destroy OUTVAL. */
5034 if (reg_overlap_mentioned_p (base_plus, outval))
5035 {
5036 /* Updating base_plus might destroy outval, see if we can
5037 swap the scratch and base_plus. */
5038 if (!reg_overlap_mentioned_p (scratch, outval))
5039 {
5040 rtx tmp = scratch;
5041 scratch = base_plus;
5042 base_plus = tmp;
5043 }
5044 else
5045 {
5046 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5047
5048 /* Be conservative and copy OUTVAL into the scratch now,
5049 this should only be necessary if outval is a subreg
5050 of something larger than a word. */
5051 /* XXX Might this clobber base? I can't see how it can,
5052 since scratch is known to overlap with OUTVAL, and
5053 must be wider than a word. */
5054 emit_insn (gen_movhi (scratch_hi, outval));
5055 outval = scratch_hi;
5056 }
5057 }
5058
5059 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5060 base = base_plus;
5061 }
5062 else if (GET_CODE (base) == PLUS)
5063 {
5064 /* The addend must be CONST_INT, or we would have dealt with it above. */
5065 HOST_WIDE_INT hi, lo;
5066
5067 offset += INTVAL (XEXP (base, 1));
5068 base = XEXP (base, 0);
5069
5070 /* Rework the address into a legal sequence of insns. */
5071 /* Valid range for lo is -4095 -> 4095 */
5072 lo = (offset >= 0
5073 ? (offset & 0xfff)
5074 : -((-offset) & 0xfff));
5075
5076 /* Corner case, if lo is the max offset then we would be out of range
5077 once we have added the additional 1 below, so bump the msb into the
5078 pre-loading insn(s). */
5079 if (lo == 4095)
5080 lo &= 0x7ff;
5081
5082 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5083 ^ (HOST_WIDE_INT) 0x80000000)
5084 - (HOST_WIDE_INT) 0x80000000);
5085
5086 if (hi + lo != offset)
5087 abort ();
5088
5089 if (hi != 0)
5090 {
5091 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5092
5093 /* Be careful not to destroy OUTVAL. */
5094 if (reg_overlap_mentioned_p (base_plus, outval))
5095 {
5096 /* Updating base_plus might destroy outval, see if we
5097 can swap the scratch and base_plus. */
5098 if (!reg_overlap_mentioned_p (scratch, outval))
5099 {
5100 rtx tmp = scratch;
5101 scratch = base_plus;
5102 base_plus = tmp;
5103 }
5104 else
5105 {
5106 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5107
5108 /* Be conservative and copy outval into scratch now,
5109 this should only be necessary if outval is a
5110 subreg of something larger than a word. */
5111 /* XXX Might this clobber base? I can't see how it
5112 can, since scratch is known to overlap with
5113 outval. */
5114 emit_insn (gen_movhi (scratch_hi, outval));
5115 outval = scratch_hi;
5116 }
5117 }
5118
5119 /* Get the base address; addsi3 knows how to handle constants
5120 that require more than one insn. */
5121 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5122 base = base_plus;
5123 offset = lo;
5124 }
5125 }
5126
5127 if (BYTES_BIG_ENDIAN)
5128 {
5129 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
5130 plus_constant (base, offset + 1)),
5131 gen_lowpart (QImode, outval)));
5132 emit_insn (gen_lshrsi3 (scratch,
5133 gen_rtx_SUBREG (SImode, outval, 0),
5134 GEN_INT (8)));
5135 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
5136 gen_lowpart (QImode, scratch)));
5137 }
5138 else
5139 {
5140 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
5141 gen_lowpart (QImode, outval)));
5142 emit_insn (gen_lshrsi3 (scratch,
5143 gen_rtx_SUBREG (SImode, outval, 0),
5144 GEN_INT (8)));
5145 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
5146 plus_constant (base, offset + 1)),
5147 gen_lowpart (QImode, scratch)));
5148 }
5149 }
5150 \f
5151 /* Print a symbolic form of X to the debug file, F. */
5152
5153 static void
5154 arm_print_value (f, x)
5155 FILE * f;
5156 rtx x;
5157 {
5158 switch (GET_CODE (x))
5159 {
5160 case CONST_INT:
5161 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
5162 return;
5163
5164 case CONST_DOUBLE:
5165 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
5166 return;
5167
5168 case CONST_STRING:
5169 fprintf (f, "\"%s\"", XSTR (x, 0));
5170 return;
5171
5172 case SYMBOL_REF:
5173 fprintf (f, "`%s'", XSTR (x, 0));
5174 return;
5175
5176 case LABEL_REF:
5177 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
5178 return;
5179
5180 case CONST:
5181 arm_print_value (f, XEXP (x, 0));
5182 return;
5183
5184 case PLUS:
5185 arm_print_value (f, XEXP (x, 0));
5186 fprintf (f, "+");
5187 arm_print_value (f, XEXP (x, 1));
5188 return;
5189
5190 case PC:
5191 fprintf (f, "pc");
5192 return;
5193
5194 default:
5195 fprintf (f, "????");
5196 return;
5197 }
5198 }
5199 \f
5200 /* Routines for manipulation of the constant pool. */
5201
5202 /* Arm instructions cannot load a large constant directly into a
5203 register; they have to come from a pc relative load. The constant
5204 must therefore be placed in the addressable range of the pc
5205 relative load. Depending on the precise pc relative load
5206 instruction the range is somewhere between 256 bytes and 4k. This
5207 means that we often have to dump a constant inside a function, and
5208 generate code to branch around it.
5209
5210 It is important to minimize this, since the branches will slow
5211 things down and make the code larger.
5212
5213 Normally we can hide the table after an existing unconditional
5214 branch so that there is no interruption of the flow, but in the
5215 worst case the code looks like this:
5216
5217 ldr rn, L1
5218 ...
5219 b L2
5220 align
5221 L1: .long value
5222 L2:
5223 ...
5224
5225 ldr rn, L3
5226 ...
5227 b L4
5228 align
5229 L3: .long value
5230 L4:
5231 ...
5232
5233 We fix this by performing a scan after scheduling, which notices
5234 which instructions need to have their operands fetched from the
5235 constant table and builds the table.
5236
5237 The algorithm starts by building a table of all the constants that
5238 need fixing up and all the natural barriers in the function (places
5239 where a constant table can be dropped without breaking the flow).
5240 For each fixup we note how far the pc-relative replacement will be
5241 able to reach and the offset of the instruction into the function.
5242
5243 Having built the table we then group the fixes together to form
5244 tables that are as large as possible (subject to addressing
5245 constraints) and emit each table of constants after the last
5246 barrier that is within range of all the instructions in the group.
5247 If a group does not contain a barrier, then we forcibly create one
5248 by inserting a jump instruction into the flow. Once the table has
5249 been inserted, the insns are then modified to reference the
5250 relevant entry in the pool.
5251
5252 Possible enhancements to the algorithm (not implemented) are:
5253
5254 1) For some processors and object formats, there may be benefit in
5255 aligning the pools to the start of cache lines; this alignment
5256 would need to be taken into account when calculating addressability
5257 of a pool. */
5258
5259 /* These typedefs are located at the start of this file, so that
5260 they can be used in the prototypes there. This comment is to
5261 remind readers of that fact so that the following structures
5262 can be understood more easily.
5263
5264 typedef struct minipool_node Mnode;
5265 typedef struct minipool_fixup Mfix; */
5266
5267 struct minipool_node
5268 {
5269 /* Doubly linked chain of entries. */
5270 Mnode * next;
5271 Mnode * prev;
5272 /* The maximum offset into the code that this entry can be placed. While
5273 pushing fixes for forward references, all entries are sorted in order
5274 of increasing max_address. */
5275 HOST_WIDE_INT max_address;
5276 /* Similarly for an entry inserted for a backwards ref. */
5277 HOST_WIDE_INT min_address;
5278 /* The number of fixes referencing this entry. This can become zero
5279 if we "unpush" an entry. In this case we ignore the entry when we
5280 come to emit the code. */
5281 int refcount;
5282 /* The offset from the start of the minipool. */
5283 HOST_WIDE_INT offset;
5284 /* The value in table. */
5285 rtx value;
5286 /* The mode of value. */
5287 enum machine_mode mode;
5288 int fix_size;
5289 };
5290
5291 struct minipool_fixup
5292 {
5293 Mfix * next;
5294 rtx insn;
5295 HOST_WIDE_INT address;
5296 rtx * loc;
5297 enum machine_mode mode;
5298 int fix_size;
5299 rtx value;
5300 Mnode * minipool;
5301 HOST_WIDE_INT forwards;
5302 HOST_WIDE_INT backwards;
5303 };
5304
5305 /* Fixes less than a word need padding out to a word boundary. */
5306 #define MINIPOOL_FIX_SIZE(mode) \
5307 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
5308
5309 static Mnode * minipool_vector_head;
5310 static Mnode * minipool_vector_tail;
5311 static rtx minipool_vector_label;
5312
5313 /* The linked list of all minipool fixes required for this function. */
5314 Mfix * minipool_fix_head;
5315 Mfix * minipool_fix_tail;
5316 /* The fix entry for the current minipool, once it has been placed. */
5317 Mfix * minipool_barrier;
5318
5319 /* Determines if INSN is the start of a jump table. Returns the end
5320 of the TABLE or NULL_RTX. */
5321
5322 static rtx
5323 is_jump_table (insn)
5324 rtx insn;
5325 {
5326 rtx table;
5327
5328 if (GET_CODE (insn) == JUMP_INSN
5329 && JUMP_LABEL (insn) != NULL
5330 && ((table = next_real_insn (JUMP_LABEL (insn)))
5331 == next_real_insn (insn))
5332 && table != NULL
5333 && GET_CODE (table) == JUMP_INSN
5334 && (GET_CODE (PATTERN (table)) == ADDR_VEC
5335 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
5336 return table;
5337
5338 return NULL_RTX;
5339 }
5340
5341 #ifndef JUMP_TABLES_IN_TEXT_SECTION
5342 #define JUMP_TABLES_IN_TEXT_SECTION 0
5343 #endif
5344
5345 static HOST_WIDE_INT
5346 get_jump_table_size (insn)
5347 rtx insn;
5348 {
5349 /* ADDR_VECs only take room if read-only data does into the text
5350 section. */
5351 if (JUMP_TABLES_IN_TEXT_SECTION
5352 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
5353 || 1
5354 #endif
5355 )
5356 {
5357 rtx body = PATTERN (insn);
5358 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
5359
5360 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
5361 }
5362
5363 return 0;
5364 }
5365
5366 /* Move a minipool fix MP from its current location to before MAX_MP.
5367 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
5368 contrains may need updating. */
5369
5370 static Mnode *
5371 move_minipool_fix_forward_ref (mp, max_mp, max_address)
5372 Mnode * mp;
5373 Mnode * max_mp;
5374 HOST_WIDE_INT max_address;
5375 {
5376 /* This should never be true and the code below assumes these are
5377 different. */
5378 if (mp == max_mp)
5379 abort ();
5380
5381 if (max_mp == NULL)
5382 {
5383 if (max_address < mp->max_address)
5384 mp->max_address = max_address;
5385 }
5386 else
5387 {
5388 if (max_address > max_mp->max_address - mp->fix_size)
5389 mp->max_address = max_mp->max_address - mp->fix_size;
5390 else
5391 mp->max_address = max_address;
5392
5393 /* Unlink MP from its current position. Since max_mp is non-null,
5394 mp->prev must be non-null. */
5395 mp->prev->next = mp->next;
5396 if (mp->next != NULL)
5397 mp->next->prev = mp->prev;
5398 else
5399 minipool_vector_tail = mp->prev;
5400
5401 /* Re-insert it before MAX_MP. */
5402 mp->next = max_mp;
5403 mp->prev = max_mp->prev;
5404 max_mp->prev = mp;
5405
5406 if (mp->prev != NULL)
5407 mp->prev->next = mp;
5408 else
5409 minipool_vector_head = mp;
5410 }
5411
5412 /* Save the new entry. */
5413 max_mp = mp;
5414
5415 /* Scan over the preceding entries and adjust their addresses as
5416 required. */
5417 while (mp->prev != NULL
5418 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5419 {
5420 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5421 mp = mp->prev;
5422 }
5423
5424 return max_mp;
5425 }
5426
5427 /* Add a constant to the minipool for a forward reference. Returns the
5428 node added or NULL if the constant will not fit in this pool. */
5429
5430 static Mnode *
5431 add_minipool_forward_ref (fix)
5432 Mfix * fix;
5433 {
5434 /* If set, max_mp is the first pool_entry that has a lower
5435 constraint than the one we are trying to add. */
5436 Mnode * max_mp = NULL;
5437 HOST_WIDE_INT max_address = fix->address + fix->forwards;
5438 Mnode * mp;
5439
5440 /* If this fix's address is greater than the address of the first
5441 entry, then we can't put the fix in this pool. We subtract the
5442 size of the current fix to ensure that if the table is fully
5443 packed we still have enough room to insert this value by suffling
5444 the other fixes forwards. */
5445 if (minipool_vector_head &&
5446 fix->address >= minipool_vector_head->max_address - fix->fix_size)
5447 return NULL;
5448
5449 /* Scan the pool to see if a constant with the same value has
5450 already been added. While we are doing this, also note the
5451 location where we must insert the constant if it doesn't already
5452 exist. */
5453 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5454 {
5455 if (GET_CODE (fix->value) == GET_CODE (mp->value)
5456 && fix->mode == mp->mode
5457 && (GET_CODE (fix->value) != CODE_LABEL
5458 || (CODE_LABEL_NUMBER (fix->value)
5459 == CODE_LABEL_NUMBER (mp->value)))
5460 && rtx_equal_p (fix->value, mp->value))
5461 {
5462 /* More than one fix references this entry. */
5463 mp->refcount++;
5464 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
5465 }
5466
5467 /* Note the insertion point if necessary. */
5468 if (max_mp == NULL
5469 && mp->max_address > max_address)
5470 max_mp = mp;
5471 }
5472
5473 /* The value is not currently in the minipool, so we need to create
5474 a new entry for it. If MAX_MP is NULL, the entry will be put on
5475 the end of the list since the placement is less constrained than
5476 any existing entry. Otherwise, we insert the new fix before
5477 MAX_MP and, if neceesary, adjust the constraints on the other
5478 entries. */
5479 mp = xmalloc (sizeof (* mp));
5480 mp->fix_size = fix->fix_size;
5481 mp->mode = fix->mode;
5482 mp->value = fix->value;
5483 mp->refcount = 1;
5484 /* Not yet required for a backwards ref. */
5485 mp->min_address = -65536;
5486
5487 if (max_mp == NULL)
5488 {
5489 mp->max_address = max_address;
5490 mp->next = NULL;
5491 mp->prev = minipool_vector_tail;
5492
5493 if (mp->prev == NULL)
5494 {
5495 minipool_vector_head = mp;
5496 minipool_vector_label = gen_label_rtx ();
5497 }
5498 else
5499 mp->prev->next = mp;
5500
5501 minipool_vector_tail = mp;
5502 }
5503 else
5504 {
5505 if (max_address > max_mp->max_address - mp->fix_size)
5506 mp->max_address = max_mp->max_address - mp->fix_size;
5507 else
5508 mp->max_address = max_address;
5509
5510 mp->next = max_mp;
5511 mp->prev = max_mp->prev;
5512 max_mp->prev = mp;
5513 if (mp->prev != NULL)
5514 mp->prev->next = mp;
5515 else
5516 minipool_vector_head = mp;
5517 }
5518
5519 /* Save the new entry. */
5520 max_mp = mp;
5521
5522 /* Scan over the preceding entries and adjust their addresses as
5523 required. */
5524 while (mp->prev != NULL
5525 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5526 {
5527 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5528 mp = mp->prev;
5529 }
5530
5531 return max_mp;
5532 }
5533
5534 static Mnode *
5535 move_minipool_fix_backward_ref (mp, min_mp, min_address)
5536 Mnode * mp;
5537 Mnode * min_mp;
5538 HOST_WIDE_INT min_address;
5539 {
5540 HOST_WIDE_INT offset;
5541
5542 /* This should never be true, and the code below assumes these are
5543 different. */
5544 if (mp == min_mp)
5545 abort ();
5546
5547 if (min_mp == NULL)
5548 {
5549 if (min_address > mp->min_address)
5550 mp->min_address = min_address;
5551 }
5552 else
5553 {
5554 /* We will adjust this below if it is too loose. */
5555 mp->min_address = min_address;
5556
5557 /* Unlink MP from its current position. Since min_mp is non-null,
5558 mp->next must be non-null. */
5559 mp->next->prev = mp->prev;
5560 if (mp->prev != NULL)
5561 mp->prev->next = mp->next;
5562 else
5563 minipool_vector_head = mp->next;
5564
5565 /* Reinsert it after MIN_MP. */
5566 mp->prev = min_mp;
5567 mp->next = min_mp->next;
5568 min_mp->next = mp;
5569 if (mp->next != NULL)
5570 mp->next->prev = mp;
5571 else
5572 minipool_vector_tail = mp;
5573 }
5574
5575 min_mp = mp;
5576
5577 offset = 0;
5578 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5579 {
5580 mp->offset = offset;
5581 if (mp->refcount > 0)
5582 offset += mp->fix_size;
5583
5584 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
5585 mp->next->min_address = mp->min_address + mp->fix_size;
5586 }
5587
5588 return min_mp;
5589 }
5590
5591 /* Add a constant to the minipool for a backward reference. Returns the
5592 node added or NULL if the constant will not fit in this pool.
5593
5594 Note that the code for insertion for a backwards reference can be
5595 somewhat confusing because the calculated offsets for each fix do
5596 not take into account the size of the pool (which is still under
5597 construction. */
5598
5599 static Mnode *
5600 add_minipool_backward_ref (fix)
5601 Mfix * fix;
5602 {
5603 /* If set, min_mp is the last pool_entry that has a lower constraint
5604 than the one we are trying to add. */
5605 Mnode * min_mp = NULL;
5606 /* This can be negative, since it is only a constraint. */
5607 HOST_WIDE_INT min_address = fix->address - fix->backwards;
5608 Mnode * mp;
5609
5610 /* If we can't reach the current pool from this insn, or if we can't
5611 insert this entry at the end of the pool without pushing other
5612 fixes out of range, then we don't try. This ensures that we
5613 can't fail later on. */
5614 if (min_address >= minipool_barrier->address
5615 || (minipool_vector_tail->min_address + fix->fix_size
5616 >= minipool_barrier->address))
5617 return NULL;
5618
5619 /* Scan the pool to see if a constant with the same value has
5620 already been added. While we are doing this, also note the
5621 location where we must insert the constant if it doesn't already
5622 exist. */
5623 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
5624 {
5625 if (GET_CODE (fix->value) == GET_CODE (mp->value)
5626 && fix->mode == mp->mode
5627 && (GET_CODE (fix->value) != CODE_LABEL
5628 || (CODE_LABEL_NUMBER (fix->value)
5629 == CODE_LABEL_NUMBER (mp->value)))
5630 && rtx_equal_p (fix->value, mp->value)
5631 /* Check that there is enough slack to move this entry to the
5632 end of the table (this is conservative). */
5633 && (mp->max_address
5634 > (minipool_barrier->address
5635 + minipool_vector_tail->offset
5636 + minipool_vector_tail->fix_size)))
5637 {
5638 mp->refcount++;
5639 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
5640 }
5641
5642 if (min_mp != NULL)
5643 mp->min_address += fix->fix_size;
5644 else
5645 {
5646 /* Note the insertion point if necessary. */
5647 if (mp->min_address < min_address)
5648 min_mp = mp;
5649 else if (mp->max_address
5650 < minipool_barrier->address + mp->offset + fix->fix_size)
5651 {
5652 /* Inserting before this entry would push the fix beyond
5653 its maximum address (which can happen if we have
5654 re-located a forwards fix); force the new fix to come
5655 after it. */
5656 min_mp = mp;
5657 min_address = mp->min_address + fix->fix_size;
5658 }
5659 }
5660 }
5661
5662 /* We need to create a new entry. */
5663 mp = xmalloc (sizeof (* mp));
5664 mp->fix_size = fix->fix_size;
5665 mp->mode = fix->mode;
5666 mp->value = fix->value;
5667 mp->refcount = 1;
5668 mp->max_address = minipool_barrier->address + 65536;
5669
5670 mp->min_address = min_address;
5671
5672 if (min_mp == NULL)
5673 {
5674 mp->prev = NULL;
5675 mp->next = minipool_vector_head;
5676
5677 if (mp->next == NULL)
5678 {
5679 minipool_vector_tail = mp;
5680 minipool_vector_label = gen_label_rtx ();
5681 }
5682 else
5683 mp->next->prev = mp;
5684
5685 minipool_vector_head = mp;
5686 }
5687 else
5688 {
5689 mp->next = min_mp->next;
5690 mp->prev = min_mp;
5691 min_mp->next = mp;
5692
5693 if (mp->next != NULL)
5694 mp->next->prev = mp;
5695 else
5696 minipool_vector_tail = mp;
5697 }
5698
5699 /* Save the new entry. */
5700 min_mp = mp;
5701
5702 if (mp->prev)
5703 mp = mp->prev;
5704 else
5705 mp->offset = 0;
5706
5707 /* Scan over the following entries and adjust their offsets. */
5708 while (mp->next != NULL)
5709 {
5710 if (mp->next->min_address < mp->min_address + mp->fix_size)
5711 mp->next->min_address = mp->min_address + mp->fix_size;
5712
5713 if (mp->refcount)
5714 mp->next->offset = mp->offset + mp->fix_size;
5715 else
5716 mp->next->offset = mp->offset;
5717
5718 mp = mp->next;
5719 }
5720
5721 return min_mp;
5722 }
5723
5724 static void
5725 assign_minipool_offsets (barrier)
5726 Mfix * barrier;
5727 {
5728 HOST_WIDE_INT offset = 0;
5729 Mnode * mp;
5730
5731 minipool_barrier = barrier;
5732
5733 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5734 {
5735 mp->offset = offset;
5736
5737 if (mp->refcount > 0)
5738 offset += mp->fix_size;
5739 }
5740 }
5741
5742 /* Output the literal table */
5743 static void
5744 dump_minipool (scan)
5745 rtx scan;
5746 {
5747 Mnode * mp;
5748 Mnode * nmp;
5749
5750 if (rtl_dump_file)
5751 fprintf (rtl_dump_file,
5752 ";; Emitting minipool after insn %u; address %ld\n",
5753 INSN_UID (scan), (unsigned long) minipool_barrier->address);
5754
5755 scan = emit_label_after (gen_label_rtx (), scan);
5756 scan = emit_insn_after (gen_align_4 (), scan);
5757 scan = emit_label_after (minipool_vector_label, scan);
5758
5759 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
5760 {
5761 if (mp->refcount > 0)
5762 {
5763 if (rtl_dump_file)
5764 {
5765 fprintf (rtl_dump_file,
5766 ";; Offset %u, min %ld, max %ld ",
5767 (unsigned) mp->offset, (unsigned long) mp->min_address,
5768 (unsigned long) mp->max_address);
5769 arm_print_value (rtl_dump_file, mp->value);
5770 fputc ('\n', rtl_dump_file);
5771 }
5772
5773 switch (mp->fix_size)
5774 {
5775 #ifdef HAVE_consttable_1
5776 case 1:
5777 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
5778 break;
5779
5780 #endif
5781 #ifdef HAVE_consttable_2
5782 case 2:
5783 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
5784 break;
5785
5786 #endif
5787 #ifdef HAVE_consttable_4
5788 case 4:
5789 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
5790 break;
5791
5792 #endif
5793 #ifdef HAVE_consttable_8
5794 case 8:
5795 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
5796 break;
5797
5798 #endif
5799 default:
5800 abort ();
5801 break;
5802 }
5803 }
5804
5805 nmp = mp->next;
5806 free (mp);
5807 }
5808
5809 minipool_vector_head = minipool_vector_tail = NULL;
5810 scan = emit_insn_after (gen_consttable_end (), scan);
5811 scan = emit_barrier_after (scan);
5812 }
5813
5814 /* Return the cost of forcibly inserting a barrier after INSN. */
5815
5816 static int
5817 arm_barrier_cost (insn)
5818 rtx insn;
5819 {
5820 /* Basing the location of the pool on the loop depth is preferable,
5821 but at the moment, the basic block information seems to be
5822 corrupt by this stage of the compilation. */
5823 int base_cost = 50;
5824 rtx next = next_nonnote_insn (insn);
5825
5826 if (next != NULL && GET_CODE (next) == CODE_LABEL)
5827 base_cost -= 20;
5828
5829 switch (GET_CODE (insn))
5830 {
5831 case CODE_LABEL:
5832 /* It will always be better to place the table before the label, rather
5833 than after it. */
5834 return 50;
5835
5836 case INSN:
5837 case CALL_INSN:
5838 return base_cost;
5839
5840 case JUMP_INSN:
5841 return base_cost - 10;
5842
5843 default:
5844 return base_cost + 10;
5845 }
5846 }
5847
5848 /* Find the best place in the insn stream in the range
5849 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
5850 Create the barrier by inserting a jump and add a new fix entry for
5851 it. */
5852
5853 static Mfix *
5854 create_fix_barrier (fix, max_address)
5855 Mfix * fix;
5856 HOST_WIDE_INT max_address;
5857 {
5858 HOST_WIDE_INT count = 0;
5859 rtx barrier;
5860 rtx from = fix->insn;
5861 rtx selected = from;
5862 int selected_cost;
5863 HOST_WIDE_INT selected_address;
5864 Mfix * new_fix;
5865 HOST_WIDE_INT max_count = max_address - fix->address;
5866 rtx label = gen_label_rtx ();
5867
5868 selected_cost = arm_barrier_cost (from);
5869 selected_address = fix->address;
5870
5871 while (from && count < max_count)
5872 {
5873 rtx tmp;
5874 int new_cost;
5875
5876 /* This code shouldn't have been called if there was a natural barrier
5877 within range. */
5878 if (GET_CODE (from) == BARRIER)
5879 abort ();
5880
5881 /* Count the length of this insn. */
5882 count += get_attr_length (from);
5883
5884 /* If there is a jump table, add its length. */
5885 tmp = is_jump_table (from);
5886 if (tmp != NULL)
5887 {
5888 count += get_jump_table_size (tmp);
5889
5890 /* Jump tables aren't in a basic block, so base the cost on
5891 the dispatch insn. If we select this location, we will
5892 still put the pool after the table. */
5893 new_cost = arm_barrier_cost (from);
5894
5895 if (count < max_count && new_cost <= selected_cost)
5896 {
5897 selected = tmp;
5898 selected_cost = new_cost;
5899 selected_address = fix->address + count;
5900 }
5901
5902 /* Continue after the dispatch table. */
5903 from = NEXT_INSN (tmp);
5904 continue;
5905 }
5906
5907 new_cost = arm_barrier_cost (from);
5908
5909 if (count < max_count && new_cost <= selected_cost)
5910 {
5911 selected = from;
5912 selected_cost = new_cost;
5913 selected_address = fix->address + count;
5914 }
5915
5916 from = NEXT_INSN (from);
5917 }
5918
5919 /* Create a new JUMP_INSN that branches around a barrier. */
5920 from = emit_jump_insn_after (gen_jump (label), selected);
5921 JUMP_LABEL (from) = label;
5922 barrier = emit_barrier_after (from);
5923 emit_label_after (label, barrier);
5924
5925 /* Create a minipool barrier entry for the new barrier. */
5926 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
5927 new_fix->insn = barrier;
5928 new_fix->address = selected_address;
5929 new_fix->next = fix->next;
5930 fix->next = new_fix;
5931
5932 return new_fix;
5933 }
5934
5935 /* Record that there is a natural barrier in the insn stream at
5936 ADDRESS. */
5937 static void
5938 push_minipool_barrier (insn, address)
5939 rtx insn;
5940 HOST_WIDE_INT address;
5941 {
5942 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
5943
5944 fix->insn = insn;
5945 fix->address = address;
5946
5947 fix->next = NULL;
5948 if (minipool_fix_head != NULL)
5949 minipool_fix_tail->next = fix;
5950 else
5951 minipool_fix_head = fix;
5952
5953 minipool_fix_tail = fix;
5954 }
5955
5956 /* Record INSN, which will need fixing up to load a value from the
5957 minipool. ADDRESS is the offset of the insn since the start of the
5958 function; LOC is a pointer to the part of the insn which requires
5959 fixing; VALUE is the constant that must be loaded, which is of type
5960 MODE. */
5961 static void
5962 push_minipool_fix (insn, address, loc, mode, value)
5963 rtx insn;
5964 HOST_WIDE_INT address;
5965 rtx * loc;
5966 enum machine_mode mode;
5967 rtx value;
5968 {
5969 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
5970
5971 #ifdef AOF_ASSEMBLER
5972 /* PIC symbol refereneces need to be converted into offsets into the
5973 based area. */
5974 /* XXX This shouldn't be done here. */
5975 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
5976 value = aof_pic_entry (value);
5977 #endif /* AOF_ASSEMBLER */
5978
5979 fix->insn = insn;
5980 fix->address = address;
5981 fix->loc = loc;
5982 fix->mode = mode;
5983 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
5984 fix->value = value;
5985 fix->forwards = get_attr_pool_range (insn);
5986 fix->backwards = get_attr_neg_pool_range (insn);
5987 fix->minipool = NULL;
5988
5989 /* If an insn doesn't have a range defined for it, then it isn't
5990 expecting to be reworked by this code. Better to abort now than
5991 to generate duff assembly code. */
5992 if (fix->forwards == 0 && fix->backwards == 0)
5993 abort ();
5994
5995 if (rtl_dump_file)
5996 {
5997 fprintf (rtl_dump_file,
5998 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
5999 GET_MODE_NAME (mode),
6000 INSN_UID (insn), (unsigned long) address,
6001 -1 * (long)fix->backwards, (long)fix->forwards);
6002 arm_print_value (rtl_dump_file, fix->value);
6003 fprintf (rtl_dump_file, "\n");
6004 }
6005
6006 /* Add it to the chain of fixes. */
6007 fix->next = NULL;
6008
6009 if (minipool_fix_head != NULL)
6010 minipool_fix_tail->next = fix;
6011 else
6012 minipool_fix_head = fix;
6013
6014 minipool_fix_tail = fix;
6015 }
6016
6017 /* Scan INSN and note any of its operands that need fixing. */
6018
6019 static void
6020 note_invalid_constants (insn, address)
6021 rtx insn;
6022 HOST_WIDE_INT address;
6023 {
6024 int opno;
6025
6026 extract_insn (insn);
6027
6028 if (!constrain_operands (1))
6029 fatal_insn_not_found (insn);
6030
6031 /* Fill in recog_op_alt with information about the constraints of this
6032 insn. */
6033 preprocess_constraints ();
6034
6035 for (opno = 0; opno < recog_data.n_operands; opno++)
6036 {
6037 /* Things we need to fix can only occur in inputs. */
6038 if (recog_data.operand_type[opno] != OP_IN)
6039 continue;
6040
6041 /* If this alternative is a memory reference, then any mention
6042 of constants in this alternative is really to fool reload
6043 into allowing us to accept one there. We need to fix them up
6044 now so that we output the right code. */
6045 if (recog_op_alt[opno][which_alternative].memory_ok)
6046 {
6047 rtx op = recog_data.operand[opno];
6048
6049 if (CONSTANT_P (op))
6050 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6051 recog_data.operand_mode[opno], op);
6052 #if 0
6053 /* RWE: Now we look correctly at the operands for the insn,
6054 this shouldn't be needed any more. */
6055 #ifndef AOF_ASSEMBLER
6056 /* XXX Is this still needed? */
6057 else if (GET_CODE (op) == UNSPEC && XINT (op, 1) == UNSPEC_PIC_SYM)
6058 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6059 recog_data.operand_mode[opno],
6060 XVECEXP (op, 0, 0));
6061 #endif
6062 #endif
6063 else if (GET_CODE (op) == MEM
6064 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
6065 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
6066 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6067 recog_data.operand_mode[opno],
6068 get_pool_constant (XEXP (op, 0)));
6069 }
6070 }
6071 }
6072
6073 void
6074 arm_reorg (first)
6075 rtx first;
6076 {
6077 rtx insn;
6078 HOST_WIDE_INT address = 0;
6079 Mfix * fix;
6080
6081 minipool_fix_head = minipool_fix_tail = NULL;
6082
6083 /* The first insn must always be a note, or the code below won't
6084 scan it properly. */
6085 if (GET_CODE (first) != NOTE)
6086 abort ();
6087
6088 /* Scan all the insns and record the operands that will need fixing. */
6089 for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn))
6090 {
6091 if (GET_CODE (insn) == BARRIER)
6092 push_minipool_barrier (insn, address);
6093 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
6094 || GET_CODE (insn) == JUMP_INSN)
6095 {
6096 rtx table;
6097
6098 note_invalid_constants (insn, address);
6099 address += get_attr_length (insn);
6100
6101 /* If the insn is a vector jump, add the size of the table
6102 and skip the table. */
6103 if ((table = is_jump_table (insn)) != NULL)
6104 {
6105 address += get_jump_table_size (table);
6106 insn = table;
6107 }
6108 }
6109 }
6110
6111 fix = minipool_fix_head;
6112
6113 /* Now scan the fixups and perform the required changes. */
6114 while (fix)
6115 {
6116 Mfix * ftmp;
6117 Mfix * fdel;
6118 Mfix * last_added_fix;
6119 Mfix * last_barrier = NULL;
6120 Mfix * this_fix;
6121
6122 /* Skip any further barriers before the next fix. */
6123 while (fix && GET_CODE (fix->insn) == BARRIER)
6124 fix = fix->next;
6125
6126 /* No more fixes. */
6127 if (fix == NULL)
6128 break;
6129
6130 last_added_fix = NULL;
6131
6132 for (ftmp = fix; ftmp; ftmp = ftmp->next)
6133 {
6134 if (GET_CODE (ftmp->insn) == BARRIER)
6135 {
6136 if (ftmp->address >= minipool_vector_head->max_address)
6137 break;
6138
6139 last_barrier = ftmp;
6140 }
6141 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
6142 break;
6143
6144 last_added_fix = ftmp; /* Keep track of the last fix added. */
6145 }
6146
6147 /* If we found a barrier, drop back to that; any fixes that we
6148 could have reached but come after the barrier will now go in
6149 the next mini-pool. */
6150 if (last_barrier != NULL)
6151 {
6152 /* Reduce the refcount for those fixes that won't go into this
6153 pool after all. */
6154 for (fdel = last_barrier->next;
6155 fdel && fdel != ftmp;
6156 fdel = fdel->next)
6157 {
6158 fdel->minipool->refcount--;
6159 fdel->minipool = NULL;
6160 }
6161
6162 ftmp = last_barrier;
6163 }
6164 else
6165 {
6166 /* ftmp is first fix that we can't fit into this pool and
6167 there no natural barriers that we could use. Insert a
6168 new barrier in the code somewhere between the previous
6169 fix and this one, and arrange to jump around it. */
6170 HOST_WIDE_INT max_address;
6171
6172 /* The last item on the list of fixes must be a barrier, so
6173 we can never run off the end of the list of fixes without
6174 last_barrier being set. */
6175 if (ftmp == NULL)
6176 abort ();
6177
6178 max_address = minipool_vector_head->max_address;
6179 /* Check that there isn't another fix that is in range that
6180 we couldn't fit into this pool because the pool was
6181 already too large: we need to put the pool before such an
6182 instruction. */
6183 if (ftmp->address < max_address)
6184 max_address = ftmp->address;
6185
6186 last_barrier = create_fix_barrier (last_added_fix, max_address);
6187 }
6188
6189 assign_minipool_offsets (last_barrier);
6190
6191 while (ftmp)
6192 {
6193 if (GET_CODE (ftmp->insn) != BARRIER
6194 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
6195 == NULL))
6196 break;
6197
6198 ftmp = ftmp->next;
6199 }
6200
6201 /* Scan over the fixes we have identified for this pool, fixing them
6202 up and adding the constants to the pool itself. */
6203 for (this_fix = fix; this_fix && ftmp != this_fix;
6204 this_fix = this_fix->next)
6205 if (GET_CODE (this_fix->insn) != BARRIER)
6206 {
6207 rtx addr
6208 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
6209 minipool_vector_label),
6210 this_fix->minipool->offset);
6211 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
6212 }
6213
6214 dump_minipool (last_barrier->insn);
6215 fix = ftmp;
6216 }
6217
6218 /* From now on we must synthesize any constants that we can't handle
6219 directly. This can happen if the RTL gets split during final
6220 instruction generation. */
6221 after_arm_reorg = 1;
6222
6223 /* Free the minipool memory. */
6224 obstack_free (&minipool_obstack, minipool_startobj);
6225 }
6226 \f
6227 /* Routines to output assembly language. */
6228
6229 /* If the rtx is the correct value then return the string of the number.
6230 In this way we can ensure that valid double constants are generated even
6231 when cross compiling. */
6232
6233 const char *
6234 fp_immediate_constant (x)
6235 rtx x;
6236 {
6237 REAL_VALUE_TYPE r;
6238 int i;
6239
6240 if (!fpa_consts_inited)
6241 init_fpa_table ();
6242
6243 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6244 for (i = 0; i < 8; i++)
6245 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
6246 return strings_fpa[i];
6247
6248 abort ();
6249 }
6250
6251 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
6252
6253 static const char *
6254 fp_const_from_val (r)
6255 REAL_VALUE_TYPE * r;
6256 {
6257 int i;
6258
6259 if (!fpa_consts_inited)
6260 init_fpa_table ();
6261
6262 for (i = 0; i < 8; i++)
6263 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
6264 return strings_fpa[i];
6265
6266 abort ();
6267 }
6268
6269 /* Output the operands of a LDM/STM instruction to STREAM.
6270 MASK is the ARM register set mask of which only bits 0-15 are important.
6271 REG is the base register, either the frame pointer or the stack pointer,
6272 INSTR is the possibly suffixed load or store instruction. */
6273
6274 static void
6275 print_multi_reg (stream, instr, reg, mask)
6276 FILE * stream;
6277 const char * instr;
6278 int reg;
6279 int mask;
6280 {
6281 int i;
6282 int not_first = FALSE;
6283
6284 fputc ('\t', stream);
6285 asm_fprintf (stream, instr, reg);
6286 fputs (", {", stream);
6287
6288 for (i = 0; i <= LAST_ARM_REGNUM; i++)
6289 if (mask & (1 << i))
6290 {
6291 if (not_first)
6292 fprintf (stream, ", ");
6293
6294 asm_fprintf (stream, "%r", i);
6295 not_first = TRUE;
6296 }
6297
6298 fprintf (stream, "}%s\n", TARGET_APCS_32 ? "" : "^");
6299 }
6300
6301 /* Output a 'call' insn. */
6302
6303 const char *
6304 output_call (operands)
6305 rtx * operands;
6306 {
6307 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
6308
6309 if (REGNO (operands[0]) == LR_REGNUM)
6310 {
6311 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
6312 output_asm_insn ("mov%?\t%0, %|lr", operands);
6313 }
6314
6315 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6316
6317 if (TARGET_INTERWORK)
6318 output_asm_insn ("bx%?\t%0", operands);
6319 else
6320 output_asm_insn ("mov%?\t%|pc, %0", operands);
6321
6322 return "";
6323 }
6324
6325 static int
6326 eliminate_lr2ip (x)
6327 rtx * x;
6328 {
6329 int something_changed = 0;
6330 rtx x0 = * x;
6331 int code = GET_CODE (x0);
6332 int i, j;
6333 const char * fmt;
6334
6335 switch (code)
6336 {
6337 case REG:
6338 if (REGNO (x0) == LR_REGNUM)
6339 {
6340 *x = gen_rtx_REG (SImode, IP_REGNUM);
6341 return 1;
6342 }
6343 return 0;
6344 default:
6345 /* Scan through the sub-elements and change any references there. */
6346 fmt = GET_RTX_FORMAT (code);
6347
6348 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6349 if (fmt[i] == 'e')
6350 something_changed |= eliminate_lr2ip (&XEXP (x0, i));
6351 else if (fmt[i] == 'E')
6352 for (j = 0; j < XVECLEN (x0, i); j++)
6353 something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
6354
6355 return something_changed;
6356 }
6357 }
6358
6359 /* Output a 'call' insn that is a reference in memory. */
6360
6361 const char *
6362 output_call_mem (operands)
6363 rtx * operands;
6364 {
6365 operands[0] = copy_rtx (operands[0]); /* Be ultra careful. */
6366 /* Handle calls using lr by using ip (which may be clobbered in subr anyway). */
6367 if (eliminate_lr2ip (&operands[0]))
6368 output_asm_insn ("mov%?\t%|ip, %|lr", operands);
6369
6370 if (TARGET_INTERWORK)
6371 {
6372 output_asm_insn ("ldr%?\t%|ip, %0", operands);
6373 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6374 output_asm_insn ("bx%?\t%|ip", operands);
6375 }
6376 else
6377 {
6378 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
6379 output_asm_insn ("ldr%?\t%|pc, %0", operands);
6380 }
6381
6382 return "";
6383 }
6384
6385
6386 /* Output a move from arm registers to an fpu registers.
6387 OPERANDS[0] is an fpu register.
6388 OPERANDS[1] is the first registers of an arm register pair. */
6389
6390 const char *
6391 output_mov_long_double_fpu_from_arm (operands)
6392 rtx * operands;
6393 {
6394 int arm_reg0 = REGNO (operands[1]);
6395 rtx ops[3];
6396
6397 if (arm_reg0 == IP_REGNUM)
6398 abort ();
6399
6400 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6401 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6402 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
6403
6404 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
6405 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
6406
6407 return "";
6408 }
6409
6410 /* Output a move from an fpu register to arm registers.
6411 OPERANDS[0] is the first registers of an arm register pair.
6412 OPERANDS[1] is an fpu register. */
6413
6414 const char *
6415 output_mov_long_double_arm_from_fpu (operands)
6416 rtx * operands;
6417 {
6418 int arm_reg0 = REGNO (operands[0]);
6419 rtx ops[3];
6420
6421 if (arm_reg0 == IP_REGNUM)
6422 abort ();
6423
6424 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6425 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6426 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
6427
6428 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
6429 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
6430 return "";
6431 }
6432
6433 /* Output a move from arm registers to arm registers of a long double
6434 OPERANDS[0] is the destination.
6435 OPERANDS[1] is the source. */
6436
6437 const char *
6438 output_mov_long_double_arm_from_arm (operands)
6439 rtx * operands;
6440 {
6441 /* We have to be careful here because the two might overlap. */
6442 int dest_start = REGNO (operands[0]);
6443 int src_start = REGNO (operands[1]);
6444 rtx ops[2];
6445 int i;
6446
6447 if (dest_start < src_start)
6448 {
6449 for (i = 0; i < 3; i++)
6450 {
6451 ops[0] = gen_rtx_REG (SImode, dest_start + i);
6452 ops[1] = gen_rtx_REG (SImode, src_start + i);
6453 output_asm_insn ("mov%?\t%0, %1", ops);
6454 }
6455 }
6456 else
6457 {
6458 for (i = 2; i >= 0; i--)
6459 {
6460 ops[0] = gen_rtx_REG (SImode, dest_start + i);
6461 ops[1] = gen_rtx_REG (SImode, src_start + i);
6462 output_asm_insn ("mov%?\t%0, %1", ops);
6463 }
6464 }
6465
6466 return "";
6467 }
6468
6469
6470 /* Output a move from arm registers to an fpu registers.
6471 OPERANDS[0] is an fpu register.
6472 OPERANDS[1] is the first registers of an arm register pair. */
6473
6474 const char *
6475 output_mov_double_fpu_from_arm (operands)
6476 rtx * operands;
6477 {
6478 int arm_reg0 = REGNO (operands[1]);
6479 rtx ops[2];
6480
6481 if (arm_reg0 == IP_REGNUM)
6482 abort ();
6483
6484 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6485 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6486 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
6487 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
6488 return "";
6489 }
6490
6491 /* Output a move from an fpu register to arm registers.
6492 OPERANDS[0] is the first registers of an arm register pair.
6493 OPERANDS[1] is an fpu register. */
6494
6495 const char *
6496 output_mov_double_arm_from_fpu (operands)
6497 rtx * operands;
6498 {
6499 int arm_reg0 = REGNO (operands[0]);
6500 rtx ops[2];
6501
6502 if (arm_reg0 == IP_REGNUM)
6503 abort ();
6504
6505 ops[0] = gen_rtx_REG (SImode, arm_reg0);
6506 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
6507 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
6508 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
6509 return "";
6510 }
6511
6512 /* Output a move between double words.
6513 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
6514 or MEM<-REG and all MEMs must be offsettable addresses. */
6515
6516 const char *
6517 output_move_double (operands)
6518 rtx * operands;
6519 {
6520 enum rtx_code code0 = GET_CODE (operands[0]);
6521 enum rtx_code code1 = GET_CODE (operands[1]);
6522 rtx otherops[3];
6523
6524 if (code0 == REG)
6525 {
6526 int reg0 = REGNO (operands[0]);
6527
6528 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
6529
6530 if (code1 == REG)
6531 {
6532 int reg1 = REGNO (operands[1]);
6533 if (reg1 == IP_REGNUM)
6534 abort ();
6535
6536 /* Ensure the second source is not overwritten. */
6537 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
6538 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
6539 else
6540 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
6541 }
6542 else if (code1 == CONST_DOUBLE)
6543 {
6544 if (GET_MODE (operands[1]) == DFmode)
6545 {
6546 REAL_VALUE_TYPE r;
6547 long l[2];
6548
6549 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
6550 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
6551 otherops[1] = GEN_INT (l[1]);
6552 operands[1] = GEN_INT (l[0]);
6553 }
6554 else if (GET_MODE (operands[1]) != VOIDmode)
6555 abort ();
6556 else if (WORDS_BIG_ENDIAN)
6557 {
6558 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
6559 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
6560 }
6561 else
6562 {
6563 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
6564 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
6565 }
6566
6567 output_mov_immediate (operands);
6568 output_mov_immediate (otherops);
6569 }
6570 else if (code1 == CONST_INT)
6571 {
6572 #if HOST_BITS_PER_WIDE_INT > 32
6573 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
6574 what the upper word is. */
6575 if (WORDS_BIG_ENDIAN)
6576 {
6577 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
6578 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
6579 }
6580 else
6581 {
6582 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
6583 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
6584 }
6585 #else
6586 /* Sign extend the intval into the high-order word. */
6587 if (WORDS_BIG_ENDIAN)
6588 {
6589 otherops[1] = operands[1];
6590 operands[1] = (INTVAL (operands[1]) < 0
6591 ? constm1_rtx : const0_rtx);
6592 }
6593 else
6594 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
6595 #endif
6596 output_mov_immediate (otherops);
6597 output_mov_immediate (operands);
6598 }
6599 else if (code1 == MEM)
6600 {
6601 switch (GET_CODE (XEXP (operands[1], 0)))
6602 {
6603 case REG:
6604 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
6605 break;
6606
6607 case PRE_INC:
6608 abort (); /* Should never happen now. */
6609 break;
6610
6611 case PRE_DEC:
6612 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
6613 break;
6614
6615 case POST_INC:
6616 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
6617 break;
6618
6619 case POST_DEC:
6620 abort (); /* Should never happen now. */
6621 break;
6622
6623 case LABEL_REF:
6624 case CONST:
6625 output_asm_insn ("adr%?\t%0, %1", operands);
6626 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
6627 break;
6628
6629 default:
6630 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
6631 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
6632 {
6633 otherops[0] = operands[0];
6634 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
6635 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
6636
6637 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
6638 {
6639 if (GET_CODE (otherops[2]) == CONST_INT)
6640 {
6641 switch (INTVAL (otherops[2]))
6642 {
6643 case -8:
6644 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
6645 return "";
6646 case -4:
6647 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
6648 return "";
6649 case 4:
6650 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
6651 return "";
6652 }
6653
6654 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
6655 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
6656 else
6657 output_asm_insn ("add%?\t%0, %1, %2", otherops);
6658 }
6659 else
6660 output_asm_insn ("add%?\t%0, %1, %2", otherops);
6661 }
6662 else
6663 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
6664
6665 return "ldm%?ia\t%0, %M0";
6666 }
6667 else
6668 {
6669 otherops[1] = adjust_address (operands[1], VOIDmode, 4);
6670 /* Take care of overlapping base/data reg. */
6671 if (reg_mentioned_p (operands[0], operands[1]))
6672 {
6673 output_asm_insn ("ldr%?\t%0, %1", otherops);
6674 output_asm_insn ("ldr%?\t%0, %1", operands);
6675 }
6676 else
6677 {
6678 output_asm_insn ("ldr%?\t%0, %1", operands);
6679 output_asm_insn ("ldr%?\t%0, %1", otherops);
6680 }
6681 }
6682 }
6683 }
6684 else
6685 abort (); /* Constraints should prevent this. */
6686 }
6687 else if (code0 == MEM && code1 == REG)
6688 {
6689 if (REGNO (operands[1]) == IP_REGNUM)
6690 abort ();
6691
6692 switch (GET_CODE (XEXP (operands[0], 0)))
6693 {
6694 case REG:
6695 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
6696 break;
6697
6698 case PRE_INC:
6699 abort (); /* Should never happen now. */
6700 break;
6701
6702 case PRE_DEC:
6703 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
6704 break;
6705
6706 case POST_INC:
6707 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
6708 break;
6709
6710 case POST_DEC:
6711 abort (); /* Should never happen now. */
6712 break;
6713
6714 case PLUS:
6715 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
6716 {
6717 switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
6718 {
6719 case -8:
6720 output_asm_insn ("stm%?db\t%m0, %M1", operands);
6721 return "";
6722
6723 case -4:
6724 output_asm_insn ("stm%?da\t%m0, %M1", operands);
6725 return "";
6726
6727 case 4:
6728 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
6729 return "";
6730 }
6731 }
6732 /* Fall through */
6733
6734 default:
6735 otherops[0] = adjust_address (operands[0], VOIDmode, 4);
6736 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
6737 output_asm_insn ("str%?\t%1, %0", operands);
6738 output_asm_insn ("str%?\t%1, %0", otherops);
6739 }
6740 }
6741 else
6742 /* Constraints should prevent this. */
6743 abort ();
6744
6745 return "";
6746 }
6747
6748
6749 /* Output an arbitrary MOV reg, #n.
6750 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
6751
6752 const char *
6753 output_mov_immediate (operands)
6754 rtx * operands;
6755 {
6756 HOST_WIDE_INT n = INTVAL (operands[1]);
6757
6758 /* Try to use one MOV. */
6759 if (const_ok_for_arm (n))
6760 output_asm_insn ("mov%?\t%0, %1", operands);
6761
6762 /* Try to use one MVN. */
6763 else if (const_ok_for_arm (~n))
6764 {
6765 operands[1] = GEN_INT (~n);
6766 output_asm_insn ("mvn%?\t%0, %1", operands);
6767 }
6768 else
6769 {
6770 int n_ones = 0;
6771 int i;
6772
6773 /* If all else fails, make it out of ORRs or BICs as appropriate. */
6774 for (i = 0; i < 32; i ++)
6775 if (n & 1 << i)
6776 n_ones ++;
6777
6778 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
6779 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
6780 else
6781 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
6782 }
6783
6784 return "";
6785 }
6786
6787 /* Output an ADD r, s, #n where n may be too big for one instruction.
6788 If adding zero to one register, output nothing. */
6789
6790 const char *
6791 output_add_immediate (operands)
6792 rtx * operands;
6793 {
6794 HOST_WIDE_INT n = INTVAL (operands[2]);
6795
6796 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
6797 {
6798 if (n < 0)
6799 output_multi_immediate (operands,
6800 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
6801 -n);
6802 else
6803 output_multi_immediate (operands,
6804 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
6805 n);
6806 }
6807
6808 return "";
6809 }
6810
6811 /* Output a multiple immediate operation.
6812 OPERANDS is the vector of operands referred to in the output patterns.
6813 INSTR1 is the output pattern to use for the first constant.
6814 INSTR2 is the output pattern to use for subsequent constants.
6815 IMMED_OP is the index of the constant slot in OPERANDS.
6816 N is the constant value. */
6817
6818 static const char *
6819 output_multi_immediate (operands, instr1, instr2, immed_op, n)
6820 rtx * operands;
6821 const char * instr1;
6822 const char * instr2;
6823 int immed_op;
6824 HOST_WIDE_INT n;
6825 {
6826 #if HOST_BITS_PER_WIDE_INT > 32
6827 n &= 0xffffffff;
6828 #endif
6829
6830 if (n == 0)
6831 {
6832 /* Quick and easy output. */
6833 operands[immed_op] = const0_rtx;
6834 output_asm_insn (instr1, operands);
6835 }
6836 else
6837 {
6838 int i;
6839 const char * instr = instr1;
6840
6841 /* Note that n is never zero here (which would give no output). */
6842 for (i = 0; i < 32; i += 2)
6843 {
6844 if (n & (3 << i))
6845 {
6846 operands[immed_op] = GEN_INT (n & (255 << i));
6847 output_asm_insn (instr, operands);
6848 instr = instr2;
6849 i += 6;
6850 }
6851 }
6852 }
6853
6854 return "";
6855 }
6856
6857 /* Return the appropriate ARM instruction for the operation code.
6858 The returned result should not be overwritten. OP is the rtx of the
6859 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
6860 was shifted. */
6861
6862 const char *
6863 arithmetic_instr (op, shift_first_arg)
6864 rtx op;
6865 int shift_first_arg;
6866 {
6867 switch (GET_CODE (op))
6868 {
6869 case PLUS:
6870 return "add";
6871
6872 case MINUS:
6873 return shift_first_arg ? "rsb" : "sub";
6874
6875 case IOR:
6876 return "orr";
6877
6878 case XOR:
6879 return "eor";
6880
6881 case AND:
6882 return "and";
6883
6884 default:
6885 abort ();
6886 }
6887 }
6888
6889 /* Ensure valid constant shifts and return the appropriate shift mnemonic
6890 for the operation code. The returned result should not be overwritten.
6891 OP is the rtx code of the shift.
6892 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
6893 shift. */
6894
6895 static const char *
6896 shift_op (op, amountp)
6897 rtx op;
6898 HOST_WIDE_INT *amountp;
6899 {
6900 const char * mnem;
6901 enum rtx_code code = GET_CODE (op);
6902
6903 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
6904 *amountp = -1;
6905 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
6906 *amountp = INTVAL (XEXP (op, 1));
6907 else
6908 abort ();
6909
6910 switch (code)
6911 {
6912 case ASHIFT:
6913 mnem = "asl";
6914 break;
6915
6916 case ASHIFTRT:
6917 mnem = "asr";
6918 break;
6919
6920 case LSHIFTRT:
6921 mnem = "lsr";
6922 break;
6923
6924 case ROTATERT:
6925 mnem = "ror";
6926 break;
6927
6928 case MULT:
6929 /* We never have to worry about the amount being other than a
6930 power of 2, since this case can never be reloaded from a reg. */
6931 if (*amountp != -1)
6932 *amountp = int_log2 (*amountp);
6933 else
6934 abort ();
6935 return "asl";
6936
6937 default:
6938 abort ();
6939 }
6940
6941 if (*amountp != -1)
6942 {
6943 /* This is not 100% correct, but follows from the desire to merge
6944 multiplication by a power of 2 with the recognizer for a
6945 shift. >=32 is not a valid shift for "asl", so we must try and
6946 output a shift that produces the correct arithmetical result.
6947 Using lsr #32 is identical except for the fact that the carry bit
6948 is not set correctly if we set the flags; but we never use the
6949 carry bit from such an operation, so we can ignore that. */
6950 if (code == ROTATERT)
6951 /* Rotate is just modulo 32. */
6952 *amountp &= 31;
6953 else if (*amountp != (*amountp & 31))
6954 {
6955 if (code == ASHIFT)
6956 mnem = "lsr";
6957 *amountp = 32;
6958 }
6959
6960 /* Shifts of 0 are no-ops. */
6961 if (*amountp == 0)
6962 return NULL;
6963 }
6964
6965 return mnem;
6966 }
6967
6968 /* Obtain the shift from the POWER of two. */
6969
6970 static HOST_WIDE_INT
6971 int_log2 (power)
6972 HOST_WIDE_INT power;
6973 {
6974 HOST_WIDE_INT shift = 0;
6975
6976 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
6977 {
6978 if (shift > 31)
6979 abort ();
6980 shift ++;
6981 }
6982
6983 return shift;
6984 }
6985
6986 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
6987 /bin/as is horribly restrictive. */
6988 #define MAX_ASCII_LEN 51
6989
6990 void
6991 output_ascii_pseudo_op (stream, p, len)
6992 FILE * stream;
6993 const unsigned char * p;
6994 int len;
6995 {
6996 int i;
6997 int len_so_far = 0;
6998
6999 fputs ("\t.ascii\t\"", stream);
7000
7001 for (i = 0; i < len; i++)
7002 {
7003 int c = p[i];
7004
7005 if (len_so_far >= MAX_ASCII_LEN)
7006 {
7007 fputs ("\"\n\t.ascii\t\"", stream);
7008 len_so_far = 0;
7009 }
7010
7011 switch (c)
7012 {
7013 case TARGET_TAB:
7014 fputs ("\\t", stream);
7015 len_so_far += 2;
7016 break;
7017
7018 case TARGET_FF:
7019 fputs ("\\f", stream);
7020 len_so_far += 2;
7021 break;
7022
7023 case TARGET_BS:
7024 fputs ("\\b", stream);
7025 len_so_far += 2;
7026 break;
7027
7028 case TARGET_CR:
7029 fputs ("\\r", stream);
7030 len_so_far += 2;
7031 break;
7032
7033 case TARGET_NEWLINE:
7034 fputs ("\\n", stream);
7035 c = p [i + 1];
7036 if ((c >= ' ' && c <= '~')
7037 || c == TARGET_TAB)
7038 /* This is a good place for a line break. */
7039 len_so_far = MAX_ASCII_LEN;
7040 else
7041 len_so_far += 2;
7042 break;
7043
7044 case '\"':
7045 case '\\':
7046 putc ('\\', stream);
7047 len_so_far++;
7048 /* drop through. */
7049
7050 default:
7051 if (c >= ' ' && c <= '~')
7052 {
7053 putc (c, stream);
7054 len_so_far++;
7055 }
7056 else
7057 {
7058 fprintf (stream, "\\%03o", c);
7059 len_so_far += 4;
7060 }
7061 break;
7062 }
7063 }
7064
7065 fputs ("\"\n", stream);
7066 }
7067 \f
7068 /* Compute the register sabe mask for registers 0 through 12
7069 inclusive. This code is used by both arm_compute_save_reg_mask
7070 and arm_compute_initial_elimination_offset. */
7071
7072 static unsigned long
7073 arm_compute_save_reg0_reg12_mask ()
7074 {
7075 unsigned long func_type = arm_current_func_type ();
7076 unsigned int save_reg_mask = 0;
7077 unsigned int reg;
7078
7079 if (IS_INTERRUPT (func_type))
7080 {
7081 unsigned int max_reg;
7082 /* Interrupt functions must not corrupt any registers,
7083 even call clobbered ones. If this is a leaf function
7084 we can just examine the registers used by the RTL, but
7085 otherwise we have to assume that whatever function is
7086 called might clobber anything, and so we have to save
7087 all the call-clobbered registers as well. */
7088 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
7089 /* FIQ handlers have registers r8 - r12 banked, so
7090 we only need to check r0 - r7, Normal ISRs only
7091 bank r14 and r15, so we must check up to r12.
7092 r13 is the stack pointer which is always preserved,
7093 so we do not need to consider it here. */
7094 max_reg = 7;
7095 else
7096 max_reg = 12;
7097
7098 for (reg = 0; reg <= max_reg; reg++)
7099 if (regs_ever_live[reg]
7100 || (! current_function_is_leaf && call_used_regs [reg]))
7101 save_reg_mask |= (1 << reg);
7102 }
7103 else
7104 {
7105 /* In the normal case we only need to save those registers
7106 which are call saved and which are used by this function. */
7107 for (reg = 0; reg <= 10; reg++)
7108 if (regs_ever_live[reg] && ! call_used_regs [reg])
7109 save_reg_mask |= (1 << reg);
7110
7111 /* Handle the frame pointer as a special case. */
7112 if (! TARGET_APCS_FRAME
7113 && ! frame_pointer_needed
7114 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
7115 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
7116 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
7117
7118 /* If we aren't loading the PIC register,
7119 don't stack it even though it may be live. */
7120 if (flag_pic
7121 && ! TARGET_SINGLE_PIC_BASE
7122 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
7123 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
7124 }
7125
7126 return save_reg_mask;
7127 }
7128
7129 /* Compute a bit mask of which registers need to be
7130 saved on the stack for the current function. */
7131
7132 static unsigned long
7133 arm_compute_save_reg_mask ()
7134 {
7135 unsigned int save_reg_mask = 0;
7136 unsigned long func_type = arm_current_func_type ();
7137
7138 if (IS_NAKED (func_type))
7139 /* This should never really happen. */
7140 return 0;
7141
7142 /* If we are creating a stack frame, then we must save the frame pointer,
7143 IP (which will hold the old stack pointer), LR and the PC. */
7144 if (frame_pointer_needed)
7145 save_reg_mask |=
7146 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
7147 | (1 << IP_REGNUM)
7148 | (1 << LR_REGNUM)
7149 | (1 << PC_REGNUM);
7150
7151 /* Volatile functions do not return, so there
7152 is no need to save any other registers. */
7153 if (IS_VOLATILE (func_type))
7154 return save_reg_mask;
7155
7156 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
7157
7158 /* Decide if we need to save the link register.
7159 Interrupt routines have their own banked link register,
7160 so they never need to save it.
7161 Otherwise if we do not use the link register we do not need to save
7162 it. If we are pushing other registers onto the stack however, we
7163 can save an instruction in the epilogue by pushing the link register
7164 now and then popping it back into the PC. This incurs extra memory
7165 accesses though, so we only do it when optimising for size, and only
7166 if we know that we will not need a fancy return sequence. */
7167 if (regs_ever_live [LR_REGNUM]
7168 || (save_reg_mask
7169 && optimize_size
7170 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
7171 save_reg_mask |= 1 << LR_REGNUM;
7172
7173 if (cfun->machine->lr_save_eliminated)
7174 save_reg_mask &= ~ (1 << LR_REGNUM);
7175
7176 return save_reg_mask;
7177 }
7178
7179 /* Generate a function exit sequence. If REALLY_RETURN is true, then do
7180 everything bar the final return instruction. */
7181
7182 const char *
7183 output_return_instruction (operand, really_return, reverse)
7184 rtx operand;
7185 int really_return;
7186 int reverse;
7187 {
7188 char conditional[10];
7189 char instr[100];
7190 int reg;
7191 unsigned long live_regs_mask;
7192 unsigned long func_type;
7193
7194 func_type = arm_current_func_type ();
7195
7196 if (IS_NAKED (func_type))
7197 return "";
7198
7199 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
7200 {
7201 /* If this function was declared non-returning, and we have found a tail
7202 call, then we have to trust that the called function won't return. */
7203 if (really_return)
7204 {
7205 rtx ops[2];
7206
7207 /* Otherwise, trap an attempted return by aborting. */
7208 ops[0] = operand;
7209 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
7210 : "abort");
7211 assemble_external_libcall (ops[1]);
7212 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
7213 }
7214
7215 return "";
7216 }
7217
7218 if (current_function_calls_alloca && !really_return)
7219 abort ();
7220
7221 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
7222
7223 return_used_this_function = 1;
7224
7225 live_regs_mask = arm_compute_save_reg_mask ();
7226
7227 if (live_regs_mask)
7228 {
7229 const char * return_reg;
7230
7231 /* If we do not have any special requirements for function exit
7232 (eg interworking, or ISR) then we can load the return address
7233 directly into the PC. Otherwise we must load it into LR. */
7234 if (really_return
7235 && ! TARGET_INTERWORK)
7236 return_reg = reg_names[PC_REGNUM];
7237 else
7238 return_reg = reg_names[LR_REGNUM];
7239
7240 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
7241 /* There are two possible reasons for the IP register being saved.
7242 Either a stack frame was created, in which case IP contains the
7243 old stack pointer, or an ISR routine corrupted it. If this in an
7244 ISR routine then just restore IP, otherwise restore IP into SP. */
7245 if (! IS_INTERRUPT (func_type))
7246 {
7247 live_regs_mask &= ~ (1 << IP_REGNUM);
7248 live_regs_mask |= (1 << SP_REGNUM);
7249 }
7250
7251 /* On some ARM architectures it is faster to use LDR rather than
7252 LDM to load a single register. On other architectures, the
7253 cost is the same. In 26 bit mode, or for exception handlers,
7254 we have to use LDM to load the PC so that the CPSR is also
7255 restored. */
7256 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
7257 {
7258 if (live_regs_mask == (unsigned int)(1 << reg))
7259 break;
7260 }
7261 if (reg <= LAST_ARM_REGNUM
7262 && (reg != LR_REGNUM
7263 || ! really_return
7264 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
7265 {
7266 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
7267 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
7268 }
7269 else
7270 {
7271 char *p;
7272 int first = 1;
7273
7274 /* Generate the load multiple instruction to restore the registers. */
7275 if (frame_pointer_needed)
7276 sprintf (instr, "ldm%sea\t%%|fp, {", conditional);
7277 else
7278 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
7279
7280 p = instr + strlen (instr);
7281
7282 for (reg = 0; reg <= SP_REGNUM; reg++)
7283 if (live_regs_mask & (1 << reg))
7284 {
7285 int l = strlen (reg_names[reg]);
7286
7287 if (first)
7288 first = 0;
7289 else
7290 {
7291 memcpy (p, ", ", 2);
7292 p += 2;
7293 }
7294
7295 memcpy (p, "%|", 2);
7296 memcpy (p + 2, reg_names[reg], l);
7297 p += l + 2;
7298 }
7299
7300 if (live_regs_mask & (1 << LR_REGNUM))
7301 {
7302 int l = strlen (return_reg);
7303
7304 if (! first)
7305 {
7306 memcpy (p, ", ", 2);
7307 p += 2;
7308 }
7309
7310 memcpy (p, "%|", 2);
7311 memcpy (p + 2, return_reg, l);
7312 strcpy (p + 2 + l, ((TARGET_APCS_32
7313 && !IS_INTERRUPT (func_type))
7314 || !really_return)
7315 ? "}" : "}^");
7316 }
7317 else
7318 strcpy (p, "}");
7319 }
7320
7321 output_asm_insn (instr, & operand);
7322
7323 /* See if we need to generate an extra instruction to
7324 perform the actual function return. */
7325 if (really_return
7326 && func_type != ARM_FT_INTERWORKED
7327 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
7328 {
7329 /* The return has already been handled
7330 by loading the LR into the PC. */
7331 really_return = 0;
7332 }
7333 }
7334
7335 if (really_return)
7336 {
7337 switch ((int) ARM_FUNC_TYPE (func_type))
7338 {
7339 case ARM_FT_ISR:
7340 case ARM_FT_FIQ:
7341 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
7342 break;
7343
7344 case ARM_FT_INTERWORKED:
7345 sprintf (instr, "bx%s\t%%|lr", conditional);
7346 break;
7347
7348 case ARM_FT_EXCEPTION:
7349 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
7350 break;
7351
7352 default:
7353 /* ARMv5 implementations always provide BX, so interworking
7354 is the default unless APCS-26 is in use. */
7355 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
7356 sprintf (instr, "bx%s\t%%|lr", conditional);
7357 else
7358 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
7359 conditional, TARGET_APCS_32 ? "" : "s");
7360 break;
7361 }
7362
7363 output_asm_insn (instr, & operand);
7364 }
7365
7366 return "";
7367 }
7368
7369 /* Write the function name into the code section, directly preceding
7370 the function prologue.
7371
7372 Code will be output similar to this:
7373 t0
7374 .ascii "arm_poke_function_name", 0
7375 .align
7376 t1
7377 .word 0xff000000 + (t1 - t0)
7378 arm_poke_function_name
7379 mov ip, sp
7380 stmfd sp!, {fp, ip, lr, pc}
7381 sub fp, ip, #4
7382
7383 When performing a stack backtrace, code can inspect the value
7384 of 'pc' stored at 'fp' + 0. If the trace function then looks
7385 at location pc - 12 and the top 8 bits are set, then we know
7386 that there is a function name embedded immediately preceding this
7387 location and has length ((pc[-3]) & 0xff000000).
7388
7389 We assume that pc is declared as a pointer to an unsigned long.
7390
7391 It is of no benefit to output the function name if we are assembling
7392 a leaf function. These function types will not contain a stack
7393 backtrace structure, therefore it is not possible to determine the
7394 function name. */
7395
7396 void
7397 arm_poke_function_name (stream, name)
7398 FILE * stream;
7399 const char * name;
7400 {
7401 unsigned long alignlength;
7402 unsigned long length;
7403 rtx x;
7404
7405 length = strlen (name) + 1;
7406 alignlength = ROUND_UP (length);
7407
7408 ASM_OUTPUT_ASCII (stream, name, length);
7409 ASM_OUTPUT_ALIGN (stream, 2);
7410 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
7411 assemble_aligned_integer (UNITS_PER_WORD, x);
7412 }
7413
7414 /* Place some comments into the assembler stream
7415 describing the current function. */
7416
7417 static void
7418 arm_output_function_prologue (f, frame_size)
7419 FILE * f;
7420 HOST_WIDE_INT frame_size;
7421 {
7422 unsigned long func_type;
7423
7424 if (!TARGET_ARM)
7425 {
7426 thumb_output_function_prologue (f, frame_size);
7427 return;
7428 }
7429
7430 /* Sanity check. */
7431 if (arm_ccfsm_state || arm_target_insn)
7432 abort ();
7433
7434 func_type = arm_current_func_type ();
7435
7436 switch ((int) ARM_FUNC_TYPE (func_type))
7437 {
7438 default:
7439 case ARM_FT_NORMAL:
7440 break;
7441 case ARM_FT_INTERWORKED:
7442 asm_fprintf (f, "\t%@ Function supports interworking.\n");
7443 break;
7444 case ARM_FT_EXCEPTION_HANDLER:
7445 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
7446 break;
7447 case ARM_FT_ISR:
7448 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
7449 break;
7450 case ARM_FT_FIQ:
7451 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
7452 break;
7453 case ARM_FT_EXCEPTION:
7454 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
7455 break;
7456 }
7457
7458 if (IS_NAKED (func_type))
7459 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
7460
7461 if (IS_VOLATILE (func_type))
7462 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
7463
7464 if (IS_NESTED (func_type))
7465 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
7466
7467 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %d\n",
7468 current_function_args_size,
7469 current_function_pretend_args_size, frame_size);
7470
7471 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
7472 frame_pointer_needed,
7473 cfun->machine->uses_anonymous_args);
7474
7475 if (cfun->machine->lr_save_eliminated)
7476 asm_fprintf (f, "\t%@ link register save eliminated.\n");
7477
7478 #ifdef AOF_ASSEMBLER
7479 if (flag_pic)
7480 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
7481 #endif
7482
7483 return_used_this_function = 0;
7484 }
7485
7486 const char *
7487 arm_output_epilogue (really_return)
7488 int really_return;
7489 {
7490 int reg;
7491 unsigned long saved_regs_mask;
7492 unsigned long func_type;
7493 /* Floats_offset is the offset from the "virtual" frame. In an APCS
7494 frame that is $fp + 4 for a non-variadic function. */
7495 int floats_offset = 0;
7496 rtx operands[3];
7497 int frame_size = get_frame_size ();
7498 FILE * f = asm_out_file;
7499 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
7500
7501 /* If we have already generated the return instruction
7502 then it is futile to generate anything else. */
7503 if (use_return_insn (FALSE) && return_used_this_function)
7504 return "";
7505
7506 func_type = arm_current_func_type ();
7507
7508 if (IS_NAKED (func_type))
7509 /* Naked functions don't have epilogues. */
7510 return "";
7511
7512 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
7513 {
7514 rtx op;
7515
7516 /* A volatile function should never return. Call abort. */
7517 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
7518 assemble_external_libcall (op);
7519 output_asm_insn ("bl\t%a0", &op);
7520
7521 return "";
7522 }
7523
7524 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
7525 && ! really_return)
7526 /* If we are throwing an exception, then we really must
7527 be doing a return, so we can't tail-call. */
7528 abort ();
7529
7530 saved_regs_mask = arm_compute_save_reg_mask ();
7531
7532 /* XXX We should adjust floats_offset for any anonymous args, and then
7533 re-adjust vfp_offset below to compensate. */
7534
7535 /* Compute how far away the floats will be. */
7536 for (reg = 0; reg <= LAST_ARM_REGNUM; reg ++)
7537 if (saved_regs_mask & (1 << reg))
7538 floats_offset += 4;
7539
7540 if (frame_pointer_needed)
7541 {
7542 int vfp_offset = 4;
7543
7544 if (arm_fpu_arch == FP_SOFT2)
7545 {
7546 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
7547 if (regs_ever_live[reg] && !call_used_regs[reg])
7548 {
7549 floats_offset += 12;
7550 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
7551 reg, FP_REGNUM, floats_offset - vfp_offset);
7552 }
7553 }
7554 else
7555 {
7556 int start_reg = LAST_ARM_FP_REGNUM;
7557
7558 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
7559 {
7560 if (regs_ever_live[reg] && !call_used_regs[reg])
7561 {
7562 floats_offset += 12;
7563
7564 /* We can't unstack more than four registers at once. */
7565 if (start_reg - reg == 3)
7566 {
7567 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
7568 reg, FP_REGNUM, floats_offset - vfp_offset);
7569 start_reg = reg - 1;
7570 }
7571 }
7572 else
7573 {
7574 if (reg != start_reg)
7575 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
7576 reg + 1, start_reg - reg,
7577 FP_REGNUM, floats_offset - vfp_offset);
7578 start_reg = reg - 1;
7579 }
7580 }
7581
7582 /* Just in case the last register checked also needs unstacking. */
7583 if (reg != start_reg)
7584 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
7585 reg + 1, start_reg - reg,
7586 FP_REGNUM, floats_offset - vfp_offset);
7587 }
7588
7589 /* saved_regs_mask should contain the IP, which at the time of stack
7590 frame generation actually contains the old stack pointer. So a
7591 quick way to unwind the stack is just pop the IP register directly
7592 into the stack pointer. */
7593 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
7594 abort ();
7595 saved_regs_mask &= ~ (1 << IP_REGNUM);
7596 saved_regs_mask |= (1 << SP_REGNUM);
7597
7598 /* There are two registers left in saved_regs_mask - LR and PC. We
7599 only need to restore the LR register (the return address), but to
7600 save time we can load it directly into the PC, unless we need a
7601 special function exit sequence, or we are not really returning. */
7602 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
7603 /* Delete the LR from the register mask, so that the LR on
7604 the stack is loaded into the PC in the register mask. */
7605 saved_regs_mask &= ~ (1 << LR_REGNUM);
7606 else
7607 saved_regs_mask &= ~ (1 << PC_REGNUM);
7608
7609 print_multi_reg (f, "ldmea\t%r", FP_REGNUM, saved_regs_mask);
7610
7611 if (IS_INTERRUPT (func_type))
7612 /* Interrupt handlers will have pushed the
7613 IP onto the stack, so restore it now. */
7614 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, 1 << IP_REGNUM);
7615 }
7616 else
7617 {
7618 /* Restore stack pointer if necessary. */
7619 if (frame_size + current_function_outgoing_args_size != 0)
7620 {
7621 operands[0] = operands[1] = stack_pointer_rtx;
7622 operands[2] = GEN_INT (frame_size
7623 + current_function_outgoing_args_size);
7624 output_add_immediate (operands);
7625 }
7626
7627 if (arm_fpu_arch == FP_SOFT2)
7628 {
7629 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
7630 if (regs_ever_live[reg] && !call_used_regs[reg])
7631 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
7632 reg, SP_REGNUM);
7633 }
7634 else
7635 {
7636 int start_reg = FIRST_ARM_FP_REGNUM;
7637
7638 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
7639 {
7640 if (regs_ever_live[reg] && !call_used_regs[reg])
7641 {
7642 if (reg - start_reg == 3)
7643 {
7644 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
7645 start_reg, SP_REGNUM);
7646 start_reg = reg + 1;
7647 }
7648 }
7649 else
7650 {
7651 if (reg != start_reg)
7652 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
7653 start_reg, reg - start_reg,
7654 SP_REGNUM);
7655
7656 start_reg = reg + 1;
7657 }
7658 }
7659
7660 /* Just in case the last register checked also needs unstacking. */
7661 if (reg != start_reg)
7662 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
7663 start_reg, reg - start_reg, SP_REGNUM);
7664 }
7665
7666 /* If we can, restore the LR into the PC. */
7667 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
7668 && really_return
7669 && current_function_pretend_args_size == 0
7670 && saved_regs_mask & (1 << LR_REGNUM))
7671 {
7672 saved_regs_mask &= ~ (1 << LR_REGNUM);
7673 saved_regs_mask |= (1 << PC_REGNUM);
7674 }
7675
7676 /* Load the registers off the stack. If we only have one register
7677 to load use the LDR instruction - it is faster. */
7678 if (saved_regs_mask == (1 << LR_REGNUM))
7679 {
7680 /* The exception handler ignores the LR, so we do
7681 not really need to load it off the stack. */
7682 if (eh_ofs)
7683 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
7684 else
7685 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
7686 }
7687 else if (saved_regs_mask)
7688 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
7689
7690 if (current_function_pretend_args_size)
7691 {
7692 /* Unwind the pre-pushed regs. */
7693 operands[0] = operands[1] = stack_pointer_rtx;
7694 operands[2] = GEN_INT (current_function_pretend_args_size);
7695 output_add_immediate (operands);
7696 }
7697 }
7698
7699 #if 0
7700 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER)
7701 /* Adjust the stack to remove the exception handler stuff. */
7702 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
7703 REGNO (eh_ofs));
7704 #endif
7705
7706 if (! really_return
7707 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
7708 && current_function_pretend_args_size == 0
7709 && saved_regs_mask & (1 << PC_REGNUM)))
7710 return "";
7711
7712 /* Generate the return instruction. */
7713 switch ((int) ARM_FUNC_TYPE (func_type))
7714 {
7715 case ARM_FT_EXCEPTION_HANDLER:
7716 /* Even in 26-bit mode we do a mov (rather than a movs)
7717 because we don't have the PSR bits set in the address. */
7718 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
7719 break;
7720
7721 case ARM_FT_ISR:
7722 case ARM_FT_FIQ:
7723 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
7724 break;
7725
7726 case ARM_FT_EXCEPTION:
7727 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7728 break;
7729
7730 case ARM_FT_INTERWORKED:
7731 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
7732 break;
7733
7734 default:
7735 if (frame_pointer_needed)
7736 /* If we used the frame pointer then the return adddress
7737 will have been loaded off the stack directly into the
7738 PC, so there is no need to issue a MOV instruction
7739 here. */
7740 ;
7741 else if (current_function_pretend_args_size == 0
7742 && (saved_regs_mask & (1 << LR_REGNUM)))
7743 /* Similarly we may have been able to load LR into the PC
7744 even if we did not create a stack frame. */
7745 ;
7746 else if (TARGET_APCS_32)
7747 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7748 else
7749 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
7750 break;
7751 }
7752
7753 return "";
7754 }
7755
7756 static void
7757 arm_output_function_epilogue (file, frame_size)
7758 FILE *file ATTRIBUTE_UNUSED;
7759 HOST_WIDE_INT frame_size;
7760 {
7761 if (TARGET_THUMB)
7762 {
7763 /* ??? Probably not safe to set this here, since it assumes that a
7764 function will be emitted as assembly immediately after we generate
7765 RTL for it. This does not happen for inline functions. */
7766 return_used_this_function = 0;
7767 }
7768 else
7769 {
7770 if (use_return_insn (FALSE)
7771 && return_used_this_function
7772 && (frame_size + current_function_outgoing_args_size) != 0
7773 && !frame_pointer_needed)
7774 abort ();
7775
7776 /* Reset the ARM-specific per-function variables. */
7777 after_arm_reorg = 0;
7778 }
7779 }
7780
7781 /* Generate and emit an insn that we will recognize as a push_multi.
7782 Unfortunately, since this insn does not reflect very well the actual
7783 semantics of the operation, we need to annotate the insn for the benefit
7784 of DWARF2 frame unwind information. */
7785
7786 static rtx
7787 emit_multi_reg_push (mask)
7788 int mask;
7789 {
7790 int num_regs = 0;
7791 int num_dwarf_regs;
7792 int i, j;
7793 rtx par;
7794 rtx dwarf;
7795 int dwarf_par_index;
7796 rtx tmp, reg;
7797
7798 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7799 if (mask & (1 << i))
7800 num_regs++;
7801
7802 if (num_regs == 0 || num_regs > 16)
7803 abort ();
7804
7805 /* We don't record the PC in the dwarf frame information. */
7806 num_dwarf_regs = num_regs;
7807 if (mask & (1 << PC_REGNUM))
7808 num_dwarf_regs--;
7809
7810 /* For the body of the insn we are going to generate an UNSPEC in
7811 parallel with several USEs. This allows the insn to be recognised
7812 by the push_multi pattern in the arm.md file. The insn looks
7813 something like this:
7814
7815 (parallel [
7816 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
7817 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
7818 (use (reg:SI 11 fp))
7819 (use (reg:SI 12 ip))
7820 (use (reg:SI 14 lr))
7821 (use (reg:SI 15 pc))
7822 ])
7823
7824 For the frame note however, we try to be more explicit and actually
7825 show each register being stored into the stack frame, plus a (single)
7826 decrement of the stack pointer. We do it this way in order to be
7827 friendly to the stack unwinding code, which only wants to see a single
7828 stack decrement per instruction. The RTL we generate for the note looks
7829 something like this:
7830
7831 (sequence [
7832 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
7833 (set (mem:SI (reg:SI sp)) (reg:SI r4))
7834 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
7835 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
7836 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
7837 ])
7838
7839 This sequence is used both by the code to support stack unwinding for
7840 exceptions handlers and the code to generate dwarf2 frame debugging. */
7841
7842 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
7843 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
7844 dwarf_par_index = 1;
7845
7846 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7847 {
7848 if (mask & (1 << i))
7849 {
7850 reg = gen_rtx_REG (SImode, i);
7851
7852 XVECEXP (par, 0, 0)
7853 = gen_rtx_SET (VOIDmode,
7854 gen_rtx_MEM (BLKmode,
7855 gen_rtx_PRE_DEC (BLKmode,
7856 stack_pointer_rtx)),
7857 gen_rtx_UNSPEC (BLKmode,
7858 gen_rtvec (1, reg),
7859 UNSPEC_PUSH_MULT));
7860
7861 if (i != PC_REGNUM)
7862 {
7863 tmp = gen_rtx_SET (VOIDmode,
7864 gen_rtx_MEM (SImode, stack_pointer_rtx),
7865 reg);
7866 RTX_FRAME_RELATED_P (tmp) = 1;
7867 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
7868 dwarf_par_index++;
7869 }
7870
7871 break;
7872 }
7873 }
7874
7875 for (j = 1, i++; j < num_regs; i++)
7876 {
7877 if (mask & (1 << i))
7878 {
7879 reg = gen_rtx_REG (SImode, i);
7880
7881 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
7882
7883 if (i != PC_REGNUM)
7884 {
7885 tmp = gen_rtx_SET (VOIDmode,
7886 gen_rtx_MEM (SImode,
7887 plus_constant (stack_pointer_rtx,
7888 4 * j)),
7889 reg);
7890 RTX_FRAME_RELATED_P (tmp) = 1;
7891 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
7892 }
7893
7894 j++;
7895 }
7896 }
7897
7898 par = emit_insn (par);
7899
7900 tmp = gen_rtx_SET (SImode,
7901 stack_pointer_rtx,
7902 gen_rtx_PLUS (SImode,
7903 stack_pointer_rtx,
7904 GEN_INT (-4 * num_regs)));
7905 RTX_FRAME_RELATED_P (tmp) = 1;
7906 XVECEXP (dwarf, 0, 0) = tmp;
7907
7908 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7909 REG_NOTES (par));
7910 return par;
7911 }
7912
7913 static rtx
7914 emit_sfm (base_reg, count)
7915 int base_reg;
7916 int count;
7917 {
7918 rtx par;
7919 rtx dwarf;
7920 rtx tmp, reg;
7921 int i;
7922
7923 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7924 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7925 RTX_FRAME_RELATED_P (dwarf) = 1;
7926
7927 reg = gen_rtx_REG (XFmode, base_reg++);
7928
7929 XVECEXP (par, 0, 0)
7930 = gen_rtx_SET (VOIDmode,
7931 gen_rtx_MEM (BLKmode,
7932 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7933 gen_rtx_UNSPEC (BLKmode,
7934 gen_rtvec (1, reg),
7935 UNSPEC_PUSH_MULT));
7936 tmp
7937 = gen_rtx_SET (VOIDmode,
7938 gen_rtx_MEM (XFmode,
7939 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7940 reg);
7941 RTX_FRAME_RELATED_P (tmp) = 1;
7942 XVECEXP (dwarf, 0, count - 1) = tmp;
7943
7944 for (i = 1; i < count; i++)
7945 {
7946 reg = gen_rtx_REG (XFmode, base_reg++);
7947 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7948
7949 tmp = gen_rtx_SET (VOIDmode,
7950 gen_rtx_MEM (XFmode,
7951 gen_rtx_PRE_DEC (BLKmode,
7952 stack_pointer_rtx)),
7953 reg);
7954 RTX_FRAME_RELATED_P (tmp) = 1;
7955 XVECEXP (dwarf, 0, count - i - 1) = tmp;
7956 }
7957
7958 par = emit_insn (par);
7959 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7960 REG_NOTES (par));
7961 return par;
7962 }
7963
7964 /* Compute the distance from register FROM to register TO.
7965 These can be the arg pointer (26), the soft frame pointer (25),
7966 the stack pointer (13) or the hard frame pointer (11).
7967 Typical stack layout looks like this:
7968
7969 old stack pointer -> | |
7970 ----
7971 | | \
7972 | | saved arguments for
7973 | | vararg functions
7974 | | /
7975 --
7976 hard FP & arg pointer -> | | \
7977 | | stack
7978 | | frame
7979 | | /
7980 --
7981 | | \
7982 | | call saved
7983 | | registers
7984 soft frame pointer -> | | /
7985 --
7986 | | \
7987 | | local
7988 | | variables
7989 | | /
7990 --
7991 | | \
7992 | | outgoing
7993 | | arguments
7994 current stack pointer -> | | /
7995 --
7996
7997 For a given funciton some or all of these stack compomnents
7998 may not be needed, giving rise to the possibility of
7999 eliminating some of the registers.
8000
8001 The values returned by this function must reflect the behaviour
8002 of arm_expand_prologue() and arm_compute_save_reg_mask().
8003
8004 The sign of the number returned reflects the direction of stack
8005 growth, so the values are positive for all eliminations except
8006 from the soft frame pointer to the hard frame pointer. */
8007
8008 unsigned int
8009 arm_compute_initial_elimination_offset (from, to)
8010 unsigned int from;
8011 unsigned int to;
8012 {
8013 unsigned int local_vars = (get_frame_size () + 3) & ~3;
8014 unsigned int outgoing_args = current_function_outgoing_args_size;
8015 unsigned int stack_frame;
8016 unsigned int call_saved_registers;
8017 unsigned long func_type;
8018
8019 func_type = arm_current_func_type ();
8020
8021 /* Volatile functions never return, so there is
8022 no need to save call saved registers. */
8023 call_saved_registers = 0;
8024 if (! IS_VOLATILE (func_type))
8025 {
8026 unsigned int reg_mask;
8027 unsigned int reg;
8028
8029 /* Make sure that we compute which registers will be saved
8030 on the stack using the same algorithm that is used by
8031 arm_compute_save_reg_mask(). */
8032 reg_mask = arm_compute_save_reg0_reg12_mask ();
8033
8034 /* Now count the number of bits set in save_reg_mask.
8035 For each set bit we need 4 bytes of stack space. */
8036 while (reg_mask)
8037 {
8038 call_saved_registers += 4;
8039 reg_mask = reg_mask & ~ (reg_mask & - reg_mask);
8040 }
8041
8042 if (regs_ever_live[LR_REGNUM]
8043 /* If a stack frame is going to be created, the LR will
8044 be saved as part of that, so we do not need to allow
8045 for it here. */
8046 && ! frame_pointer_needed)
8047 call_saved_registers += 4;
8048
8049 /* If the hard floating point registers are going to be
8050 used then they must be saved on the stack as well.
8051 Each register occupies 12 bytes of stack space. */
8052 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg ++)
8053 if (regs_ever_live[reg] && ! call_used_regs[reg])
8054 call_saved_registers += 12;
8055 }
8056
8057 /* The stack frame contains 4 registers - the old frame pointer,
8058 the old stack pointer, the return address and PC of the start
8059 of the function. */
8060 stack_frame = frame_pointer_needed ? 16 : 0;
8061
8062 /* OK, now we have enough information to compute the distances.
8063 There must be an entry in these switch tables for each pair
8064 of registers in ELIMINABLE_REGS, even if some of the entries
8065 seem to be redundant or useless. */
8066 switch (from)
8067 {
8068 case ARG_POINTER_REGNUM:
8069 switch (to)
8070 {
8071 case THUMB_HARD_FRAME_POINTER_REGNUM:
8072 return 0;
8073
8074 case FRAME_POINTER_REGNUM:
8075 /* This is the reverse of the soft frame pointer
8076 to hard frame pointer elimination below. */
8077 if (call_saved_registers == 0 && stack_frame == 0)
8078 return 0;
8079 return (call_saved_registers + stack_frame - 4);
8080
8081 case ARM_HARD_FRAME_POINTER_REGNUM:
8082 /* If there is no stack frame then the hard
8083 frame pointer and the arg pointer coincide. */
8084 if (stack_frame == 0 && call_saved_registers != 0)
8085 return 0;
8086 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
8087 return (frame_pointer_needed
8088 && current_function_needs_context
8089 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
8090
8091 case STACK_POINTER_REGNUM:
8092 /* If nothing has been pushed on the stack at all
8093 then this will return -4. This *is* correct! */
8094 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
8095
8096 default:
8097 abort ();
8098 }
8099 break;
8100
8101 case FRAME_POINTER_REGNUM:
8102 switch (to)
8103 {
8104 case THUMB_HARD_FRAME_POINTER_REGNUM:
8105 return 0;
8106
8107 case ARM_HARD_FRAME_POINTER_REGNUM:
8108 /* The hard frame pointer points to the top entry in the
8109 stack frame. The soft frame pointer to the bottom entry
8110 in the stack frame. If there is no stack frame at all,
8111 then they are identical. */
8112 if (call_saved_registers == 0 && stack_frame == 0)
8113 return 0;
8114 return - (call_saved_registers + stack_frame - 4);
8115
8116 case STACK_POINTER_REGNUM:
8117 return local_vars + outgoing_args;
8118
8119 default:
8120 abort ();
8121 }
8122 break;
8123
8124 default:
8125 /* You cannot eliminate from the stack pointer.
8126 In theory you could eliminate from the hard frame
8127 pointer to the stack pointer, but this will never
8128 happen, since if a stack frame is not needed the
8129 hard frame pointer will never be used. */
8130 abort ();
8131 }
8132 }
8133
8134 /* Generate the prologue instructions for entry into an ARM function. */
8135
8136 void
8137 arm_expand_prologue ()
8138 {
8139 int reg;
8140 rtx amount;
8141 rtx insn;
8142 rtx ip_rtx;
8143 unsigned long live_regs_mask;
8144 unsigned long func_type;
8145 int fp_offset = 0;
8146 int saved_pretend_args = 0;
8147 unsigned int args_to_push;
8148
8149 func_type = arm_current_func_type ();
8150
8151 /* Naked functions don't have prologues. */
8152 if (IS_NAKED (func_type))
8153 return;
8154
8155 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
8156 args_to_push = current_function_pretend_args_size;
8157
8158 /* Compute which register we will have to save onto the stack. */
8159 live_regs_mask = arm_compute_save_reg_mask ();
8160
8161 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
8162
8163 if (frame_pointer_needed)
8164 {
8165 if (IS_INTERRUPT (func_type))
8166 {
8167 /* Interrupt functions must not corrupt any registers.
8168 Creating a frame pointer however, corrupts the IP
8169 register, so we must push it first. */
8170 insn = emit_multi_reg_push (1 << IP_REGNUM);
8171
8172 /* Do not set RTX_FRAME_RELATED_P on this insn.
8173 The dwarf stack unwinding code only wants to see one
8174 stack decrement per function, and this is not it. If
8175 this instruction is labeled as being part of the frame
8176 creation sequence then dwarf2out_frame_debug_expr will
8177 abort when it encounters the assignment of IP to FP
8178 later on, since the use of SP here establishes SP as
8179 the CFA register and not IP.
8180
8181 Anyway this instruction is not really part of the stack
8182 frame creation although it is part of the prologue. */
8183 }
8184 else if (IS_NESTED (func_type))
8185 {
8186 /* The Static chain register is the same as the IP register
8187 used as a scratch register during stack frame creation.
8188 To get around this need to find somewhere to store IP
8189 whilst the frame is being created. We try the following
8190 places in order:
8191
8192 1. The last argument register.
8193 2. A slot on the stack above the frame. (This only
8194 works if the function is not a varargs function).
8195 3. Register r3, after pushing the argument registers
8196 onto the stack.
8197
8198 Note - we only need to tell the dwarf2 backend about the SP
8199 adjustment in the second variant; the static chain register
8200 doesn't need to be unwound, as it doesn't contain a value
8201 inherited from the caller. */
8202
8203 if (regs_ever_live[3] == 0)
8204 {
8205 insn = gen_rtx_REG (SImode, 3);
8206 insn = gen_rtx_SET (SImode, insn, ip_rtx);
8207 insn = emit_insn (insn);
8208 }
8209 else if (args_to_push == 0)
8210 {
8211 rtx dwarf;
8212 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
8213 insn = gen_rtx_MEM (SImode, insn);
8214 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
8215 insn = emit_insn (insn);
8216
8217 fp_offset = 4;
8218
8219 /* Just tell the dwarf backend that we adjusted SP. */
8220 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8221 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8222 GEN_INT (-fp_offset)));
8223 RTX_FRAME_RELATED_P (insn) = 1;
8224 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8225 dwarf, REG_NOTES (insn));
8226 }
8227 else
8228 {
8229 /* Store the args on the stack. */
8230 if (cfun->machine->uses_anonymous_args)
8231 insn = emit_multi_reg_push
8232 ((0xf0 >> (args_to_push / 4)) & 0xf);
8233 else
8234 insn = emit_insn
8235 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8236 GEN_INT (- args_to_push)));
8237
8238 RTX_FRAME_RELATED_P (insn) = 1;
8239
8240 saved_pretend_args = 1;
8241 fp_offset = args_to_push;
8242 args_to_push = 0;
8243
8244 /* Now reuse r3 to preserve IP. */
8245 insn = gen_rtx_REG (SImode, 3);
8246 insn = gen_rtx_SET (SImode, insn, ip_rtx);
8247 (void) emit_insn (insn);
8248 }
8249 }
8250
8251 if (fp_offset)
8252 {
8253 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
8254 insn = gen_rtx_SET (SImode, ip_rtx, insn);
8255 }
8256 else
8257 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
8258
8259 insn = emit_insn (insn);
8260 RTX_FRAME_RELATED_P (insn) = 1;
8261 }
8262
8263 if (args_to_push)
8264 {
8265 /* Push the argument registers, or reserve space for them. */
8266 if (cfun->machine->uses_anonymous_args)
8267 insn = emit_multi_reg_push
8268 ((0xf0 >> (args_to_push / 4)) & 0xf);
8269 else
8270 insn = emit_insn
8271 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8272 GEN_INT (- args_to_push)));
8273 RTX_FRAME_RELATED_P (insn) = 1;
8274 }
8275
8276 /* If this is an interrupt service routine, and the link register is
8277 going to be pushed, subtracting four now will mean that the
8278 function return can be done with a single instruction. */
8279 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
8280 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8281 {
8282 emit_insn (gen_rtx_SET (SImode,
8283 gen_rtx_REG (SImode, LR_REGNUM),
8284 gen_rtx_PLUS (SImode,
8285 gen_rtx_REG (SImode, LR_REGNUM),
8286 GEN_INT (-4))));
8287 }
8288
8289 if (live_regs_mask)
8290 {
8291 insn = emit_multi_reg_push (live_regs_mask);
8292 RTX_FRAME_RELATED_P (insn) = 1;
8293 }
8294
8295 if (! IS_VOLATILE (func_type))
8296 {
8297 /* Save any floating point call-saved registers used by this function. */
8298 if (arm_fpu_arch == FP_SOFT2)
8299 {
8300 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
8301 if (regs_ever_live[reg] && !call_used_regs[reg])
8302 {
8303 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
8304 insn = gen_rtx_MEM (XFmode, insn);
8305 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
8306 gen_rtx_REG (XFmode, reg)));
8307 RTX_FRAME_RELATED_P (insn) = 1;
8308 }
8309 }
8310 else
8311 {
8312 int start_reg = LAST_ARM_FP_REGNUM;
8313
8314 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
8315 {
8316 if (regs_ever_live[reg] && !call_used_regs[reg])
8317 {
8318 if (start_reg - reg == 3)
8319 {
8320 insn = emit_sfm (reg, 4);
8321 RTX_FRAME_RELATED_P (insn) = 1;
8322 start_reg = reg - 1;
8323 }
8324 }
8325 else
8326 {
8327 if (start_reg != reg)
8328 {
8329 insn = emit_sfm (reg + 1, start_reg - reg);
8330 RTX_FRAME_RELATED_P (insn) = 1;
8331 }
8332 start_reg = reg - 1;
8333 }
8334 }
8335
8336 if (start_reg != reg)
8337 {
8338 insn = emit_sfm (reg + 1, start_reg - reg);
8339 RTX_FRAME_RELATED_P (insn) = 1;
8340 }
8341 }
8342 }
8343
8344 if (frame_pointer_needed)
8345 {
8346 /* Create the new frame pointer. */
8347 insn = GEN_INT (-(4 + args_to_push + fp_offset));
8348 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
8349 RTX_FRAME_RELATED_P (insn) = 1;
8350
8351 if (IS_NESTED (func_type))
8352 {
8353 /* Recover the static chain register. */
8354 if (regs_ever_live [3] == 0
8355 || saved_pretend_args)
8356 insn = gen_rtx_REG (SImode, 3);
8357 else /* if (current_function_pretend_args_size == 0) */
8358 {
8359 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx, GEN_INT (4));
8360 insn = gen_rtx_MEM (SImode, insn);
8361 }
8362
8363 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
8364 /* Add a USE to stop propagate_one_insn() from barfing. */
8365 emit_insn (gen_prologue_use (ip_rtx));
8366 }
8367 }
8368
8369 amount = GEN_INT (-(get_frame_size ()
8370 + current_function_outgoing_args_size));
8371
8372 if (amount != const0_rtx)
8373 {
8374 /* This add can produce multiple insns for a large constant, so we
8375 need to get tricky. */
8376 rtx last = get_last_insn ();
8377 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
8378 amount));
8379 do
8380 {
8381 last = last ? NEXT_INSN (last) : get_insns ();
8382 RTX_FRAME_RELATED_P (last) = 1;
8383 }
8384 while (last != insn);
8385
8386 /* If the frame pointer is needed, emit a special barrier that
8387 will prevent the scheduler from moving stores to the frame
8388 before the stack adjustment. */
8389 if (frame_pointer_needed)
8390 {
8391 rtx unspec = gen_rtx_UNSPEC (SImode,
8392 gen_rtvec (2, stack_pointer_rtx,
8393 hard_frame_pointer_rtx),
8394 UNSPEC_PRLG_STK);
8395
8396 insn = emit_insn (gen_rtx_CLOBBER (VOIDmode,
8397 gen_rtx_MEM (BLKmode, unspec)));
8398 }
8399 }
8400
8401 /* If we are profiling, make sure no instructions are scheduled before
8402 the call to mcount. Similarly if the user has requested no
8403 scheduling in the prolog. */
8404 if (current_function_profile || TARGET_NO_SCHED_PRO)
8405 emit_insn (gen_blockage ());
8406
8407 /* If the link register is being kept alive, with the return address in it,
8408 then make sure that it does not get reused by the ce2 pass. */
8409 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
8410 {
8411 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
8412 cfun->machine->lr_save_eliminated = 1;
8413 }
8414 }
8415 \f
8416 /* If CODE is 'd', then the X is a condition operand and the instruction
8417 should only be executed if the condition is true.
8418 if CODE is 'D', then the X is a condition operand and the instruction
8419 should only be executed if the condition is false: however, if the mode
8420 of the comparison is CCFPEmode, then always execute the instruction -- we
8421 do this because in these circumstances !GE does not necessarily imply LT;
8422 in these cases the instruction pattern will take care to make sure that
8423 an instruction containing %d will follow, thereby undoing the effects of
8424 doing this instruction unconditionally.
8425 If CODE is 'N' then X is a floating point operand that must be negated
8426 before output.
8427 If CODE is 'B' then output a bitwise inverted value of X (a const int).
8428 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
8429
8430 void
8431 arm_print_operand (stream, x, code)
8432 FILE * stream;
8433 rtx x;
8434 int code;
8435 {
8436 switch (code)
8437 {
8438 case '@':
8439 fputs (ASM_COMMENT_START, stream);
8440 return;
8441
8442 case '_':
8443 fputs (user_label_prefix, stream);
8444 return;
8445
8446 case '|':
8447 fputs (REGISTER_PREFIX, stream);
8448 return;
8449
8450 case '?':
8451 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
8452 {
8453 if (TARGET_THUMB || current_insn_predicate != NULL)
8454 abort ();
8455
8456 fputs (arm_condition_codes[arm_current_cc], stream);
8457 }
8458 else if (current_insn_predicate)
8459 {
8460 enum arm_cond_code code;
8461
8462 if (TARGET_THUMB)
8463 abort ();
8464
8465 code = get_arm_condition_code (current_insn_predicate);
8466 fputs (arm_condition_codes[code], stream);
8467 }
8468 return;
8469
8470 case 'N':
8471 {
8472 REAL_VALUE_TYPE r;
8473 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8474 r = REAL_VALUE_NEGATE (r);
8475 fprintf (stream, "%s", fp_const_from_val (&r));
8476 }
8477 return;
8478
8479 case 'B':
8480 if (GET_CODE (x) == CONST_INT)
8481 {
8482 HOST_WIDE_INT val;
8483 val = ARM_SIGN_EXTEND (~INTVAL (x));
8484 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
8485 }
8486 else
8487 {
8488 putc ('~', stream);
8489 output_addr_const (stream, x);
8490 }
8491 return;
8492
8493 case 'i':
8494 fprintf (stream, "%s", arithmetic_instr (x, 1));
8495 return;
8496
8497 case 'I':
8498 fprintf (stream, "%s", arithmetic_instr (x, 0));
8499 return;
8500
8501 case 'S':
8502 {
8503 HOST_WIDE_INT val;
8504 const char * shift = shift_op (x, &val);
8505
8506 if (shift)
8507 {
8508 fprintf (stream, ", %s ", shift_op (x, &val));
8509 if (val == -1)
8510 arm_print_operand (stream, XEXP (x, 1), 0);
8511 else
8512 {
8513 fputc ('#', stream);
8514 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
8515 }
8516 }
8517 }
8518 return;
8519
8520 /* An explanation of the 'Q', 'R' and 'H' register operands:
8521
8522 In a pair of registers containing a DI or DF value the 'Q'
8523 operand returns the register number of the register containing
8524 the least signficant part of the value. The 'R' operand returns
8525 the register number of the register containing the most
8526 significant part of the value.
8527
8528 The 'H' operand returns the higher of the two register numbers.
8529 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
8530 same as the 'Q' operand, since the most signficant part of the
8531 value is held in the lower number register. The reverse is true
8532 on systems where WORDS_BIG_ENDIAN is false.
8533
8534 The purpose of these operands is to distinguish between cases
8535 where the endian-ness of the values is important (for example
8536 when they are added together), and cases where the endian-ness
8537 is irrelevant, but the order of register operations is important.
8538 For example when loading a value from memory into a register
8539 pair, the endian-ness does not matter. Provided that the value
8540 from the lower memory address is put into the lower numbered
8541 register, and the value from the higher address is put into the
8542 higher numbered register, the load will work regardless of whether
8543 the value being loaded is big-wordian or little-wordian. The
8544 order of the two register loads can matter however, if the address
8545 of the memory location is actually held in one of the registers
8546 being overwritten by the load. */
8547 case 'Q':
8548 if (REGNO (x) > LAST_ARM_REGNUM)
8549 abort ();
8550 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
8551 return;
8552
8553 case 'R':
8554 if (REGNO (x) > LAST_ARM_REGNUM)
8555 abort ();
8556 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
8557 return;
8558
8559 case 'H':
8560 if (REGNO (x) > LAST_ARM_REGNUM)
8561 abort ();
8562 asm_fprintf (stream, "%r", REGNO (x) + 1);
8563 return;
8564
8565 case 'm':
8566 asm_fprintf (stream, "%r",
8567 GET_CODE (XEXP (x, 0)) == REG
8568 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
8569 return;
8570
8571 case 'M':
8572 asm_fprintf (stream, "{%r-%r}",
8573 REGNO (x),
8574 REGNO (x) + NUM_REGS (GET_MODE (x)) - 1);
8575 return;
8576
8577 case 'd':
8578 /* CONST_TRUE_RTX means always -- that's the default. */
8579 if (x == const_true_rtx)
8580 return;
8581
8582 if (TARGET_ARM)
8583 fputs (arm_condition_codes[get_arm_condition_code (x)],
8584 stream);
8585 else
8586 fputs (thumb_condition_code (x, 0), stream);
8587 return;
8588
8589 case 'D':
8590 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
8591 want to do that. */
8592 if (x == const_true_rtx)
8593 abort ();
8594
8595 if (TARGET_ARM)
8596 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
8597 (get_arm_condition_code (x))],
8598 stream);
8599 else
8600 fputs (thumb_condition_code (x, 1), stream);
8601 return;
8602
8603 default:
8604 if (x == 0)
8605 abort ();
8606
8607 if (GET_CODE (x) == REG)
8608 asm_fprintf (stream, "%r", REGNO (x));
8609 else if (GET_CODE (x) == MEM)
8610 {
8611 output_memory_reference_mode = GET_MODE (x);
8612 output_address (XEXP (x, 0));
8613 }
8614 else if (GET_CODE (x) == CONST_DOUBLE)
8615 fprintf (stream, "#%s", fp_immediate_constant (x));
8616 else if (GET_CODE (x) == NEG)
8617 abort (); /* This should never happen now. */
8618 else
8619 {
8620 fputc ('#', stream);
8621 output_addr_const (stream, x);
8622 }
8623 }
8624 }
8625 \f
8626 #ifndef AOF_ASSEMBLER
8627 /* Target hook for assembling integer objects. The ARM version needs to
8628 handle word-sized values specially. */
8629
8630 static bool
8631 arm_assemble_integer (x, size, aligned_p)
8632 rtx x;
8633 unsigned int size;
8634 int aligned_p;
8635 {
8636 if (size == UNITS_PER_WORD && aligned_p)
8637 {
8638 fputs ("\t.word\t", asm_out_file);
8639 output_addr_const (asm_out_file, x);
8640
8641 /* Mark symbols as position independent. We only do this in the
8642 .text segment, not in the .data segment. */
8643 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
8644 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
8645 {
8646 if (GET_CODE (x) == SYMBOL_REF
8647 && (CONSTANT_POOL_ADDRESS_P (x)
8648 || ENCODED_SHORT_CALL_ATTR_P (XSTR (x, 0))))
8649 fputs ("(GOTOFF)", asm_out_file);
8650 else if (GET_CODE (x) == LABEL_REF)
8651 fputs ("(GOTOFF)", asm_out_file);
8652 else
8653 fputs ("(GOT)", asm_out_file);
8654 }
8655 fputc ('\n', asm_out_file);
8656 return true;
8657 }
8658
8659 return default_assemble_integer (x, size, aligned_p);
8660 }
8661 #endif
8662 \f
8663 /* A finite state machine takes care of noticing whether or not instructions
8664 can be conditionally executed, and thus decrease execution time and code
8665 size by deleting branch instructions. The fsm is controlled by
8666 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
8667
8668 /* The state of the fsm controlling condition codes are:
8669 0: normal, do nothing special
8670 1: make ASM_OUTPUT_OPCODE not output this instruction
8671 2: make ASM_OUTPUT_OPCODE not output this instruction
8672 3: make instructions conditional
8673 4: make instructions conditional
8674
8675 State transitions (state->state by whom under condition):
8676 0 -> 1 final_prescan_insn if the `target' is a label
8677 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
8678 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
8679 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
8680 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
8681 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
8682 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
8683 (the target insn is arm_target_insn).
8684
8685 If the jump clobbers the conditions then we use states 2 and 4.
8686
8687 A similar thing can be done with conditional return insns.
8688
8689 XXX In case the `target' is an unconditional branch, this conditionalising
8690 of the instructions always reduces code size, but not always execution
8691 time. But then, I want to reduce the code size to somewhere near what
8692 /bin/cc produces. */
8693
8694 /* Returns the index of the ARM condition code string in
8695 `arm_condition_codes'. COMPARISON should be an rtx like
8696 `(eq (...) (...))'. */
8697
8698 static enum arm_cond_code
8699 get_arm_condition_code (comparison)
8700 rtx comparison;
8701 {
8702 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
8703 int code;
8704 enum rtx_code comp_code = GET_CODE (comparison);
8705
8706 if (GET_MODE_CLASS (mode) != MODE_CC)
8707 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
8708 XEXP (comparison, 1));
8709
8710 switch (mode)
8711 {
8712 case CC_DNEmode: code = ARM_NE; goto dominance;
8713 case CC_DEQmode: code = ARM_EQ; goto dominance;
8714 case CC_DGEmode: code = ARM_GE; goto dominance;
8715 case CC_DGTmode: code = ARM_GT; goto dominance;
8716 case CC_DLEmode: code = ARM_LE; goto dominance;
8717 case CC_DLTmode: code = ARM_LT; goto dominance;
8718 case CC_DGEUmode: code = ARM_CS; goto dominance;
8719 case CC_DGTUmode: code = ARM_HI; goto dominance;
8720 case CC_DLEUmode: code = ARM_LS; goto dominance;
8721 case CC_DLTUmode: code = ARM_CC;
8722
8723 dominance:
8724 if (comp_code != EQ && comp_code != NE)
8725 abort ();
8726
8727 if (comp_code == EQ)
8728 return ARM_INVERSE_CONDITION_CODE (code);
8729 return code;
8730
8731 case CC_NOOVmode:
8732 switch (comp_code)
8733 {
8734 case NE: return ARM_NE;
8735 case EQ: return ARM_EQ;
8736 case GE: return ARM_PL;
8737 case LT: return ARM_MI;
8738 default: abort ();
8739 }
8740
8741 case CC_Zmode:
8742 switch (comp_code)
8743 {
8744 case NE: return ARM_NE;
8745 case EQ: return ARM_EQ;
8746 default: abort ();
8747 }
8748
8749 case CCFPEmode:
8750 case CCFPmode:
8751 /* These encodings assume that AC=1 in the FPA system control
8752 byte. This allows us to handle all cases except UNEQ and
8753 LTGT. */
8754 switch (comp_code)
8755 {
8756 case GE: return ARM_GE;
8757 case GT: return ARM_GT;
8758 case LE: return ARM_LS;
8759 case LT: return ARM_MI;
8760 case NE: return ARM_NE;
8761 case EQ: return ARM_EQ;
8762 case ORDERED: return ARM_VC;
8763 case UNORDERED: return ARM_VS;
8764 case UNLT: return ARM_LT;
8765 case UNLE: return ARM_LE;
8766 case UNGT: return ARM_HI;
8767 case UNGE: return ARM_PL;
8768 /* UNEQ and LTGT do not have a representation. */
8769 case UNEQ: /* Fall through. */
8770 case LTGT: /* Fall through. */
8771 default: abort ();
8772 }
8773
8774 case CC_SWPmode:
8775 switch (comp_code)
8776 {
8777 case NE: return ARM_NE;
8778 case EQ: return ARM_EQ;
8779 case GE: return ARM_LE;
8780 case GT: return ARM_LT;
8781 case LE: return ARM_GE;
8782 case LT: return ARM_GT;
8783 case GEU: return ARM_LS;
8784 case GTU: return ARM_CC;
8785 case LEU: return ARM_CS;
8786 case LTU: return ARM_HI;
8787 default: abort ();
8788 }
8789
8790 case CC_Cmode:
8791 switch (comp_code)
8792 {
8793 case LTU: return ARM_CS;
8794 case GEU: return ARM_CC;
8795 default: abort ();
8796 }
8797
8798 case CCmode:
8799 switch (comp_code)
8800 {
8801 case NE: return ARM_NE;
8802 case EQ: return ARM_EQ;
8803 case GE: return ARM_GE;
8804 case GT: return ARM_GT;
8805 case LE: return ARM_LE;
8806 case LT: return ARM_LT;
8807 case GEU: return ARM_CS;
8808 case GTU: return ARM_HI;
8809 case LEU: return ARM_LS;
8810 case LTU: return ARM_CC;
8811 default: abort ();
8812 }
8813
8814 default: abort ();
8815 }
8816
8817 abort ();
8818 }
8819
8820
8821 void
8822 arm_final_prescan_insn (insn)
8823 rtx insn;
8824 {
8825 /* BODY will hold the body of INSN. */
8826 rtx body = PATTERN (insn);
8827
8828 /* This will be 1 if trying to repeat the trick, and things need to be
8829 reversed if it appears to fail. */
8830 int reverse = 0;
8831
8832 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
8833 taken are clobbered, even if the rtl suggests otherwise. It also
8834 means that we have to grub around within the jump expression to find
8835 out what the conditions are when the jump isn't taken. */
8836 int jump_clobbers = 0;
8837
8838 /* If we start with a return insn, we only succeed if we find another one. */
8839 int seeking_return = 0;
8840
8841 /* START_INSN will hold the insn from where we start looking. This is the
8842 first insn after the following code_label if REVERSE is true. */
8843 rtx start_insn = insn;
8844
8845 /* If in state 4, check if the target branch is reached, in order to
8846 change back to state 0. */
8847 if (arm_ccfsm_state == 4)
8848 {
8849 if (insn == arm_target_insn)
8850 {
8851 arm_target_insn = NULL;
8852 arm_ccfsm_state = 0;
8853 }
8854 return;
8855 }
8856
8857 /* If in state 3, it is possible to repeat the trick, if this insn is an
8858 unconditional branch to a label, and immediately following this branch
8859 is the previous target label which is only used once, and the label this
8860 branch jumps to is not too far off. */
8861 if (arm_ccfsm_state == 3)
8862 {
8863 if (simplejump_p (insn))
8864 {
8865 start_insn = next_nonnote_insn (start_insn);
8866 if (GET_CODE (start_insn) == BARRIER)
8867 {
8868 /* XXX Isn't this always a barrier? */
8869 start_insn = next_nonnote_insn (start_insn);
8870 }
8871 if (GET_CODE (start_insn) == CODE_LABEL
8872 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
8873 && LABEL_NUSES (start_insn) == 1)
8874 reverse = TRUE;
8875 else
8876 return;
8877 }
8878 else if (GET_CODE (body) == RETURN)
8879 {
8880 start_insn = next_nonnote_insn (start_insn);
8881 if (GET_CODE (start_insn) == BARRIER)
8882 start_insn = next_nonnote_insn (start_insn);
8883 if (GET_CODE (start_insn) == CODE_LABEL
8884 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
8885 && LABEL_NUSES (start_insn) == 1)
8886 {
8887 reverse = TRUE;
8888 seeking_return = 1;
8889 }
8890 else
8891 return;
8892 }
8893 else
8894 return;
8895 }
8896
8897 if (arm_ccfsm_state != 0 && !reverse)
8898 abort ();
8899 if (GET_CODE (insn) != JUMP_INSN)
8900 return;
8901
8902 /* This jump might be paralleled with a clobber of the condition codes
8903 the jump should always come first */
8904 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
8905 body = XVECEXP (body, 0, 0);
8906
8907 #if 0
8908 /* If this is a conditional return then we don't want to know */
8909 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
8910 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
8911 && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
8912 || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
8913 return;
8914 #endif
8915
8916 if (reverse
8917 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
8918 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
8919 {
8920 int insns_skipped;
8921 int fail = FALSE, succeed = FALSE;
8922 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
8923 int then_not_else = TRUE;
8924 rtx this_insn = start_insn, label = 0;
8925
8926 /* If the jump cannot be done with one instruction, we cannot
8927 conditionally execute the instruction in the inverse case. */
8928 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
8929 {
8930 jump_clobbers = 1;
8931 return;
8932 }
8933
8934 /* Register the insn jumped to. */
8935 if (reverse)
8936 {
8937 if (!seeking_return)
8938 label = XEXP (SET_SRC (body), 0);
8939 }
8940 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
8941 label = XEXP (XEXP (SET_SRC (body), 1), 0);
8942 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
8943 {
8944 label = XEXP (XEXP (SET_SRC (body), 2), 0);
8945 then_not_else = FALSE;
8946 }
8947 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
8948 seeking_return = 1;
8949 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
8950 {
8951 seeking_return = 1;
8952 then_not_else = FALSE;
8953 }
8954 else
8955 abort ();
8956
8957 /* See how many insns this branch skips, and what kind of insns. If all
8958 insns are okay, and the label or unconditional branch to the same
8959 label is not too far away, succeed. */
8960 for (insns_skipped = 0;
8961 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
8962 {
8963 rtx scanbody;
8964
8965 this_insn = next_nonnote_insn (this_insn);
8966 if (!this_insn)
8967 break;
8968
8969 switch (GET_CODE (this_insn))
8970 {
8971 case CODE_LABEL:
8972 /* Succeed if it is the target label, otherwise fail since
8973 control falls in from somewhere else. */
8974 if (this_insn == label)
8975 {
8976 if (jump_clobbers)
8977 {
8978 arm_ccfsm_state = 2;
8979 this_insn = next_nonnote_insn (this_insn);
8980 }
8981 else
8982 arm_ccfsm_state = 1;
8983 succeed = TRUE;
8984 }
8985 else
8986 fail = TRUE;
8987 break;
8988
8989 case BARRIER:
8990 /* Succeed if the following insn is the target label.
8991 Otherwise fail.
8992 If return insns are used then the last insn in a function
8993 will be a barrier. */
8994 this_insn = next_nonnote_insn (this_insn);
8995 if (this_insn && this_insn == label)
8996 {
8997 if (jump_clobbers)
8998 {
8999 arm_ccfsm_state = 2;
9000 this_insn = next_nonnote_insn (this_insn);
9001 }
9002 else
9003 arm_ccfsm_state = 1;
9004 succeed = TRUE;
9005 }
9006 else
9007 fail = TRUE;
9008 break;
9009
9010 case CALL_INSN:
9011 /* If using 32-bit addresses the cc is not preserved over
9012 calls. */
9013 if (TARGET_APCS_32)
9014 {
9015 /* Succeed if the following insn is the target label,
9016 or if the following two insns are a barrier and
9017 the target label. */
9018 this_insn = next_nonnote_insn (this_insn);
9019 if (this_insn && GET_CODE (this_insn) == BARRIER)
9020 this_insn = next_nonnote_insn (this_insn);
9021
9022 if (this_insn && this_insn == label
9023 && insns_skipped < max_insns_skipped)
9024 {
9025 if (jump_clobbers)
9026 {
9027 arm_ccfsm_state = 2;
9028 this_insn = next_nonnote_insn (this_insn);
9029 }
9030 else
9031 arm_ccfsm_state = 1;
9032 succeed = TRUE;
9033 }
9034 else
9035 fail = TRUE;
9036 }
9037 break;
9038
9039 case JUMP_INSN:
9040 /* If this is an unconditional branch to the same label, succeed.
9041 If it is to another label, do nothing. If it is conditional,
9042 fail. */
9043 /* XXX Probably, the tests for SET and the PC are unnecessary. */
9044
9045 scanbody = PATTERN (this_insn);
9046 if (GET_CODE (scanbody) == SET
9047 && GET_CODE (SET_DEST (scanbody)) == PC)
9048 {
9049 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
9050 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
9051 {
9052 arm_ccfsm_state = 2;
9053 succeed = TRUE;
9054 }
9055 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
9056 fail = TRUE;
9057 }
9058 /* Fail if a conditional return is undesirable (eg on a
9059 StrongARM), but still allow this if optimizing for size. */
9060 else if (GET_CODE (scanbody) == RETURN
9061 && !use_return_insn (TRUE)
9062 && !optimize_size)
9063 fail = TRUE;
9064 else if (GET_CODE (scanbody) == RETURN
9065 && seeking_return)
9066 {
9067 arm_ccfsm_state = 2;
9068 succeed = TRUE;
9069 }
9070 else if (GET_CODE (scanbody) == PARALLEL)
9071 {
9072 switch (get_attr_conds (this_insn))
9073 {
9074 case CONDS_NOCOND:
9075 break;
9076 default:
9077 fail = TRUE;
9078 break;
9079 }
9080 }
9081 else
9082 fail = TRUE; /* Unrecognized jump (eg epilogue). */
9083
9084 break;
9085
9086 case INSN:
9087 /* Instructions using or affecting the condition codes make it
9088 fail. */
9089 scanbody = PATTERN (this_insn);
9090 if (!(GET_CODE (scanbody) == SET
9091 || GET_CODE (scanbody) == PARALLEL)
9092 || get_attr_conds (this_insn) != CONDS_NOCOND)
9093 fail = TRUE;
9094 break;
9095
9096 default:
9097 break;
9098 }
9099 }
9100 if (succeed)
9101 {
9102 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
9103 arm_target_label = CODE_LABEL_NUMBER (label);
9104 else if (seeking_return || arm_ccfsm_state == 2)
9105 {
9106 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
9107 {
9108 this_insn = next_nonnote_insn (this_insn);
9109 if (this_insn && (GET_CODE (this_insn) == BARRIER
9110 || GET_CODE (this_insn) == CODE_LABEL))
9111 abort ();
9112 }
9113 if (!this_insn)
9114 {
9115 /* Oh, dear! we ran off the end.. give up */
9116 recog (PATTERN (insn), insn, NULL);
9117 arm_ccfsm_state = 0;
9118 arm_target_insn = NULL;
9119 return;
9120 }
9121 arm_target_insn = this_insn;
9122 }
9123 else
9124 abort ();
9125 if (jump_clobbers)
9126 {
9127 if (reverse)
9128 abort ();
9129 arm_current_cc =
9130 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
9131 0), 0), 1));
9132 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
9133 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9134 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
9135 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9136 }
9137 else
9138 {
9139 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
9140 what it was. */
9141 if (!reverse)
9142 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
9143 0));
9144 }
9145
9146 if (reverse || then_not_else)
9147 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
9148 }
9149
9150 /* Restore recog_data (getting the attributes of other insns can
9151 destroy this array, but final.c assumes that it remains intact
9152 across this call; since the insn has been recognized already we
9153 call recog direct). */
9154 recog (PATTERN (insn), insn, NULL);
9155 }
9156 }
9157
9158 /* Returns true if REGNO is a valid register
9159 for holding a quantity of tyoe MODE. */
9160
9161 int
9162 arm_hard_regno_mode_ok (regno, mode)
9163 unsigned int regno;
9164 enum machine_mode mode;
9165 {
9166 if (GET_MODE_CLASS (mode) == MODE_CC)
9167 return regno == CC_REGNUM;
9168
9169 if (TARGET_THUMB)
9170 /* For the Thumb we only allow values bigger than SImode in
9171 registers 0 - 6, so that there is always a second low
9172 register available to hold the upper part of the value.
9173 We probably we ought to ensure that the register is the
9174 start of an even numbered register pair. */
9175 return (NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
9176
9177 if (regno <= LAST_ARM_REGNUM)
9178 /* We allow any value to be stored in the general regisetrs. */
9179 return 1;
9180
9181 if ( regno == FRAME_POINTER_REGNUM
9182 || regno == ARG_POINTER_REGNUM)
9183 /* We only allow integers in the fake hard registers. */
9184 return GET_MODE_CLASS (mode) == MODE_INT;
9185
9186 /* The only registers left are the FPU registers
9187 which we only allow to hold FP values. */
9188 return GET_MODE_CLASS (mode) == MODE_FLOAT
9189 && regno >= FIRST_ARM_FP_REGNUM
9190 && regno <= LAST_ARM_FP_REGNUM;
9191 }
9192
9193 int
9194 arm_regno_class (regno)
9195 int regno;
9196 {
9197 if (TARGET_THUMB)
9198 {
9199 if (regno == STACK_POINTER_REGNUM)
9200 return STACK_REG;
9201 if (regno == CC_REGNUM)
9202 return CC_REG;
9203 if (regno < 8)
9204 return LO_REGS;
9205 return HI_REGS;
9206 }
9207
9208 if ( regno <= LAST_ARM_REGNUM
9209 || regno == FRAME_POINTER_REGNUM
9210 || regno == ARG_POINTER_REGNUM)
9211 return GENERAL_REGS;
9212
9213 if (regno == CC_REGNUM)
9214 return NO_REGS;
9215
9216 return FPU_REGS;
9217 }
9218
9219 /* Handle a special case when computing the offset
9220 of an argument from the frame pointer. */
9221
9222 int
9223 arm_debugger_arg_offset (value, addr)
9224 int value;
9225 rtx addr;
9226 {
9227 rtx insn;
9228
9229 /* We are only interested if dbxout_parms() failed to compute the offset. */
9230 if (value != 0)
9231 return 0;
9232
9233 /* We can only cope with the case where the address is held in a register. */
9234 if (GET_CODE (addr) != REG)
9235 return 0;
9236
9237 /* If we are using the frame pointer to point at the argument, then
9238 an offset of 0 is correct. */
9239 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
9240 return 0;
9241
9242 /* If we are using the stack pointer to point at the
9243 argument, then an offset of 0 is correct. */
9244 if ((TARGET_THUMB || !frame_pointer_needed)
9245 && REGNO (addr) == SP_REGNUM)
9246 return 0;
9247
9248 /* Oh dear. The argument is pointed to by a register rather
9249 than being held in a register, or being stored at a known
9250 offset from the frame pointer. Since GDB only understands
9251 those two kinds of argument we must translate the address
9252 held in the register into an offset from the frame pointer.
9253 We do this by searching through the insns for the function
9254 looking to see where this register gets its value. If the
9255 register is initialised from the frame pointer plus an offset
9256 then we are in luck and we can continue, otherwise we give up.
9257
9258 This code is exercised by producing debugging information
9259 for a function with arguments like this:
9260
9261 double func (double a, double b, int c, double d) {return d;}
9262
9263 Without this code the stab for parameter 'd' will be set to
9264 an offset of 0 from the frame pointer, rather than 8. */
9265
9266 /* The if() statement says:
9267
9268 If the insn is a normal instruction
9269 and if the insn is setting the value in a register
9270 and if the register being set is the register holding the address of the argument
9271 and if the address is computing by an addition
9272 that involves adding to a register
9273 which is the frame pointer
9274 a constant integer
9275
9276 then... */
9277
9278 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9279 {
9280 if ( GET_CODE (insn) == INSN
9281 && GET_CODE (PATTERN (insn)) == SET
9282 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
9283 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
9284 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
9285 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
9286 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
9287 )
9288 {
9289 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
9290
9291 break;
9292 }
9293 }
9294
9295 if (value == 0)
9296 {
9297 debug_rtx (addr);
9298 warning ("unable to compute real location of stacked parameter");
9299 value = 8; /* XXX magic hack */
9300 }
9301
9302 return value;
9303 }
9304
9305 #define def_builtin(NAME, TYPE, CODE) \
9306 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL)
9307
9308 void
9309 arm_init_builtins ()
9310 {
9311 tree endlink = void_list_node;
9312 tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
9313 tree pchar_type_node = build_pointer_type (char_type_node);
9314
9315 tree int_ftype_int, void_ftype_pchar;
9316
9317 /* void func (void *) */
9318 void_ftype_pchar
9319 = build_function_type (void_type_node,
9320 tree_cons (NULL_TREE, pchar_type_node, endlink));
9321
9322 /* int func (int) */
9323 int_ftype_int
9324 = build_function_type (integer_type_node, int_endlink);
9325
9326 /* Initialize arm V5 builtins. */
9327 if (arm_arch5)
9328 def_builtin ("__builtin_clz", int_ftype_int, ARM_BUILTIN_CLZ);
9329 }
9330
9331 /* Expand an expression EXP that calls a built-in function,
9332 with result going to TARGET if that's convenient
9333 (and in mode MODE if that's convenient).
9334 SUBTARGET may be used as the target for computing one of EXP's operands.
9335 IGNORE is nonzero if the value is to be ignored. */
9336
9337 rtx
9338 arm_expand_builtin (exp, target, subtarget, mode, ignore)
9339 tree exp;
9340 rtx target;
9341 rtx subtarget ATTRIBUTE_UNUSED;
9342 enum machine_mode mode ATTRIBUTE_UNUSED;
9343 int ignore ATTRIBUTE_UNUSED;
9344 {
9345 enum insn_code icode;
9346 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9347 tree arglist = TREE_OPERAND (exp, 1);
9348 tree arg0;
9349 rtx op0, pat;
9350 enum machine_mode tmode, mode0;
9351 int fcode = DECL_FUNCTION_CODE (fndecl);
9352
9353 switch (fcode)
9354 {
9355 default:
9356 break;
9357
9358 case ARM_BUILTIN_CLZ:
9359 icode = CODE_FOR_clz;
9360 arg0 = TREE_VALUE (arglist);
9361 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
9362 tmode = insn_data[icode].operand[0].mode;
9363 mode0 = insn_data[icode].operand[1].mode;
9364
9365 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9366 op0 = copy_to_mode_reg (mode0, op0);
9367 if (target == 0
9368 || GET_MODE (target) != tmode
9369 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9370 target = gen_reg_rtx (tmode);
9371 pat = GEN_FCN (icode) (target, op0);
9372 if (! pat)
9373 return 0;
9374 emit_insn (pat);
9375 return target;
9376 }
9377
9378 /* @@@ Should really do something sensible here. */
9379 return NULL_RTX;
9380 }
9381 \f
9382 /* Recursively search through all of the blocks in a function
9383 checking to see if any of the variables created in that
9384 function match the RTX called 'orig'. If they do then
9385 replace them with the RTX called 'new'. */
9386
9387 static void
9388 replace_symbols_in_block (block, orig, new)
9389 tree block;
9390 rtx orig;
9391 rtx new;
9392 {
9393 for (; block; block = BLOCK_CHAIN (block))
9394 {
9395 tree sym;
9396
9397 if (!TREE_USED (block))
9398 continue;
9399
9400 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
9401 {
9402 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
9403 || DECL_IGNORED_P (sym)
9404 || TREE_CODE (sym) != VAR_DECL
9405 || DECL_EXTERNAL (sym)
9406 || !rtx_equal_p (DECL_RTL (sym), orig)
9407 )
9408 continue;
9409
9410 SET_DECL_RTL (sym, new);
9411 }
9412
9413 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
9414 }
9415 }
9416
9417 /* Return the number (counting from 0) of
9418 the least significant set bit in MASK. */
9419
9420 #ifdef __GNUC__
9421 inline
9422 #endif
9423 static int
9424 number_of_first_bit_set (mask)
9425 int mask;
9426 {
9427 int bit;
9428
9429 for (bit = 0;
9430 (mask & (1 << bit)) == 0;
9431 ++bit)
9432 continue;
9433
9434 return bit;
9435 }
9436
9437 /* Generate code to return from a thumb function.
9438 If 'reg_containing_return_addr' is -1, then the return address is
9439 actually on the stack, at the stack pointer. */
9440 static void
9441 thumb_exit (f, reg_containing_return_addr, eh_ofs)
9442 FILE * f;
9443 int reg_containing_return_addr;
9444 rtx eh_ofs;
9445 {
9446 unsigned regs_available_for_popping;
9447 unsigned regs_to_pop;
9448 int pops_needed;
9449 unsigned available;
9450 unsigned required;
9451 int mode;
9452 int size;
9453 int restore_a4 = FALSE;
9454
9455 /* Compute the registers we need to pop. */
9456 regs_to_pop = 0;
9457 pops_needed = 0;
9458
9459 /* There is an assumption here, that if eh_ofs is not NULL, the
9460 normal return address will have been pushed. */
9461 if (reg_containing_return_addr == -1 || eh_ofs)
9462 {
9463 /* When we are generating a return for __builtin_eh_return,
9464 reg_containing_return_addr must specify the return regno. */
9465 if (eh_ofs && reg_containing_return_addr == -1)
9466 abort ();
9467
9468 regs_to_pop |= 1 << LR_REGNUM;
9469 ++pops_needed;
9470 }
9471
9472 if (TARGET_BACKTRACE)
9473 {
9474 /* Restore the (ARM) frame pointer and stack pointer. */
9475 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
9476 pops_needed += 2;
9477 }
9478
9479 /* If there is nothing to pop then just emit the BX instruction and
9480 return. */
9481 if (pops_needed == 0)
9482 {
9483 if (eh_ofs)
9484 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9485
9486 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9487 return;
9488 }
9489 /* Otherwise if we are not supporting interworking and we have not created
9490 a backtrace structure and the function was not entered in ARM mode then
9491 just pop the return address straight into the PC. */
9492 else if (!TARGET_INTERWORK
9493 && !TARGET_BACKTRACE
9494 && !is_called_in_ARM_mode (current_function_decl))
9495 {
9496 if (eh_ofs)
9497 {
9498 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
9499 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9500 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9501 }
9502 else
9503 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
9504
9505 return;
9506 }
9507
9508 /* Find out how many of the (return) argument registers we can corrupt. */
9509 regs_available_for_popping = 0;
9510
9511 /* If returning via __builtin_eh_return, the bottom three registers
9512 all contain information needed for the return. */
9513 if (eh_ofs)
9514 size = 12;
9515 else
9516 {
9517 #ifdef RTX_CODE
9518 /* If we can deduce the registers used from the function's
9519 return value. This is more reliable that examining
9520 regs_ever_live[] because that will be set if the register is
9521 ever used in the function, not just if the register is used
9522 to hold a return value. */
9523
9524 if (current_function_return_rtx != 0)
9525 mode = GET_MODE (current_function_return_rtx);
9526 else
9527 #endif
9528 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9529
9530 size = GET_MODE_SIZE (mode);
9531
9532 if (size == 0)
9533 {
9534 /* In a void function we can use any argument register.
9535 In a function that returns a structure on the stack
9536 we can use the second and third argument registers. */
9537 if (mode == VOIDmode)
9538 regs_available_for_popping =
9539 (1 << ARG_REGISTER (1))
9540 | (1 << ARG_REGISTER (2))
9541 | (1 << ARG_REGISTER (3));
9542 else
9543 regs_available_for_popping =
9544 (1 << ARG_REGISTER (2))
9545 | (1 << ARG_REGISTER (3));
9546 }
9547 else if (size <= 4)
9548 regs_available_for_popping =
9549 (1 << ARG_REGISTER (2))
9550 | (1 << ARG_REGISTER (3));
9551 else if (size <= 8)
9552 regs_available_for_popping =
9553 (1 << ARG_REGISTER (3));
9554 }
9555
9556 /* Match registers to be popped with registers into which we pop them. */
9557 for (available = regs_available_for_popping,
9558 required = regs_to_pop;
9559 required != 0 && available != 0;
9560 available &= ~(available & - available),
9561 required &= ~(required & - required))
9562 -- pops_needed;
9563
9564 /* If we have any popping registers left over, remove them. */
9565 if (available > 0)
9566 regs_available_for_popping &= ~available;
9567
9568 /* Otherwise if we need another popping register we can use
9569 the fourth argument register. */
9570 else if (pops_needed)
9571 {
9572 /* If we have not found any free argument registers and
9573 reg a4 contains the return address, we must move it. */
9574 if (regs_available_for_popping == 0
9575 && reg_containing_return_addr == LAST_ARG_REGNUM)
9576 {
9577 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
9578 reg_containing_return_addr = LR_REGNUM;
9579 }
9580 else if (size > 12)
9581 {
9582 /* Register a4 is being used to hold part of the return value,
9583 but we have dire need of a free, low register. */
9584 restore_a4 = TRUE;
9585
9586 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
9587 }
9588
9589 if (reg_containing_return_addr != LAST_ARG_REGNUM)
9590 {
9591 /* The fourth argument register is available. */
9592 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
9593
9594 --pops_needed;
9595 }
9596 }
9597
9598 /* Pop as many registers as we can. */
9599 thumb_pushpop (f, regs_available_for_popping, FALSE);
9600
9601 /* Process the registers we popped. */
9602 if (reg_containing_return_addr == -1)
9603 {
9604 /* The return address was popped into the lowest numbered register. */
9605 regs_to_pop &= ~(1 << LR_REGNUM);
9606
9607 reg_containing_return_addr =
9608 number_of_first_bit_set (regs_available_for_popping);
9609
9610 /* Remove this register for the mask of available registers, so that
9611 the return address will not be corrupted by futher pops. */
9612 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
9613 }
9614
9615 /* If we popped other registers then handle them here. */
9616 if (regs_available_for_popping)
9617 {
9618 int frame_pointer;
9619
9620 /* Work out which register currently contains the frame pointer. */
9621 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
9622
9623 /* Move it into the correct place. */
9624 asm_fprintf (f, "\tmov\t%r, %r\n",
9625 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
9626
9627 /* (Temporarily) remove it from the mask of popped registers. */
9628 regs_available_for_popping &= ~(1 << frame_pointer);
9629 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
9630
9631 if (regs_available_for_popping)
9632 {
9633 int stack_pointer;
9634
9635 /* We popped the stack pointer as well,
9636 find the register that contains it. */
9637 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
9638
9639 /* Move it into the stack register. */
9640 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
9641
9642 /* At this point we have popped all necessary registers, so
9643 do not worry about restoring regs_available_for_popping
9644 to its correct value:
9645
9646 assert (pops_needed == 0)
9647 assert (regs_available_for_popping == (1 << frame_pointer))
9648 assert (regs_to_pop == (1 << STACK_POINTER)) */
9649 }
9650 else
9651 {
9652 /* Since we have just move the popped value into the frame
9653 pointer, the popping register is available for reuse, and
9654 we know that we still have the stack pointer left to pop. */
9655 regs_available_for_popping |= (1 << frame_pointer);
9656 }
9657 }
9658
9659 /* If we still have registers left on the stack, but we no longer have
9660 any registers into which we can pop them, then we must move the return
9661 address into the link register and make available the register that
9662 contained it. */
9663 if (regs_available_for_popping == 0 && pops_needed > 0)
9664 {
9665 regs_available_for_popping |= 1 << reg_containing_return_addr;
9666
9667 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
9668 reg_containing_return_addr);
9669
9670 reg_containing_return_addr = LR_REGNUM;
9671 }
9672
9673 /* If we have registers left on the stack then pop some more.
9674 We know that at most we will want to pop FP and SP. */
9675 if (pops_needed > 0)
9676 {
9677 int popped_into;
9678 int move_to;
9679
9680 thumb_pushpop (f, regs_available_for_popping, FALSE);
9681
9682 /* We have popped either FP or SP.
9683 Move whichever one it is into the correct register. */
9684 popped_into = number_of_first_bit_set (regs_available_for_popping);
9685 move_to = number_of_first_bit_set (regs_to_pop);
9686
9687 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
9688
9689 regs_to_pop &= ~(1 << move_to);
9690
9691 --pops_needed;
9692 }
9693
9694 /* If we still have not popped everything then we must have only
9695 had one register available to us and we are now popping the SP. */
9696 if (pops_needed > 0)
9697 {
9698 int popped_into;
9699
9700 thumb_pushpop (f, regs_available_for_popping, FALSE);
9701
9702 popped_into = number_of_first_bit_set (regs_available_for_popping);
9703
9704 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
9705 /*
9706 assert (regs_to_pop == (1 << STACK_POINTER))
9707 assert (pops_needed == 1)
9708 */
9709 }
9710
9711 /* If necessary restore the a4 register. */
9712 if (restore_a4)
9713 {
9714 if (reg_containing_return_addr != LR_REGNUM)
9715 {
9716 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
9717 reg_containing_return_addr = LR_REGNUM;
9718 }
9719
9720 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
9721 }
9722
9723 if (eh_ofs)
9724 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
9725
9726 /* Return to caller. */
9727 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
9728 }
9729
9730 /* Emit code to push or pop registers to or from the stack. */
9731
9732 static void
9733 thumb_pushpop (f, mask, push)
9734 FILE * f;
9735 int mask;
9736 int push;
9737 {
9738 int regno;
9739 int lo_mask = mask & 0xFF;
9740
9741 if (lo_mask == 0 && !push && (mask & (1 << 15)))
9742 {
9743 /* Special case. Do not generate a POP PC statement here, do it in
9744 thumb_exit() */
9745 thumb_exit (f, -1, NULL_RTX);
9746 return;
9747 }
9748
9749 fprintf (f, "\t%s\t{", push ? "push" : "pop");
9750
9751 /* Look at the low registers first. */
9752 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
9753 {
9754 if (lo_mask & 1)
9755 {
9756 asm_fprintf (f, "%r", regno);
9757
9758 if ((lo_mask & ~1) != 0)
9759 fprintf (f, ", ");
9760 }
9761 }
9762
9763 if (push && (mask & (1 << LR_REGNUM)))
9764 {
9765 /* Catch pushing the LR. */
9766 if (mask & 0xFF)
9767 fprintf (f, ", ");
9768
9769 asm_fprintf (f, "%r", LR_REGNUM);
9770 }
9771 else if (!push && (mask & (1 << PC_REGNUM)))
9772 {
9773 /* Catch popping the PC. */
9774 if (TARGET_INTERWORK || TARGET_BACKTRACE)
9775 {
9776 /* The PC is never poped directly, instead
9777 it is popped into r3 and then BX is used. */
9778 fprintf (f, "}\n");
9779
9780 thumb_exit (f, -1, NULL_RTX);
9781
9782 return;
9783 }
9784 else
9785 {
9786 if (mask & 0xFF)
9787 fprintf (f, ", ");
9788
9789 asm_fprintf (f, "%r", PC_REGNUM);
9790 }
9791 }
9792
9793 fprintf (f, "}\n");
9794 }
9795 \f
9796 void
9797 thumb_final_prescan_insn (insn)
9798 rtx insn;
9799 {
9800 if (flag_print_asm_name)
9801 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
9802 INSN_ADDRESSES (INSN_UID (insn)));
9803 }
9804
9805 int
9806 thumb_shiftable_const (val)
9807 unsigned HOST_WIDE_INT val;
9808 {
9809 unsigned HOST_WIDE_INT mask = 0xff;
9810 int i;
9811
9812 if (val == 0) /* XXX */
9813 return 0;
9814
9815 for (i = 0; i < 25; i++)
9816 if ((val & (mask << i)) == val)
9817 return 1;
9818
9819 return 0;
9820 }
9821
9822 /* Returns non-zero if the current function contains,
9823 or might contain a far jump. */
9824
9825 int
9826 thumb_far_jump_used_p (int in_prologue)
9827 {
9828 rtx insn;
9829
9830 /* This test is only important for leaf functions. */
9831 /* assert (!leaf_function_p ()); */
9832
9833 /* If we have already decided that far jumps may be used,
9834 do not bother checking again, and always return true even if
9835 it turns out that they are not being used. Once we have made
9836 the decision that far jumps are present (and that hence the link
9837 register will be pushed onto the stack) we cannot go back on it. */
9838 if (cfun->machine->far_jump_used)
9839 return 1;
9840
9841 /* If this function is not being called from the prologue/epilogue
9842 generation code then it must be being called from the
9843 INITIAL_ELIMINATION_OFFSET macro. */
9844 if (!in_prologue)
9845 {
9846 /* In this case we know that we are being asked about the elimination
9847 of the arg pointer register. If that register is not being used,
9848 then there are no arguments on the stack, and we do not have to
9849 worry that a far jump might force the prologue to push the link
9850 register, changing the stack offsets. In this case we can just
9851 return false, since the presence of far jumps in the function will
9852 not affect stack offsets.
9853
9854 If the arg pointer is live (or if it was live, but has now been
9855 eliminated and so set to dead) then we do have to test to see if
9856 the function might contain a far jump. This test can lead to some
9857 false negatives, since before reload is completed, then length of
9858 branch instructions is not known, so gcc defaults to returning their
9859 longest length, which in turn sets the far jump attribute to true.
9860
9861 A false negative will not result in bad code being generated, but it
9862 will result in a needless push and pop of the link register. We
9863 hope that this does not occur too often. */
9864 if (regs_ever_live [ARG_POINTER_REGNUM])
9865 cfun->machine->arg_pointer_live = 1;
9866 else if (!cfun->machine->arg_pointer_live)
9867 return 0;
9868 }
9869
9870 /* Check to see if the function contains a branch
9871 insn with the far jump attribute set. */
9872 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9873 {
9874 if (GET_CODE (insn) == JUMP_INSN
9875 /* Ignore tablejump patterns. */
9876 && GET_CODE (PATTERN (insn)) != ADDR_VEC
9877 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
9878 && get_attr_far_jump (insn) == FAR_JUMP_YES
9879 )
9880 {
9881 /* Record the fact that we have decied that
9882 the function does use far jumps. */
9883 cfun->machine->far_jump_used = 1;
9884 return 1;
9885 }
9886 }
9887
9888 return 0;
9889 }
9890
9891 /* Return non-zero if FUNC must be entered in ARM mode. */
9892
9893 int
9894 is_called_in_ARM_mode (func)
9895 tree func;
9896 {
9897 if (TREE_CODE (func) != FUNCTION_DECL)
9898 abort ();
9899
9900 /* Ignore the problem about functions whoes address is taken. */
9901 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
9902 return TRUE;
9903
9904 #ifdef ARM_PE
9905 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
9906 #else
9907 return FALSE;
9908 #endif
9909 }
9910
9911 /* The bits which aren't usefully expanded as rtl. */
9912
9913 const char *
9914 thumb_unexpanded_epilogue ()
9915 {
9916 int regno;
9917 int live_regs_mask = 0;
9918 int high_regs_pushed = 0;
9919 int leaf_function = leaf_function_p ();
9920 int had_to_push_lr;
9921 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9922
9923 if (return_used_this_function)
9924 return "";
9925
9926 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
9927 if (regs_ever_live[regno] && !call_used_regs[regno]
9928 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
9929 live_regs_mask |= 1 << regno;
9930
9931 for (regno = 8; regno < 13; regno++)
9932 {
9933 if (regs_ever_live[regno] && !call_used_regs[regno]
9934 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
9935 high_regs_pushed++;
9936 }
9937
9938 /* The prolog may have pushed some high registers to use as
9939 work registers. eg the testuite file:
9940 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
9941 compiles to produce:
9942 push {r4, r5, r6, r7, lr}
9943 mov r7, r9
9944 mov r6, r8
9945 push {r6, r7}
9946 as part of the prolog. We have to undo that pushing here. */
9947
9948 if (high_regs_pushed)
9949 {
9950 int mask = live_regs_mask;
9951 int next_hi_reg;
9952 int size;
9953 int mode;
9954
9955 #ifdef RTX_CODE
9956 /* If we can deduce the registers used from the function's return value.
9957 This is more reliable that examining regs_ever_live[] because that
9958 will be set if the register is ever used in the function, not just if
9959 the register is used to hold a return value. */
9960
9961 if (current_function_return_rtx != 0)
9962 mode = GET_MODE (current_function_return_rtx);
9963 else
9964 #endif
9965 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9966
9967 size = GET_MODE_SIZE (mode);
9968
9969 /* Unless we are returning a type of size > 12 register r3 is
9970 available. */
9971 if (size < 13)
9972 mask |= 1 << 3;
9973
9974 if (mask == 0)
9975 /* Oh dear! We have no low registers into which we can pop
9976 high registers! */
9977 internal_error
9978 ("no low registers available for popping high registers");
9979
9980 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
9981 if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]
9982 && !(TARGET_SINGLE_PIC_BASE && (next_hi_reg == arm_pic_register)))
9983 break;
9984
9985 while (high_regs_pushed)
9986 {
9987 /* Find lo register(s) into which the high register(s) can
9988 be popped. */
9989 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
9990 {
9991 if (mask & (1 << regno))
9992 high_regs_pushed--;
9993 if (high_regs_pushed == 0)
9994 break;
9995 }
9996
9997 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
9998
9999 /* Pop the values into the low register(s). */
10000 thumb_pushpop (asm_out_file, mask, 0);
10001
10002 /* Move the value(s) into the high registers. */
10003 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10004 {
10005 if (mask & (1 << regno))
10006 {
10007 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
10008 regno);
10009
10010 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
10011 if (regs_ever_live[next_hi_reg]
10012 && !call_used_regs[next_hi_reg]
10013 && !(TARGET_SINGLE_PIC_BASE
10014 && (next_hi_reg == arm_pic_register)))
10015 break;
10016 }
10017 }
10018 }
10019 }
10020
10021 had_to_push_lr = (live_regs_mask || !leaf_function
10022 || thumb_far_jump_used_p (1));
10023
10024 if (TARGET_BACKTRACE
10025 && ((live_regs_mask & 0xFF) == 0)
10026 && regs_ever_live [LAST_ARG_REGNUM] != 0)
10027 {
10028 /* The stack backtrace structure creation code had to
10029 push R7 in order to get a work register, so we pop
10030 it now. */
10031 live_regs_mask |= (1 << LAST_LO_REGNUM);
10032 }
10033
10034 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
10035 {
10036 if (had_to_push_lr
10037 && !is_called_in_ARM_mode (current_function_decl)
10038 && !eh_ofs)
10039 live_regs_mask |= 1 << PC_REGNUM;
10040
10041 /* Either no argument registers were pushed or a backtrace
10042 structure was created which includes an adjusted stack
10043 pointer, so just pop everything. */
10044 if (live_regs_mask)
10045 thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
10046
10047 if (eh_ofs)
10048 thumb_exit (asm_out_file, 2, eh_ofs);
10049 /* We have either just popped the return address into the
10050 PC or it is was kept in LR for the entire function or
10051 it is still on the stack because we do not want to
10052 return by doing a pop {pc}. */
10053 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
10054 thumb_exit (asm_out_file,
10055 (had_to_push_lr
10056 && is_called_in_ARM_mode (current_function_decl)) ?
10057 -1 : LR_REGNUM, NULL_RTX);
10058 }
10059 else
10060 {
10061 /* Pop everything but the return address. */
10062 live_regs_mask &= ~(1 << PC_REGNUM);
10063
10064 if (live_regs_mask)
10065 thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
10066
10067 if (had_to_push_lr)
10068 /* Get the return address into a temporary register. */
10069 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0);
10070
10071 /* Remove the argument registers that were pushed onto the stack. */
10072 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
10073 SP_REGNUM, SP_REGNUM,
10074 current_function_pretend_args_size);
10075
10076 if (eh_ofs)
10077 thumb_exit (asm_out_file, 2, eh_ofs);
10078 else
10079 thumb_exit (asm_out_file,
10080 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
10081 }
10082
10083 return "";
10084 }
10085
10086 /* Functions to save and restore machine-specific function data. */
10087
10088 static struct machine_function *
10089 arm_init_machine_status ()
10090 {
10091 struct machine_function *machine;
10092 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
10093
10094 #if ARM_FT_UNKNOWN != 0
10095 machine->func_type = ARM_FT_UNKNOWN;
10096 #endif
10097 return machine;
10098 }
10099
10100 /* Return an RTX indicating where the return address to the
10101 calling function can be found. */
10102
10103 rtx
10104 arm_return_addr (count, frame)
10105 int count;
10106 rtx frame ATTRIBUTE_UNUSED;
10107 {
10108 if (count != 0)
10109 return NULL_RTX;
10110
10111 if (TARGET_APCS_32)
10112 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
10113 else
10114 {
10115 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
10116 GEN_INT (RETURN_ADDR_MASK26));
10117 return get_func_hard_reg_initial_val (cfun, lr);
10118 }
10119 }
10120
10121 /* Do anything needed before RTL is emitted for each function. */
10122
10123 void
10124 arm_init_expanders ()
10125 {
10126 /* Arrange to initialize and mark the machine per-function status. */
10127 init_machine_status = arm_init_machine_status;
10128 }
10129
10130 /* Generate the rest of a function's prologue. */
10131
10132 void
10133 thumb_expand_prologue ()
10134 {
10135 HOST_WIDE_INT amount = (get_frame_size ()
10136 + current_function_outgoing_args_size);
10137 unsigned long func_type;
10138
10139 func_type = arm_current_func_type ();
10140
10141 /* Naked functions don't have prologues. */
10142 if (IS_NAKED (func_type))
10143 return;
10144
10145 if (IS_INTERRUPT (func_type))
10146 {
10147 error ("interrupt Service Routines cannot be coded in Thumb mode");
10148 return;
10149 }
10150
10151 if (frame_pointer_needed)
10152 emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
10153
10154 if (amount)
10155 {
10156 amount = ROUND_UP (amount);
10157
10158 if (amount < 512)
10159 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10160 GEN_INT (- amount)));
10161 else
10162 {
10163 int regno;
10164 rtx reg;
10165
10166 /* The stack decrement is too big for an immediate value in a single
10167 insn. In theory we could issue multiple subtracts, but after
10168 three of them it becomes more space efficient to place the full
10169 value in the constant pool and load into a register. (Also the
10170 ARM debugger really likes to see only one stack decrement per
10171 function). So instead we look for a scratch register into which
10172 we can load the decrement, and then we subtract this from the
10173 stack pointer. Unfortunately on the thumb the only available
10174 scratch registers are the argument registers, and we cannot use
10175 these as they may hold arguments to the function. Instead we
10176 attempt to locate a call preserved register which is used by this
10177 function. If we can find one, then we know that it will have
10178 been pushed at the start of the prologue and so we can corrupt
10179 it now. */
10180 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
10181 if (regs_ever_live[regno]
10182 && !call_used_regs[regno] /* Paranoia */
10183 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register))
10184 && !(frame_pointer_needed
10185 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
10186 break;
10187
10188 if (regno > LAST_LO_REGNUM) /* Very unlikely */
10189 {
10190 rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
10191
10192 /* Choose an arbitary, non-argument low register. */
10193 reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
10194
10195 /* Save it by copying it into a high, scratch register. */
10196 emit_insn (gen_movsi (spare, reg));
10197 /* Add a USE to stop propagate_one_insn() from barfing. */
10198 emit_insn (gen_prologue_use (spare));
10199
10200 /* Decrement the stack. */
10201 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
10202 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10203 reg));
10204
10205 /* Restore the low register's original value. */
10206 emit_insn (gen_movsi (reg, spare));
10207
10208 /* Emit a USE of the restored scratch register, so that flow
10209 analysis will not consider the restore redundant. The
10210 register won't be used again in this function and isn't
10211 restored by the epilogue. */
10212 emit_insn (gen_prologue_use (reg));
10213 }
10214 else
10215 {
10216 reg = gen_rtx (REG, SImode, regno);
10217
10218 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
10219 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10220 reg));
10221 }
10222 }
10223 }
10224
10225 if (current_function_profile || TARGET_NO_SCHED_PRO)
10226 emit_insn (gen_blockage ());
10227 }
10228
10229 void
10230 thumb_expand_epilogue ()
10231 {
10232 HOST_WIDE_INT amount = (get_frame_size ()
10233 + current_function_outgoing_args_size);
10234
10235 /* Naked functions don't have prologues. */
10236 if (IS_NAKED (arm_current_func_type ()))
10237 return;
10238
10239 if (frame_pointer_needed)
10240 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
10241 else if (amount)
10242 {
10243 amount = ROUND_UP (amount);
10244
10245 if (amount < 512)
10246 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10247 GEN_INT (amount)));
10248 else
10249 {
10250 /* r3 is always free in the epilogue. */
10251 rtx reg = gen_rtx (REG, SImode, LAST_ARG_REGNUM);
10252
10253 emit_insn (gen_movsi (reg, GEN_INT (amount)));
10254 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
10255 }
10256 }
10257
10258 /* Emit a USE (stack_pointer_rtx), so that
10259 the stack adjustment will not be deleted. */
10260 emit_insn (gen_prologue_use (stack_pointer_rtx));
10261
10262 if (current_function_profile || TARGET_NO_SCHED_PRO)
10263 emit_insn (gen_blockage ());
10264 }
10265
10266 static void
10267 thumb_output_function_prologue (f, size)
10268 FILE * f;
10269 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
10270 {
10271 int live_regs_mask = 0;
10272 int high_regs_pushed = 0;
10273 int regno;
10274
10275 if (IS_NAKED (arm_current_func_type ()))
10276 return;
10277
10278 if (is_called_in_ARM_mode (current_function_decl))
10279 {
10280 const char * name;
10281
10282 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
10283 abort ();
10284 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
10285 abort ();
10286 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10287
10288 /* Generate code sequence to switch us into Thumb mode. */
10289 /* The .code 32 directive has already been emitted by
10290 ASM_DECLARE_FUNCTION_NAME. */
10291 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
10292 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
10293
10294 /* Generate a label, so that the debugger will notice the
10295 change in instruction sets. This label is also used by
10296 the assembler to bypass the ARM code when this function
10297 is called from a Thumb encoded function elsewhere in the
10298 same file. Hence the definition of STUB_NAME here must
10299 agree with the definition in gas/config/tc-arm.c */
10300
10301 #define STUB_NAME ".real_start_of"
10302
10303 asm_fprintf (f, "\t.code\t16\n");
10304 #ifdef ARM_PE
10305 if (arm_dllexport_name_p (name))
10306 name = arm_strip_name_encoding (name);
10307 #endif
10308 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
10309 asm_fprintf (f, "\t.thumb_func\n");
10310 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
10311 }
10312
10313 if (current_function_pretend_args_size)
10314 {
10315 if (cfun->machine->uses_anonymous_args)
10316 {
10317 int num_pushes;
10318
10319 asm_fprintf (f, "\tpush\t{");
10320
10321 num_pushes = NUM_INTS (current_function_pretend_args_size);
10322
10323 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
10324 regno <= LAST_ARG_REGNUM;
10325 regno++)
10326 asm_fprintf (f, "%r%s", regno,
10327 regno == LAST_ARG_REGNUM ? "" : ", ");
10328
10329 asm_fprintf (f, "}\n");
10330 }
10331 else
10332 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
10333 SP_REGNUM, SP_REGNUM,
10334 current_function_pretend_args_size);
10335 }
10336
10337 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
10338 if (regs_ever_live[regno] && !call_used_regs[regno]
10339 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
10340 live_regs_mask |= 1 << regno;
10341
10342 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
10343 live_regs_mask |= 1 << LR_REGNUM;
10344
10345 if (TARGET_BACKTRACE)
10346 {
10347 int offset;
10348 int work_register = 0;
10349 int wr;
10350
10351 /* We have been asked to create a stack backtrace structure.
10352 The code looks like this:
10353
10354 0 .align 2
10355 0 func:
10356 0 sub SP, #16 Reserve space for 4 registers.
10357 2 push {R7} Get a work register.
10358 4 add R7, SP, #20 Get the stack pointer before the push.
10359 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
10360 8 mov R7, PC Get hold of the start of this code plus 12.
10361 10 str R7, [SP, #16] Store it.
10362 12 mov R7, FP Get hold of the current frame pointer.
10363 14 str R7, [SP, #4] Store it.
10364 16 mov R7, LR Get hold of the current return address.
10365 18 str R7, [SP, #12] Store it.
10366 20 add R7, SP, #16 Point at the start of the backtrace structure.
10367 22 mov FP, R7 Put this value into the frame pointer. */
10368
10369 if ((live_regs_mask & 0xFF) == 0)
10370 {
10371 /* See if the a4 register is free. */
10372
10373 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
10374 work_register = LAST_ARG_REGNUM;
10375 else /* We must push a register of our own */
10376 live_regs_mask |= (1 << LAST_LO_REGNUM);
10377 }
10378
10379 if (work_register == 0)
10380 {
10381 /* Select a register from the list that will be pushed to
10382 use as our work register. */
10383 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
10384 if ((1 << work_register) & live_regs_mask)
10385 break;
10386 }
10387
10388 asm_fprintf
10389 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
10390 SP_REGNUM, SP_REGNUM);
10391
10392 if (live_regs_mask)
10393 thumb_pushpop (f, live_regs_mask, 1);
10394
10395 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
10396 if (wr & live_regs_mask)
10397 offset += 4;
10398
10399 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
10400 offset + 16 + current_function_pretend_args_size);
10401
10402 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10403 offset + 4);
10404
10405 /* Make sure that the instruction fetching the PC is in the right place
10406 to calculate "start of backtrace creation code + 12". */
10407 if (live_regs_mask)
10408 {
10409 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
10410 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10411 offset + 12);
10412 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
10413 ARM_HARD_FRAME_POINTER_REGNUM);
10414 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10415 offset);
10416 }
10417 else
10418 {
10419 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
10420 ARM_HARD_FRAME_POINTER_REGNUM);
10421 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10422 offset);
10423 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
10424 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10425 offset + 12);
10426 }
10427
10428 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
10429 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
10430 offset + 8);
10431 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
10432 offset + 12);
10433 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
10434 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
10435 }
10436 else if (live_regs_mask)
10437 thumb_pushpop (f, live_regs_mask, 1);
10438
10439 for (regno = 8; regno < 13; regno++)
10440 {
10441 if (regs_ever_live[regno] && !call_used_regs[regno]
10442 && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
10443 high_regs_pushed++;
10444 }
10445
10446 if (high_regs_pushed)
10447 {
10448 int pushable_regs = 0;
10449 int mask = live_regs_mask & 0xff;
10450 int next_hi_reg;
10451
10452 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
10453 {
10454 if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]
10455 && !(TARGET_SINGLE_PIC_BASE
10456 && (next_hi_reg == arm_pic_register)))
10457 break;
10458 }
10459
10460 pushable_regs = mask;
10461
10462 if (pushable_regs == 0)
10463 {
10464 /* Desperation time -- this probably will never happen. */
10465 if (regs_ever_live[LAST_ARG_REGNUM]
10466 || !call_used_regs[LAST_ARG_REGNUM])
10467 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
10468 mask = 1 << LAST_ARG_REGNUM;
10469 }
10470
10471 while (high_regs_pushed > 0)
10472 {
10473 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
10474 {
10475 if (mask & (1 << regno))
10476 {
10477 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
10478
10479 high_regs_pushed--;
10480
10481 if (high_regs_pushed)
10482 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
10483 next_hi_reg--)
10484 {
10485 if (regs_ever_live[next_hi_reg]
10486 && !call_used_regs[next_hi_reg]
10487 && !(TARGET_SINGLE_PIC_BASE
10488 && (next_hi_reg == arm_pic_register)))
10489 break;
10490 }
10491 else
10492 {
10493 mask &= ~((1 << regno) - 1);
10494 break;
10495 }
10496 }
10497 }
10498
10499 thumb_pushpop (f, mask, 1);
10500 }
10501
10502 if (pushable_regs == 0
10503 && (regs_ever_live[LAST_ARG_REGNUM]
10504 || !call_used_regs[LAST_ARG_REGNUM]))
10505 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
10506 }
10507 }
10508
10509 /* Handle the case of a double word load into a low register from
10510 a computed memory address. The computed address may involve a
10511 register which is overwritten by the load. */
10512
10513 const char *
10514 thumb_load_double_from_address (operands)
10515 rtx *operands;
10516 {
10517 rtx addr;
10518 rtx base;
10519 rtx offset;
10520 rtx arg1;
10521 rtx arg2;
10522
10523 if (GET_CODE (operands[0]) != REG)
10524 abort ();
10525
10526 if (GET_CODE (operands[1]) != MEM)
10527 abort ();
10528
10529 /* Get the memory address. */
10530 addr = XEXP (operands[1], 0);
10531
10532 /* Work out how the memory address is computed. */
10533 switch (GET_CODE (addr))
10534 {
10535 case REG:
10536 operands[2] = gen_rtx (MEM, SImode,
10537 plus_constant (XEXP (operands[1], 0), 4));
10538
10539 if (REGNO (operands[0]) == REGNO (addr))
10540 {
10541 output_asm_insn ("ldr\t%H0, %2", operands);
10542 output_asm_insn ("ldr\t%0, %1", operands);
10543 }
10544 else
10545 {
10546 output_asm_insn ("ldr\t%0, %1", operands);
10547 output_asm_insn ("ldr\t%H0, %2", operands);
10548 }
10549 break;
10550
10551 case CONST:
10552 /* Compute <address> + 4 for the high order load. */
10553 operands[2] = gen_rtx (MEM, SImode,
10554 plus_constant (XEXP (operands[1], 0), 4));
10555
10556 output_asm_insn ("ldr\t%0, %1", operands);
10557 output_asm_insn ("ldr\t%H0, %2", operands);
10558 break;
10559
10560 case PLUS:
10561 arg1 = XEXP (addr, 0);
10562 arg2 = XEXP (addr, 1);
10563
10564 if (CONSTANT_P (arg1))
10565 base = arg2, offset = arg1;
10566 else
10567 base = arg1, offset = arg2;
10568
10569 if (GET_CODE (base) != REG)
10570 abort ();
10571
10572 /* Catch the case of <address> = <reg> + <reg> */
10573 if (GET_CODE (offset) == REG)
10574 {
10575 int reg_offset = REGNO (offset);
10576 int reg_base = REGNO (base);
10577 int reg_dest = REGNO (operands[0]);
10578
10579 /* Add the base and offset registers together into the
10580 higher destination register. */
10581 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
10582 reg_dest + 1, reg_base, reg_offset);
10583
10584 /* Load the lower destination register from the address in
10585 the higher destination register. */
10586 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
10587 reg_dest, reg_dest + 1);
10588
10589 /* Load the higher destination register from its own address
10590 plus 4. */
10591 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
10592 reg_dest + 1, reg_dest + 1);
10593 }
10594 else
10595 {
10596 /* Compute <address> + 4 for the high order load. */
10597 operands[2] = gen_rtx (MEM, SImode,
10598 plus_constant (XEXP (operands[1], 0), 4));
10599
10600 /* If the computed address is held in the low order register
10601 then load the high order register first, otherwise always
10602 load the low order register first. */
10603 if (REGNO (operands[0]) == REGNO (base))
10604 {
10605 output_asm_insn ("ldr\t%H0, %2", operands);
10606 output_asm_insn ("ldr\t%0, %1", operands);
10607 }
10608 else
10609 {
10610 output_asm_insn ("ldr\t%0, %1", operands);
10611 output_asm_insn ("ldr\t%H0, %2", operands);
10612 }
10613 }
10614 break;
10615
10616 case LABEL_REF:
10617 /* With no registers to worry about we can just load the value
10618 directly. */
10619 operands[2] = gen_rtx (MEM, SImode,
10620 plus_constant (XEXP (operands[1], 0), 4));
10621
10622 output_asm_insn ("ldr\t%H0, %2", operands);
10623 output_asm_insn ("ldr\t%0, %1", operands);
10624 break;
10625
10626 default:
10627 abort ();
10628 break;
10629 }
10630
10631 return "";
10632 }
10633
10634
10635 const char *
10636 thumb_output_move_mem_multiple (n, operands)
10637 int n;
10638 rtx * operands;
10639 {
10640 rtx tmp;
10641
10642 switch (n)
10643 {
10644 case 2:
10645 if (REGNO (operands[4]) > REGNO (operands[5]))
10646 {
10647 tmp = operands[4];
10648 operands[4] = operands[5];
10649 operands[5] = tmp;
10650 }
10651 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
10652 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
10653 break;
10654
10655 case 3:
10656 if (REGNO (operands[4]) > REGNO (operands[5]))
10657 {
10658 tmp = operands[4];
10659 operands[4] = operands[5];
10660 operands[5] = tmp;
10661 }
10662 if (REGNO (operands[5]) > REGNO (operands[6]))
10663 {
10664 tmp = operands[5];
10665 operands[5] = operands[6];
10666 operands[6] = tmp;
10667 }
10668 if (REGNO (operands[4]) > REGNO (operands[5]))
10669 {
10670 tmp = operands[4];
10671 operands[4] = operands[5];
10672 operands[5] = tmp;
10673 }
10674
10675 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
10676 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
10677 break;
10678
10679 default:
10680 abort ();
10681 }
10682
10683 return "";
10684 }
10685
10686 /* Routines for generating rtl. */
10687
10688 void
10689 thumb_expand_movstrqi (operands)
10690 rtx * operands;
10691 {
10692 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
10693 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
10694 HOST_WIDE_INT len = INTVAL (operands[2]);
10695 HOST_WIDE_INT offset = 0;
10696
10697 while (len >= 12)
10698 {
10699 emit_insn (gen_movmem12b (out, in, out, in));
10700 len -= 12;
10701 }
10702
10703 if (len >= 8)
10704 {
10705 emit_insn (gen_movmem8b (out, in, out, in));
10706 len -= 8;
10707 }
10708
10709 if (len >= 4)
10710 {
10711 rtx reg = gen_reg_rtx (SImode);
10712 emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
10713 emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
10714 len -= 4;
10715 offset += 4;
10716 }
10717
10718 if (len >= 2)
10719 {
10720 rtx reg = gen_reg_rtx (HImode);
10721 emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
10722 plus_constant (in, offset))));
10723 emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
10724 reg));
10725 len -= 2;
10726 offset += 2;
10727 }
10728
10729 if (len)
10730 {
10731 rtx reg = gen_reg_rtx (QImode);
10732 emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
10733 plus_constant (in, offset))));
10734 emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
10735 reg));
10736 }
10737 }
10738
10739 int
10740 thumb_cmp_operand (op, mode)
10741 rtx op;
10742 enum machine_mode mode;
10743 {
10744 return ((GET_CODE (op) == CONST_INT
10745 && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
10746 || register_operand (op, mode));
10747 }
10748
10749 static const char *
10750 thumb_condition_code (x, invert)
10751 rtx x;
10752 int invert;
10753 {
10754 static const char * const conds[] =
10755 {
10756 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
10757 "hi", "ls", "ge", "lt", "gt", "le"
10758 };
10759 int val;
10760
10761 switch (GET_CODE (x))
10762 {
10763 case EQ: val = 0; break;
10764 case NE: val = 1; break;
10765 case GEU: val = 2; break;
10766 case LTU: val = 3; break;
10767 case GTU: val = 8; break;
10768 case LEU: val = 9; break;
10769 case GE: val = 10; break;
10770 case LT: val = 11; break;
10771 case GT: val = 12; break;
10772 case LE: val = 13; break;
10773 default:
10774 abort ();
10775 }
10776
10777 return conds[val ^ invert];
10778 }
10779
10780 /* Handle storing a half-word to memory during reload. */
10781
10782 void
10783 thumb_reload_out_hi (operands)
10784 rtx * operands;
10785 {
10786 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
10787 }
10788
10789 /* Handle storing a half-word to memory during reload. */
10790
10791 void
10792 thumb_reload_in_hi (operands)
10793 rtx * operands ATTRIBUTE_UNUSED;
10794 {
10795 abort ();
10796 }
10797
10798 /* Return the length of a function name prefix
10799 that starts with the character 'c'. */
10800
10801 static int
10802 arm_get_strip_length (char c)
10803 {
10804 switch (c)
10805 {
10806 ARM_NAME_ENCODING_LENGTHS
10807 default: return 0;
10808 }
10809 }
10810
10811 /* Return a pointer to a function's name with any
10812 and all prefix encodings stripped from it. */
10813
10814 const char *
10815 arm_strip_name_encoding (const char * name)
10816 {
10817 int skip;
10818
10819 while ((skip = arm_get_strip_length (* name)))
10820 name += skip;
10821
10822 return name;
10823 }
10824
10825 rtx aof_pic_label;
10826
10827 #ifdef AOF_ASSEMBLER
10828 /* Special functions only needed when producing AOF syntax assembler. */
10829
10830 struct pic_chain
10831 {
10832 struct pic_chain * next;
10833 const char * symname;
10834 };
10835
10836 static struct pic_chain * aof_pic_chain = NULL;
10837
10838 rtx
10839 aof_pic_entry (x)
10840 rtx x;
10841 {
10842 struct pic_chain ** chainp;
10843 int offset;
10844
10845 if (aof_pic_label == NULL_RTX)
10846 {
10847 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
10848 }
10849
10850 for (offset = 0, chainp = &aof_pic_chain; *chainp;
10851 offset += 4, chainp = &(*chainp)->next)
10852 if ((*chainp)->symname == XSTR (x, 0))
10853 return plus_constant (aof_pic_label, offset);
10854
10855 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
10856 (*chainp)->next = NULL;
10857 (*chainp)->symname = XSTR (x, 0);
10858 return plus_constant (aof_pic_label, offset);
10859 }
10860
10861 void
10862 aof_dump_pic_table (f)
10863 FILE * f;
10864 {
10865 struct pic_chain * chain;
10866
10867 if (aof_pic_chain == NULL)
10868 return;
10869
10870 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
10871 PIC_OFFSET_TABLE_REGNUM,
10872 PIC_OFFSET_TABLE_REGNUM);
10873 fputs ("|x$adcons|\n", f);
10874
10875 for (chain = aof_pic_chain; chain; chain = chain->next)
10876 {
10877 fputs ("\tDCD\t", f);
10878 assemble_name (f, chain->symname);
10879 fputs ("\n", f);
10880 }
10881 }
10882
10883 int arm_text_section_count = 1;
10884
10885 char *
10886 aof_text_section ()
10887 {
10888 static char buf[100];
10889 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
10890 arm_text_section_count++);
10891 if (flag_pic)
10892 strcat (buf, ", PIC, REENTRANT");
10893 return buf;
10894 }
10895
10896 static int arm_data_section_count = 1;
10897
10898 char *
10899 aof_data_section ()
10900 {
10901 static char buf[100];
10902 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
10903 return buf;
10904 }
10905
10906 /* The AOF assembler is religiously strict about declarations of
10907 imported and exported symbols, so that it is impossible to declare
10908 a function as imported near the beginning of the file, and then to
10909 export it later on. It is, however, possible to delay the decision
10910 until all the functions in the file have been compiled. To get
10911 around this, we maintain a list of the imports and exports, and
10912 delete from it any that are subsequently defined. At the end of
10913 compilation we spit the remainder of the list out before the END
10914 directive. */
10915
10916 struct import
10917 {
10918 struct import * next;
10919 const char * name;
10920 };
10921
10922 static struct import * imports_list = NULL;
10923
10924 void
10925 aof_add_import (name)
10926 const char * name;
10927 {
10928 struct import * new;
10929
10930 for (new = imports_list; new; new = new->next)
10931 if (new->name == name)
10932 return;
10933
10934 new = (struct import *) xmalloc (sizeof (struct import));
10935 new->next = imports_list;
10936 imports_list = new;
10937 new->name = name;
10938 }
10939
10940 void
10941 aof_delete_import (name)
10942 const char * name;
10943 {
10944 struct import ** old;
10945
10946 for (old = &imports_list; *old; old = & (*old)->next)
10947 {
10948 if ((*old)->name == name)
10949 {
10950 *old = (*old)->next;
10951 return;
10952 }
10953 }
10954 }
10955
10956 int arm_main_function = 0;
10957
10958 void
10959 aof_dump_imports (f)
10960 FILE * f;
10961 {
10962 /* The AOF assembler needs this to cause the startup code to be extracted
10963 from the library. Brining in __main causes the whole thing to work
10964 automagically. */
10965 if (arm_main_function)
10966 {
10967 text_section ();
10968 fputs ("\tIMPORT __main\n", f);
10969 fputs ("\tDCD __main\n", f);
10970 }
10971
10972 /* Now dump the remaining imports. */
10973 while (imports_list)
10974 {
10975 fprintf (f, "\tIMPORT\t");
10976 assemble_name (f, imports_list->name);
10977 fputc ('\n', f);
10978 imports_list = imports_list->next;
10979 }
10980 }
10981 #endif /* AOF_ASSEMBLER */
10982
10983 #ifdef OBJECT_FORMAT_ELF
10984 /* Switch to an arbitrary section NAME with attributes as specified
10985 by FLAGS. ALIGN specifies any known alignment requirements for
10986 the section; 0 if the default should be used.
10987
10988 Differs from the default elf version only in the prefix character
10989 used before the section type. */
10990
10991 static void
10992 arm_elf_asm_named_section (name, flags)
10993 const char *name;
10994 unsigned int flags;
10995 {
10996 char flagchars[8], *f = flagchars;
10997 const char *type;
10998
10999 if (!(flags & SECTION_DEBUG))
11000 *f++ = 'a';
11001 if (flags & SECTION_WRITE)
11002 *f++ = 'w';
11003 if (flags & SECTION_CODE)
11004 *f++ = 'x';
11005 if (flags & SECTION_SMALL)
11006 *f++ = 's';
11007 if (flags & SECTION_MERGE)
11008 *f++ = 'M';
11009 if (flags & SECTION_STRINGS)
11010 *f++ = 'S';
11011 *f = '\0';
11012
11013 if (flags & SECTION_BSS)
11014 type = "nobits";
11015 else
11016 type = "progbits";
11017
11018 if (flags & SECTION_ENTSIZE)
11019 fprintf (asm_out_file, "\t.section\t%s,\"%s\",%%%s,%d\n",
11020 name, flagchars, type, flags & SECTION_ENTSIZE);
11021 else
11022 fprintf (asm_out_file, "\t.section\t%s,\"%s\",%%%s\n",
11023 name, flagchars, type);
11024 }
11025 #endif
11026
11027 #ifndef ARM_PE
11028 /* Symbols in the text segment can be accessed without indirecting via the
11029 constant pool; it may take an extra binary operation, but this is still
11030 faster than indirecting via memory. Don't do this when not optimizing,
11031 since we won't be calculating al of the offsets necessary to do this
11032 simplification. */
11033
11034 static void
11035 arm_encode_section_info (decl, first)
11036 tree decl;
11037 int first;
11038 {
11039 /* This doesn't work with AOF syntax, since the string table may be in
11040 a different AREA. */
11041 #ifndef AOF_ASSEMBLER
11042 if (optimize > 0 && TREE_CONSTANT (decl)
11043 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
11044 {
11045 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
11046 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
11047 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
11048 }
11049 #endif
11050
11051 /* If we are referencing a function that is weak then encode a long call
11052 flag in the function name, otherwise if the function is static or
11053 or known to be defined in this file then encode a short call flag. */
11054 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
11055 {
11056 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
11057 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
11058 else if (! TREE_PUBLIC (decl))
11059 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
11060 }
11061 }
11062 #endif /* !ARM_PE */