]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
use templates instead of gengtype for typed allocation functions
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
55
56 /* Return nonzero if there is a bypass for the output of
57 OUT_INSN and the fp store IN_INSN. */
58 int
59 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
60 {
61 enum machine_mode store_mode;
62 enum machine_mode other_mode;
63 rtx set;
64
65 if (recog_memoized (in_insn) < 0
66 || (get_attr_type (in_insn) != TYPE_FPSTORE
67 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
68 || recog_memoized (out_insn) < 0)
69 return 0;
70
71 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
72
73 set = single_set (out_insn);
74 if (!set)
75 return 0;
76
77 other_mode = GET_MODE (SET_SRC (set));
78
79 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
80 }
81
82
83 #ifndef DO_FRAME_NOTES
84 #ifdef INCOMING_RETURN_ADDR_RTX
85 #define DO_FRAME_NOTES 1
86 #else
87 #define DO_FRAME_NOTES 0
88 #endif
89 #endif
90
91 static void pa_option_override (void);
92 static void copy_reg_pointer (rtx, rtx);
93 static void fix_range (const char *);
94 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
95 reg_class_t);
96 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
97 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
98 static inline rtx force_mode (enum machine_mode, rtx);
99 static void pa_reorg (void);
100 static void pa_combine_instructions (void);
101 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
102 static bool forward_branch_p (rtx);
103 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
104 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static int compute_movmem_length (rtx);
106 static int compute_clrmem_length (rtx);
107 static bool pa_assemble_integer (rtx, unsigned int, int);
108 static void remove_useless_addtr_insns (int);
109 static void store_reg (int, HOST_WIDE_INT, int);
110 static void store_reg_modify (int, int, HOST_WIDE_INT);
111 static void load_reg (int, HOST_WIDE_INT, int);
112 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
113 static rtx pa_function_value (const_tree, const_tree, bool);
114 static rtx pa_libcall_value (enum machine_mode, const_rtx);
115 static bool pa_function_value_regno_p (const unsigned int);
116 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void update_total_code_bytes (unsigned int);
118 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
119 static int pa_adjust_cost (rtx, rtx, rtx, int);
120 static int pa_adjust_priority (rtx, int);
121 static int pa_issue_rate (void);
122 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
123 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
124 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
125 ATTRIBUTE_UNUSED;
126 static void pa_encode_section_info (tree, rtx, int);
127 static const char *pa_strip_name_encoding (const char *);
128 static bool pa_function_ok_for_sibcall (tree, tree);
129 static void pa_globalize_label (FILE *, const char *)
130 ATTRIBUTE_UNUSED;
131 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
132 HOST_WIDE_INT, tree);
133 #if !defined(USE_COLLECT2)
134 static void pa_asm_out_constructor (rtx, int);
135 static void pa_asm_out_destructor (rtx, int);
136 #endif
137 static void pa_init_builtins (void);
138 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
139 static rtx hppa_builtin_saveregs (void);
140 static void hppa_va_start (tree, rtx);
141 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
142 static bool pa_scalar_mode_supported_p (enum machine_mode);
143 static bool pa_commutative_p (const_rtx x, int outer_code);
144 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
145 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
146 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
147 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
148 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
149 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
151 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
152 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
153 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
156 static void output_deferred_plabels (void);
157 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
158 #ifdef ASM_OUTPUT_EXTERNAL_REAL
159 static void pa_hpux_file_end (void);
160 #endif
161 static void pa_init_libfuncs (void);
162 static rtx pa_struct_value_rtx (tree, int);
163 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
164 const_tree, bool);
165 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
166 tree, bool);
167 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
168 const_tree, bool);
169 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
170 const_tree, bool);
171 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
172 static struct machine_function * pa_init_machine_status (void);
173 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
174 enum machine_mode,
175 secondary_reload_info *);
176 static void pa_extra_live_on_entry (bitmap);
177 static enum machine_mode pa_promote_function_mode (const_tree,
178 enum machine_mode, int *,
179 const_tree, int);
180
181 static void pa_asm_trampoline_template (FILE *);
182 static void pa_trampoline_init (rtx, tree, rtx);
183 static rtx pa_trampoline_adjust_address (rtx);
184 static rtx pa_delegitimize_address (rtx);
185 static bool pa_print_operand_punct_valid_p (unsigned char);
186 static rtx pa_internal_arg_pointer (void);
187 static bool pa_can_eliminate (const int, const int);
188 static void pa_conditional_register_usage (void);
189 static enum machine_mode pa_c_mode_for_suffix (char);
190 static section *pa_function_section (tree, enum node_frequency, bool, bool);
191 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
192 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
193 static unsigned int pa_section_type_flags (tree, const char *, int);
194 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
195
196 /* The following extra sections are only used for SOM. */
197 static GTY(()) section *som_readonly_data_section;
198 static GTY(()) section *som_one_only_readonly_data_section;
199 static GTY(()) section *som_one_only_data_section;
200 static GTY(()) section *som_tm_clone_table_section;
201
202 /* Counts for the number of callee-saved general and floating point
203 registers which were saved by the current function's prologue. */
204 static int gr_saved, fr_saved;
205
206 /* Boolean indicating whether the return pointer was saved by the
207 current function's prologue. */
208 static bool rp_saved;
209
210 static rtx find_addr_reg (rtx);
211
212 /* Keep track of the number of bytes we have output in the CODE subspace
213 during this compilation so we'll know when to emit inline long-calls. */
214 unsigned long total_code_bytes;
215
216 /* The last address of the previous function plus the number of bytes in
217 associated thunks that have been output. This is used to determine if
218 a thunk can use an IA-relative branch to reach its target function. */
219 static unsigned int last_address;
220
221 /* Variables to handle plabels that we discover are necessary at assembly
222 output time. They are output after the current function. */
223 struct GTY(()) deferred_plabel
224 {
225 rtx internal_label;
226 rtx symbol;
227 };
228 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
229 deferred_plabels;
230 static size_t n_deferred_plabels = 0;
231 \f
232 /* Initialize the GCC target structure. */
233
234 #undef TARGET_OPTION_OVERRIDE
235 #define TARGET_OPTION_OVERRIDE pa_option_override
236
237 #undef TARGET_ASM_ALIGNED_HI_OP
238 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
239 #undef TARGET_ASM_ALIGNED_SI_OP
240 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
241 #undef TARGET_ASM_ALIGNED_DI_OP
242 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
243 #undef TARGET_ASM_UNALIGNED_HI_OP
244 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
245 #undef TARGET_ASM_UNALIGNED_SI_OP
246 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
247 #undef TARGET_ASM_UNALIGNED_DI_OP
248 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
249 #undef TARGET_ASM_INTEGER
250 #define TARGET_ASM_INTEGER pa_assemble_integer
251
252 #undef TARGET_ASM_FUNCTION_PROLOGUE
253 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
254 #undef TARGET_ASM_FUNCTION_EPILOGUE
255 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
256
257 #undef TARGET_FUNCTION_VALUE
258 #define TARGET_FUNCTION_VALUE pa_function_value
259 #undef TARGET_LIBCALL_VALUE
260 #define TARGET_LIBCALL_VALUE pa_libcall_value
261 #undef TARGET_FUNCTION_VALUE_REGNO_P
262 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
263
264 #undef TARGET_LEGITIMIZE_ADDRESS
265 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
266
267 #undef TARGET_SCHED_ADJUST_COST
268 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
269 #undef TARGET_SCHED_ADJUST_PRIORITY
270 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
271 #undef TARGET_SCHED_ISSUE_RATE
272 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
273
274 #undef TARGET_ENCODE_SECTION_INFO
275 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
276 #undef TARGET_STRIP_NAME_ENCODING
277 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
278
279 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
280 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
281
282 #undef TARGET_COMMUTATIVE_P
283 #define TARGET_COMMUTATIVE_P pa_commutative_p
284
285 #undef TARGET_ASM_OUTPUT_MI_THUNK
286 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
287 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
288 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
289
290 #undef TARGET_ASM_FILE_END
291 #ifdef ASM_OUTPUT_EXTERNAL_REAL
292 #define TARGET_ASM_FILE_END pa_hpux_file_end
293 #else
294 #define TARGET_ASM_FILE_END output_deferred_plabels
295 #endif
296
297 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
298 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
299
300 #if !defined(USE_COLLECT2)
301 #undef TARGET_ASM_CONSTRUCTOR
302 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
303 #undef TARGET_ASM_DESTRUCTOR
304 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
305 #endif
306
307 #undef TARGET_INIT_BUILTINS
308 #define TARGET_INIT_BUILTINS pa_init_builtins
309
310 #undef TARGET_EXPAND_BUILTIN
311 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
312
313 #undef TARGET_REGISTER_MOVE_COST
314 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
315 #undef TARGET_RTX_COSTS
316 #define TARGET_RTX_COSTS hppa_rtx_costs
317 #undef TARGET_ADDRESS_COST
318 #define TARGET_ADDRESS_COST hppa_address_cost
319
320 #undef TARGET_MACHINE_DEPENDENT_REORG
321 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
322
323 #undef TARGET_INIT_LIBFUNCS
324 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
325
326 #undef TARGET_PROMOTE_FUNCTION_MODE
327 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
328 #undef TARGET_PROMOTE_PROTOTYPES
329 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
330
331 #undef TARGET_STRUCT_VALUE_RTX
332 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
333 #undef TARGET_RETURN_IN_MEMORY
334 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
335 #undef TARGET_MUST_PASS_IN_STACK
336 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
337 #undef TARGET_PASS_BY_REFERENCE
338 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
339 #undef TARGET_CALLEE_COPIES
340 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
341 #undef TARGET_ARG_PARTIAL_BYTES
342 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
343 #undef TARGET_FUNCTION_ARG
344 #define TARGET_FUNCTION_ARG pa_function_arg
345 #undef TARGET_FUNCTION_ARG_ADVANCE
346 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
347 #undef TARGET_FUNCTION_ARG_BOUNDARY
348 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
349
350 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
351 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
352 #undef TARGET_EXPAND_BUILTIN_VA_START
353 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
354 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
355 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
356
357 #undef TARGET_SCALAR_MODE_SUPPORTED_P
358 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
359
360 #undef TARGET_CANNOT_FORCE_CONST_MEM
361 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
362
363 #undef TARGET_SECONDARY_RELOAD
364 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
365
366 #undef TARGET_EXTRA_LIVE_ON_ENTRY
367 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
368
369 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
370 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
371 #undef TARGET_TRAMPOLINE_INIT
372 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
373 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
374 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
375 #undef TARGET_DELEGITIMIZE_ADDRESS
376 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
377 #undef TARGET_INTERNAL_ARG_POINTER
378 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
379 #undef TARGET_CAN_ELIMINATE
380 #define TARGET_CAN_ELIMINATE pa_can_eliminate
381 #undef TARGET_CONDITIONAL_REGISTER_USAGE
382 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
383 #undef TARGET_C_MODE_FOR_SUFFIX
384 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
385 #undef TARGET_ASM_FUNCTION_SECTION
386 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
387
388 #undef TARGET_LEGITIMATE_CONSTANT_P
389 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
390 #undef TARGET_SECTION_TYPE_FLAGS
391 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
392 #undef TARGET_LEGITIMATE_ADDRESS_P
393 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
394
395 struct gcc_target targetm = TARGET_INITIALIZER;
396 \f
397 /* Parse the -mfixed-range= option string. */
398
399 static void
400 fix_range (const char *const_str)
401 {
402 int i, first, last;
403 char *str, *dash, *comma;
404
405 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
406 REG2 are either register names or register numbers. The effect
407 of this option is to mark the registers in the range from REG1 to
408 REG2 as ``fixed'' so they won't be used by the compiler. This is
409 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
410
411 i = strlen (const_str);
412 str = (char *) alloca (i + 1);
413 memcpy (str, const_str, i + 1);
414
415 while (1)
416 {
417 dash = strchr (str, '-');
418 if (!dash)
419 {
420 warning (0, "value of -mfixed-range must have form REG1-REG2");
421 return;
422 }
423 *dash = '\0';
424
425 comma = strchr (dash + 1, ',');
426 if (comma)
427 *comma = '\0';
428
429 first = decode_reg_name (str);
430 if (first < 0)
431 {
432 warning (0, "unknown register name: %s", str);
433 return;
434 }
435
436 last = decode_reg_name (dash + 1);
437 if (last < 0)
438 {
439 warning (0, "unknown register name: %s", dash + 1);
440 return;
441 }
442
443 *dash = '-';
444
445 if (first > last)
446 {
447 warning (0, "%s-%s is an empty range", str, dash + 1);
448 return;
449 }
450
451 for (i = first; i <= last; ++i)
452 fixed_regs[i] = call_used_regs[i] = 1;
453
454 if (!comma)
455 break;
456
457 *comma = ',';
458 str = comma + 1;
459 }
460
461 /* Check if all floating point registers have been fixed. */
462 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
463 if (!fixed_regs[i])
464 break;
465
466 if (i > FP_REG_LAST)
467 target_flags |= MASK_DISABLE_FPREGS;
468 }
469
470 /* Implement the TARGET_OPTION_OVERRIDE hook. */
471
472 static void
473 pa_option_override (void)
474 {
475 unsigned int i;
476 cl_deferred_option *opt;
477 vec<cl_deferred_option> *v
478 = (vec<cl_deferred_option> *) pa_deferred_options;
479
480 if (v)
481 FOR_EACH_VEC_ELT (*v, i, opt)
482 {
483 switch (opt->opt_index)
484 {
485 case OPT_mfixed_range_:
486 fix_range (opt->arg);
487 break;
488
489 default:
490 gcc_unreachable ();
491 }
492 }
493
494 /* Unconditional branches in the delay slot are not compatible with dwarf2
495 call frame information. There is no benefit in using this optimization
496 on PA8000 and later processors. */
497 if (pa_cpu >= PROCESSOR_8000
498 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
499 && flag_exceptions)
500 || flag_unwind_tables)
501 target_flags &= ~MASK_JUMP_IN_DELAY;
502
503 if (flag_pic && TARGET_PORTABLE_RUNTIME)
504 {
505 warning (0, "PIC code generation is not supported in the portable runtime model");
506 }
507
508 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
509 {
510 warning (0, "PIC code generation is not compatible with fast indirect calls");
511 }
512
513 if (! TARGET_GAS && write_symbols != NO_DEBUG)
514 {
515 warning (0, "-g is only supported when using GAS on this processor,");
516 warning (0, "-g option disabled");
517 write_symbols = NO_DEBUG;
518 }
519
520 /* We only support the "big PIC" model now. And we always generate PIC
521 code when in 64bit mode. */
522 if (flag_pic == 1 || TARGET_64BIT)
523 flag_pic = 2;
524
525 /* Disable -freorder-blocks-and-partition as we don't support hot and
526 cold partitioning. */
527 if (flag_reorder_blocks_and_partition)
528 {
529 inform (input_location,
530 "-freorder-blocks-and-partition does not work "
531 "on this architecture");
532 flag_reorder_blocks_and_partition = 0;
533 flag_reorder_blocks = 1;
534 }
535
536 /* We can't guarantee that .dword is available for 32-bit targets. */
537 if (UNITS_PER_WORD == 4)
538 targetm.asm_out.aligned_op.di = NULL;
539
540 /* The unaligned ops are only available when using GAS. */
541 if (!TARGET_GAS)
542 {
543 targetm.asm_out.unaligned_op.hi = NULL;
544 targetm.asm_out.unaligned_op.si = NULL;
545 targetm.asm_out.unaligned_op.di = NULL;
546 }
547
548 init_machine_status = pa_init_machine_status;
549 }
550
551 enum pa_builtins
552 {
553 PA_BUILTIN_COPYSIGNQ,
554 PA_BUILTIN_FABSQ,
555 PA_BUILTIN_INFQ,
556 PA_BUILTIN_HUGE_VALQ,
557 PA_BUILTIN_max
558 };
559
560 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
561
562 static void
563 pa_init_builtins (void)
564 {
565 #ifdef DONT_HAVE_FPUTC_UNLOCKED
566 {
567 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
568 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
569 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
570 }
571 #endif
572 #if TARGET_HPUX_11
573 {
574 tree decl;
575
576 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
577 set_user_assembler_name (decl, "_Isfinite");
578 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
579 set_user_assembler_name (decl, "_Isfinitef");
580 }
581 #endif
582
583 if (HPUX_LONG_DOUBLE_LIBRARY)
584 {
585 tree decl, ftype;
586
587 /* Under HPUX, the __float128 type is a synonym for "long double". */
588 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
589 "__float128");
590
591 /* TFmode support builtins. */
592 ftype = build_function_type_list (long_double_type_node,
593 long_double_type_node,
594 NULL_TREE);
595 decl = add_builtin_function ("__builtin_fabsq", ftype,
596 PA_BUILTIN_FABSQ, BUILT_IN_MD,
597 "_U_Qfabs", NULL_TREE);
598 TREE_READONLY (decl) = 1;
599 pa_builtins[PA_BUILTIN_FABSQ] = decl;
600
601 ftype = build_function_type_list (long_double_type_node,
602 long_double_type_node,
603 long_double_type_node,
604 NULL_TREE);
605 decl = add_builtin_function ("__builtin_copysignq", ftype,
606 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
607 "_U_Qfcopysign", NULL_TREE);
608 TREE_READONLY (decl) = 1;
609 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
610
611 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
612 decl = add_builtin_function ("__builtin_infq", ftype,
613 PA_BUILTIN_INFQ, BUILT_IN_MD,
614 NULL, NULL_TREE);
615 pa_builtins[PA_BUILTIN_INFQ] = decl;
616
617 decl = add_builtin_function ("__builtin_huge_valq", ftype,
618 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
619 NULL, NULL_TREE);
620 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
621 }
622 }
623
624 static rtx
625 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
626 enum machine_mode mode ATTRIBUTE_UNUSED,
627 int ignore ATTRIBUTE_UNUSED)
628 {
629 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
630 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
631
632 switch (fcode)
633 {
634 case PA_BUILTIN_FABSQ:
635 case PA_BUILTIN_COPYSIGNQ:
636 return expand_call (exp, target, ignore);
637
638 case PA_BUILTIN_INFQ:
639 case PA_BUILTIN_HUGE_VALQ:
640 {
641 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
642 REAL_VALUE_TYPE inf;
643 rtx tmp;
644
645 real_inf (&inf);
646 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
647
648 tmp = validize_mem (force_const_mem (target_mode, tmp));
649
650 if (target == 0)
651 target = gen_reg_rtx (target_mode);
652
653 emit_move_insn (target, tmp);
654 return target;
655 }
656
657 default:
658 gcc_unreachable ();
659 }
660
661 return NULL_RTX;
662 }
663
664 /* Function to init struct machine_function.
665 This will be called, via a pointer variable,
666 from push_function_context. */
667
668 static struct machine_function *
669 pa_init_machine_status (void)
670 {
671 return ggc_cleared_alloc<machine_function> ();
672 }
673
674 /* If FROM is a probable pointer register, mark TO as a probable
675 pointer register with the same pointer alignment as FROM. */
676
677 static void
678 copy_reg_pointer (rtx to, rtx from)
679 {
680 if (REG_POINTER (from))
681 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
682 }
683
684 /* Return 1 if X contains a symbolic expression. We know these
685 expressions will have one of a few well defined forms, so
686 we need only check those forms. */
687 int
688 pa_symbolic_expression_p (rtx x)
689 {
690
691 /* Strip off any HIGH. */
692 if (GET_CODE (x) == HIGH)
693 x = XEXP (x, 0);
694
695 return symbolic_operand (x, VOIDmode);
696 }
697
698 /* Accept any constant that can be moved in one instruction into a
699 general register. */
700 int
701 pa_cint_ok_for_move (HOST_WIDE_INT ival)
702 {
703 /* OK if ldo, ldil, or zdepi, can be used. */
704 return (VAL_14_BITS_P (ival)
705 || pa_ldil_cint_p (ival)
706 || pa_zdepi_cint_p (ival));
707 }
708 \f
709 /* True iff ldil can be used to load this CONST_INT. The least
710 significant 11 bits of the value must be zero and the value must
711 not change sign when extended from 32 to 64 bits. */
712 int
713 pa_ldil_cint_p (HOST_WIDE_INT ival)
714 {
715 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
716
717 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
718 }
719
720 /* True iff zdepi can be used to generate this CONST_INT.
721 zdepi first sign extends a 5-bit signed number to a given field
722 length, then places this field anywhere in a zero. */
723 int
724 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
725 {
726 unsigned HOST_WIDE_INT lsb_mask, t;
727
728 /* This might not be obvious, but it's at least fast.
729 This function is critical; we don't have the time loops would take. */
730 lsb_mask = x & -x;
731 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
732 /* Return true iff t is a power of two. */
733 return ((t & (t - 1)) == 0);
734 }
735
736 /* True iff depi or extru can be used to compute (reg & mask).
737 Accept bit pattern like these:
738 0....01....1
739 1....10....0
740 1..10..01..1 */
741 int
742 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
743 {
744 mask = ~mask;
745 mask += mask & -mask;
746 return (mask & (mask - 1)) == 0;
747 }
748
749 /* True iff depi can be used to compute (reg | MASK). */
750 int
751 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
752 {
753 mask += mask & -mask;
754 return (mask & (mask - 1)) == 0;
755 }
756 \f
757 /* Legitimize PIC addresses. If the address is already
758 position-independent, we return ORIG. Newly generated
759 position-independent addresses go to REG. If we need more
760 than one register, we lose. */
761
762 static rtx
763 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
764 {
765 rtx pic_ref = orig;
766
767 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
768
769 /* Labels need special handling. */
770 if (pic_label_operand (orig, mode))
771 {
772 rtx insn;
773
774 /* We do not want to go through the movXX expanders here since that
775 would create recursion.
776
777 Nor do we really want to call a generator for a named pattern
778 since that requires multiple patterns if we want to support
779 multiple word sizes.
780
781 So instead we just emit the raw set, which avoids the movXX
782 expanders completely. */
783 mark_reg_pointer (reg, BITS_PER_UNIT);
784 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
785
786 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
787 add_reg_note (insn, REG_EQUAL, orig);
788
789 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
790 and update LABEL_NUSES because this is not done automatically. */
791 if (reload_in_progress || reload_completed)
792 {
793 /* Extract LABEL_REF. */
794 if (GET_CODE (orig) == CONST)
795 orig = XEXP (XEXP (orig, 0), 0);
796 /* Extract CODE_LABEL. */
797 orig = XEXP (orig, 0);
798 add_reg_note (insn, REG_LABEL_OPERAND, orig);
799 /* Make sure we have label and not a note. */
800 if (LABEL_P (orig))
801 LABEL_NUSES (orig)++;
802 }
803 crtl->uses_pic_offset_table = 1;
804 return reg;
805 }
806 if (GET_CODE (orig) == SYMBOL_REF)
807 {
808 rtx insn, tmp_reg;
809
810 gcc_assert (reg);
811
812 /* Before reload, allocate a temporary register for the intermediate
813 result. This allows the sequence to be deleted when the final
814 result is unused and the insns are trivially dead. */
815 tmp_reg = ((reload_in_progress || reload_completed)
816 ? reg : gen_reg_rtx (Pmode));
817
818 if (function_label_operand (orig, VOIDmode))
819 {
820 /* Force function label into memory in word mode. */
821 orig = XEXP (force_const_mem (word_mode, orig), 0);
822 /* Load plabel address from DLT. */
823 emit_move_insn (tmp_reg,
824 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
825 gen_rtx_HIGH (word_mode, orig)));
826 pic_ref
827 = gen_const_mem (Pmode,
828 gen_rtx_LO_SUM (Pmode, tmp_reg,
829 gen_rtx_UNSPEC (Pmode,
830 gen_rtvec (1, orig),
831 UNSPEC_DLTIND14R)));
832 emit_move_insn (reg, pic_ref);
833 /* Now load address of function descriptor. */
834 pic_ref = gen_rtx_MEM (Pmode, reg);
835 }
836 else
837 {
838 /* Load symbol reference from DLT. */
839 emit_move_insn (tmp_reg,
840 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
841 gen_rtx_HIGH (word_mode, orig)));
842 pic_ref
843 = gen_const_mem (Pmode,
844 gen_rtx_LO_SUM (Pmode, tmp_reg,
845 gen_rtx_UNSPEC (Pmode,
846 gen_rtvec (1, orig),
847 UNSPEC_DLTIND14R)));
848 }
849
850 crtl->uses_pic_offset_table = 1;
851 mark_reg_pointer (reg, BITS_PER_UNIT);
852 insn = emit_move_insn (reg, pic_ref);
853
854 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
855 set_unique_reg_note (insn, REG_EQUAL, orig);
856
857 return reg;
858 }
859 else if (GET_CODE (orig) == CONST)
860 {
861 rtx base;
862
863 if (GET_CODE (XEXP (orig, 0)) == PLUS
864 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
865 return orig;
866
867 gcc_assert (reg);
868 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
869
870 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
871 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
872 base == reg ? 0 : reg);
873
874 if (GET_CODE (orig) == CONST_INT)
875 {
876 if (INT_14_BITS (orig))
877 return plus_constant (Pmode, base, INTVAL (orig));
878 orig = force_reg (Pmode, orig);
879 }
880 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
881 /* Likewise, should we set special REG_NOTEs here? */
882 }
883
884 return pic_ref;
885 }
886
887 static GTY(()) rtx gen_tls_tga;
888
889 static rtx
890 gen_tls_get_addr (void)
891 {
892 if (!gen_tls_tga)
893 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
894 return gen_tls_tga;
895 }
896
897 static rtx
898 hppa_tls_call (rtx arg)
899 {
900 rtx ret;
901
902 ret = gen_reg_rtx (Pmode);
903 emit_library_call_value (gen_tls_get_addr (), ret,
904 LCT_CONST, Pmode, 1, arg, Pmode);
905
906 return ret;
907 }
908
909 static rtx
910 legitimize_tls_address (rtx addr)
911 {
912 rtx ret, insn, tmp, t1, t2, tp;
913
914 /* Currently, we can't handle anything but a SYMBOL_REF. */
915 if (GET_CODE (addr) != SYMBOL_REF)
916 return addr;
917
918 switch (SYMBOL_REF_TLS_MODEL (addr))
919 {
920 case TLS_MODEL_GLOBAL_DYNAMIC:
921 tmp = gen_reg_rtx (Pmode);
922 if (flag_pic)
923 emit_insn (gen_tgd_load_pic (tmp, addr));
924 else
925 emit_insn (gen_tgd_load (tmp, addr));
926 ret = hppa_tls_call (tmp);
927 break;
928
929 case TLS_MODEL_LOCAL_DYNAMIC:
930 ret = gen_reg_rtx (Pmode);
931 tmp = gen_reg_rtx (Pmode);
932 start_sequence ();
933 if (flag_pic)
934 emit_insn (gen_tld_load_pic (tmp, addr));
935 else
936 emit_insn (gen_tld_load (tmp, addr));
937 t1 = hppa_tls_call (tmp);
938 insn = get_insns ();
939 end_sequence ();
940 t2 = gen_reg_rtx (Pmode);
941 emit_libcall_block (insn, t2, t1,
942 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
943 UNSPEC_TLSLDBASE));
944 emit_insn (gen_tld_offset_load (ret, addr, t2));
945 break;
946
947 case TLS_MODEL_INITIAL_EXEC:
948 tp = gen_reg_rtx (Pmode);
949 tmp = gen_reg_rtx (Pmode);
950 ret = gen_reg_rtx (Pmode);
951 emit_insn (gen_tp_load (tp));
952 if (flag_pic)
953 emit_insn (gen_tie_load_pic (tmp, addr));
954 else
955 emit_insn (gen_tie_load (tmp, addr));
956 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
957 break;
958
959 case TLS_MODEL_LOCAL_EXEC:
960 tp = gen_reg_rtx (Pmode);
961 ret = gen_reg_rtx (Pmode);
962 emit_insn (gen_tp_load (tp));
963 emit_insn (gen_tle_load (ret, addr, tp));
964 break;
965
966 default:
967 gcc_unreachable ();
968 }
969
970 return ret;
971 }
972
973 /* Try machine-dependent ways of modifying an illegitimate address
974 to be legitimate. If we find one, return the new, valid address.
975 This macro is used in only one place: `memory_address' in explow.c.
976
977 OLDX is the address as it was before break_out_memory_refs was called.
978 In some cases it is useful to look at this to decide what needs to be done.
979
980 It is always safe for this macro to do nothing. It exists to recognize
981 opportunities to optimize the output.
982
983 For the PA, transform:
984
985 memory(X + <large int>)
986
987 into:
988
989 if (<large int> & mask) >= 16
990 Y = (<large int> & ~mask) + mask + 1 Round up.
991 else
992 Y = (<large int> & ~mask) Round down.
993 Z = X + Y
994 memory (Z + (<large int> - Y));
995
996 This is for CSE to find several similar references, and only use one Z.
997
998 X can either be a SYMBOL_REF or REG, but because combine cannot
999 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1000 D will not fit in 14 bits.
1001
1002 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1003 0x1f as the mask.
1004
1005 MODE_INT references allow displacements which fit in 14 bits, so use
1006 0x3fff as the mask.
1007
1008 This relies on the fact that most mode MODE_FLOAT references will use FP
1009 registers and most mode MODE_INT references will use integer registers.
1010 (In the rare case of an FP register used in an integer MODE, we depend
1011 on secondary reloads to clean things up.)
1012
1013
1014 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1015 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1016 addressing modes to be used).
1017
1018 Put X and Z into registers. Then put the entire expression into
1019 a register. */
1020
1021 rtx
1022 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1023 enum machine_mode mode)
1024 {
1025 rtx orig = x;
1026
1027 /* We need to canonicalize the order of operands in unscaled indexed
1028 addresses since the code that checks if an address is valid doesn't
1029 always try both orders. */
1030 if (!TARGET_NO_SPACE_REGS
1031 && GET_CODE (x) == PLUS
1032 && GET_MODE (x) == Pmode
1033 && REG_P (XEXP (x, 0))
1034 && REG_P (XEXP (x, 1))
1035 && REG_POINTER (XEXP (x, 0))
1036 && !REG_POINTER (XEXP (x, 1)))
1037 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1038
1039 if (pa_tls_referenced_p (x))
1040 return legitimize_tls_address (x);
1041 else if (flag_pic)
1042 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1043
1044 /* Strip off CONST. */
1045 if (GET_CODE (x) == CONST)
1046 x = XEXP (x, 0);
1047
1048 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1049 That should always be safe. */
1050 if (GET_CODE (x) == PLUS
1051 && GET_CODE (XEXP (x, 0)) == REG
1052 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1053 {
1054 rtx reg = force_reg (Pmode, XEXP (x, 1));
1055 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1056 }
1057
1058 /* Note we must reject symbols which represent function addresses
1059 since the assembler/linker can't handle arithmetic on plabels. */
1060 if (GET_CODE (x) == PLUS
1061 && GET_CODE (XEXP (x, 1)) == CONST_INT
1062 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1063 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1064 || GET_CODE (XEXP (x, 0)) == REG))
1065 {
1066 rtx int_part, ptr_reg;
1067 int newoffset;
1068 int offset = INTVAL (XEXP (x, 1));
1069 int mask;
1070
1071 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1072 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1073
1074 /* Choose which way to round the offset. Round up if we
1075 are >= halfway to the next boundary. */
1076 if ((offset & mask) >= ((mask + 1) / 2))
1077 newoffset = (offset & ~ mask) + mask + 1;
1078 else
1079 newoffset = (offset & ~ mask);
1080
1081 /* If the newoffset will not fit in 14 bits (ldo), then
1082 handling this would take 4 or 5 instructions (2 to load
1083 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1084 add the new offset and the SYMBOL_REF.) Combine can
1085 not handle 4->2 or 5->2 combinations, so do not create
1086 them. */
1087 if (! VAL_14_BITS_P (newoffset)
1088 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1089 {
1090 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1091 rtx tmp_reg
1092 = force_reg (Pmode,
1093 gen_rtx_HIGH (Pmode, const_part));
1094 ptr_reg
1095 = force_reg (Pmode,
1096 gen_rtx_LO_SUM (Pmode,
1097 tmp_reg, const_part));
1098 }
1099 else
1100 {
1101 if (! VAL_14_BITS_P (newoffset))
1102 int_part = force_reg (Pmode, GEN_INT (newoffset));
1103 else
1104 int_part = GEN_INT (newoffset);
1105
1106 ptr_reg = force_reg (Pmode,
1107 gen_rtx_PLUS (Pmode,
1108 force_reg (Pmode, XEXP (x, 0)),
1109 int_part));
1110 }
1111 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1112 }
1113
1114 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1115
1116 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1117 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1118 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1119 && (OBJECT_P (XEXP (x, 1))
1120 || GET_CODE (XEXP (x, 1)) == SUBREG)
1121 && GET_CODE (XEXP (x, 1)) != CONST)
1122 {
1123 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1124 rtx reg1, reg2;
1125
1126 reg1 = XEXP (x, 1);
1127 if (GET_CODE (reg1) != REG)
1128 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1129
1130 reg2 = XEXP (XEXP (x, 0), 0);
1131 if (GET_CODE (reg2) != REG)
1132 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1133
1134 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1135 gen_rtx_MULT (Pmode,
1136 reg2,
1137 GEN_INT (val)),
1138 reg1));
1139 }
1140
1141 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1142
1143 Only do so for floating point modes since this is more speculative
1144 and we lose if it's an integer store. */
1145 if (GET_CODE (x) == PLUS
1146 && GET_CODE (XEXP (x, 0)) == PLUS
1147 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1148 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1149 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1150 && (mode == SFmode || mode == DFmode))
1151 {
1152
1153 /* First, try and figure out what to use as a base register. */
1154 rtx reg1, reg2, base, idx;
1155
1156 reg1 = XEXP (XEXP (x, 0), 1);
1157 reg2 = XEXP (x, 1);
1158 base = NULL_RTX;
1159 idx = NULL_RTX;
1160
1161 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1162 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1163 it's a base register below. */
1164 if (GET_CODE (reg1) != REG)
1165 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1166
1167 if (GET_CODE (reg2) != REG)
1168 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1169
1170 /* Figure out what the base and index are. */
1171
1172 if (GET_CODE (reg1) == REG
1173 && REG_POINTER (reg1))
1174 {
1175 base = reg1;
1176 idx = gen_rtx_PLUS (Pmode,
1177 gen_rtx_MULT (Pmode,
1178 XEXP (XEXP (XEXP (x, 0), 0), 0),
1179 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1180 XEXP (x, 1));
1181 }
1182 else if (GET_CODE (reg2) == REG
1183 && REG_POINTER (reg2))
1184 {
1185 base = reg2;
1186 idx = XEXP (x, 0);
1187 }
1188
1189 if (base == 0)
1190 return orig;
1191
1192 /* If the index adds a large constant, try to scale the
1193 constant so that it can be loaded with only one insn. */
1194 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1195 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1196 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1197 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1198 {
1199 /* Divide the CONST_INT by the scale factor, then add it to A. */
1200 int val = INTVAL (XEXP (idx, 1));
1201
1202 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1203 reg1 = XEXP (XEXP (idx, 0), 0);
1204 if (GET_CODE (reg1) != REG)
1205 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1206
1207 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1208
1209 /* We can now generate a simple scaled indexed address. */
1210 return
1211 force_reg
1212 (Pmode, gen_rtx_PLUS (Pmode,
1213 gen_rtx_MULT (Pmode, reg1,
1214 XEXP (XEXP (idx, 0), 1)),
1215 base));
1216 }
1217
1218 /* If B + C is still a valid base register, then add them. */
1219 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1220 && INTVAL (XEXP (idx, 1)) <= 4096
1221 && INTVAL (XEXP (idx, 1)) >= -4096)
1222 {
1223 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1224 rtx reg1, reg2;
1225
1226 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1227
1228 reg2 = XEXP (XEXP (idx, 0), 0);
1229 if (GET_CODE (reg2) != CONST_INT)
1230 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1231
1232 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1233 gen_rtx_MULT (Pmode,
1234 reg2,
1235 GEN_INT (val)),
1236 reg1));
1237 }
1238
1239 /* Get the index into a register, then add the base + index and
1240 return a register holding the result. */
1241
1242 /* First get A into a register. */
1243 reg1 = XEXP (XEXP (idx, 0), 0);
1244 if (GET_CODE (reg1) != REG)
1245 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1246
1247 /* And get B into a register. */
1248 reg2 = XEXP (idx, 1);
1249 if (GET_CODE (reg2) != REG)
1250 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1251
1252 reg1 = force_reg (Pmode,
1253 gen_rtx_PLUS (Pmode,
1254 gen_rtx_MULT (Pmode, reg1,
1255 XEXP (XEXP (idx, 0), 1)),
1256 reg2));
1257
1258 /* Add the result to our base register and return. */
1259 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1260
1261 }
1262
1263 /* Uh-oh. We might have an address for x[n-100000]. This needs
1264 special handling to avoid creating an indexed memory address
1265 with x-100000 as the base.
1266
1267 If the constant part is small enough, then it's still safe because
1268 there is a guard page at the beginning and end of the data segment.
1269
1270 Scaled references are common enough that we want to try and rearrange the
1271 terms so that we can use indexing for these addresses too. Only
1272 do the optimization for floatint point modes. */
1273
1274 if (GET_CODE (x) == PLUS
1275 && pa_symbolic_expression_p (XEXP (x, 1)))
1276 {
1277 /* Ugly. We modify things here so that the address offset specified
1278 by the index expression is computed first, then added to x to form
1279 the entire address. */
1280
1281 rtx regx1, regx2, regy1, regy2, y;
1282
1283 /* Strip off any CONST. */
1284 y = XEXP (x, 1);
1285 if (GET_CODE (y) == CONST)
1286 y = XEXP (y, 0);
1287
1288 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1289 {
1290 /* See if this looks like
1291 (plus (mult (reg) (shadd_const))
1292 (const (plus (symbol_ref) (const_int))))
1293
1294 Where const_int is small. In that case the const
1295 expression is a valid pointer for indexing.
1296
1297 If const_int is big, but can be divided evenly by shadd_const
1298 and added to (reg). This allows more scaled indexed addresses. */
1299 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1300 && GET_CODE (XEXP (x, 0)) == MULT
1301 && GET_CODE (XEXP (y, 1)) == CONST_INT
1302 && INTVAL (XEXP (y, 1)) >= -4096
1303 && INTVAL (XEXP (y, 1)) <= 4095
1304 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1305 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1306 {
1307 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1308 rtx reg1, reg2;
1309
1310 reg1 = XEXP (x, 1);
1311 if (GET_CODE (reg1) != REG)
1312 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1313
1314 reg2 = XEXP (XEXP (x, 0), 0);
1315 if (GET_CODE (reg2) != REG)
1316 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1317
1318 return force_reg (Pmode,
1319 gen_rtx_PLUS (Pmode,
1320 gen_rtx_MULT (Pmode,
1321 reg2,
1322 GEN_INT (val)),
1323 reg1));
1324 }
1325 else if ((mode == DFmode || mode == SFmode)
1326 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1327 && GET_CODE (XEXP (x, 0)) == MULT
1328 && GET_CODE (XEXP (y, 1)) == CONST_INT
1329 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1330 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1331 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1332 {
1333 regx1
1334 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1335 / INTVAL (XEXP (XEXP (x, 0), 1))));
1336 regx2 = XEXP (XEXP (x, 0), 0);
1337 if (GET_CODE (regx2) != REG)
1338 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1339 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1340 regx2, regx1));
1341 return
1342 force_reg (Pmode,
1343 gen_rtx_PLUS (Pmode,
1344 gen_rtx_MULT (Pmode, regx2,
1345 XEXP (XEXP (x, 0), 1)),
1346 force_reg (Pmode, XEXP (y, 0))));
1347 }
1348 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1349 && INTVAL (XEXP (y, 1)) >= -4096
1350 && INTVAL (XEXP (y, 1)) <= 4095)
1351 {
1352 /* This is safe because of the guard page at the
1353 beginning and end of the data space. Just
1354 return the original address. */
1355 return orig;
1356 }
1357 else
1358 {
1359 /* Doesn't look like one we can optimize. */
1360 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1361 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1362 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1363 regx1 = force_reg (Pmode,
1364 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1365 regx1, regy2));
1366 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1367 }
1368 }
1369 }
1370
1371 return orig;
1372 }
1373
1374 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1375
1376 Compute extra cost of moving data between one register class
1377 and another.
1378
1379 Make moves from SAR so expensive they should never happen. We used to
1380 have 0xffff here, but that generates overflow in rare cases.
1381
1382 Copies involving a FP register and a non-FP register are relatively
1383 expensive because they must go through memory.
1384
1385 Other copies are reasonably cheap. */
1386
1387 static int
1388 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1389 reg_class_t from, reg_class_t to)
1390 {
1391 if (from == SHIFT_REGS)
1392 return 0x100;
1393 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1394 return 18;
1395 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1396 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1397 return 16;
1398 else
1399 return 2;
1400 }
1401
1402 /* For the HPPA, REG and REG+CONST is cost 0
1403 and addresses involving symbolic constants are cost 2.
1404
1405 PIC addresses are very expensive.
1406
1407 It is no coincidence that this has the same structure
1408 as pa_legitimate_address_p. */
1409
1410 static int
1411 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1412 addr_space_t as ATTRIBUTE_UNUSED,
1413 bool speed ATTRIBUTE_UNUSED)
1414 {
1415 switch (GET_CODE (X))
1416 {
1417 case REG:
1418 case PLUS:
1419 case LO_SUM:
1420 return 1;
1421 case HIGH:
1422 return 2;
1423 default:
1424 return 4;
1425 }
1426 }
1427
1428 /* Compute a (partial) cost for rtx X. Return true if the complete
1429 cost has been computed, and false if subexpressions should be
1430 scanned. In either case, *TOTAL contains the cost result. */
1431
1432 static bool
1433 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1434 int *total, bool speed ATTRIBUTE_UNUSED)
1435 {
1436 int factor;
1437
1438 switch (code)
1439 {
1440 case CONST_INT:
1441 if (INTVAL (x) == 0)
1442 *total = 0;
1443 else if (INT_14_BITS (x))
1444 *total = 1;
1445 else
1446 *total = 2;
1447 return true;
1448
1449 case HIGH:
1450 *total = 2;
1451 return true;
1452
1453 case CONST:
1454 case LABEL_REF:
1455 case SYMBOL_REF:
1456 *total = 4;
1457 return true;
1458
1459 case CONST_DOUBLE:
1460 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1461 && outer_code != SET)
1462 *total = 0;
1463 else
1464 *total = 8;
1465 return true;
1466
1467 case MULT:
1468 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1469 {
1470 *total = COSTS_N_INSNS (3);
1471 return true;
1472 }
1473
1474 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1475 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1476 if (factor == 0)
1477 factor = 1;
1478
1479 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1480 *total = factor * factor * COSTS_N_INSNS (8);
1481 else
1482 *total = factor * factor * COSTS_N_INSNS (20);
1483 return true;
1484
1485 case DIV:
1486 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1487 {
1488 *total = COSTS_N_INSNS (14);
1489 return true;
1490 }
1491 /* FALLTHRU */
1492
1493 case UDIV:
1494 case MOD:
1495 case UMOD:
1496 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1497 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1498 if (factor == 0)
1499 factor = 1;
1500
1501 *total = factor * factor * COSTS_N_INSNS (60);
1502 return true;
1503
1504 case PLUS: /* this includes shNadd insns */
1505 case MINUS:
1506 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1507 {
1508 *total = COSTS_N_INSNS (3);
1509 return true;
1510 }
1511
1512 /* A size N times larger than UNITS_PER_WORD needs N times as
1513 many insns, taking N times as long. */
1514 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1515 if (factor == 0)
1516 factor = 1;
1517 *total = factor * COSTS_N_INSNS (1);
1518 return true;
1519
1520 case ASHIFT:
1521 case ASHIFTRT:
1522 case LSHIFTRT:
1523 *total = COSTS_N_INSNS (1);
1524 return true;
1525
1526 default:
1527 return false;
1528 }
1529 }
1530
1531 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1532 new rtx with the correct mode. */
1533 static inline rtx
1534 force_mode (enum machine_mode mode, rtx orig)
1535 {
1536 if (mode == GET_MODE (orig))
1537 return orig;
1538
1539 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1540
1541 return gen_rtx_REG (mode, REGNO (orig));
1542 }
1543
1544 /* Return 1 if *X is a thread-local symbol. */
1545
1546 static int
1547 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1548 {
1549 return PA_SYMBOL_REF_TLS_P (*x);
1550 }
1551
1552 /* Return 1 if X contains a thread-local symbol. */
1553
1554 bool
1555 pa_tls_referenced_p (rtx x)
1556 {
1557 if (!TARGET_HAVE_TLS)
1558 return false;
1559
1560 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1561 }
1562
1563 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1564
1565 static bool
1566 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1567 {
1568 return pa_tls_referenced_p (x);
1569 }
1570
1571 /* Emit insns to move operands[1] into operands[0].
1572
1573 Return 1 if we have written out everything that needs to be done to
1574 do the move. Otherwise, return 0 and the caller will emit the move
1575 normally.
1576
1577 Note SCRATCH_REG may not be in the proper mode depending on how it
1578 will be used. This routine is responsible for creating a new copy
1579 of SCRATCH_REG in the proper mode. */
1580
1581 int
1582 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1583 {
1584 register rtx operand0 = operands[0];
1585 register rtx operand1 = operands[1];
1586 register rtx tem;
1587
1588 /* We can only handle indexed addresses in the destination operand
1589 of floating point stores. Thus, we need to break out indexed
1590 addresses from the destination operand. */
1591 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1592 {
1593 gcc_assert (can_create_pseudo_p ());
1594
1595 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1596 operand0 = replace_equiv_address (operand0, tem);
1597 }
1598
1599 /* On targets with non-equivalent space registers, break out unscaled
1600 indexed addresses from the source operand before the final CSE.
1601 We have to do this because the REG_POINTER flag is not correctly
1602 carried through various optimization passes and CSE may substitute
1603 a pseudo without the pointer set for one with the pointer set. As
1604 a result, we loose various opportunities to create insns with
1605 unscaled indexed addresses. */
1606 if (!TARGET_NO_SPACE_REGS
1607 && !cse_not_expected
1608 && GET_CODE (operand1) == MEM
1609 && GET_CODE (XEXP (operand1, 0)) == PLUS
1610 && REG_P (XEXP (XEXP (operand1, 0), 0))
1611 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1612 operand1
1613 = replace_equiv_address (operand1,
1614 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1615
1616 if (scratch_reg
1617 && reload_in_progress && GET_CODE (operand0) == REG
1618 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1619 operand0 = reg_equiv_mem (REGNO (operand0));
1620 else if (scratch_reg
1621 && reload_in_progress && GET_CODE (operand0) == SUBREG
1622 && GET_CODE (SUBREG_REG (operand0)) == REG
1623 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1624 {
1625 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1626 the code which tracks sets/uses for delete_output_reload. */
1627 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1628 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1629 SUBREG_BYTE (operand0));
1630 operand0 = alter_subreg (&temp, true);
1631 }
1632
1633 if (scratch_reg
1634 && reload_in_progress && GET_CODE (operand1) == REG
1635 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1636 operand1 = reg_equiv_mem (REGNO (operand1));
1637 else if (scratch_reg
1638 && reload_in_progress && GET_CODE (operand1) == SUBREG
1639 && GET_CODE (SUBREG_REG (operand1)) == REG
1640 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1641 {
1642 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1643 the code which tracks sets/uses for delete_output_reload. */
1644 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1645 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1646 SUBREG_BYTE (operand1));
1647 operand1 = alter_subreg (&temp, true);
1648 }
1649
1650 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1651 && ((tem = find_replacement (&XEXP (operand0, 0)))
1652 != XEXP (operand0, 0)))
1653 operand0 = replace_equiv_address (operand0, tem);
1654
1655 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1656 && ((tem = find_replacement (&XEXP (operand1, 0)))
1657 != XEXP (operand1, 0)))
1658 operand1 = replace_equiv_address (operand1, tem);
1659
1660 /* Handle secondary reloads for loads/stores of FP registers from
1661 REG+D addresses where D does not fit in 5 or 14 bits, including
1662 (subreg (mem (addr))) cases. */
1663 if (scratch_reg
1664 && fp_reg_operand (operand0, mode)
1665 && (MEM_P (operand1)
1666 || (GET_CODE (operand1) == SUBREG
1667 && MEM_P (XEXP (operand1, 0))))
1668 && !floating_point_store_memory_operand (operand1, mode))
1669 {
1670 if (GET_CODE (operand1) == SUBREG)
1671 operand1 = XEXP (operand1, 0);
1672
1673 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1674 it in WORD_MODE regardless of what mode it was originally given
1675 to us. */
1676 scratch_reg = force_mode (word_mode, scratch_reg);
1677
1678 /* D might not fit in 14 bits either; for such cases load D into
1679 scratch reg. */
1680 if (reg_plus_base_memory_operand (operand1, mode)
1681 && !(TARGET_PA_20
1682 && !TARGET_ELF32
1683 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1684 {
1685 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1686 emit_move_insn (scratch_reg,
1687 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1688 Pmode,
1689 XEXP (XEXP (operand1, 0), 0),
1690 scratch_reg));
1691 }
1692 else
1693 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1694 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1695 replace_equiv_address (operand1, scratch_reg)));
1696 return 1;
1697 }
1698 else if (scratch_reg
1699 && fp_reg_operand (operand1, mode)
1700 && (MEM_P (operand0)
1701 || (GET_CODE (operand0) == SUBREG
1702 && MEM_P (XEXP (operand0, 0))))
1703 && !floating_point_store_memory_operand (operand0, mode))
1704 {
1705 if (GET_CODE (operand0) == SUBREG)
1706 operand0 = XEXP (operand0, 0);
1707
1708 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1709 it in WORD_MODE regardless of what mode it was originally given
1710 to us. */
1711 scratch_reg = force_mode (word_mode, scratch_reg);
1712
1713 /* D might not fit in 14 bits either; for such cases load D into
1714 scratch reg. */
1715 if (reg_plus_base_memory_operand (operand0, mode)
1716 && !(TARGET_PA_20
1717 && !TARGET_ELF32
1718 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1719 {
1720 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1721 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1722 0)),
1723 Pmode,
1724 XEXP (XEXP (operand0, 0),
1725 0),
1726 scratch_reg));
1727 }
1728 else
1729 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1730 emit_insn (gen_rtx_SET (VOIDmode,
1731 replace_equiv_address (operand0, scratch_reg),
1732 operand1));
1733 return 1;
1734 }
1735 /* Handle secondary reloads for loads of FP registers from constant
1736 expressions by forcing the constant into memory. For the most part,
1737 this is only necessary for SImode and DImode.
1738
1739 Use scratch_reg to hold the address of the memory location. */
1740 else if (scratch_reg
1741 && CONSTANT_P (operand1)
1742 && fp_reg_operand (operand0, mode))
1743 {
1744 rtx const_mem, xoperands[2];
1745
1746 if (operand1 == CONST0_RTX (mode))
1747 {
1748 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1749 return 1;
1750 }
1751
1752 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1753 it in WORD_MODE regardless of what mode it was originally given
1754 to us. */
1755 scratch_reg = force_mode (word_mode, scratch_reg);
1756
1757 /* Force the constant into memory and put the address of the
1758 memory location into scratch_reg. */
1759 const_mem = force_const_mem (mode, operand1);
1760 xoperands[0] = scratch_reg;
1761 xoperands[1] = XEXP (const_mem, 0);
1762 pa_emit_move_sequence (xoperands, Pmode, 0);
1763
1764 /* Now load the destination register. */
1765 emit_insn (gen_rtx_SET (mode, operand0,
1766 replace_equiv_address (const_mem, scratch_reg)));
1767 return 1;
1768 }
1769 /* Handle secondary reloads for SAR. These occur when trying to load
1770 the SAR from memory or a constant. */
1771 else if (scratch_reg
1772 && GET_CODE (operand0) == REG
1773 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1774 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1775 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1776 {
1777 /* D might not fit in 14 bits either; for such cases load D into
1778 scratch reg. */
1779 if (GET_CODE (operand1) == MEM
1780 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1781 {
1782 /* We are reloading the address into the scratch register, so we
1783 want to make sure the scratch register is a full register. */
1784 scratch_reg = force_mode (word_mode, scratch_reg);
1785
1786 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1787 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1788 0)),
1789 Pmode,
1790 XEXP (XEXP (operand1, 0),
1791 0),
1792 scratch_reg));
1793
1794 /* Now we are going to load the scratch register from memory,
1795 we want to load it in the same width as the original MEM,
1796 which must be the same as the width of the ultimate destination,
1797 OPERAND0. */
1798 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1799
1800 emit_move_insn (scratch_reg,
1801 replace_equiv_address (operand1, scratch_reg));
1802 }
1803 else
1804 {
1805 /* We want to load the scratch register using the same mode as
1806 the ultimate destination. */
1807 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1808
1809 emit_move_insn (scratch_reg, operand1);
1810 }
1811
1812 /* And emit the insn to set the ultimate destination. We know that
1813 the scratch register has the same mode as the destination at this
1814 point. */
1815 emit_move_insn (operand0, scratch_reg);
1816 return 1;
1817 }
1818 /* Handle the most common case: storing into a register. */
1819 else if (register_operand (operand0, mode))
1820 {
1821 /* Legitimize TLS symbol references. This happens for references
1822 that aren't a legitimate constant. */
1823 if (PA_SYMBOL_REF_TLS_P (operand1))
1824 operand1 = legitimize_tls_address (operand1);
1825
1826 if (register_operand (operand1, mode)
1827 || (GET_CODE (operand1) == CONST_INT
1828 && pa_cint_ok_for_move (INTVAL (operand1)))
1829 || (operand1 == CONST0_RTX (mode))
1830 || (GET_CODE (operand1) == HIGH
1831 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1832 /* Only `general_operands' can come here, so MEM is ok. */
1833 || GET_CODE (operand1) == MEM)
1834 {
1835 /* Various sets are created during RTL generation which don't
1836 have the REG_POINTER flag correctly set. After the CSE pass,
1837 instruction recognition can fail if we don't consistently
1838 set this flag when performing register copies. This should
1839 also improve the opportunities for creating insns that use
1840 unscaled indexing. */
1841 if (REG_P (operand0) && REG_P (operand1))
1842 {
1843 if (REG_POINTER (operand1)
1844 && !REG_POINTER (operand0)
1845 && !HARD_REGISTER_P (operand0))
1846 copy_reg_pointer (operand0, operand1);
1847 }
1848
1849 /* When MEMs are broken out, the REG_POINTER flag doesn't
1850 get set. In some cases, we can set the REG_POINTER flag
1851 from the declaration for the MEM. */
1852 if (REG_P (operand0)
1853 && GET_CODE (operand1) == MEM
1854 && !REG_POINTER (operand0))
1855 {
1856 tree decl = MEM_EXPR (operand1);
1857
1858 /* Set the register pointer flag and register alignment
1859 if the declaration for this memory reference is a
1860 pointer type. */
1861 if (decl)
1862 {
1863 tree type;
1864
1865 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1866 tree operand 1. */
1867 if (TREE_CODE (decl) == COMPONENT_REF)
1868 decl = TREE_OPERAND (decl, 1);
1869
1870 type = TREE_TYPE (decl);
1871 type = strip_array_types (type);
1872
1873 if (POINTER_TYPE_P (type))
1874 {
1875 int align;
1876
1877 type = TREE_TYPE (type);
1878 /* Using TYPE_ALIGN_OK is rather conservative as
1879 only the ada frontend actually sets it. */
1880 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1881 : BITS_PER_UNIT);
1882 mark_reg_pointer (operand0, align);
1883 }
1884 }
1885 }
1886
1887 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1888 return 1;
1889 }
1890 }
1891 else if (GET_CODE (operand0) == MEM)
1892 {
1893 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1894 && !(reload_in_progress || reload_completed))
1895 {
1896 rtx temp = gen_reg_rtx (DFmode);
1897
1898 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1899 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1900 return 1;
1901 }
1902 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1903 {
1904 /* Run this case quickly. */
1905 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1906 return 1;
1907 }
1908 if (! (reload_in_progress || reload_completed))
1909 {
1910 operands[0] = validize_mem (operand0);
1911 operands[1] = operand1 = force_reg (mode, operand1);
1912 }
1913 }
1914
1915 /* Simplify the source if we need to.
1916 Note we do have to handle function labels here, even though we do
1917 not consider them legitimate constants. Loop optimizations can
1918 call the emit_move_xxx with one as a source. */
1919 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1920 || (GET_CODE (operand1) == HIGH
1921 && symbolic_operand (XEXP (operand1, 0), mode))
1922 || function_label_operand (operand1, VOIDmode)
1923 || pa_tls_referenced_p (operand1))
1924 {
1925 int ishighonly = 0;
1926
1927 if (GET_CODE (operand1) == HIGH)
1928 {
1929 ishighonly = 1;
1930 operand1 = XEXP (operand1, 0);
1931 }
1932 if (symbolic_operand (operand1, mode))
1933 {
1934 /* Argh. The assembler and linker can't handle arithmetic
1935 involving plabels.
1936
1937 So we force the plabel into memory, load operand0 from
1938 the memory location, then add in the constant part. */
1939 if ((GET_CODE (operand1) == CONST
1940 && GET_CODE (XEXP (operand1, 0)) == PLUS
1941 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1942 VOIDmode))
1943 || function_label_operand (operand1, VOIDmode))
1944 {
1945 rtx temp, const_part;
1946
1947 /* Figure out what (if any) scratch register to use. */
1948 if (reload_in_progress || reload_completed)
1949 {
1950 scratch_reg = scratch_reg ? scratch_reg : operand0;
1951 /* SCRATCH_REG will hold an address and maybe the actual
1952 data. We want it in WORD_MODE regardless of what mode it
1953 was originally given to us. */
1954 scratch_reg = force_mode (word_mode, scratch_reg);
1955 }
1956 else if (flag_pic)
1957 scratch_reg = gen_reg_rtx (Pmode);
1958
1959 if (GET_CODE (operand1) == CONST)
1960 {
1961 /* Save away the constant part of the expression. */
1962 const_part = XEXP (XEXP (operand1, 0), 1);
1963 gcc_assert (GET_CODE (const_part) == CONST_INT);
1964
1965 /* Force the function label into memory. */
1966 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1967 }
1968 else
1969 {
1970 /* No constant part. */
1971 const_part = NULL_RTX;
1972
1973 /* Force the function label into memory. */
1974 temp = force_const_mem (mode, operand1);
1975 }
1976
1977
1978 /* Get the address of the memory location. PIC-ify it if
1979 necessary. */
1980 temp = XEXP (temp, 0);
1981 if (flag_pic)
1982 temp = legitimize_pic_address (temp, mode, scratch_reg);
1983
1984 /* Put the address of the memory location into our destination
1985 register. */
1986 operands[1] = temp;
1987 pa_emit_move_sequence (operands, mode, scratch_reg);
1988
1989 /* Now load from the memory location into our destination
1990 register. */
1991 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1992 pa_emit_move_sequence (operands, mode, scratch_reg);
1993
1994 /* And add back in the constant part. */
1995 if (const_part != NULL_RTX)
1996 expand_inc (operand0, const_part);
1997
1998 return 1;
1999 }
2000
2001 if (flag_pic)
2002 {
2003 rtx temp;
2004
2005 if (reload_in_progress || reload_completed)
2006 {
2007 temp = scratch_reg ? scratch_reg : operand0;
2008 /* TEMP will hold an address and maybe the actual
2009 data. We want it in WORD_MODE regardless of what mode it
2010 was originally given to us. */
2011 temp = force_mode (word_mode, temp);
2012 }
2013 else
2014 temp = gen_reg_rtx (Pmode);
2015
2016 /* (const (plus (symbol) (const_int))) must be forced to
2017 memory during/after reload if the const_int will not fit
2018 in 14 bits. */
2019 if (GET_CODE (operand1) == CONST
2020 && GET_CODE (XEXP (operand1, 0)) == PLUS
2021 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2022 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2023 && (reload_completed || reload_in_progress)
2024 && flag_pic)
2025 {
2026 rtx const_mem = force_const_mem (mode, operand1);
2027 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2028 mode, temp);
2029 operands[1] = replace_equiv_address (const_mem, operands[1]);
2030 pa_emit_move_sequence (operands, mode, temp);
2031 }
2032 else
2033 {
2034 operands[1] = legitimize_pic_address (operand1, mode, temp);
2035 if (REG_P (operand0) && REG_P (operands[1]))
2036 copy_reg_pointer (operand0, operands[1]);
2037 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2038 }
2039 }
2040 /* On the HPPA, references to data space are supposed to use dp,
2041 register 27, but showing it in the RTL inhibits various cse
2042 and loop optimizations. */
2043 else
2044 {
2045 rtx temp, set;
2046
2047 if (reload_in_progress || reload_completed)
2048 {
2049 temp = scratch_reg ? scratch_reg : operand0;
2050 /* TEMP will hold an address and maybe the actual
2051 data. We want it in WORD_MODE regardless of what mode it
2052 was originally given to us. */
2053 temp = force_mode (word_mode, temp);
2054 }
2055 else
2056 temp = gen_reg_rtx (mode);
2057
2058 /* Loading a SYMBOL_REF into a register makes that register
2059 safe to be used as the base in an indexed address.
2060
2061 Don't mark hard registers though. That loses. */
2062 if (GET_CODE (operand0) == REG
2063 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2064 mark_reg_pointer (operand0, BITS_PER_UNIT);
2065 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2066 mark_reg_pointer (temp, BITS_PER_UNIT);
2067
2068 if (ishighonly)
2069 set = gen_rtx_SET (mode, operand0, temp);
2070 else
2071 set = gen_rtx_SET (VOIDmode,
2072 operand0,
2073 gen_rtx_LO_SUM (mode, temp, operand1));
2074
2075 emit_insn (gen_rtx_SET (VOIDmode,
2076 temp,
2077 gen_rtx_HIGH (mode, operand1)));
2078 emit_insn (set);
2079
2080 }
2081 return 1;
2082 }
2083 else if (pa_tls_referenced_p (operand1))
2084 {
2085 rtx tmp = operand1;
2086 rtx addend = NULL;
2087
2088 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2089 {
2090 addend = XEXP (XEXP (tmp, 0), 1);
2091 tmp = XEXP (XEXP (tmp, 0), 0);
2092 }
2093
2094 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2095 tmp = legitimize_tls_address (tmp);
2096 if (addend)
2097 {
2098 tmp = gen_rtx_PLUS (mode, tmp, addend);
2099 tmp = force_operand (tmp, operands[0]);
2100 }
2101 operands[1] = tmp;
2102 }
2103 else if (GET_CODE (operand1) != CONST_INT
2104 || !pa_cint_ok_for_move (INTVAL (operand1)))
2105 {
2106 rtx insn, temp;
2107 rtx op1 = operand1;
2108 HOST_WIDE_INT value = 0;
2109 HOST_WIDE_INT insv = 0;
2110 int insert = 0;
2111
2112 if (GET_CODE (operand1) == CONST_INT)
2113 value = INTVAL (operand1);
2114
2115 if (TARGET_64BIT
2116 && GET_CODE (operand1) == CONST_INT
2117 && HOST_BITS_PER_WIDE_INT > 32
2118 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2119 {
2120 HOST_WIDE_INT nval;
2121
2122 /* Extract the low order 32 bits of the value and sign extend.
2123 If the new value is the same as the original value, we can
2124 can use the original value as-is. If the new value is
2125 different, we use it and insert the most-significant 32-bits
2126 of the original value into the final result. */
2127 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2128 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2129 if (value != nval)
2130 {
2131 #if HOST_BITS_PER_WIDE_INT > 32
2132 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2133 #endif
2134 insert = 1;
2135 value = nval;
2136 operand1 = GEN_INT (nval);
2137 }
2138 }
2139
2140 if (reload_in_progress || reload_completed)
2141 temp = scratch_reg ? scratch_reg : operand0;
2142 else
2143 temp = gen_reg_rtx (mode);
2144
2145 /* We don't directly split DImode constants on 32-bit targets
2146 because PLUS uses an 11-bit immediate and the insn sequence
2147 generated is not as efficient as the one using HIGH/LO_SUM. */
2148 if (GET_CODE (operand1) == CONST_INT
2149 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2150 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2151 && !insert)
2152 {
2153 /* Directly break constant into high and low parts. This
2154 provides better optimization opportunities because various
2155 passes recognize constants split with PLUS but not LO_SUM.
2156 We use a 14-bit signed low part except when the addition
2157 of 0x4000 to the high part might change the sign of the
2158 high part. */
2159 HOST_WIDE_INT low = value & 0x3fff;
2160 HOST_WIDE_INT high = value & ~ 0x3fff;
2161
2162 if (low >= 0x2000)
2163 {
2164 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2165 high += 0x2000;
2166 else
2167 high += 0x4000;
2168 }
2169
2170 low = value - high;
2171
2172 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2173 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2174 }
2175 else
2176 {
2177 emit_insn (gen_rtx_SET (VOIDmode, temp,
2178 gen_rtx_HIGH (mode, operand1)));
2179 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2180 }
2181
2182 insn = emit_move_insn (operands[0], operands[1]);
2183
2184 /* Now insert the most significant 32 bits of the value
2185 into the register. When we don't have a second register
2186 available, it could take up to nine instructions to load
2187 a 64-bit integer constant. Prior to reload, we force
2188 constants that would take more than three instructions
2189 to load to the constant pool. During and after reload,
2190 we have to handle all possible values. */
2191 if (insert)
2192 {
2193 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2194 register and the value to be inserted is outside the
2195 range that can be loaded with three depdi instructions. */
2196 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2197 {
2198 operand1 = GEN_INT (insv);
2199
2200 emit_insn (gen_rtx_SET (VOIDmode, temp,
2201 gen_rtx_HIGH (mode, operand1)));
2202 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2203 if (mode == DImode)
2204 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2205 const0_rtx, temp));
2206 else
2207 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2208 const0_rtx, temp));
2209 }
2210 else
2211 {
2212 int len = 5, pos = 27;
2213
2214 /* Insert the bits using the depdi instruction. */
2215 while (pos >= 0)
2216 {
2217 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2218 HOST_WIDE_INT sign = v5 < 0;
2219
2220 /* Left extend the insertion. */
2221 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2222 while (pos > 0 && (insv & 1) == sign)
2223 {
2224 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2225 len += 1;
2226 pos -= 1;
2227 }
2228
2229 if (mode == DImode)
2230 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2231 GEN_INT (pos), GEN_INT (v5)));
2232 else
2233 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2234 GEN_INT (pos), GEN_INT (v5)));
2235
2236 len = pos > 0 && pos < 5 ? pos : 5;
2237 pos -= len;
2238 }
2239 }
2240 }
2241
2242 set_unique_reg_note (insn, REG_EQUAL, op1);
2243
2244 return 1;
2245 }
2246 }
2247 /* Now have insn-emit do whatever it normally does. */
2248 return 0;
2249 }
2250
2251 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2252 it will need a link/runtime reloc). */
2253
2254 int
2255 pa_reloc_needed (tree exp)
2256 {
2257 int reloc = 0;
2258
2259 switch (TREE_CODE (exp))
2260 {
2261 case ADDR_EXPR:
2262 return 1;
2263
2264 case POINTER_PLUS_EXPR:
2265 case PLUS_EXPR:
2266 case MINUS_EXPR:
2267 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2268 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2269 break;
2270
2271 CASE_CONVERT:
2272 case NON_LVALUE_EXPR:
2273 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2274 break;
2275
2276 case CONSTRUCTOR:
2277 {
2278 tree value;
2279 unsigned HOST_WIDE_INT ix;
2280
2281 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2282 if (value)
2283 reloc |= pa_reloc_needed (value);
2284 }
2285 break;
2286
2287 case ERROR_MARK:
2288 break;
2289
2290 default:
2291 break;
2292 }
2293 return reloc;
2294 }
2295
2296 \f
2297 /* Return the best assembler insn template
2298 for moving operands[1] into operands[0] as a fullword. */
2299 const char *
2300 pa_singlemove_string (rtx *operands)
2301 {
2302 HOST_WIDE_INT intval;
2303
2304 if (GET_CODE (operands[0]) == MEM)
2305 return "stw %r1,%0";
2306 if (GET_CODE (operands[1]) == MEM)
2307 return "ldw %1,%0";
2308 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2309 {
2310 long i;
2311 REAL_VALUE_TYPE d;
2312
2313 gcc_assert (GET_MODE (operands[1]) == SFmode);
2314
2315 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2316 bit pattern. */
2317 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2318 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2319
2320 operands[1] = GEN_INT (i);
2321 /* Fall through to CONST_INT case. */
2322 }
2323 if (GET_CODE (operands[1]) == CONST_INT)
2324 {
2325 intval = INTVAL (operands[1]);
2326
2327 if (VAL_14_BITS_P (intval))
2328 return "ldi %1,%0";
2329 else if ((intval & 0x7ff) == 0)
2330 return "ldil L'%1,%0";
2331 else if (pa_zdepi_cint_p (intval))
2332 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2333 else
2334 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2335 }
2336 return "copy %1,%0";
2337 }
2338 \f
2339
2340 /* Compute position (in OP[1]) and width (in OP[2])
2341 useful for copying IMM to a register using the zdepi
2342 instructions. Store the immediate value to insert in OP[0]. */
2343 static void
2344 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2345 {
2346 int lsb, len;
2347
2348 /* Find the least significant set bit in IMM. */
2349 for (lsb = 0; lsb < 32; lsb++)
2350 {
2351 if ((imm & 1) != 0)
2352 break;
2353 imm >>= 1;
2354 }
2355
2356 /* Choose variants based on *sign* of the 5-bit field. */
2357 if ((imm & 0x10) == 0)
2358 len = (lsb <= 28) ? 4 : 32 - lsb;
2359 else
2360 {
2361 /* Find the width of the bitstring in IMM. */
2362 for (len = 5; len < 32 - lsb; len++)
2363 {
2364 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2365 break;
2366 }
2367
2368 /* Sign extend IMM as a 5-bit value. */
2369 imm = (imm & 0xf) - 0x10;
2370 }
2371
2372 op[0] = imm;
2373 op[1] = 31 - lsb;
2374 op[2] = len;
2375 }
2376
2377 /* Compute position (in OP[1]) and width (in OP[2])
2378 useful for copying IMM to a register using the depdi,z
2379 instructions. Store the immediate value to insert in OP[0]. */
2380
2381 static void
2382 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2383 {
2384 int lsb, len, maxlen;
2385
2386 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2387
2388 /* Find the least significant set bit in IMM. */
2389 for (lsb = 0; lsb < maxlen; lsb++)
2390 {
2391 if ((imm & 1) != 0)
2392 break;
2393 imm >>= 1;
2394 }
2395
2396 /* Choose variants based on *sign* of the 5-bit field. */
2397 if ((imm & 0x10) == 0)
2398 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2399 else
2400 {
2401 /* Find the width of the bitstring in IMM. */
2402 for (len = 5; len < maxlen - lsb; len++)
2403 {
2404 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2405 break;
2406 }
2407
2408 /* Extend length if host is narrow and IMM is negative. */
2409 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2410 len += 32;
2411
2412 /* Sign extend IMM as a 5-bit value. */
2413 imm = (imm & 0xf) - 0x10;
2414 }
2415
2416 op[0] = imm;
2417 op[1] = 63 - lsb;
2418 op[2] = len;
2419 }
2420
2421 /* Output assembler code to perform a doubleword move insn
2422 with operands OPERANDS. */
2423
2424 const char *
2425 pa_output_move_double (rtx *operands)
2426 {
2427 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2428 rtx latehalf[2];
2429 rtx addreg0 = 0, addreg1 = 0;
2430
2431 /* First classify both operands. */
2432
2433 if (REG_P (operands[0]))
2434 optype0 = REGOP;
2435 else if (offsettable_memref_p (operands[0]))
2436 optype0 = OFFSOP;
2437 else if (GET_CODE (operands[0]) == MEM)
2438 optype0 = MEMOP;
2439 else
2440 optype0 = RNDOP;
2441
2442 if (REG_P (operands[1]))
2443 optype1 = REGOP;
2444 else if (CONSTANT_P (operands[1]))
2445 optype1 = CNSTOP;
2446 else if (offsettable_memref_p (operands[1]))
2447 optype1 = OFFSOP;
2448 else if (GET_CODE (operands[1]) == MEM)
2449 optype1 = MEMOP;
2450 else
2451 optype1 = RNDOP;
2452
2453 /* Check for the cases that the operand constraints are not
2454 supposed to allow to happen. */
2455 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2456
2457 /* Handle copies between general and floating registers. */
2458
2459 if (optype0 == REGOP && optype1 == REGOP
2460 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2461 {
2462 if (FP_REG_P (operands[0]))
2463 {
2464 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2465 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2466 return "{fldds|fldd} -16(%%sp),%0";
2467 }
2468 else
2469 {
2470 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2471 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2472 return "{ldws|ldw} -12(%%sp),%R0";
2473 }
2474 }
2475
2476 /* Handle auto decrementing and incrementing loads and stores
2477 specifically, since the structure of the function doesn't work
2478 for them without major modification. Do it better when we learn
2479 this port about the general inc/dec addressing of PA.
2480 (This was written by tege. Chide him if it doesn't work.) */
2481
2482 if (optype0 == MEMOP)
2483 {
2484 /* We have to output the address syntax ourselves, since print_operand
2485 doesn't deal with the addresses we want to use. Fix this later. */
2486
2487 rtx addr = XEXP (operands[0], 0);
2488 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2489 {
2490 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2491
2492 operands[0] = XEXP (addr, 0);
2493 gcc_assert (GET_CODE (operands[1]) == REG
2494 && GET_CODE (operands[0]) == REG);
2495
2496 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2497
2498 /* No overlap between high target register and address
2499 register. (We do this in a non-obvious way to
2500 save a register file writeback) */
2501 if (GET_CODE (addr) == POST_INC)
2502 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2503 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2504 }
2505 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2506 {
2507 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2508
2509 operands[0] = XEXP (addr, 0);
2510 gcc_assert (GET_CODE (operands[1]) == REG
2511 && GET_CODE (operands[0]) == REG);
2512
2513 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2514 /* No overlap between high target register and address
2515 register. (We do this in a non-obvious way to save a
2516 register file writeback) */
2517 if (GET_CODE (addr) == PRE_INC)
2518 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2519 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2520 }
2521 }
2522 if (optype1 == MEMOP)
2523 {
2524 /* We have to output the address syntax ourselves, since print_operand
2525 doesn't deal with the addresses we want to use. Fix this later. */
2526
2527 rtx addr = XEXP (operands[1], 0);
2528 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2529 {
2530 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2531
2532 operands[1] = XEXP (addr, 0);
2533 gcc_assert (GET_CODE (operands[0]) == REG
2534 && GET_CODE (operands[1]) == REG);
2535
2536 if (!reg_overlap_mentioned_p (high_reg, addr))
2537 {
2538 /* No overlap between high target register and address
2539 register. (We do this in a non-obvious way to
2540 save a register file writeback) */
2541 if (GET_CODE (addr) == POST_INC)
2542 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2543 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2544 }
2545 else
2546 {
2547 /* This is an undefined situation. We should load into the
2548 address register *and* update that register. Probably
2549 we don't need to handle this at all. */
2550 if (GET_CODE (addr) == POST_INC)
2551 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2552 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2553 }
2554 }
2555 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2556 {
2557 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2558
2559 operands[1] = XEXP (addr, 0);
2560 gcc_assert (GET_CODE (operands[0]) == REG
2561 && GET_CODE (operands[1]) == REG);
2562
2563 if (!reg_overlap_mentioned_p (high_reg, addr))
2564 {
2565 /* No overlap between high target register and address
2566 register. (We do this in a non-obvious way to
2567 save a register file writeback) */
2568 if (GET_CODE (addr) == PRE_INC)
2569 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2570 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2571 }
2572 else
2573 {
2574 /* This is an undefined situation. We should load into the
2575 address register *and* update that register. Probably
2576 we don't need to handle this at all. */
2577 if (GET_CODE (addr) == PRE_INC)
2578 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2579 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2580 }
2581 }
2582 else if (GET_CODE (addr) == PLUS
2583 && GET_CODE (XEXP (addr, 0)) == MULT)
2584 {
2585 rtx xoperands[4];
2586 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2587
2588 if (!reg_overlap_mentioned_p (high_reg, addr))
2589 {
2590 xoperands[0] = high_reg;
2591 xoperands[1] = XEXP (addr, 1);
2592 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2593 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2594 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2595 xoperands);
2596 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2597 }
2598 else
2599 {
2600 xoperands[0] = high_reg;
2601 xoperands[1] = XEXP (addr, 1);
2602 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2603 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2604 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2605 xoperands);
2606 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2607 }
2608 }
2609 }
2610
2611 /* If an operand is an unoffsettable memory ref, find a register
2612 we can increment temporarily to make it refer to the second word. */
2613
2614 if (optype0 == MEMOP)
2615 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2616
2617 if (optype1 == MEMOP)
2618 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2619
2620 /* Ok, we can do one word at a time.
2621 Normally we do the low-numbered word first.
2622
2623 In either case, set up in LATEHALF the operands to use
2624 for the high-numbered word and in some cases alter the
2625 operands in OPERANDS to be suitable for the low-numbered word. */
2626
2627 if (optype0 == REGOP)
2628 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2629 else if (optype0 == OFFSOP)
2630 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2631 else
2632 latehalf[0] = operands[0];
2633
2634 if (optype1 == REGOP)
2635 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2636 else if (optype1 == OFFSOP)
2637 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2638 else if (optype1 == CNSTOP)
2639 split_double (operands[1], &operands[1], &latehalf[1]);
2640 else
2641 latehalf[1] = operands[1];
2642
2643 /* If the first move would clobber the source of the second one,
2644 do them in the other order.
2645
2646 This can happen in two cases:
2647
2648 mem -> register where the first half of the destination register
2649 is the same register used in the memory's address. Reload
2650 can create such insns.
2651
2652 mem in this case will be either register indirect or register
2653 indirect plus a valid offset.
2654
2655 register -> register move where REGNO(dst) == REGNO(src + 1)
2656 someone (Tim/Tege?) claimed this can happen for parameter loads.
2657
2658 Handle mem -> register case first. */
2659 if (optype0 == REGOP
2660 && (optype1 == MEMOP || optype1 == OFFSOP)
2661 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2662 operands[1], 0))
2663 {
2664 /* Do the late half first. */
2665 if (addreg1)
2666 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2667 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2668
2669 /* Then clobber. */
2670 if (addreg1)
2671 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2672 return pa_singlemove_string (operands);
2673 }
2674
2675 /* Now handle register -> register case. */
2676 if (optype0 == REGOP && optype1 == REGOP
2677 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2678 {
2679 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2680 return pa_singlemove_string (operands);
2681 }
2682
2683 /* Normal case: do the two words, low-numbered first. */
2684
2685 output_asm_insn (pa_singlemove_string (operands), operands);
2686
2687 /* Make any unoffsettable addresses point at high-numbered word. */
2688 if (addreg0)
2689 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2690 if (addreg1)
2691 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2692
2693 /* Do that word. */
2694 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2695
2696 /* Undo the adds we just did. */
2697 if (addreg0)
2698 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2699 if (addreg1)
2700 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2701
2702 return "";
2703 }
2704 \f
2705 const char *
2706 pa_output_fp_move_double (rtx *operands)
2707 {
2708 if (FP_REG_P (operands[0]))
2709 {
2710 if (FP_REG_P (operands[1])
2711 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2712 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2713 else
2714 output_asm_insn ("fldd%F1 %1,%0", operands);
2715 }
2716 else if (FP_REG_P (operands[1]))
2717 {
2718 output_asm_insn ("fstd%F0 %1,%0", operands);
2719 }
2720 else
2721 {
2722 rtx xoperands[2];
2723
2724 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2725
2726 /* This is a pain. You have to be prepared to deal with an
2727 arbitrary address here including pre/post increment/decrement.
2728
2729 so avoid this in the MD. */
2730 gcc_assert (GET_CODE (operands[0]) == REG);
2731
2732 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2733 xoperands[0] = operands[0];
2734 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2735 }
2736 return "";
2737 }
2738 \f
2739 /* Return a REG that occurs in ADDR with coefficient 1.
2740 ADDR can be effectively incremented by incrementing REG. */
2741
2742 static rtx
2743 find_addr_reg (rtx addr)
2744 {
2745 while (GET_CODE (addr) == PLUS)
2746 {
2747 if (GET_CODE (XEXP (addr, 0)) == REG)
2748 addr = XEXP (addr, 0);
2749 else if (GET_CODE (XEXP (addr, 1)) == REG)
2750 addr = XEXP (addr, 1);
2751 else if (CONSTANT_P (XEXP (addr, 0)))
2752 addr = XEXP (addr, 1);
2753 else if (CONSTANT_P (XEXP (addr, 1)))
2754 addr = XEXP (addr, 0);
2755 else
2756 gcc_unreachable ();
2757 }
2758 gcc_assert (GET_CODE (addr) == REG);
2759 return addr;
2760 }
2761
2762 /* Emit code to perform a block move.
2763
2764 OPERANDS[0] is the destination pointer as a REG, clobbered.
2765 OPERANDS[1] is the source pointer as a REG, clobbered.
2766 OPERANDS[2] is a register for temporary storage.
2767 OPERANDS[3] is a register for temporary storage.
2768 OPERANDS[4] is the size as a CONST_INT
2769 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2770 OPERANDS[6] is another temporary register. */
2771
2772 const char *
2773 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2774 {
2775 int align = INTVAL (operands[5]);
2776 unsigned long n_bytes = INTVAL (operands[4]);
2777
2778 /* We can't move more than a word at a time because the PA
2779 has no longer integer move insns. (Could use fp mem ops?) */
2780 if (align > (TARGET_64BIT ? 8 : 4))
2781 align = (TARGET_64BIT ? 8 : 4);
2782
2783 /* Note that we know each loop below will execute at least twice
2784 (else we would have open-coded the copy). */
2785 switch (align)
2786 {
2787 case 8:
2788 /* Pre-adjust the loop counter. */
2789 operands[4] = GEN_INT (n_bytes - 16);
2790 output_asm_insn ("ldi %4,%2", operands);
2791
2792 /* Copying loop. */
2793 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2794 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2795 output_asm_insn ("std,ma %3,8(%0)", operands);
2796 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2797 output_asm_insn ("std,ma %6,8(%0)", operands);
2798
2799 /* Handle the residual. There could be up to 7 bytes of
2800 residual to copy! */
2801 if (n_bytes % 16 != 0)
2802 {
2803 operands[4] = GEN_INT (n_bytes % 8);
2804 if (n_bytes % 16 >= 8)
2805 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2806 if (n_bytes % 8 != 0)
2807 output_asm_insn ("ldd 0(%1),%6", operands);
2808 if (n_bytes % 16 >= 8)
2809 output_asm_insn ("std,ma %3,8(%0)", operands);
2810 if (n_bytes % 8 != 0)
2811 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2812 }
2813 return "";
2814
2815 case 4:
2816 /* Pre-adjust the loop counter. */
2817 operands[4] = GEN_INT (n_bytes - 8);
2818 output_asm_insn ("ldi %4,%2", operands);
2819
2820 /* Copying loop. */
2821 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2822 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2823 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2824 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2825 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2826
2827 /* Handle the residual. There could be up to 7 bytes of
2828 residual to copy! */
2829 if (n_bytes % 8 != 0)
2830 {
2831 operands[4] = GEN_INT (n_bytes % 4);
2832 if (n_bytes % 8 >= 4)
2833 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2834 if (n_bytes % 4 != 0)
2835 output_asm_insn ("ldw 0(%1),%6", operands);
2836 if (n_bytes % 8 >= 4)
2837 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2838 if (n_bytes % 4 != 0)
2839 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2840 }
2841 return "";
2842
2843 case 2:
2844 /* Pre-adjust the loop counter. */
2845 operands[4] = GEN_INT (n_bytes - 4);
2846 output_asm_insn ("ldi %4,%2", operands);
2847
2848 /* Copying loop. */
2849 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2850 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2851 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2852 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2853 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2854
2855 /* Handle the residual. */
2856 if (n_bytes % 4 != 0)
2857 {
2858 if (n_bytes % 4 >= 2)
2859 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2860 if (n_bytes % 2 != 0)
2861 output_asm_insn ("ldb 0(%1),%6", operands);
2862 if (n_bytes % 4 >= 2)
2863 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2864 if (n_bytes % 2 != 0)
2865 output_asm_insn ("stb %6,0(%0)", operands);
2866 }
2867 return "";
2868
2869 case 1:
2870 /* Pre-adjust the loop counter. */
2871 operands[4] = GEN_INT (n_bytes - 2);
2872 output_asm_insn ("ldi %4,%2", operands);
2873
2874 /* Copying loop. */
2875 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2876 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2877 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2878 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2879 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2880
2881 /* Handle the residual. */
2882 if (n_bytes % 2 != 0)
2883 {
2884 output_asm_insn ("ldb 0(%1),%3", operands);
2885 output_asm_insn ("stb %3,0(%0)", operands);
2886 }
2887 return "";
2888
2889 default:
2890 gcc_unreachable ();
2891 }
2892 }
2893
2894 /* Count the number of insns necessary to handle this block move.
2895
2896 Basic structure is the same as emit_block_move, except that we
2897 count insns rather than emit them. */
2898
2899 static int
2900 compute_movmem_length (rtx insn)
2901 {
2902 rtx pat = PATTERN (insn);
2903 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2904 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2905 unsigned int n_insns = 0;
2906
2907 /* We can't move more than four bytes at a time because the PA
2908 has no longer integer move insns. (Could use fp mem ops?) */
2909 if (align > (TARGET_64BIT ? 8 : 4))
2910 align = (TARGET_64BIT ? 8 : 4);
2911
2912 /* The basic copying loop. */
2913 n_insns = 6;
2914
2915 /* Residuals. */
2916 if (n_bytes % (2 * align) != 0)
2917 {
2918 if ((n_bytes % (2 * align)) >= align)
2919 n_insns += 2;
2920
2921 if ((n_bytes % align) != 0)
2922 n_insns += 2;
2923 }
2924
2925 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2926 return n_insns * 4;
2927 }
2928
2929 /* Emit code to perform a block clear.
2930
2931 OPERANDS[0] is the destination pointer as a REG, clobbered.
2932 OPERANDS[1] is a register for temporary storage.
2933 OPERANDS[2] is the size as a CONST_INT
2934 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2935
2936 const char *
2937 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2938 {
2939 int align = INTVAL (operands[3]);
2940 unsigned long n_bytes = INTVAL (operands[2]);
2941
2942 /* We can't clear more than a word at a time because the PA
2943 has no longer integer move insns. */
2944 if (align > (TARGET_64BIT ? 8 : 4))
2945 align = (TARGET_64BIT ? 8 : 4);
2946
2947 /* Note that we know each loop below will execute at least twice
2948 (else we would have open-coded the copy). */
2949 switch (align)
2950 {
2951 case 8:
2952 /* Pre-adjust the loop counter. */
2953 operands[2] = GEN_INT (n_bytes - 16);
2954 output_asm_insn ("ldi %2,%1", operands);
2955
2956 /* Loop. */
2957 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2958 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2959 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2960
2961 /* Handle the residual. There could be up to 7 bytes of
2962 residual to copy! */
2963 if (n_bytes % 16 != 0)
2964 {
2965 operands[2] = GEN_INT (n_bytes % 8);
2966 if (n_bytes % 16 >= 8)
2967 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2968 if (n_bytes % 8 != 0)
2969 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2970 }
2971 return "";
2972
2973 case 4:
2974 /* Pre-adjust the loop counter. */
2975 operands[2] = GEN_INT (n_bytes - 8);
2976 output_asm_insn ("ldi %2,%1", operands);
2977
2978 /* Loop. */
2979 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2980 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2981 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2982
2983 /* Handle the residual. There could be up to 7 bytes of
2984 residual to copy! */
2985 if (n_bytes % 8 != 0)
2986 {
2987 operands[2] = GEN_INT (n_bytes % 4);
2988 if (n_bytes % 8 >= 4)
2989 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2990 if (n_bytes % 4 != 0)
2991 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2992 }
2993 return "";
2994
2995 case 2:
2996 /* Pre-adjust the loop counter. */
2997 operands[2] = GEN_INT (n_bytes - 4);
2998 output_asm_insn ("ldi %2,%1", operands);
2999
3000 /* Loop. */
3001 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3002 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3003 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3004
3005 /* Handle the residual. */
3006 if (n_bytes % 4 != 0)
3007 {
3008 if (n_bytes % 4 >= 2)
3009 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3010 if (n_bytes % 2 != 0)
3011 output_asm_insn ("stb %%r0,0(%0)", operands);
3012 }
3013 return "";
3014
3015 case 1:
3016 /* Pre-adjust the loop counter. */
3017 operands[2] = GEN_INT (n_bytes - 2);
3018 output_asm_insn ("ldi %2,%1", operands);
3019
3020 /* Loop. */
3021 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3022 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3023 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3024
3025 /* Handle the residual. */
3026 if (n_bytes % 2 != 0)
3027 output_asm_insn ("stb %%r0,0(%0)", operands);
3028
3029 return "";
3030
3031 default:
3032 gcc_unreachable ();
3033 }
3034 }
3035
3036 /* Count the number of insns necessary to handle this block move.
3037
3038 Basic structure is the same as emit_block_move, except that we
3039 count insns rather than emit them. */
3040
3041 static int
3042 compute_clrmem_length (rtx insn)
3043 {
3044 rtx pat = PATTERN (insn);
3045 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3046 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3047 unsigned int n_insns = 0;
3048
3049 /* We can't clear more than a word at a time because the PA
3050 has no longer integer move insns. */
3051 if (align > (TARGET_64BIT ? 8 : 4))
3052 align = (TARGET_64BIT ? 8 : 4);
3053
3054 /* The basic loop. */
3055 n_insns = 4;
3056
3057 /* Residuals. */
3058 if (n_bytes % (2 * align) != 0)
3059 {
3060 if ((n_bytes % (2 * align)) >= align)
3061 n_insns++;
3062
3063 if ((n_bytes % align) != 0)
3064 n_insns++;
3065 }
3066
3067 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3068 return n_insns * 4;
3069 }
3070 \f
3071
3072 const char *
3073 pa_output_and (rtx *operands)
3074 {
3075 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3076 {
3077 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3078 int ls0, ls1, ms0, p, len;
3079
3080 for (ls0 = 0; ls0 < 32; ls0++)
3081 if ((mask & (1 << ls0)) == 0)
3082 break;
3083
3084 for (ls1 = ls0; ls1 < 32; ls1++)
3085 if ((mask & (1 << ls1)) != 0)
3086 break;
3087
3088 for (ms0 = ls1; ms0 < 32; ms0++)
3089 if ((mask & (1 << ms0)) == 0)
3090 break;
3091
3092 gcc_assert (ms0 == 32);
3093
3094 if (ls1 == 32)
3095 {
3096 len = ls0;
3097
3098 gcc_assert (len);
3099
3100 operands[2] = GEN_INT (len);
3101 return "{extru|extrw,u} %1,31,%2,%0";
3102 }
3103 else
3104 {
3105 /* We could use this `depi' for the case above as well, but `depi'
3106 requires one more register file access than an `extru'. */
3107
3108 p = 31 - ls0;
3109 len = ls1 - ls0;
3110
3111 operands[2] = GEN_INT (p);
3112 operands[3] = GEN_INT (len);
3113 return "{depi|depwi} 0,%2,%3,%0";
3114 }
3115 }
3116 else
3117 return "and %1,%2,%0";
3118 }
3119
3120 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3121 storing the result in operands[0]. */
3122 const char *
3123 pa_output_64bit_and (rtx *operands)
3124 {
3125 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3126 {
3127 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3128 int ls0, ls1, ms0, p, len;
3129
3130 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3131 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3132 break;
3133
3134 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3135 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3136 break;
3137
3138 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3139 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3140 break;
3141
3142 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3143
3144 if (ls1 == HOST_BITS_PER_WIDE_INT)
3145 {
3146 len = ls0;
3147
3148 gcc_assert (len);
3149
3150 operands[2] = GEN_INT (len);
3151 return "extrd,u %1,63,%2,%0";
3152 }
3153 else
3154 {
3155 /* We could use this `depi' for the case above as well, but `depi'
3156 requires one more register file access than an `extru'. */
3157
3158 p = 63 - ls0;
3159 len = ls1 - ls0;
3160
3161 operands[2] = GEN_INT (p);
3162 operands[3] = GEN_INT (len);
3163 return "depdi 0,%2,%3,%0";
3164 }
3165 }
3166 else
3167 return "and %1,%2,%0";
3168 }
3169
3170 const char *
3171 pa_output_ior (rtx *operands)
3172 {
3173 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3174 int bs0, bs1, p, len;
3175
3176 if (INTVAL (operands[2]) == 0)
3177 return "copy %1,%0";
3178
3179 for (bs0 = 0; bs0 < 32; bs0++)
3180 if ((mask & (1 << bs0)) != 0)
3181 break;
3182
3183 for (bs1 = bs0; bs1 < 32; bs1++)
3184 if ((mask & (1 << bs1)) == 0)
3185 break;
3186
3187 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3188
3189 p = 31 - bs0;
3190 len = bs1 - bs0;
3191
3192 operands[2] = GEN_INT (p);
3193 operands[3] = GEN_INT (len);
3194 return "{depi|depwi} -1,%2,%3,%0";
3195 }
3196
3197 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3198 storing the result in operands[0]. */
3199 const char *
3200 pa_output_64bit_ior (rtx *operands)
3201 {
3202 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3203 int bs0, bs1, p, len;
3204
3205 if (INTVAL (operands[2]) == 0)
3206 return "copy %1,%0";
3207
3208 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3209 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3210 break;
3211
3212 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3213 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3214 break;
3215
3216 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3217 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3218
3219 p = 63 - bs0;
3220 len = bs1 - bs0;
3221
3222 operands[2] = GEN_INT (p);
3223 operands[3] = GEN_INT (len);
3224 return "depdi -1,%2,%3,%0";
3225 }
3226 \f
3227 /* Target hook for assembling integer objects. This code handles
3228 aligned SI and DI integers specially since function references
3229 must be preceded by P%. */
3230
3231 static bool
3232 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3233 {
3234 if (size == UNITS_PER_WORD
3235 && aligned_p
3236 && function_label_operand (x, VOIDmode))
3237 {
3238 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3239 output_addr_const (asm_out_file, x);
3240 fputc ('\n', asm_out_file);
3241 return true;
3242 }
3243 return default_assemble_integer (x, size, aligned_p);
3244 }
3245 \f
3246 /* Output an ascii string. */
3247 void
3248 pa_output_ascii (FILE *file, const char *p, int size)
3249 {
3250 int i;
3251 int chars_output;
3252 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3253
3254 /* The HP assembler can only take strings of 256 characters at one
3255 time. This is a limitation on input line length, *not* the
3256 length of the string. Sigh. Even worse, it seems that the
3257 restriction is in number of input characters (see \xnn &
3258 \whatever). So we have to do this very carefully. */
3259
3260 fputs ("\t.STRING \"", file);
3261
3262 chars_output = 0;
3263 for (i = 0; i < size; i += 4)
3264 {
3265 int co = 0;
3266 int io = 0;
3267 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3268 {
3269 register unsigned int c = (unsigned char) p[i + io];
3270
3271 if (c == '\"' || c == '\\')
3272 partial_output[co++] = '\\';
3273 if (c >= ' ' && c < 0177)
3274 partial_output[co++] = c;
3275 else
3276 {
3277 unsigned int hexd;
3278 partial_output[co++] = '\\';
3279 partial_output[co++] = 'x';
3280 hexd = c / 16 - 0 + '0';
3281 if (hexd > '9')
3282 hexd -= '9' - 'a' + 1;
3283 partial_output[co++] = hexd;
3284 hexd = c % 16 - 0 + '0';
3285 if (hexd > '9')
3286 hexd -= '9' - 'a' + 1;
3287 partial_output[co++] = hexd;
3288 }
3289 }
3290 if (chars_output + co > 243)
3291 {
3292 fputs ("\"\n\t.STRING \"", file);
3293 chars_output = 0;
3294 }
3295 fwrite (partial_output, 1, (size_t) co, file);
3296 chars_output += co;
3297 co = 0;
3298 }
3299 fputs ("\"\n", file);
3300 }
3301
3302 /* Try to rewrite floating point comparisons & branches to avoid
3303 useless add,tr insns.
3304
3305 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3306 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3307 first attempt to remove useless add,tr insns. It is zero
3308 for the second pass as reorg sometimes leaves bogus REG_DEAD
3309 notes lying around.
3310
3311 When CHECK_NOTES is zero we can only eliminate add,tr insns
3312 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3313 instructions. */
3314 static void
3315 remove_useless_addtr_insns (int check_notes)
3316 {
3317 rtx insn;
3318 static int pass = 0;
3319
3320 /* This is fairly cheap, so always run it when optimizing. */
3321 if (optimize > 0)
3322 {
3323 int fcmp_count = 0;
3324 int fbranch_count = 0;
3325
3326 /* Walk all the insns in this function looking for fcmp & fbranch
3327 instructions. Keep track of how many of each we find. */
3328 for (insn = get_insns (); insn; insn = next_insn (insn))
3329 {
3330 rtx tmp;
3331
3332 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3333 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3334 continue;
3335
3336 tmp = PATTERN (insn);
3337
3338 /* It must be a set. */
3339 if (GET_CODE (tmp) != SET)
3340 continue;
3341
3342 /* If the destination is CCFP, then we've found an fcmp insn. */
3343 tmp = SET_DEST (tmp);
3344 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3345 {
3346 fcmp_count++;
3347 continue;
3348 }
3349
3350 tmp = PATTERN (insn);
3351 /* If this is an fbranch instruction, bump the fbranch counter. */
3352 if (GET_CODE (tmp) == SET
3353 && SET_DEST (tmp) == pc_rtx
3354 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3355 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3356 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3357 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3358 {
3359 fbranch_count++;
3360 continue;
3361 }
3362 }
3363
3364
3365 /* Find all floating point compare + branch insns. If possible,
3366 reverse the comparison & the branch to avoid add,tr insns. */
3367 for (insn = get_insns (); insn; insn = next_insn (insn))
3368 {
3369 rtx tmp, next;
3370
3371 /* Ignore anything that isn't an INSN. */
3372 if (! NONJUMP_INSN_P (insn))
3373 continue;
3374
3375 tmp = PATTERN (insn);
3376
3377 /* It must be a set. */
3378 if (GET_CODE (tmp) != SET)
3379 continue;
3380
3381 /* The destination must be CCFP, which is register zero. */
3382 tmp = SET_DEST (tmp);
3383 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3384 continue;
3385
3386 /* INSN should be a set of CCFP.
3387
3388 See if the result of this insn is used in a reversed FP
3389 conditional branch. If so, reverse our condition and
3390 the branch. Doing so avoids useless add,tr insns. */
3391 next = next_insn (insn);
3392 while (next)
3393 {
3394 /* Jumps, calls and labels stop our search. */
3395 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3396 break;
3397
3398 /* As does another fcmp insn. */
3399 if (NONJUMP_INSN_P (next)
3400 && GET_CODE (PATTERN (next)) == SET
3401 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3402 && REGNO (SET_DEST (PATTERN (next))) == 0)
3403 break;
3404
3405 next = next_insn (next);
3406 }
3407
3408 /* Is NEXT_INSN a branch? */
3409 if (next && JUMP_P (next))
3410 {
3411 rtx pattern = PATTERN (next);
3412
3413 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3414 and CCFP dies, then reverse our conditional and the branch
3415 to avoid the add,tr. */
3416 if (GET_CODE (pattern) == SET
3417 && SET_DEST (pattern) == pc_rtx
3418 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3419 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3420 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3421 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3422 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3423 && (fcmp_count == fbranch_count
3424 || (check_notes
3425 && find_regno_note (next, REG_DEAD, 0))))
3426 {
3427 /* Reverse the branch. */
3428 tmp = XEXP (SET_SRC (pattern), 1);
3429 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3430 XEXP (SET_SRC (pattern), 2) = tmp;
3431 INSN_CODE (next) = -1;
3432
3433 /* Reverse our condition. */
3434 tmp = PATTERN (insn);
3435 PUT_CODE (XEXP (tmp, 1),
3436 (reverse_condition_maybe_unordered
3437 (GET_CODE (XEXP (tmp, 1)))));
3438 }
3439 }
3440 }
3441 }
3442
3443 pass = !pass;
3444
3445 }
3446 \f
3447 /* You may have trouble believing this, but this is the 32 bit HP-PA
3448 stack layout. Wow.
3449
3450 Offset Contents
3451
3452 Variable arguments (optional; any number may be allocated)
3453
3454 SP-(4*(N+9)) arg word N
3455 : :
3456 SP-56 arg word 5
3457 SP-52 arg word 4
3458
3459 Fixed arguments (must be allocated; may remain unused)
3460
3461 SP-48 arg word 3
3462 SP-44 arg word 2
3463 SP-40 arg word 1
3464 SP-36 arg word 0
3465
3466 Frame Marker
3467
3468 SP-32 External Data Pointer (DP)
3469 SP-28 External sr4
3470 SP-24 External/stub RP (RP')
3471 SP-20 Current RP
3472 SP-16 Static Link
3473 SP-12 Clean up
3474 SP-8 Calling Stub RP (RP'')
3475 SP-4 Previous SP
3476
3477 Top of Frame
3478
3479 SP-0 Stack Pointer (points to next available address)
3480
3481 */
3482
3483 /* This function saves registers as follows. Registers marked with ' are
3484 this function's registers (as opposed to the previous function's).
3485 If a frame_pointer isn't needed, r4 is saved as a general register;
3486 the space for the frame pointer is still allocated, though, to keep
3487 things simple.
3488
3489
3490 Top of Frame
3491
3492 SP (FP') Previous FP
3493 SP + 4 Alignment filler (sigh)
3494 SP + 8 Space for locals reserved here.
3495 .
3496 .
3497 .
3498 SP + n All call saved register used.
3499 .
3500 .
3501 .
3502 SP + o All call saved fp registers used.
3503 .
3504 .
3505 .
3506 SP + p (SP') points to next available address.
3507
3508 */
3509
3510 /* Global variables set by output_function_prologue(). */
3511 /* Size of frame. Need to know this to emit return insns from
3512 leaf procedures. */
3513 static HOST_WIDE_INT actual_fsize, local_fsize;
3514 static int save_fregs;
3515
3516 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3517 Handle case where DISP > 8k by using the add_high_const patterns.
3518
3519 Note in DISP > 8k case, we will leave the high part of the address
3520 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3521
3522 static void
3523 store_reg (int reg, HOST_WIDE_INT disp, int base)
3524 {
3525 rtx insn, dest, src, basereg;
3526
3527 src = gen_rtx_REG (word_mode, reg);
3528 basereg = gen_rtx_REG (Pmode, base);
3529 if (VAL_14_BITS_P (disp))
3530 {
3531 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3532 insn = emit_move_insn (dest, src);
3533 }
3534 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3535 {
3536 rtx delta = GEN_INT (disp);
3537 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3538
3539 emit_move_insn (tmpreg, delta);
3540 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3541 if (DO_FRAME_NOTES)
3542 {
3543 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3544 gen_rtx_SET (VOIDmode, tmpreg,
3545 gen_rtx_PLUS (Pmode, basereg, delta)));
3546 RTX_FRAME_RELATED_P (insn) = 1;
3547 }
3548 dest = gen_rtx_MEM (word_mode, tmpreg);
3549 insn = emit_move_insn (dest, src);
3550 }
3551 else
3552 {
3553 rtx delta = GEN_INT (disp);
3554 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3555 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3556
3557 emit_move_insn (tmpreg, high);
3558 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3559 insn = emit_move_insn (dest, src);
3560 if (DO_FRAME_NOTES)
3561 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3562 gen_rtx_SET (VOIDmode,
3563 gen_rtx_MEM (word_mode,
3564 gen_rtx_PLUS (word_mode,
3565 basereg,
3566 delta)),
3567 src));
3568 }
3569
3570 if (DO_FRAME_NOTES)
3571 RTX_FRAME_RELATED_P (insn) = 1;
3572 }
3573
3574 /* Emit RTL to store REG at the memory location specified by BASE and then
3575 add MOD to BASE. MOD must be <= 8k. */
3576
3577 static void
3578 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3579 {
3580 rtx insn, basereg, srcreg, delta;
3581
3582 gcc_assert (VAL_14_BITS_P (mod));
3583
3584 basereg = gen_rtx_REG (Pmode, base);
3585 srcreg = gen_rtx_REG (word_mode, reg);
3586 delta = GEN_INT (mod);
3587
3588 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3589 if (DO_FRAME_NOTES)
3590 {
3591 RTX_FRAME_RELATED_P (insn) = 1;
3592
3593 /* RTX_FRAME_RELATED_P must be set on each frame related set
3594 in a parallel with more than one element. */
3595 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3596 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3597 }
3598 }
3599
3600 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3601 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3602 whether to add a frame note or not.
3603
3604 In the DISP > 8k case, we leave the high part of the address in %r1.
3605 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3606
3607 static void
3608 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3609 {
3610 rtx insn;
3611
3612 if (VAL_14_BITS_P (disp))
3613 {
3614 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3615 plus_constant (Pmode,
3616 gen_rtx_REG (Pmode, base), disp));
3617 }
3618 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3619 {
3620 rtx basereg = gen_rtx_REG (Pmode, base);
3621 rtx delta = GEN_INT (disp);
3622 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3623
3624 emit_move_insn (tmpreg, delta);
3625 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3626 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3627 if (DO_FRAME_NOTES)
3628 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3629 gen_rtx_SET (VOIDmode, tmpreg,
3630 gen_rtx_PLUS (Pmode, basereg, delta)));
3631 }
3632 else
3633 {
3634 rtx basereg = gen_rtx_REG (Pmode, base);
3635 rtx delta = GEN_INT (disp);
3636 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3637
3638 emit_move_insn (tmpreg,
3639 gen_rtx_PLUS (Pmode, basereg,
3640 gen_rtx_HIGH (Pmode, delta)));
3641 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3642 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3643 }
3644
3645 if (DO_FRAME_NOTES && note)
3646 RTX_FRAME_RELATED_P (insn) = 1;
3647 }
3648
3649 HOST_WIDE_INT
3650 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3651 {
3652 int freg_saved = 0;
3653 int i, j;
3654
3655 /* The code in pa_expand_prologue and pa_expand_epilogue must
3656 be consistent with the rounding and size calculation done here.
3657 Change them at the same time. */
3658
3659 /* We do our own stack alignment. First, round the size of the
3660 stack locals up to a word boundary. */
3661 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3662
3663 /* Space for previous frame pointer + filler. If any frame is
3664 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3665 waste some space here for the sake of HP compatibility. The
3666 first slot is only used when the frame pointer is needed. */
3667 if (size || frame_pointer_needed)
3668 size += STARTING_FRAME_OFFSET;
3669
3670 /* If the current function calls __builtin_eh_return, then we need
3671 to allocate stack space for registers that will hold data for
3672 the exception handler. */
3673 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3674 {
3675 unsigned int i;
3676
3677 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3678 continue;
3679 size += i * UNITS_PER_WORD;
3680 }
3681
3682 /* Account for space used by the callee general register saves. */
3683 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3684 if (df_regs_ever_live_p (i))
3685 size += UNITS_PER_WORD;
3686
3687 /* Account for space used by the callee floating point register saves. */
3688 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3689 if (df_regs_ever_live_p (i)
3690 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3691 {
3692 freg_saved = 1;
3693
3694 /* We always save both halves of the FP register, so always
3695 increment the frame size by 8 bytes. */
3696 size += 8;
3697 }
3698
3699 /* If any of the floating registers are saved, account for the
3700 alignment needed for the floating point register save block. */
3701 if (freg_saved)
3702 {
3703 size = (size + 7) & ~7;
3704 if (fregs_live)
3705 *fregs_live = 1;
3706 }
3707
3708 /* The various ABIs include space for the outgoing parameters in the
3709 size of the current function's stack frame. We don't need to align
3710 for the outgoing arguments as their alignment is set by the final
3711 rounding for the frame as a whole. */
3712 size += crtl->outgoing_args_size;
3713
3714 /* Allocate space for the fixed frame marker. This space must be
3715 allocated for any function that makes calls or allocates
3716 stack space. */
3717 if (!crtl->is_leaf || size)
3718 size += TARGET_64BIT ? 48 : 32;
3719
3720 /* Finally, round to the preferred stack boundary. */
3721 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3722 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3723 }
3724
3725 /* Generate the assembly code for function entry. FILE is a stdio
3726 stream to output the code to. SIZE is an int: how many units of
3727 temporary storage to allocate.
3728
3729 Refer to the array `regs_ever_live' to determine which registers to
3730 save; `regs_ever_live[I]' is nonzero if register number I is ever
3731 used in the function. This function is responsible for knowing
3732 which registers should not be saved even if used. */
3733
3734 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3735 of memory. If any fpu reg is used in the function, we allocate
3736 such a block here, at the bottom of the frame, just in case it's needed.
3737
3738 If this function is a leaf procedure, then we may choose not
3739 to do a "save" insn. The decision about whether or not
3740 to do this is made in regclass.c. */
3741
3742 static void
3743 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3744 {
3745 /* The function's label and associated .PROC must never be
3746 separated and must be output *after* any profiling declarations
3747 to avoid changing spaces/subspaces within a procedure. */
3748 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3749 fputs ("\t.PROC\n", file);
3750
3751 /* pa_expand_prologue does the dirty work now. We just need
3752 to output the assembler directives which denote the start
3753 of a function. */
3754 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3755 if (crtl->is_leaf)
3756 fputs (",NO_CALLS", file);
3757 else
3758 fputs (",CALLS", file);
3759 if (rp_saved)
3760 fputs (",SAVE_RP", file);
3761
3762 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3763 at the beginning of the frame and that it is used as the frame
3764 pointer for the frame. We do this because our current frame
3765 layout doesn't conform to that specified in the HP runtime
3766 documentation and we need a way to indicate to programs such as
3767 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3768 isn't used by HP compilers but is supported by the assembler.
3769 However, SAVE_SP is supposed to indicate that the previous stack
3770 pointer has been saved in the frame marker. */
3771 if (frame_pointer_needed)
3772 fputs (",SAVE_SP", file);
3773
3774 /* Pass on information about the number of callee register saves
3775 performed in the prologue.
3776
3777 The compiler is supposed to pass the highest register number
3778 saved, the assembler then has to adjust that number before
3779 entering it into the unwind descriptor (to account for any
3780 caller saved registers with lower register numbers than the
3781 first callee saved register). */
3782 if (gr_saved)
3783 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3784
3785 if (fr_saved)
3786 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3787
3788 fputs ("\n\t.ENTRY\n", file);
3789
3790 remove_useless_addtr_insns (0);
3791 }
3792
3793 void
3794 pa_expand_prologue (void)
3795 {
3796 int merge_sp_adjust_with_store = 0;
3797 HOST_WIDE_INT size = get_frame_size ();
3798 HOST_WIDE_INT offset;
3799 int i;
3800 rtx insn, tmpreg;
3801
3802 gr_saved = 0;
3803 fr_saved = 0;
3804 save_fregs = 0;
3805
3806 /* Compute total size for frame pointer, filler, locals and rounding to
3807 the next word boundary. Similar code appears in pa_compute_frame_size
3808 and must be changed in tandem with this code. */
3809 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3810 if (local_fsize || frame_pointer_needed)
3811 local_fsize += STARTING_FRAME_OFFSET;
3812
3813 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3814 if (flag_stack_usage_info)
3815 current_function_static_stack_size = actual_fsize;
3816
3817 /* Compute a few things we will use often. */
3818 tmpreg = gen_rtx_REG (word_mode, 1);
3819
3820 /* Save RP first. The calling conventions manual states RP will
3821 always be stored into the caller's frame at sp - 20 or sp - 16
3822 depending on which ABI is in use. */
3823 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3824 {
3825 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3826 rp_saved = true;
3827 }
3828 else
3829 rp_saved = false;
3830
3831 /* Allocate the local frame and set up the frame pointer if needed. */
3832 if (actual_fsize != 0)
3833 {
3834 if (frame_pointer_needed)
3835 {
3836 /* Copy the old frame pointer temporarily into %r1. Set up the
3837 new stack pointer, then store away the saved old frame pointer
3838 into the stack at sp and at the same time update the stack
3839 pointer by actual_fsize bytes. Two versions, first
3840 handles small (<8k) frames. The second handles large (>=8k)
3841 frames. */
3842 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3843 if (DO_FRAME_NOTES)
3844 RTX_FRAME_RELATED_P (insn) = 1;
3845
3846 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3847 if (DO_FRAME_NOTES)
3848 RTX_FRAME_RELATED_P (insn) = 1;
3849
3850 if (VAL_14_BITS_P (actual_fsize))
3851 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3852 else
3853 {
3854 /* It is incorrect to store the saved frame pointer at *sp,
3855 then increment sp (writes beyond the current stack boundary).
3856
3857 So instead use stwm to store at *sp and post-increment the
3858 stack pointer as an atomic operation. Then increment sp to
3859 finish allocating the new frame. */
3860 HOST_WIDE_INT adjust1 = 8192 - 64;
3861 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3862
3863 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3864 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3865 adjust2, 1);
3866 }
3867
3868 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3869 we need to store the previous stack pointer (frame pointer)
3870 into the frame marker on targets that use the HP unwind
3871 library. This allows the HP unwind library to be used to
3872 unwind GCC frames. However, we are not fully compatible
3873 with the HP library because our frame layout differs from
3874 that specified in the HP runtime specification.
3875
3876 We don't want a frame note on this instruction as the frame
3877 marker moves during dynamic stack allocation.
3878
3879 This instruction also serves as a blockage to prevent
3880 register spills from being scheduled before the stack
3881 pointer is raised. This is necessary as we store
3882 registers using the frame pointer as a base register,
3883 and the frame pointer is set before sp is raised. */
3884 if (TARGET_HPUX_UNWIND_LIBRARY)
3885 {
3886 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3887 GEN_INT (TARGET_64BIT ? -8 : -4));
3888
3889 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3890 hard_frame_pointer_rtx);
3891 }
3892 else
3893 emit_insn (gen_blockage ());
3894 }
3895 /* no frame pointer needed. */
3896 else
3897 {
3898 /* In some cases we can perform the first callee register save
3899 and allocating the stack frame at the same time. If so, just
3900 make a note of it and defer allocating the frame until saving
3901 the callee registers. */
3902 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3903 merge_sp_adjust_with_store = 1;
3904 /* Can not optimize. Adjust the stack frame by actual_fsize
3905 bytes. */
3906 else
3907 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3908 actual_fsize, 1);
3909 }
3910 }
3911
3912 /* Normal register save.
3913
3914 Do not save the frame pointer in the frame_pointer_needed case. It
3915 was done earlier. */
3916 if (frame_pointer_needed)
3917 {
3918 offset = local_fsize;
3919
3920 /* Saving the EH return data registers in the frame is the simplest
3921 way to get the frame unwind information emitted. We put them
3922 just before the general registers. */
3923 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3924 {
3925 unsigned int i, regno;
3926
3927 for (i = 0; ; ++i)
3928 {
3929 regno = EH_RETURN_DATA_REGNO (i);
3930 if (regno == INVALID_REGNUM)
3931 break;
3932
3933 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3934 offset += UNITS_PER_WORD;
3935 }
3936 }
3937
3938 for (i = 18; i >= 4; i--)
3939 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3940 {
3941 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3942 offset += UNITS_PER_WORD;
3943 gr_saved++;
3944 }
3945 /* Account for %r3 which is saved in a special place. */
3946 gr_saved++;
3947 }
3948 /* No frame pointer needed. */
3949 else
3950 {
3951 offset = local_fsize - actual_fsize;
3952
3953 /* Saving the EH return data registers in the frame is the simplest
3954 way to get the frame unwind information emitted. */
3955 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3956 {
3957 unsigned int i, regno;
3958
3959 for (i = 0; ; ++i)
3960 {
3961 regno = EH_RETURN_DATA_REGNO (i);
3962 if (regno == INVALID_REGNUM)
3963 break;
3964
3965 /* If merge_sp_adjust_with_store is nonzero, then we can
3966 optimize the first save. */
3967 if (merge_sp_adjust_with_store)
3968 {
3969 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3970 merge_sp_adjust_with_store = 0;
3971 }
3972 else
3973 store_reg (regno, offset, STACK_POINTER_REGNUM);
3974 offset += UNITS_PER_WORD;
3975 }
3976 }
3977
3978 for (i = 18; i >= 3; i--)
3979 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3980 {
3981 /* If merge_sp_adjust_with_store is nonzero, then we can
3982 optimize the first GR save. */
3983 if (merge_sp_adjust_with_store)
3984 {
3985 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3986 merge_sp_adjust_with_store = 0;
3987 }
3988 else
3989 store_reg (i, offset, STACK_POINTER_REGNUM);
3990 offset += UNITS_PER_WORD;
3991 gr_saved++;
3992 }
3993
3994 /* If we wanted to merge the SP adjustment with a GR save, but we never
3995 did any GR saves, then just emit the adjustment here. */
3996 if (merge_sp_adjust_with_store)
3997 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3998 actual_fsize, 1);
3999 }
4000
4001 /* The hppa calling conventions say that %r19, the pic offset
4002 register, is saved at sp - 32 (in this function's frame)
4003 when generating PIC code. FIXME: What is the correct thing
4004 to do for functions which make no calls and allocate no
4005 frame? Do we need to allocate a frame, or can we just omit
4006 the save? For now we'll just omit the save.
4007
4008 We don't want a note on this insn as the frame marker can
4009 move if there is a dynamic stack allocation. */
4010 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4011 {
4012 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4013
4014 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4015
4016 }
4017
4018 /* Align pointer properly (doubleword boundary). */
4019 offset = (offset + 7) & ~7;
4020
4021 /* Floating point register store. */
4022 if (save_fregs)
4023 {
4024 rtx base;
4025
4026 /* First get the frame or stack pointer to the start of the FP register
4027 save area. */
4028 if (frame_pointer_needed)
4029 {
4030 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4031 base = hard_frame_pointer_rtx;
4032 }
4033 else
4034 {
4035 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4036 base = stack_pointer_rtx;
4037 }
4038
4039 /* Now actually save the FP registers. */
4040 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4041 {
4042 if (df_regs_ever_live_p (i)
4043 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4044 {
4045 rtx addr, insn, reg;
4046 addr = gen_rtx_MEM (DFmode,
4047 gen_rtx_POST_INC (word_mode, tmpreg));
4048 reg = gen_rtx_REG (DFmode, i);
4049 insn = emit_move_insn (addr, reg);
4050 if (DO_FRAME_NOTES)
4051 {
4052 RTX_FRAME_RELATED_P (insn) = 1;
4053 if (TARGET_64BIT)
4054 {
4055 rtx mem = gen_rtx_MEM (DFmode,
4056 plus_constant (Pmode, base,
4057 offset));
4058 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4059 gen_rtx_SET (VOIDmode, mem, reg));
4060 }
4061 else
4062 {
4063 rtx meml = gen_rtx_MEM (SFmode,
4064 plus_constant (Pmode, base,
4065 offset));
4066 rtx memr = gen_rtx_MEM (SFmode,
4067 plus_constant (Pmode, base,
4068 offset + 4));
4069 rtx regl = gen_rtx_REG (SFmode, i);
4070 rtx regr = gen_rtx_REG (SFmode, i + 1);
4071 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4072 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4073 rtvec vec;
4074
4075 RTX_FRAME_RELATED_P (setl) = 1;
4076 RTX_FRAME_RELATED_P (setr) = 1;
4077 vec = gen_rtvec (2, setl, setr);
4078 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4079 gen_rtx_SEQUENCE (VOIDmode, vec));
4080 }
4081 }
4082 offset += GET_MODE_SIZE (DFmode);
4083 fr_saved++;
4084 }
4085 }
4086 }
4087 }
4088
4089 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4090 Handle case where DISP > 8k by using the add_high_const patterns. */
4091
4092 static void
4093 load_reg (int reg, HOST_WIDE_INT disp, int base)
4094 {
4095 rtx dest = gen_rtx_REG (word_mode, reg);
4096 rtx basereg = gen_rtx_REG (Pmode, base);
4097 rtx src;
4098
4099 if (VAL_14_BITS_P (disp))
4100 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4101 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4102 {
4103 rtx delta = GEN_INT (disp);
4104 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4105
4106 emit_move_insn (tmpreg, delta);
4107 if (TARGET_DISABLE_INDEXING)
4108 {
4109 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4110 src = gen_rtx_MEM (word_mode, tmpreg);
4111 }
4112 else
4113 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4114 }
4115 else
4116 {
4117 rtx delta = GEN_INT (disp);
4118 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4119 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4120
4121 emit_move_insn (tmpreg, high);
4122 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4123 }
4124
4125 emit_move_insn (dest, src);
4126 }
4127
4128 /* Update the total code bytes output to the text section. */
4129
4130 static void
4131 update_total_code_bytes (unsigned int nbytes)
4132 {
4133 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4134 && !IN_NAMED_SECTION_P (cfun->decl))
4135 {
4136 unsigned int old_total = total_code_bytes;
4137
4138 total_code_bytes += nbytes;
4139
4140 /* Be prepared to handle overflows. */
4141 if (old_total > total_code_bytes)
4142 total_code_bytes = UINT_MAX;
4143 }
4144 }
4145
4146 /* This function generates the assembly code for function exit.
4147 Args are as for output_function_prologue ().
4148
4149 The function epilogue should not depend on the current stack
4150 pointer! It should use the frame pointer only. This is mandatory
4151 because of alloca; we also take advantage of it to omit stack
4152 adjustments before returning. */
4153
4154 static void
4155 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4156 {
4157 rtx insn = get_last_insn ();
4158
4159 last_address = 0;
4160
4161 /* pa_expand_epilogue does the dirty work now. We just need
4162 to output the assembler directives which denote the end
4163 of a function.
4164
4165 To make debuggers happy, emit a nop if the epilogue was completely
4166 eliminated due to a volatile call as the last insn in the
4167 current function. That way the return address (in %r2) will
4168 always point to a valid instruction in the current function. */
4169
4170 /* Get the last real insn. */
4171 if (NOTE_P (insn))
4172 insn = prev_real_insn (insn);
4173
4174 /* If it is a sequence, then look inside. */
4175 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4176 insn = XVECEXP (PATTERN (insn), 0, 0);
4177
4178 /* If insn is a CALL_INSN, then it must be a call to a volatile
4179 function (otherwise there would be epilogue insns). */
4180 if (insn && CALL_P (insn))
4181 {
4182 fputs ("\tnop\n", file);
4183 last_address += 4;
4184 }
4185
4186 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4187
4188 if (TARGET_SOM && TARGET_GAS)
4189 {
4190 /* We are done with this subspace except possibly for some additional
4191 debug information. Forget that we are in this subspace to ensure
4192 that the next function is output in its own subspace. */
4193 in_section = NULL;
4194 cfun->machine->in_nsubspa = 2;
4195 }
4196
4197 /* Thunks do their own accounting. */
4198 if (cfun->is_thunk)
4199 return;
4200
4201 if (INSN_ADDRESSES_SET_P ())
4202 {
4203 insn = get_last_nonnote_insn ();
4204 last_address += INSN_ADDRESSES (INSN_UID (insn));
4205 if (INSN_P (insn))
4206 last_address += insn_default_length (insn);
4207 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4208 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4209 }
4210 else
4211 last_address = UINT_MAX;
4212
4213 /* Finally, update the total number of code bytes output so far. */
4214 update_total_code_bytes (last_address);
4215 }
4216
4217 void
4218 pa_expand_epilogue (void)
4219 {
4220 rtx tmpreg;
4221 HOST_WIDE_INT offset;
4222 HOST_WIDE_INT ret_off = 0;
4223 int i;
4224 int merge_sp_adjust_with_load = 0;
4225
4226 /* We will use this often. */
4227 tmpreg = gen_rtx_REG (word_mode, 1);
4228
4229 /* Try to restore RP early to avoid load/use interlocks when
4230 RP gets used in the return (bv) instruction. This appears to still
4231 be necessary even when we schedule the prologue and epilogue. */
4232 if (rp_saved)
4233 {
4234 ret_off = TARGET_64BIT ? -16 : -20;
4235 if (frame_pointer_needed)
4236 {
4237 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4238 ret_off = 0;
4239 }
4240 else
4241 {
4242 /* No frame pointer, and stack is smaller than 8k. */
4243 if (VAL_14_BITS_P (ret_off - actual_fsize))
4244 {
4245 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4246 ret_off = 0;
4247 }
4248 }
4249 }
4250
4251 /* General register restores. */
4252 if (frame_pointer_needed)
4253 {
4254 offset = local_fsize;
4255
4256 /* If the current function calls __builtin_eh_return, then we need
4257 to restore the saved EH data registers. */
4258 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4259 {
4260 unsigned int i, regno;
4261
4262 for (i = 0; ; ++i)
4263 {
4264 regno = EH_RETURN_DATA_REGNO (i);
4265 if (regno == INVALID_REGNUM)
4266 break;
4267
4268 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4269 offset += UNITS_PER_WORD;
4270 }
4271 }
4272
4273 for (i = 18; i >= 4; i--)
4274 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4275 {
4276 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4277 offset += UNITS_PER_WORD;
4278 }
4279 }
4280 else
4281 {
4282 offset = local_fsize - actual_fsize;
4283
4284 /* If the current function calls __builtin_eh_return, then we need
4285 to restore the saved EH data registers. */
4286 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4287 {
4288 unsigned int i, regno;
4289
4290 for (i = 0; ; ++i)
4291 {
4292 regno = EH_RETURN_DATA_REGNO (i);
4293 if (regno == INVALID_REGNUM)
4294 break;
4295
4296 /* Only for the first load.
4297 merge_sp_adjust_with_load holds the register load
4298 with which we will merge the sp adjustment. */
4299 if (merge_sp_adjust_with_load == 0
4300 && local_fsize == 0
4301 && VAL_14_BITS_P (-actual_fsize))
4302 merge_sp_adjust_with_load = regno;
4303 else
4304 load_reg (regno, offset, STACK_POINTER_REGNUM);
4305 offset += UNITS_PER_WORD;
4306 }
4307 }
4308
4309 for (i = 18; i >= 3; i--)
4310 {
4311 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4312 {
4313 /* Only for the first load.
4314 merge_sp_adjust_with_load holds the register load
4315 with which we will merge the sp adjustment. */
4316 if (merge_sp_adjust_with_load == 0
4317 && local_fsize == 0
4318 && VAL_14_BITS_P (-actual_fsize))
4319 merge_sp_adjust_with_load = i;
4320 else
4321 load_reg (i, offset, STACK_POINTER_REGNUM);
4322 offset += UNITS_PER_WORD;
4323 }
4324 }
4325 }
4326
4327 /* Align pointer properly (doubleword boundary). */
4328 offset = (offset + 7) & ~7;
4329
4330 /* FP register restores. */
4331 if (save_fregs)
4332 {
4333 /* Adjust the register to index off of. */
4334 if (frame_pointer_needed)
4335 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4336 else
4337 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4338
4339 /* Actually do the restores now. */
4340 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4341 if (df_regs_ever_live_p (i)
4342 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4343 {
4344 rtx src = gen_rtx_MEM (DFmode,
4345 gen_rtx_POST_INC (word_mode, tmpreg));
4346 rtx dest = gen_rtx_REG (DFmode, i);
4347 emit_move_insn (dest, src);
4348 }
4349 }
4350
4351 /* Emit a blockage insn here to keep these insns from being moved to
4352 an earlier spot in the epilogue, or into the main instruction stream.
4353
4354 This is necessary as we must not cut the stack back before all the
4355 restores are finished. */
4356 emit_insn (gen_blockage ());
4357
4358 /* Reset stack pointer (and possibly frame pointer). The stack
4359 pointer is initially set to fp + 64 to avoid a race condition. */
4360 if (frame_pointer_needed)
4361 {
4362 rtx delta = GEN_INT (-64);
4363
4364 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4365 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4366 stack_pointer_rtx, delta));
4367 }
4368 /* If we were deferring a callee register restore, do it now. */
4369 else if (merge_sp_adjust_with_load)
4370 {
4371 rtx delta = GEN_INT (-actual_fsize);
4372 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4373
4374 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4375 }
4376 else if (actual_fsize != 0)
4377 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4378 - actual_fsize, 0);
4379
4380 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4381 frame greater than 8k), do so now. */
4382 if (ret_off != 0)
4383 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4384
4385 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4386 {
4387 rtx sa = EH_RETURN_STACKADJ_RTX;
4388
4389 emit_insn (gen_blockage ());
4390 emit_insn (TARGET_64BIT
4391 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4392 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4393 }
4394 }
4395
4396 bool
4397 pa_can_use_return_insn (void)
4398 {
4399 if (!reload_completed)
4400 return false;
4401
4402 if (frame_pointer_needed)
4403 return false;
4404
4405 if (df_regs_ever_live_p (2))
4406 return false;
4407
4408 if (crtl->profile)
4409 return false;
4410
4411 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4412 }
4413
4414 rtx
4415 hppa_pic_save_rtx (void)
4416 {
4417 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4418 }
4419
4420 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4421 #define NO_DEFERRED_PROFILE_COUNTERS 0
4422 #endif
4423
4424
4425 /* Vector of funcdef numbers. */
4426 static vec<int> funcdef_nos;
4427
4428 /* Output deferred profile counters. */
4429 static void
4430 output_deferred_profile_counters (void)
4431 {
4432 unsigned int i;
4433 int align, n;
4434
4435 if (funcdef_nos.is_empty ())
4436 return;
4437
4438 switch_to_section (data_section);
4439 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4440 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4441
4442 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4443 {
4444 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4445 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4446 }
4447
4448 funcdef_nos.release ();
4449 }
4450
4451 void
4452 hppa_profile_hook (int label_no)
4453 {
4454 /* We use SImode for the address of the function in both 32 and
4455 64-bit code to avoid having to provide DImode versions of the
4456 lcla2 and load_offset_label_address insn patterns. */
4457 rtx reg = gen_reg_rtx (SImode);
4458 rtx label_rtx = gen_label_rtx ();
4459 rtx begin_label_rtx, call_insn;
4460 char begin_label_name[16];
4461
4462 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4463 label_no);
4464 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4465
4466 if (TARGET_64BIT)
4467 emit_move_insn (arg_pointer_rtx,
4468 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4469 GEN_INT (64)));
4470
4471 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4472
4473 /* The address of the function is loaded into %r25 with an instruction-
4474 relative sequence that avoids the use of relocations. The sequence
4475 is split so that the load_offset_label_address instruction can
4476 occupy the delay slot of the call to _mcount. */
4477 if (TARGET_PA_20)
4478 emit_insn (gen_lcla2 (reg, label_rtx));
4479 else
4480 emit_insn (gen_lcla1 (reg, label_rtx));
4481
4482 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4483 reg, begin_label_rtx, label_rtx));
4484
4485 #if !NO_DEFERRED_PROFILE_COUNTERS
4486 {
4487 rtx count_label_rtx, addr, r24;
4488 char count_label_name[16];
4489
4490 funcdef_nos.safe_push (label_no);
4491 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4492 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4493
4494 addr = force_reg (Pmode, count_label_rtx);
4495 r24 = gen_rtx_REG (Pmode, 24);
4496 emit_move_insn (r24, addr);
4497
4498 call_insn =
4499 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4500 gen_rtx_SYMBOL_REF (Pmode,
4501 "_mcount")),
4502 GEN_INT (TARGET_64BIT ? 24 : 12)));
4503
4504 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4505 }
4506 #else
4507
4508 call_insn =
4509 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4510 gen_rtx_SYMBOL_REF (Pmode,
4511 "_mcount")),
4512 GEN_INT (TARGET_64BIT ? 16 : 8)));
4513
4514 #endif
4515
4516 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4517 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4518
4519 /* Indicate the _mcount call cannot throw, nor will it execute a
4520 non-local goto. */
4521 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4522 }
4523
4524 /* Fetch the return address for the frame COUNT steps up from
4525 the current frame, after the prologue. FRAMEADDR is the
4526 frame pointer of the COUNT frame.
4527
4528 We want to ignore any export stub remnants here. To handle this,
4529 we examine the code at the return address, and if it is an export
4530 stub, we return a memory rtx for the stub return address stored
4531 at frame-24.
4532
4533 The value returned is used in two different ways:
4534
4535 1. To find a function's caller.
4536
4537 2. To change the return address for a function.
4538
4539 This function handles most instances of case 1; however, it will
4540 fail if there are two levels of stubs to execute on the return
4541 path. The only way I believe that can happen is if the return value
4542 needs a parameter relocation, which never happens for C code.
4543
4544 This function handles most instances of case 2; however, it will
4545 fail if we did not originally have stub code on the return path
4546 but will need stub code on the new return path. This can happen if
4547 the caller & callee are both in the main program, but the new
4548 return location is in a shared library. */
4549
4550 rtx
4551 pa_return_addr_rtx (int count, rtx frameaddr)
4552 {
4553 rtx label;
4554 rtx rp;
4555 rtx saved_rp;
4556 rtx ins;
4557
4558 /* The instruction stream at the return address of a PA1.X export stub is:
4559
4560 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4561 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4562 0x00011820 | stub+16: mtsp r1,sr0
4563 0xe0400002 | stub+20: be,n 0(sr0,rp)
4564
4565 0xe0400002 must be specified as -532676606 so that it won't be
4566 rejected as an invalid immediate operand on 64-bit hosts.
4567
4568 The instruction stream at the return address of a PA2.0 export stub is:
4569
4570 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4571 0xe840d002 | stub+12: bve,n (rp)
4572 */
4573
4574 HOST_WIDE_INT insns[4];
4575 int i, len;
4576
4577 if (count != 0)
4578 return NULL_RTX;
4579
4580 rp = get_hard_reg_initial_val (Pmode, 2);
4581
4582 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4583 return rp;
4584
4585 /* If there is no export stub then just use the value saved from
4586 the return pointer register. */
4587
4588 saved_rp = gen_reg_rtx (Pmode);
4589 emit_move_insn (saved_rp, rp);
4590
4591 /* Get pointer to the instruction stream. We have to mask out the
4592 privilege level from the two low order bits of the return address
4593 pointer here so that ins will point to the start of the first
4594 instruction that would have been executed if we returned. */
4595 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4596 label = gen_label_rtx ();
4597
4598 if (TARGET_PA_20)
4599 {
4600 insns[0] = 0x4bc23fd1;
4601 insns[1] = -398405630;
4602 len = 2;
4603 }
4604 else
4605 {
4606 insns[0] = 0x4bc23fd1;
4607 insns[1] = 0x004010a1;
4608 insns[2] = 0x00011820;
4609 insns[3] = -532676606;
4610 len = 4;
4611 }
4612
4613 /* Check the instruction stream at the normal return address for the
4614 export stub. If it is an export stub, than our return address is
4615 really in -24[frameaddr]. */
4616
4617 for (i = 0; i < len; i++)
4618 {
4619 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4620 rtx op1 = GEN_INT (insns[i]);
4621 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4622 }
4623
4624 /* Here we know that our return address points to an export
4625 stub. We don't want to return the address of the export stub,
4626 but rather the return address of the export stub. That return
4627 address is stored at -24[frameaddr]. */
4628
4629 emit_move_insn (saved_rp,
4630 gen_rtx_MEM (Pmode,
4631 memory_address (Pmode,
4632 plus_constant (Pmode, frameaddr,
4633 -24))));
4634
4635 emit_label (label);
4636
4637 return saved_rp;
4638 }
4639
4640 void
4641 pa_emit_bcond_fp (rtx operands[])
4642 {
4643 enum rtx_code code = GET_CODE (operands[0]);
4644 rtx operand0 = operands[1];
4645 rtx operand1 = operands[2];
4646 rtx label = operands[3];
4647
4648 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4649 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4650
4651 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4652 gen_rtx_IF_THEN_ELSE (VOIDmode,
4653 gen_rtx_fmt_ee (NE,
4654 VOIDmode,
4655 gen_rtx_REG (CCFPmode, 0),
4656 const0_rtx),
4657 gen_rtx_LABEL_REF (VOIDmode, label),
4658 pc_rtx)));
4659
4660 }
4661
4662 /* Adjust the cost of a scheduling dependency. Return the new cost of
4663 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4664
4665 static int
4666 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4667 {
4668 enum attr_type attr_type;
4669
4670 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4671 true dependencies as they are described with bypasses now. */
4672 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4673 return cost;
4674
4675 if (! recog_memoized (insn))
4676 return 0;
4677
4678 attr_type = get_attr_type (insn);
4679
4680 switch (REG_NOTE_KIND (link))
4681 {
4682 case REG_DEP_ANTI:
4683 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4684 cycles later. */
4685
4686 if (attr_type == TYPE_FPLOAD)
4687 {
4688 rtx pat = PATTERN (insn);
4689 rtx dep_pat = PATTERN (dep_insn);
4690 if (GET_CODE (pat) == PARALLEL)
4691 {
4692 /* This happens for the fldXs,mb patterns. */
4693 pat = XVECEXP (pat, 0, 0);
4694 }
4695 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4696 /* If this happens, we have to extend this to schedule
4697 optimally. Return 0 for now. */
4698 return 0;
4699
4700 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4701 {
4702 if (! recog_memoized (dep_insn))
4703 return 0;
4704 switch (get_attr_type (dep_insn))
4705 {
4706 case TYPE_FPALU:
4707 case TYPE_FPMULSGL:
4708 case TYPE_FPMULDBL:
4709 case TYPE_FPDIVSGL:
4710 case TYPE_FPDIVDBL:
4711 case TYPE_FPSQRTSGL:
4712 case TYPE_FPSQRTDBL:
4713 /* A fpload can't be issued until one cycle before a
4714 preceding arithmetic operation has finished if
4715 the target of the fpload is any of the sources
4716 (or destination) of the arithmetic operation. */
4717 return insn_default_latency (dep_insn) - 1;
4718
4719 default:
4720 return 0;
4721 }
4722 }
4723 }
4724 else if (attr_type == TYPE_FPALU)
4725 {
4726 rtx pat = PATTERN (insn);
4727 rtx dep_pat = PATTERN (dep_insn);
4728 if (GET_CODE (pat) == PARALLEL)
4729 {
4730 /* This happens for the fldXs,mb patterns. */
4731 pat = XVECEXP (pat, 0, 0);
4732 }
4733 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4734 /* If this happens, we have to extend this to schedule
4735 optimally. Return 0 for now. */
4736 return 0;
4737
4738 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4739 {
4740 if (! recog_memoized (dep_insn))
4741 return 0;
4742 switch (get_attr_type (dep_insn))
4743 {
4744 case TYPE_FPDIVSGL:
4745 case TYPE_FPDIVDBL:
4746 case TYPE_FPSQRTSGL:
4747 case TYPE_FPSQRTDBL:
4748 /* An ALU flop can't be issued until two cycles before a
4749 preceding divide or sqrt operation has finished if
4750 the target of the ALU flop is any of the sources
4751 (or destination) of the divide or sqrt operation. */
4752 return insn_default_latency (dep_insn) - 2;
4753
4754 default:
4755 return 0;
4756 }
4757 }
4758 }
4759
4760 /* For other anti dependencies, the cost is 0. */
4761 return 0;
4762
4763 case REG_DEP_OUTPUT:
4764 /* Output dependency; DEP_INSN writes a register that INSN writes some
4765 cycles later. */
4766 if (attr_type == TYPE_FPLOAD)
4767 {
4768 rtx pat = PATTERN (insn);
4769 rtx dep_pat = PATTERN (dep_insn);
4770 if (GET_CODE (pat) == PARALLEL)
4771 {
4772 /* This happens for the fldXs,mb patterns. */
4773 pat = XVECEXP (pat, 0, 0);
4774 }
4775 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4776 /* If this happens, we have to extend this to schedule
4777 optimally. Return 0 for now. */
4778 return 0;
4779
4780 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4781 {
4782 if (! recog_memoized (dep_insn))
4783 return 0;
4784 switch (get_attr_type (dep_insn))
4785 {
4786 case TYPE_FPALU:
4787 case TYPE_FPMULSGL:
4788 case TYPE_FPMULDBL:
4789 case TYPE_FPDIVSGL:
4790 case TYPE_FPDIVDBL:
4791 case TYPE_FPSQRTSGL:
4792 case TYPE_FPSQRTDBL:
4793 /* A fpload can't be issued until one cycle before a
4794 preceding arithmetic operation has finished if
4795 the target of the fpload is the destination of the
4796 arithmetic operation.
4797
4798 Exception: For PA7100LC, PA7200 and PA7300, the cost
4799 is 3 cycles, unless they bundle together. We also
4800 pay the penalty if the second insn is a fpload. */
4801 return insn_default_latency (dep_insn) - 1;
4802
4803 default:
4804 return 0;
4805 }
4806 }
4807 }
4808 else if (attr_type == TYPE_FPALU)
4809 {
4810 rtx pat = PATTERN (insn);
4811 rtx dep_pat = PATTERN (dep_insn);
4812 if (GET_CODE (pat) == PARALLEL)
4813 {
4814 /* This happens for the fldXs,mb patterns. */
4815 pat = XVECEXP (pat, 0, 0);
4816 }
4817 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4818 /* If this happens, we have to extend this to schedule
4819 optimally. Return 0 for now. */
4820 return 0;
4821
4822 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4823 {
4824 if (! recog_memoized (dep_insn))
4825 return 0;
4826 switch (get_attr_type (dep_insn))
4827 {
4828 case TYPE_FPDIVSGL:
4829 case TYPE_FPDIVDBL:
4830 case TYPE_FPSQRTSGL:
4831 case TYPE_FPSQRTDBL:
4832 /* An ALU flop can't be issued until two cycles before a
4833 preceding divide or sqrt operation has finished if
4834 the target of the ALU flop is also the target of
4835 the divide or sqrt operation. */
4836 return insn_default_latency (dep_insn) - 2;
4837
4838 default:
4839 return 0;
4840 }
4841 }
4842 }
4843
4844 /* For other output dependencies, the cost is 0. */
4845 return 0;
4846
4847 default:
4848 gcc_unreachable ();
4849 }
4850 }
4851
4852 /* Adjust scheduling priorities. We use this to try and keep addil
4853 and the next use of %r1 close together. */
4854 static int
4855 pa_adjust_priority (rtx insn, int priority)
4856 {
4857 rtx set = single_set (insn);
4858 rtx src, dest;
4859 if (set)
4860 {
4861 src = SET_SRC (set);
4862 dest = SET_DEST (set);
4863 if (GET_CODE (src) == LO_SUM
4864 && symbolic_operand (XEXP (src, 1), VOIDmode)
4865 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4866 priority >>= 3;
4867
4868 else if (GET_CODE (src) == MEM
4869 && GET_CODE (XEXP (src, 0)) == LO_SUM
4870 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4871 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4872 priority >>= 1;
4873
4874 else if (GET_CODE (dest) == MEM
4875 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4876 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4877 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4878 priority >>= 3;
4879 }
4880 return priority;
4881 }
4882
4883 /* The 700 can only issue a single insn at a time.
4884 The 7XXX processors can issue two insns at a time.
4885 The 8000 can issue 4 insns at a time. */
4886 static int
4887 pa_issue_rate (void)
4888 {
4889 switch (pa_cpu)
4890 {
4891 case PROCESSOR_700: return 1;
4892 case PROCESSOR_7100: return 2;
4893 case PROCESSOR_7100LC: return 2;
4894 case PROCESSOR_7200: return 2;
4895 case PROCESSOR_7300: return 2;
4896 case PROCESSOR_8000: return 4;
4897
4898 default:
4899 gcc_unreachable ();
4900 }
4901 }
4902
4903
4904
4905 /* Return any length plus adjustment needed by INSN which already has
4906 its length computed as LENGTH. Return LENGTH if no adjustment is
4907 necessary.
4908
4909 Also compute the length of an inline block move here as it is too
4910 complicated to express as a length attribute in pa.md. */
4911 int
4912 pa_adjust_insn_length (rtx insn, int length)
4913 {
4914 rtx pat = PATTERN (insn);
4915
4916 /* If length is negative or undefined, provide initial length. */
4917 if ((unsigned int) length >= INT_MAX)
4918 {
4919 if (GET_CODE (pat) == SEQUENCE)
4920 insn = XVECEXP (pat, 0, 0);
4921
4922 switch (get_attr_type (insn))
4923 {
4924 case TYPE_MILLI:
4925 length = pa_attr_length_millicode_call (insn);
4926 break;
4927 case TYPE_CALL:
4928 length = pa_attr_length_call (insn, 0);
4929 break;
4930 case TYPE_SIBCALL:
4931 length = pa_attr_length_call (insn, 1);
4932 break;
4933 case TYPE_DYNCALL:
4934 length = pa_attr_length_indirect_call (insn);
4935 break;
4936 case TYPE_SH_FUNC_ADRS:
4937 length = pa_attr_length_millicode_call (insn) + 20;
4938 break;
4939 default:
4940 gcc_unreachable ();
4941 }
4942 }
4943
4944 /* Block move pattern. */
4945 if (NONJUMP_INSN_P (insn)
4946 && GET_CODE (pat) == PARALLEL
4947 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4948 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4949 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4950 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4951 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4952 length += compute_movmem_length (insn) - 4;
4953 /* Block clear pattern. */
4954 else if (NONJUMP_INSN_P (insn)
4955 && GET_CODE (pat) == PARALLEL
4956 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4957 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4958 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4959 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4960 length += compute_clrmem_length (insn) - 4;
4961 /* Conditional branch with an unfilled delay slot. */
4962 else if (JUMP_P (insn) && ! simplejump_p (insn))
4963 {
4964 /* Adjust a short backwards conditional with an unfilled delay slot. */
4965 if (GET_CODE (pat) == SET
4966 && length == 4
4967 && JUMP_LABEL (insn) != NULL_RTX
4968 && ! forward_branch_p (insn))
4969 length += 4;
4970 else if (GET_CODE (pat) == PARALLEL
4971 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4972 && length == 4)
4973 length += 4;
4974 /* Adjust dbra insn with short backwards conditional branch with
4975 unfilled delay slot -- only for case where counter is in a
4976 general register register. */
4977 else if (GET_CODE (pat) == PARALLEL
4978 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4979 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4980 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4981 && length == 4
4982 && ! forward_branch_p (insn))
4983 length += 4;
4984 }
4985 return length;
4986 }
4987
4988 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4989
4990 static bool
4991 pa_print_operand_punct_valid_p (unsigned char code)
4992 {
4993 if (code == '@'
4994 || code == '#'
4995 || code == '*'
4996 || code == '^')
4997 return true;
4998
4999 return false;
5000 }
5001
5002 /* Print operand X (an rtx) in assembler syntax to file FILE.
5003 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5004 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5005
5006 void
5007 pa_print_operand (FILE *file, rtx x, int code)
5008 {
5009 switch (code)
5010 {
5011 case '#':
5012 /* Output a 'nop' if there's nothing for the delay slot. */
5013 if (dbr_sequence_length () == 0)
5014 fputs ("\n\tnop", file);
5015 return;
5016 case '*':
5017 /* Output a nullification completer if there's nothing for the */
5018 /* delay slot or nullification is requested. */
5019 if (dbr_sequence_length () == 0 ||
5020 (final_sequence &&
5021 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5022 fputs (",n", file);
5023 return;
5024 case 'R':
5025 /* Print out the second register name of a register pair.
5026 I.e., R (6) => 7. */
5027 fputs (reg_names[REGNO (x) + 1], file);
5028 return;
5029 case 'r':
5030 /* A register or zero. */
5031 if (x == const0_rtx
5032 || (x == CONST0_RTX (DFmode))
5033 || (x == CONST0_RTX (SFmode)))
5034 {
5035 fputs ("%r0", file);
5036 return;
5037 }
5038 else
5039 break;
5040 case 'f':
5041 /* A register or zero (floating point). */
5042 if (x == const0_rtx
5043 || (x == CONST0_RTX (DFmode))
5044 || (x == CONST0_RTX (SFmode)))
5045 {
5046 fputs ("%fr0", file);
5047 return;
5048 }
5049 else
5050 break;
5051 case 'A':
5052 {
5053 rtx xoperands[2];
5054
5055 xoperands[0] = XEXP (XEXP (x, 0), 0);
5056 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5057 pa_output_global_address (file, xoperands[1], 0);
5058 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5059 return;
5060 }
5061
5062 case 'C': /* Plain (C)ondition */
5063 case 'X':
5064 switch (GET_CODE (x))
5065 {
5066 case EQ:
5067 fputs ("=", file); break;
5068 case NE:
5069 fputs ("<>", file); break;
5070 case GT:
5071 fputs (">", file); break;
5072 case GE:
5073 fputs (">=", file); break;
5074 case GEU:
5075 fputs (">>=", file); break;
5076 case GTU:
5077 fputs (">>", file); break;
5078 case LT:
5079 fputs ("<", file); break;
5080 case LE:
5081 fputs ("<=", file); break;
5082 case LEU:
5083 fputs ("<<=", file); break;
5084 case LTU:
5085 fputs ("<<", file); break;
5086 default:
5087 gcc_unreachable ();
5088 }
5089 return;
5090 case 'N': /* Condition, (N)egated */
5091 switch (GET_CODE (x))
5092 {
5093 case EQ:
5094 fputs ("<>", file); break;
5095 case NE:
5096 fputs ("=", file); break;
5097 case GT:
5098 fputs ("<=", file); break;
5099 case GE:
5100 fputs ("<", file); break;
5101 case GEU:
5102 fputs ("<<", file); break;
5103 case GTU:
5104 fputs ("<<=", file); break;
5105 case LT:
5106 fputs (">=", file); break;
5107 case LE:
5108 fputs (">", file); break;
5109 case LEU:
5110 fputs (">>", file); break;
5111 case LTU:
5112 fputs (">>=", file); break;
5113 default:
5114 gcc_unreachable ();
5115 }
5116 return;
5117 /* For floating point comparisons. Note that the output
5118 predicates are the complement of the desired mode. The
5119 conditions for GT, GE, LT, LE and LTGT cause an invalid
5120 operation exception if the result is unordered and this
5121 exception is enabled in the floating-point status register. */
5122 case 'Y':
5123 switch (GET_CODE (x))
5124 {
5125 case EQ:
5126 fputs ("!=", file); break;
5127 case NE:
5128 fputs ("=", file); break;
5129 case GT:
5130 fputs ("!>", file); break;
5131 case GE:
5132 fputs ("!>=", file); break;
5133 case LT:
5134 fputs ("!<", file); break;
5135 case LE:
5136 fputs ("!<=", file); break;
5137 case LTGT:
5138 fputs ("!<>", file); break;
5139 case UNLE:
5140 fputs ("!?<=", file); break;
5141 case UNLT:
5142 fputs ("!?<", file); break;
5143 case UNGE:
5144 fputs ("!?>=", file); break;
5145 case UNGT:
5146 fputs ("!?>", file); break;
5147 case UNEQ:
5148 fputs ("!?=", file); break;
5149 case UNORDERED:
5150 fputs ("!?", file); break;
5151 case ORDERED:
5152 fputs ("?", file); break;
5153 default:
5154 gcc_unreachable ();
5155 }
5156 return;
5157 case 'S': /* Condition, operands are (S)wapped. */
5158 switch (GET_CODE (x))
5159 {
5160 case EQ:
5161 fputs ("=", file); break;
5162 case NE:
5163 fputs ("<>", file); break;
5164 case GT:
5165 fputs ("<", file); break;
5166 case GE:
5167 fputs ("<=", file); break;
5168 case GEU:
5169 fputs ("<<=", file); break;
5170 case GTU:
5171 fputs ("<<", file); break;
5172 case LT:
5173 fputs (">", file); break;
5174 case LE:
5175 fputs (">=", file); break;
5176 case LEU:
5177 fputs (">>=", file); break;
5178 case LTU:
5179 fputs (">>", file); break;
5180 default:
5181 gcc_unreachable ();
5182 }
5183 return;
5184 case 'B': /* Condition, (B)oth swapped and negate. */
5185 switch (GET_CODE (x))
5186 {
5187 case EQ:
5188 fputs ("<>", file); break;
5189 case NE:
5190 fputs ("=", file); break;
5191 case GT:
5192 fputs (">=", file); break;
5193 case GE:
5194 fputs (">", file); break;
5195 case GEU:
5196 fputs (">>", file); break;
5197 case GTU:
5198 fputs (">>=", file); break;
5199 case LT:
5200 fputs ("<=", file); break;
5201 case LE:
5202 fputs ("<", file); break;
5203 case LEU:
5204 fputs ("<<", file); break;
5205 case LTU:
5206 fputs ("<<=", file); break;
5207 default:
5208 gcc_unreachable ();
5209 }
5210 return;
5211 case 'k':
5212 gcc_assert (GET_CODE (x) == CONST_INT);
5213 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5214 return;
5215 case 'Q':
5216 gcc_assert (GET_CODE (x) == CONST_INT);
5217 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5218 return;
5219 case 'L':
5220 gcc_assert (GET_CODE (x) == CONST_INT);
5221 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5222 return;
5223 case 'O':
5224 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5225 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5226 return;
5227 case 'p':
5228 gcc_assert (GET_CODE (x) == CONST_INT);
5229 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5230 return;
5231 case 'P':
5232 gcc_assert (GET_CODE (x) == CONST_INT);
5233 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5234 return;
5235 case 'I':
5236 if (GET_CODE (x) == CONST_INT)
5237 fputs ("i", file);
5238 return;
5239 case 'M':
5240 case 'F':
5241 switch (GET_CODE (XEXP (x, 0)))
5242 {
5243 case PRE_DEC:
5244 case PRE_INC:
5245 if (ASSEMBLER_DIALECT == 0)
5246 fputs ("s,mb", file);
5247 else
5248 fputs (",mb", file);
5249 break;
5250 case POST_DEC:
5251 case POST_INC:
5252 if (ASSEMBLER_DIALECT == 0)
5253 fputs ("s,ma", file);
5254 else
5255 fputs (",ma", file);
5256 break;
5257 case PLUS:
5258 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5259 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5260 {
5261 if (ASSEMBLER_DIALECT == 0)
5262 fputs ("x", file);
5263 }
5264 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5265 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5266 {
5267 if (ASSEMBLER_DIALECT == 0)
5268 fputs ("x,s", file);
5269 else
5270 fputs (",s", file);
5271 }
5272 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5273 fputs ("s", file);
5274 break;
5275 default:
5276 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5277 fputs ("s", file);
5278 break;
5279 }
5280 return;
5281 case 'G':
5282 pa_output_global_address (file, x, 0);
5283 return;
5284 case 'H':
5285 pa_output_global_address (file, x, 1);
5286 return;
5287 case 0: /* Don't do anything special */
5288 break;
5289 case 'Z':
5290 {
5291 unsigned op[3];
5292 compute_zdepwi_operands (INTVAL (x), op);
5293 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5294 return;
5295 }
5296 case 'z':
5297 {
5298 unsigned op[3];
5299 compute_zdepdi_operands (INTVAL (x), op);
5300 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5301 return;
5302 }
5303 case 'c':
5304 /* We can get here from a .vtable_inherit due to our
5305 CONSTANT_ADDRESS_P rejecting perfectly good constant
5306 addresses. */
5307 break;
5308 default:
5309 gcc_unreachable ();
5310 }
5311 if (GET_CODE (x) == REG)
5312 {
5313 fputs (reg_names [REGNO (x)], file);
5314 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5315 {
5316 fputs ("R", file);
5317 return;
5318 }
5319 if (FP_REG_P (x)
5320 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5321 && (REGNO (x) & 1) == 0)
5322 fputs ("L", file);
5323 }
5324 else if (GET_CODE (x) == MEM)
5325 {
5326 int size = GET_MODE_SIZE (GET_MODE (x));
5327 rtx base = NULL_RTX;
5328 switch (GET_CODE (XEXP (x, 0)))
5329 {
5330 case PRE_DEC:
5331 case POST_DEC:
5332 base = XEXP (XEXP (x, 0), 0);
5333 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5334 break;
5335 case PRE_INC:
5336 case POST_INC:
5337 base = XEXP (XEXP (x, 0), 0);
5338 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5339 break;
5340 case PLUS:
5341 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5342 fprintf (file, "%s(%s)",
5343 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5344 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5345 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5346 fprintf (file, "%s(%s)",
5347 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5348 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5349 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5350 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5351 {
5352 /* Because the REG_POINTER flag can get lost during reload,
5353 pa_legitimate_address_p canonicalizes the order of the
5354 index and base registers in the combined move patterns. */
5355 rtx base = XEXP (XEXP (x, 0), 1);
5356 rtx index = XEXP (XEXP (x, 0), 0);
5357
5358 fprintf (file, "%s(%s)",
5359 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5360 }
5361 else
5362 output_address (XEXP (x, 0));
5363 break;
5364 default:
5365 output_address (XEXP (x, 0));
5366 break;
5367 }
5368 }
5369 else
5370 output_addr_const (file, x);
5371 }
5372
5373 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5374
5375 void
5376 pa_output_global_address (FILE *file, rtx x, int round_constant)
5377 {
5378
5379 /* Imagine (high (const (plus ...))). */
5380 if (GET_CODE (x) == HIGH)
5381 x = XEXP (x, 0);
5382
5383 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5384 output_addr_const (file, x);
5385 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5386 {
5387 output_addr_const (file, x);
5388 fputs ("-$global$", file);
5389 }
5390 else if (GET_CODE (x) == CONST)
5391 {
5392 const char *sep = "";
5393 int offset = 0; /* assembler wants -$global$ at end */
5394 rtx base = NULL_RTX;
5395
5396 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5397 {
5398 case SYMBOL_REF:
5399 base = XEXP (XEXP (x, 0), 0);
5400 output_addr_const (file, base);
5401 break;
5402 case CONST_INT:
5403 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5404 break;
5405 default:
5406 gcc_unreachable ();
5407 }
5408
5409 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5410 {
5411 case SYMBOL_REF:
5412 base = XEXP (XEXP (x, 0), 1);
5413 output_addr_const (file, base);
5414 break;
5415 case CONST_INT:
5416 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5417 break;
5418 default:
5419 gcc_unreachable ();
5420 }
5421
5422 /* How bogus. The compiler is apparently responsible for
5423 rounding the constant if it uses an LR field selector.
5424
5425 The linker and/or assembler seem a better place since
5426 they have to do this kind of thing already.
5427
5428 If we fail to do this, HP's optimizing linker may eliminate
5429 an addil, but not update the ldw/stw/ldo instruction that
5430 uses the result of the addil. */
5431 if (round_constant)
5432 offset = ((offset + 0x1000) & ~0x1fff);
5433
5434 switch (GET_CODE (XEXP (x, 0)))
5435 {
5436 case PLUS:
5437 if (offset < 0)
5438 {
5439 offset = -offset;
5440 sep = "-";
5441 }
5442 else
5443 sep = "+";
5444 break;
5445
5446 case MINUS:
5447 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5448 sep = "-";
5449 break;
5450
5451 default:
5452 gcc_unreachable ();
5453 }
5454
5455 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5456 fputs ("-$global$", file);
5457 if (offset)
5458 fprintf (file, "%s%d", sep, offset);
5459 }
5460 else
5461 output_addr_const (file, x);
5462 }
5463
5464 /* Output boilerplate text to appear at the beginning of the file.
5465 There are several possible versions. */
5466 #define aputs(x) fputs(x, asm_out_file)
5467 static inline void
5468 pa_file_start_level (void)
5469 {
5470 if (TARGET_64BIT)
5471 aputs ("\t.LEVEL 2.0w\n");
5472 else if (TARGET_PA_20)
5473 aputs ("\t.LEVEL 2.0\n");
5474 else if (TARGET_PA_11)
5475 aputs ("\t.LEVEL 1.1\n");
5476 else
5477 aputs ("\t.LEVEL 1.0\n");
5478 }
5479
5480 static inline void
5481 pa_file_start_space (int sortspace)
5482 {
5483 aputs ("\t.SPACE $PRIVATE$");
5484 if (sortspace)
5485 aputs (",SORT=16");
5486 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5487 if (flag_tm)
5488 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5489 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5490 "\n\t.SPACE $TEXT$");
5491 if (sortspace)
5492 aputs (",SORT=8");
5493 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5494 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5495 }
5496
5497 static inline void
5498 pa_file_start_file (int want_version)
5499 {
5500 if (write_symbols != NO_DEBUG)
5501 {
5502 output_file_directive (asm_out_file, main_input_filename);
5503 if (want_version)
5504 aputs ("\t.version\t\"01.01\"\n");
5505 }
5506 }
5507
5508 static inline void
5509 pa_file_start_mcount (const char *aswhat)
5510 {
5511 if (profile_flag)
5512 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5513 }
5514
5515 static void
5516 pa_elf_file_start (void)
5517 {
5518 pa_file_start_level ();
5519 pa_file_start_mcount ("ENTRY");
5520 pa_file_start_file (0);
5521 }
5522
5523 static void
5524 pa_som_file_start (void)
5525 {
5526 pa_file_start_level ();
5527 pa_file_start_space (0);
5528 aputs ("\t.IMPORT $global$,DATA\n"
5529 "\t.IMPORT $$dyncall,MILLICODE\n");
5530 pa_file_start_mcount ("CODE");
5531 pa_file_start_file (0);
5532 }
5533
5534 static void
5535 pa_linux_file_start (void)
5536 {
5537 pa_file_start_file (1);
5538 pa_file_start_level ();
5539 pa_file_start_mcount ("CODE");
5540 }
5541
5542 static void
5543 pa_hpux64_gas_file_start (void)
5544 {
5545 pa_file_start_level ();
5546 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5547 if (profile_flag)
5548 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5549 #endif
5550 pa_file_start_file (1);
5551 }
5552
5553 static void
5554 pa_hpux64_hpas_file_start (void)
5555 {
5556 pa_file_start_level ();
5557 pa_file_start_space (1);
5558 pa_file_start_mcount ("CODE");
5559 pa_file_start_file (0);
5560 }
5561 #undef aputs
5562
5563 /* Search the deferred plabel list for SYMBOL and return its internal
5564 label. If an entry for SYMBOL is not found, a new entry is created. */
5565
5566 rtx
5567 pa_get_deferred_plabel (rtx symbol)
5568 {
5569 const char *fname = XSTR (symbol, 0);
5570 size_t i;
5571
5572 /* See if we have already put this function on the list of deferred
5573 plabels. This list is generally small, so a liner search is not
5574 too ugly. If it proves too slow replace it with something faster. */
5575 for (i = 0; i < n_deferred_plabels; i++)
5576 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5577 break;
5578
5579 /* If the deferred plabel list is empty, or this entry was not found
5580 on the list, create a new entry on the list. */
5581 if (deferred_plabels == NULL || i == n_deferred_plabels)
5582 {
5583 tree id;
5584
5585 if (deferred_plabels == 0)
5586 deferred_plabels = ggc_alloc<deferred_plabel> ();
5587 else
5588 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5589 deferred_plabels,
5590 n_deferred_plabels + 1);
5591
5592 i = n_deferred_plabels++;
5593 deferred_plabels[i].internal_label = gen_label_rtx ();
5594 deferred_plabels[i].symbol = symbol;
5595
5596 /* Gross. We have just implicitly taken the address of this
5597 function. Mark it in the same manner as assemble_name. */
5598 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5599 if (id)
5600 mark_referenced (id);
5601 }
5602
5603 return deferred_plabels[i].internal_label;
5604 }
5605
5606 static void
5607 output_deferred_plabels (void)
5608 {
5609 size_t i;
5610
5611 /* If we have some deferred plabels, then we need to switch into the
5612 data or readonly data section, and align it to a 4 byte boundary
5613 before outputting the deferred plabels. */
5614 if (n_deferred_plabels)
5615 {
5616 switch_to_section (flag_pic ? data_section : readonly_data_section);
5617 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5618 }
5619
5620 /* Now output the deferred plabels. */
5621 for (i = 0; i < n_deferred_plabels; i++)
5622 {
5623 targetm.asm_out.internal_label (asm_out_file, "L",
5624 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5625 assemble_integer (deferred_plabels[i].symbol,
5626 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5627 }
5628 }
5629
5630 /* Initialize optabs to point to emulation routines. */
5631
5632 static void
5633 pa_init_libfuncs (void)
5634 {
5635 if (HPUX_LONG_DOUBLE_LIBRARY)
5636 {
5637 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5638 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5639 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5640 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5641 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5642 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5643 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5644 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5645 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5646
5647 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5648 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5649 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5650 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5651 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5652 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5653 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5654
5655 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5656 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5657 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5658 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5659
5660 set_conv_libfunc (sfix_optab, SImode, TFmode,
5661 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5662 : "_U_Qfcnvfxt_quad_to_sgl");
5663 set_conv_libfunc (sfix_optab, DImode, TFmode,
5664 "_U_Qfcnvfxt_quad_to_dbl");
5665 set_conv_libfunc (ufix_optab, SImode, TFmode,
5666 "_U_Qfcnvfxt_quad_to_usgl");
5667 set_conv_libfunc (ufix_optab, DImode, TFmode,
5668 "_U_Qfcnvfxt_quad_to_udbl");
5669
5670 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5671 "_U_Qfcnvxf_sgl_to_quad");
5672 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5673 "_U_Qfcnvxf_dbl_to_quad");
5674 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5675 "_U_Qfcnvxf_usgl_to_quad");
5676 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5677 "_U_Qfcnvxf_udbl_to_quad");
5678 }
5679
5680 if (TARGET_SYNC_LIBCALL)
5681 init_sync_libfuncs (UNITS_PER_WORD);
5682 }
5683
5684 /* HP's millicode routines mean something special to the assembler.
5685 Keep track of which ones we have used. */
5686
5687 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5688 static void import_milli (enum millicodes);
5689 static char imported[(int) end1000];
5690 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5691 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5692 #define MILLI_START 10
5693
5694 static void
5695 import_milli (enum millicodes code)
5696 {
5697 char str[sizeof (import_string)];
5698
5699 if (!imported[(int) code])
5700 {
5701 imported[(int) code] = 1;
5702 strcpy (str, import_string);
5703 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5704 output_asm_insn (str, 0);
5705 }
5706 }
5707
5708 /* The register constraints have put the operands and return value in
5709 the proper registers. */
5710
5711 const char *
5712 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5713 {
5714 import_milli (mulI);
5715 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5716 }
5717
5718 /* Emit the rtl for doing a division by a constant. */
5719
5720 /* Do magic division millicodes exist for this value? */
5721 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5722
5723 /* We'll use an array to keep track of the magic millicodes and
5724 whether or not we've used them already. [n][0] is signed, [n][1] is
5725 unsigned. */
5726
5727 static int div_milli[16][2];
5728
5729 int
5730 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5731 {
5732 if (GET_CODE (operands[2]) == CONST_INT
5733 && INTVAL (operands[2]) > 0
5734 && INTVAL (operands[2]) < 16
5735 && pa_magic_milli[INTVAL (operands[2])])
5736 {
5737 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5738
5739 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5740 emit
5741 (gen_rtx_PARALLEL
5742 (VOIDmode,
5743 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5744 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5745 SImode,
5746 gen_rtx_REG (SImode, 26),
5747 operands[2])),
5748 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5749 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5750 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5751 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5752 gen_rtx_CLOBBER (VOIDmode, ret))));
5753 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5754 return 1;
5755 }
5756 return 0;
5757 }
5758
5759 const char *
5760 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5761 {
5762 int divisor;
5763
5764 /* If the divisor is a constant, try to use one of the special
5765 opcodes .*/
5766 if (GET_CODE (operands[0]) == CONST_INT)
5767 {
5768 static char buf[100];
5769 divisor = INTVAL (operands[0]);
5770 if (!div_milli[divisor][unsignedp])
5771 {
5772 div_milli[divisor][unsignedp] = 1;
5773 if (unsignedp)
5774 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5775 else
5776 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5777 }
5778 if (unsignedp)
5779 {
5780 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5781 INTVAL (operands[0]));
5782 return pa_output_millicode_call (insn,
5783 gen_rtx_SYMBOL_REF (SImode, buf));
5784 }
5785 else
5786 {
5787 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5788 INTVAL (operands[0]));
5789 return pa_output_millicode_call (insn,
5790 gen_rtx_SYMBOL_REF (SImode, buf));
5791 }
5792 }
5793 /* Divisor isn't a special constant. */
5794 else
5795 {
5796 if (unsignedp)
5797 {
5798 import_milli (divU);
5799 return pa_output_millicode_call (insn,
5800 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5801 }
5802 else
5803 {
5804 import_milli (divI);
5805 return pa_output_millicode_call (insn,
5806 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5807 }
5808 }
5809 }
5810
5811 /* Output a $$rem millicode to do mod. */
5812
5813 const char *
5814 pa_output_mod_insn (int unsignedp, rtx insn)
5815 {
5816 if (unsignedp)
5817 {
5818 import_milli (remU);
5819 return pa_output_millicode_call (insn,
5820 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5821 }
5822 else
5823 {
5824 import_milli (remI);
5825 return pa_output_millicode_call (insn,
5826 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5827 }
5828 }
5829
5830 void
5831 pa_output_arg_descriptor (rtx call_insn)
5832 {
5833 const char *arg_regs[4];
5834 enum machine_mode arg_mode;
5835 rtx link;
5836 int i, output_flag = 0;
5837 int regno;
5838
5839 /* We neither need nor want argument location descriptors for the
5840 64bit runtime environment or the ELF32 environment. */
5841 if (TARGET_64BIT || TARGET_ELF32)
5842 return;
5843
5844 for (i = 0; i < 4; i++)
5845 arg_regs[i] = 0;
5846
5847 /* Specify explicitly that no argument relocations should take place
5848 if using the portable runtime calling conventions. */
5849 if (TARGET_PORTABLE_RUNTIME)
5850 {
5851 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5852 asm_out_file);
5853 return;
5854 }
5855
5856 gcc_assert (CALL_P (call_insn));
5857 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5858 link; link = XEXP (link, 1))
5859 {
5860 rtx use = XEXP (link, 0);
5861
5862 if (! (GET_CODE (use) == USE
5863 && GET_CODE (XEXP (use, 0)) == REG
5864 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5865 continue;
5866
5867 arg_mode = GET_MODE (XEXP (use, 0));
5868 regno = REGNO (XEXP (use, 0));
5869 if (regno >= 23 && regno <= 26)
5870 {
5871 arg_regs[26 - regno] = "GR";
5872 if (arg_mode == DImode)
5873 arg_regs[25 - regno] = "GR";
5874 }
5875 else if (regno >= 32 && regno <= 39)
5876 {
5877 if (arg_mode == SFmode)
5878 arg_regs[(regno - 32) / 2] = "FR";
5879 else
5880 {
5881 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5882 arg_regs[(regno - 34) / 2] = "FR";
5883 arg_regs[(regno - 34) / 2 + 1] = "FU";
5884 #else
5885 arg_regs[(regno - 34) / 2] = "FU";
5886 arg_regs[(regno - 34) / 2 + 1] = "FR";
5887 #endif
5888 }
5889 }
5890 }
5891 fputs ("\t.CALL ", asm_out_file);
5892 for (i = 0; i < 4; i++)
5893 {
5894 if (arg_regs[i])
5895 {
5896 if (output_flag++)
5897 fputc (',', asm_out_file);
5898 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5899 }
5900 }
5901 fputc ('\n', asm_out_file);
5902 }
5903 \f
5904 /* Inform reload about cases where moving X with a mode MODE to or from
5905 a register in RCLASS requires an extra scratch or immediate register.
5906 Return the class needed for the immediate register. */
5907
5908 static reg_class_t
5909 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5910 enum machine_mode mode, secondary_reload_info *sri)
5911 {
5912 int regno;
5913 enum reg_class rclass = (enum reg_class) rclass_i;
5914
5915 /* Handle the easy stuff first. */
5916 if (rclass == R1_REGS)
5917 return NO_REGS;
5918
5919 if (REG_P (x))
5920 {
5921 regno = REGNO (x);
5922 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5923 return NO_REGS;
5924 }
5925 else
5926 regno = -1;
5927
5928 /* If we have something like (mem (mem (...)), we can safely assume the
5929 inner MEM will end up in a general register after reloading, so there's
5930 no need for a secondary reload. */
5931 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5932 return NO_REGS;
5933
5934 /* Trying to load a constant into a FP register during PIC code
5935 generation requires %r1 as a scratch register. For float modes,
5936 the only legitimate constant is CONST0_RTX. However, there are
5937 a few patterns that accept constant double operands. */
5938 if (flag_pic
5939 && FP_REG_CLASS_P (rclass)
5940 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5941 {
5942 switch (mode)
5943 {
5944 case SImode:
5945 sri->icode = CODE_FOR_reload_insi_r1;
5946 break;
5947
5948 case DImode:
5949 sri->icode = CODE_FOR_reload_indi_r1;
5950 break;
5951
5952 case SFmode:
5953 sri->icode = CODE_FOR_reload_insf_r1;
5954 break;
5955
5956 case DFmode:
5957 sri->icode = CODE_FOR_reload_indf_r1;
5958 break;
5959
5960 default:
5961 gcc_unreachable ();
5962 }
5963 return NO_REGS;
5964 }
5965
5966 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5967 register when we're generating PIC code or when the operand isn't
5968 readonly. */
5969 if (pa_symbolic_expression_p (x))
5970 {
5971 if (GET_CODE (x) == HIGH)
5972 x = XEXP (x, 0);
5973
5974 if (flag_pic || !read_only_operand (x, VOIDmode))
5975 {
5976 switch (mode)
5977 {
5978 case SImode:
5979 sri->icode = CODE_FOR_reload_insi_r1;
5980 break;
5981
5982 case DImode:
5983 sri->icode = CODE_FOR_reload_indi_r1;
5984 break;
5985
5986 default:
5987 gcc_unreachable ();
5988 }
5989 return NO_REGS;
5990 }
5991 }
5992
5993 /* Profiling showed the PA port spends about 1.3% of its compilation
5994 time in true_regnum from calls inside pa_secondary_reload_class. */
5995 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5996 regno = true_regnum (x);
5997
5998 /* Handle reloads for floating point loads and stores. */
5999 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6000 && FP_REG_CLASS_P (rclass))
6001 {
6002 if (MEM_P (x))
6003 {
6004 x = XEXP (x, 0);
6005
6006 /* We don't need an intermediate for indexed and LO_SUM DLT
6007 memory addresses. When INT14_OK_STRICT is true, it might
6008 appear that we could directly allow register indirect
6009 memory addresses. However, this doesn't work because we
6010 don't support SUBREGs in floating-point register copies
6011 and reload doesn't tell us when it's going to use a SUBREG. */
6012 if (IS_INDEX_ADDR_P (x)
6013 || IS_LO_SUM_DLT_ADDR_P (x))
6014 return NO_REGS;
6015
6016 /* Request intermediate general register. */
6017 return GENERAL_REGS;
6018 }
6019
6020 /* Request a secondary reload with a general scratch register
6021 for everything else. ??? Could symbolic operands be handled
6022 directly when generating non-pic PA 2.0 code? */
6023 sri->icode = (in_p
6024 ? direct_optab_handler (reload_in_optab, mode)
6025 : direct_optab_handler (reload_out_optab, mode));
6026 return NO_REGS;
6027 }
6028
6029 /* A SAR<->FP register copy requires an intermediate general register
6030 and secondary memory. We need a secondary reload with a general
6031 scratch register for spills. */
6032 if (rclass == SHIFT_REGS)
6033 {
6034 /* Handle spill. */
6035 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6036 {
6037 sri->icode = (in_p
6038 ? direct_optab_handler (reload_in_optab, mode)
6039 : direct_optab_handler (reload_out_optab, mode));
6040 return NO_REGS;
6041 }
6042
6043 /* Handle FP copy. */
6044 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6045 return GENERAL_REGS;
6046 }
6047
6048 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6049 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6050 && FP_REG_CLASS_P (rclass))
6051 return GENERAL_REGS;
6052
6053 return NO_REGS;
6054 }
6055
6056 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6057 is only marked as live on entry by df-scan when it is a fixed
6058 register. It isn't a fixed register in the 64-bit runtime,
6059 so we need to mark it here. */
6060
6061 static void
6062 pa_extra_live_on_entry (bitmap regs)
6063 {
6064 if (TARGET_64BIT)
6065 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6066 }
6067
6068 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6069 to prevent it from being deleted. */
6070
6071 rtx
6072 pa_eh_return_handler_rtx (void)
6073 {
6074 rtx tmp;
6075
6076 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6077 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6078 tmp = gen_rtx_MEM (word_mode, tmp);
6079 tmp->volatil = 1;
6080 return tmp;
6081 }
6082
6083 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6084 by invisible reference. As a GCC extension, we also pass anything
6085 with a zero or variable size by reference.
6086
6087 The 64-bit runtime does not describe passing any types by invisible
6088 reference. The internals of GCC can't currently handle passing
6089 empty structures, and zero or variable length arrays when they are
6090 not passed entirely on the stack or by reference. Thus, as a GCC
6091 extension, we pass these types by reference. The HP compiler doesn't
6092 support these types, so hopefully there shouldn't be any compatibility
6093 issues. This may have to be revisited when HP releases a C99 compiler
6094 or updates the ABI. */
6095
6096 static bool
6097 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6098 enum machine_mode mode, const_tree type,
6099 bool named ATTRIBUTE_UNUSED)
6100 {
6101 HOST_WIDE_INT size;
6102
6103 if (type)
6104 size = int_size_in_bytes (type);
6105 else
6106 size = GET_MODE_SIZE (mode);
6107
6108 if (TARGET_64BIT)
6109 return size <= 0;
6110 else
6111 return size <= 0 || size > 8;
6112 }
6113
6114 enum direction
6115 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6116 {
6117 if (mode == BLKmode
6118 || (TARGET_64BIT
6119 && type
6120 && (AGGREGATE_TYPE_P (type)
6121 || TREE_CODE (type) == COMPLEX_TYPE
6122 || TREE_CODE (type) == VECTOR_TYPE)))
6123 {
6124 /* Return none if justification is not required. */
6125 if (type
6126 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6127 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6128 return none;
6129
6130 /* The directions set here are ignored when a BLKmode argument larger
6131 than a word is placed in a register. Different code is used for
6132 the stack and registers. This makes it difficult to have a
6133 consistent data representation for both the stack and registers.
6134 For both runtimes, the justification and padding for arguments on
6135 the stack and in registers should be identical. */
6136 if (TARGET_64BIT)
6137 /* The 64-bit runtime specifies left justification for aggregates. */
6138 return upward;
6139 else
6140 /* The 32-bit runtime architecture specifies right justification.
6141 When the argument is passed on the stack, the argument is padded
6142 with garbage on the left. The HP compiler pads with zeros. */
6143 return downward;
6144 }
6145
6146 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6147 return downward;
6148 else
6149 return none;
6150 }
6151
6152 \f
6153 /* Do what is necessary for `va_start'. We look at the current function
6154 to determine if stdargs or varargs is used and fill in an initial
6155 va_list. A pointer to this constructor is returned. */
6156
6157 static rtx
6158 hppa_builtin_saveregs (void)
6159 {
6160 rtx offset, dest;
6161 tree fntype = TREE_TYPE (current_function_decl);
6162 int argadj = ((!stdarg_p (fntype))
6163 ? UNITS_PER_WORD : 0);
6164
6165 if (argadj)
6166 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6167 else
6168 offset = crtl->args.arg_offset_rtx;
6169
6170 if (TARGET_64BIT)
6171 {
6172 int i, off;
6173
6174 /* Adjust for varargs/stdarg differences. */
6175 if (argadj)
6176 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6177 else
6178 offset = crtl->args.arg_offset_rtx;
6179
6180 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6181 from the incoming arg pointer and growing to larger addresses. */
6182 for (i = 26, off = -64; i >= 19; i--, off += 8)
6183 emit_move_insn (gen_rtx_MEM (word_mode,
6184 plus_constant (Pmode,
6185 arg_pointer_rtx, off)),
6186 gen_rtx_REG (word_mode, i));
6187
6188 /* The incoming args pointer points just beyond the flushback area;
6189 normally this is not a serious concern. However, when we are doing
6190 varargs/stdargs we want to make the arg pointer point to the start
6191 of the incoming argument area. */
6192 emit_move_insn (virtual_incoming_args_rtx,
6193 plus_constant (Pmode, arg_pointer_rtx, -64));
6194
6195 /* Now return a pointer to the first anonymous argument. */
6196 return copy_to_reg (expand_binop (Pmode, add_optab,
6197 virtual_incoming_args_rtx,
6198 offset, 0, 0, OPTAB_LIB_WIDEN));
6199 }
6200
6201 /* Store general registers on the stack. */
6202 dest = gen_rtx_MEM (BLKmode,
6203 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6204 -16));
6205 set_mem_alias_set (dest, get_varargs_alias_set ());
6206 set_mem_align (dest, BITS_PER_WORD);
6207 move_block_from_reg (23, dest, 4);
6208
6209 /* move_block_from_reg will emit code to store the argument registers
6210 individually as scalar stores.
6211
6212 However, other insns may later load from the same addresses for
6213 a structure load (passing a struct to a varargs routine).
6214
6215 The alias code assumes that such aliasing can never happen, so we
6216 have to keep memory referencing insns from moving up beyond the
6217 last argument register store. So we emit a blockage insn here. */
6218 emit_insn (gen_blockage ());
6219
6220 return copy_to_reg (expand_binop (Pmode, add_optab,
6221 crtl->args.internal_arg_pointer,
6222 offset, 0, 0, OPTAB_LIB_WIDEN));
6223 }
6224
6225 static void
6226 hppa_va_start (tree valist, rtx nextarg)
6227 {
6228 nextarg = expand_builtin_saveregs ();
6229 std_expand_builtin_va_start (valist, nextarg);
6230 }
6231
6232 static tree
6233 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6234 gimple_seq *post_p)
6235 {
6236 if (TARGET_64BIT)
6237 {
6238 /* Args grow upward. We can use the generic routines. */
6239 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6240 }
6241 else /* !TARGET_64BIT */
6242 {
6243 tree ptr = build_pointer_type (type);
6244 tree valist_type;
6245 tree t, u;
6246 unsigned int size, ofs;
6247 bool indirect;
6248
6249 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6250 if (indirect)
6251 {
6252 type = ptr;
6253 ptr = build_pointer_type (type);
6254 }
6255 size = int_size_in_bytes (type);
6256 valist_type = TREE_TYPE (valist);
6257
6258 /* Args grow down. Not handled by generic routines. */
6259
6260 u = fold_convert (sizetype, size_in_bytes (type));
6261 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6262 t = fold_build_pointer_plus (valist, u);
6263
6264 /* Align to 4 or 8 byte boundary depending on argument size. */
6265
6266 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6267 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6268 t = fold_convert (valist_type, t);
6269
6270 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6271
6272 ofs = (8 - size) % 4;
6273 if (ofs != 0)
6274 t = fold_build_pointer_plus_hwi (t, ofs);
6275
6276 t = fold_convert (ptr, t);
6277 t = build_va_arg_indirect_ref (t);
6278
6279 if (indirect)
6280 t = build_va_arg_indirect_ref (t);
6281
6282 return t;
6283 }
6284 }
6285
6286 /* True if MODE is valid for the target. By "valid", we mean able to
6287 be manipulated in non-trivial ways. In particular, this means all
6288 the arithmetic is supported.
6289
6290 Currently, TImode is not valid as the HP 64-bit runtime documentation
6291 doesn't document the alignment and calling conventions for this type.
6292 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6293 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6294
6295 static bool
6296 pa_scalar_mode_supported_p (enum machine_mode mode)
6297 {
6298 int precision = GET_MODE_PRECISION (mode);
6299
6300 switch (GET_MODE_CLASS (mode))
6301 {
6302 case MODE_PARTIAL_INT:
6303 case MODE_INT:
6304 if (precision == CHAR_TYPE_SIZE)
6305 return true;
6306 if (precision == SHORT_TYPE_SIZE)
6307 return true;
6308 if (precision == INT_TYPE_SIZE)
6309 return true;
6310 if (precision == LONG_TYPE_SIZE)
6311 return true;
6312 if (precision == LONG_LONG_TYPE_SIZE)
6313 return true;
6314 return false;
6315
6316 case MODE_FLOAT:
6317 if (precision == FLOAT_TYPE_SIZE)
6318 return true;
6319 if (precision == DOUBLE_TYPE_SIZE)
6320 return true;
6321 if (precision == LONG_DOUBLE_TYPE_SIZE)
6322 return true;
6323 return false;
6324
6325 case MODE_DECIMAL_FLOAT:
6326 return false;
6327
6328 default:
6329 gcc_unreachable ();
6330 }
6331 }
6332
6333 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6334 it branches into the delay slot. Otherwise, return FALSE. */
6335
6336 static bool
6337 branch_to_delay_slot_p (rtx insn)
6338 {
6339 rtx jump_insn;
6340
6341 if (dbr_sequence_length ())
6342 return FALSE;
6343
6344 jump_insn = next_active_insn (JUMP_LABEL (insn));
6345 while (insn)
6346 {
6347 insn = next_active_insn (insn);
6348 if (jump_insn == insn)
6349 return TRUE;
6350
6351 /* We can't rely on the length of asms. So, we return FALSE when
6352 the branch is followed by an asm. */
6353 if (!insn
6354 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6355 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6356 || get_attr_length (insn) > 0)
6357 break;
6358 }
6359
6360 return FALSE;
6361 }
6362
6363 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6364
6365 This occurs when INSN has an unfilled delay slot and is followed
6366 by an asm. Disaster can occur if the asm is empty and the jump
6367 branches into the delay slot. So, we add a nop in the delay slot
6368 when this occurs. */
6369
6370 static bool
6371 branch_needs_nop_p (rtx insn)
6372 {
6373 rtx jump_insn;
6374
6375 if (dbr_sequence_length ())
6376 return FALSE;
6377
6378 jump_insn = next_active_insn (JUMP_LABEL (insn));
6379 while (insn)
6380 {
6381 insn = next_active_insn (insn);
6382 if (!insn || jump_insn == insn)
6383 return TRUE;
6384
6385 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6386 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6387 && get_attr_length (insn) > 0)
6388 break;
6389 }
6390
6391 return FALSE;
6392 }
6393
6394 /* Return TRUE if INSN, a forward jump insn, can use nullification
6395 to skip the following instruction. This avoids an extra cycle due
6396 to a mis-predicted branch when we fall through. */
6397
6398 static bool
6399 use_skip_p (rtx insn)
6400 {
6401 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6402
6403 while (insn)
6404 {
6405 insn = next_active_insn (insn);
6406
6407 /* We can't rely on the length of asms, so we can't skip asms. */
6408 if (!insn
6409 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6410 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6411 break;
6412 if (get_attr_length (insn) == 4
6413 && jump_insn == next_active_insn (insn))
6414 return TRUE;
6415 if (get_attr_length (insn) > 0)
6416 break;
6417 }
6418
6419 return FALSE;
6420 }
6421
6422 /* This routine handles all the normal conditional branch sequences we
6423 might need to generate. It handles compare immediate vs compare
6424 register, nullification of delay slots, varying length branches,
6425 negated branches, and all combinations of the above. It returns the
6426 output appropriate to emit the branch corresponding to all given
6427 parameters. */
6428
6429 const char *
6430 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6431 {
6432 static char buf[100];
6433 bool useskip;
6434 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6435 int length = get_attr_length (insn);
6436 int xdelay;
6437
6438 /* A conditional branch to the following instruction (e.g. the delay slot)
6439 is asking for a disaster. This can happen when not optimizing and
6440 when jump optimization fails.
6441
6442 While it is usually safe to emit nothing, this can fail if the
6443 preceding instruction is a nullified branch with an empty delay
6444 slot and the same branch target as this branch. We could check
6445 for this but jump optimization should eliminate nop jumps. It
6446 is always safe to emit a nop. */
6447 if (branch_to_delay_slot_p (insn))
6448 return "nop";
6449
6450 /* The doubleword form of the cmpib instruction doesn't have the LEU
6451 and GTU conditions while the cmpb instruction does. Since we accept
6452 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6453 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6454 operands[2] = gen_rtx_REG (DImode, 0);
6455 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6456 operands[1] = gen_rtx_REG (DImode, 0);
6457
6458 /* If this is a long branch with its delay slot unfilled, set `nullify'
6459 as it can nullify the delay slot and save a nop. */
6460 if (length == 8 && dbr_sequence_length () == 0)
6461 nullify = 1;
6462
6463 /* If this is a short forward conditional branch which did not get
6464 its delay slot filled, the delay slot can still be nullified. */
6465 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6466 nullify = forward_branch_p (insn);
6467
6468 /* A forward branch over a single nullified insn can be done with a
6469 comclr instruction. This avoids a single cycle penalty due to
6470 mis-predicted branch if we fall through (branch not taken). */
6471 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6472
6473 switch (length)
6474 {
6475 /* All short conditional branches except backwards with an unfilled
6476 delay slot. */
6477 case 4:
6478 if (useskip)
6479 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6480 else
6481 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6482 if (GET_MODE (operands[1]) == DImode)
6483 strcat (buf, "*");
6484 if (negated)
6485 strcat (buf, "%B3");
6486 else
6487 strcat (buf, "%S3");
6488 if (useskip)
6489 strcat (buf, " %2,%r1,%%r0");
6490 else if (nullify)
6491 {
6492 if (branch_needs_nop_p (insn))
6493 strcat (buf, ",n %2,%r1,%0%#");
6494 else
6495 strcat (buf, ",n %2,%r1,%0");
6496 }
6497 else
6498 strcat (buf, " %2,%r1,%0");
6499 break;
6500
6501 /* All long conditionals. Note a short backward branch with an
6502 unfilled delay slot is treated just like a long backward branch
6503 with an unfilled delay slot. */
6504 case 8:
6505 /* Handle weird backwards branch with a filled delay slot
6506 which is nullified. */
6507 if (dbr_sequence_length () != 0
6508 && ! forward_branch_p (insn)
6509 && nullify)
6510 {
6511 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6512 if (GET_MODE (operands[1]) == DImode)
6513 strcat (buf, "*");
6514 if (negated)
6515 strcat (buf, "%S3");
6516 else
6517 strcat (buf, "%B3");
6518 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6519 }
6520 /* Handle short backwards branch with an unfilled delay slot.
6521 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6522 taken and untaken branches. */
6523 else if (dbr_sequence_length () == 0
6524 && ! forward_branch_p (insn)
6525 && INSN_ADDRESSES_SET_P ()
6526 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6527 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6528 {
6529 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6530 if (GET_MODE (operands[1]) == DImode)
6531 strcat (buf, "*");
6532 if (negated)
6533 strcat (buf, "%B3 %2,%r1,%0%#");
6534 else
6535 strcat (buf, "%S3 %2,%r1,%0%#");
6536 }
6537 else
6538 {
6539 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6540 if (GET_MODE (operands[1]) == DImode)
6541 strcat (buf, "*");
6542 if (negated)
6543 strcat (buf, "%S3");
6544 else
6545 strcat (buf, "%B3");
6546 if (nullify)
6547 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6548 else
6549 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6550 }
6551 break;
6552
6553 default:
6554 /* The reversed conditional branch must branch over one additional
6555 instruction if the delay slot is filled and needs to be extracted
6556 by pa_output_lbranch. If the delay slot is empty or this is a
6557 nullified forward branch, the instruction after the reversed
6558 condition branch must be nullified. */
6559 if (dbr_sequence_length () == 0
6560 || (nullify && forward_branch_p (insn)))
6561 {
6562 nullify = 1;
6563 xdelay = 0;
6564 operands[4] = GEN_INT (length);
6565 }
6566 else
6567 {
6568 xdelay = 1;
6569 operands[4] = GEN_INT (length + 4);
6570 }
6571
6572 /* Create a reversed conditional branch which branches around
6573 the following insns. */
6574 if (GET_MODE (operands[1]) != DImode)
6575 {
6576 if (nullify)
6577 {
6578 if (negated)
6579 strcpy (buf,
6580 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6581 else
6582 strcpy (buf,
6583 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6584 }
6585 else
6586 {
6587 if (negated)
6588 strcpy (buf,
6589 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6590 else
6591 strcpy (buf,
6592 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6593 }
6594 }
6595 else
6596 {
6597 if (nullify)
6598 {
6599 if (negated)
6600 strcpy (buf,
6601 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6602 else
6603 strcpy (buf,
6604 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6605 }
6606 else
6607 {
6608 if (negated)
6609 strcpy (buf,
6610 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6611 else
6612 strcpy (buf,
6613 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6614 }
6615 }
6616
6617 output_asm_insn (buf, operands);
6618 return pa_output_lbranch (operands[0], insn, xdelay);
6619 }
6620 return buf;
6621 }
6622
6623 /* This routine handles output of long unconditional branches that
6624 exceed the maximum range of a simple branch instruction. Since
6625 we don't have a register available for the branch, we save register
6626 %r1 in the frame marker, load the branch destination DEST into %r1,
6627 execute the branch, and restore %r1 in the delay slot of the branch.
6628
6629 Since long branches may have an insn in the delay slot and the
6630 delay slot is used to restore %r1, we in general need to extract
6631 this insn and execute it before the branch. However, to facilitate
6632 use of this function by conditional branches, we also provide an
6633 option to not extract the delay insn so that it will be emitted
6634 after the long branch. So, if there is an insn in the delay slot,
6635 it is extracted if XDELAY is nonzero.
6636
6637 The lengths of the various long-branch sequences are 20, 16 and 24
6638 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6639
6640 const char *
6641 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6642 {
6643 rtx xoperands[2];
6644
6645 xoperands[0] = dest;
6646
6647 /* First, free up the delay slot. */
6648 if (xdelay && dbr_sequence_length () != 0)
6649 {
6650 /* We can't handle a jump in the delay slot. */
6651 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6652
6653 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6654 optimize, 0, NULL);
6655
6656 /* Now delete the delay insn. */
6657 SET_INSN_DELETED (NEXT_INSN (insn));
6658 }
6659
6660 /* Output an insn to save %r1. The runtime documentation doesn't
6661 specify whether the "Clean Up" slot in the callers frame can
6662 be clobbered by the callee. It isn't copied by HP's builtin
6663 alloca, so this suggests that it can be clobbered if necessary.
6664 The "Static Link" location is copied by HP builtin alloca, so
6665 we avoid using it. Using the cleanup slot might be a problem
6666 if we have to interoperate with languages that pass cleanup
6667 information. However, it should be possible to handle these
6668 situations with GCC's asm feature.
6669
6670 The "Current RP" slot is reserved for the called procedure, so
6671 we try to use it when we don't have a frame of our own. It's
6672 rather unlikely that we won't have a frame when we need to emit
6673 a very long branch.
6674
6675 Really the way to go long term is a register scavenger; goto
6676 the target of the jump and find a register which we can use
6677 as a scratch to hold the value in %r1. Then, we wouldn't have
6678 to free up the delay slot or clobber a slot that may be needed
6679 for other purposes. */
6680 if (TARGET_64BIT)
6681 {
6682 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6683 /* Use the return pointer slot in the frame marker. */
6684 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6685 else
6686 /* Use the slot at -40 in the frame marker since HP builtin
6687 alloca doesn't copy it. */
6688 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6689 }
6690 else
6691 {
6692 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6693 /* Use the return pointer slot in the frame marker. */
6694 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6695 else
6696 /* Use the "Clean Up" slot in the frame marker. In GCC,
6697 the only other use of this location is for copying a
6698 floating point double argument from a floating-point
6699 register to two general registers. The copy is done
6700 as an "atomic" operation when outputting a call, so it
6701 won't interfere with our using the location here. */
6702 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6703 }
6704
6705 if (TARGET_PORTABLE_RUNTIME)
6706 {
6707 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6708 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6709 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6710 }
6711 else if (flag_pic)
6712 {
6713 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6714 if (TARGET_SOM || !TARGET_GAS)
6715 {
6716 xoperands[1] = gen_label_rtx ();
6717 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6718 targetm.asm_out.internal_label (asm_out_file, "L",
6719 CODE_LABEL_NUMBER (xoperands[1]));
6720 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6721 }
6722 else
6723 {
6724 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6725 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6726 }
6727 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6728 }
6729 else
6730 /* Now output a very long branch to the original target. */
6731 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6732
6733 /* Now restore the value of %r1 in the delay slot. */
6734 if (TARGET_64BIT)
6735 {
6736 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6737 return "ldd -16(%%r30),%%r1";
6738 else
6739 return "ldd -40(%%r30),%%r1";
6740 }
6741 else
6742 {
6743 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6744 return "ldw -20(%%r30),%%r1";
6745 else
6746 return "ldw -12(%%r30),%%r1";
6747 }
6748 }
6749
6750 /* This routine handles all the branch-on-bit conditional branch sequences we
6751 might need to generate. It handles nullification of delay slots,
6752 varying length branches, negated branches and all combinations of the
6753 above. it returns the appropriate output template to emit the branch. */
6754
6755 const char *
6756 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6757 {
6758 static char buf[100];
6759 bool useskip;
6760 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6761 int length = get_attr_length (insn);
6762 int xdelay;
6763
6764 /* A conditional branch to the following instruction (e.g. the delay slot) is
6765 asking for a disaster. I do not think this can happen as this pattern
6766 is only used when optimizing; jump optimization should eliminate the
6767 jump. But be prepared just in case. */
6768
6769 if (branch_to_delay_slot_p (insn))
6770 return "nop";
6771
6772 /* If this is a long branch with its delay slot unfilled, set `nullify'
6773 as it can nullify the delay slot and save a nop. */
6774 if (length == 8 && dbr_sequence_length () == 0)
6775 nullify = 1;
6776
6777 /* If this is a short forward conditional branch which did not get
6778 its delay slot filled, the delay slot can still be nullified. */
6779 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6780 nullify = forward_branch_p (insn);
6781
6782 /* A forward branch over a single nullified insn can be done with a
6783 extrs instruction. This avoids a single cycle penalty due to
6784 mis-predicted branch if we fall through (branch not taken). */
6785 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6786
6787 switch (length)
6788 {
6789
6790 /* All short conditional branches except backwards with an unfilled
6791 delay slot. */
6792 case 4:
6793 if (useskip)
6794 strcpy (buf, "{extrs,|extrw,s,}");
6795 else
6796 strcpy (buf, "bb,");
6797 if (useskip && GET_MODE (operands[0]) == DImode)
6798 strcpy (buf, "extrd,s,*");
6799 else if (GET_MODE (operands[0]) == DImode)
6800 strcpy (buf, "bb,*");
6801 if ((which == 0 && negated)
6802 || (which == 1 && ! negated))
6803 strcat (buf, ">=");
6804 else
6805 strcat (buf, "<");
6806 if (useskip)
6807 strcat (buf, " %0,%1,1,%%r0");
6808 else if (nullify && negated)
6809 {
6810 if (branch_needs_nop_p (insn))
6811 strcat (buf, ",n %0,%1,%3%#");
6812 else
6813 strcat (buf, ",n %0,%1,%3");
6814 }
6815 else if (nullify && ! negated)
6816 {
6817 if (branch_needs_nop_p (insn))
6818 strcat (buf, ",n %0,%1,%2%#");
6819 else
6820 strcat (buf, ",n %0,%1,%2");
6821 }
6822 else if (! nullify && negated)
6823 strcat (buf, " %0,%1,%3");
6824 else if (! nullify && ! negated)
6825 strcat (buf, " %0,%1,%2");
6826 break;
6827
6828 /* All long conditionals. Note a short backward branch with an
6829 unfilled delay slot is treated just like a long backward branch
6830 with an unfilled delay slot. */
6831 case 8:
6832 /* Handle weird backwards branch with a filled delay slot
6833 which is nullified. */
6834 if (dbr_sequence_length () != 0
6835 && ! forward_branch_p (insn)
6836 && nullify)
6837 {
6838 strcpy (buf, "bb,");
6839 if (GET_MODE (operands[0]) == DImode)
6840 strcat (buf, "*");
6841 if ((which == 0 && negated)
6842 || (which == 1 && ! negated))
6843 strcat (buf, "<");
6844 else
6845 strcat (buf, ">=");
6846 if (negated)
6847 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6848 else
6849 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6850 }
6851 /* Handle short backwards branch with an unfilled delay slot.
6852 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6853 taken and untaken branches. */
6854 else if (dbr_sequence_length () == 0
6855 && ! forward_branch_p (insn)
6856 && INSN_ADDRESSES_SET_P ()
6857 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6858 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6859 {
6860 strcpy (buf, "bb,");
6861 if (GET_MODE (operands[0]) == DImode)
6862 strcat (buf, "*");
6863 if ((which == 0 && negated)
6864 || (which == 1 && ! negated))
6865 strcat (buf, ">=");
6866 else
6867 strcat (buf, "<");
6868 if (negated)
6869 strcat (buf, " %0,%1,%3%#");
6870 else
6871 strcat (buf, " %0,%1,%2%#");
6872 }
6873 else
6874 {
6875 if (GET_MODE (operands[0]) == DImode)
6876 strcpy (buf, "extrd,s,*");
6877 else
6878 strcpy (buf, "{extrs,|extrw,s,}");
6879 if ((which == 0 && negated)
6880 || (which == 1 && ! negated))
6881 strcat (buf, "<");
6882 else
6883 strcat (buf, ">=");
6884 if (nullify && negated)
6885 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6886 else if (nullify && ! negated)
6887 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6888 else if (negated)
6889 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6890 else
6891 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6892 }
6893 break;
6894
6895 default:
6896 /* The reversed conditional branch must branch over one additional
6897 instruction if the delay slot is filled and needs to be extracted
6898 by pa_output_lbranch. If the delay slot is empty or this is a
6899 nullified forward branch, the instruction after the reversed
6900 condition branch must be nullified. */
6901 if (dbr_sequence_length () == 0
6902 || (nullify && forward_branch_p (insn)))
6903 {
6904 nullify = 1;
6905 xdelay = 0;
6906 operands[4] = GEN_INT (length);
6907 }
6908 else
6909 {
6910 xdelay = 1;
6911 operands[4] = GEN_INT (length + 4);
6912 }
6913
6914 if (GET_MODE (operands[0]) == DImode)
6915 strcpy (buf, "bb,*");
6916 else
6917 strcpy (buf, "bb,");
6918 if ((which == 0 && negated)
6919 || (which == 1 && !negated))
6920 strcat (buf, "<");
6921 else
6922 strcat (buf, ">=");
6923 if (nullify)
6924 strcat (buf, ",n %0,%1,.+%4");
6925 else
6926 strcat (buf, " %0,%1,.+%4");
6927 output_asm_insn (buf, operands);
6928 return pa_output_lbranch (negated ? operands[3] : operands[2],
6929 insn, xdelay);
6930 }
6931 return buf;
6932 }
6933
6934 /* This routine handles all the branch-on-variable-bit conditional branch
6935 sequences we might need to generate. It handles nullification of delay
6936 slots, varying length branches, negated branches and all combinations
6937 of the above. it returns the appropriate output template to emit the
6938 branch. */
6939
6940 const char *
6941 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6942 int which)
6943 {
6944 static char buf[100];
6945 bool useskip;
6946 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6947 int length = get_attr_length (insn);
6948 int xdelay;
6949
6950 /* A conditional branch to the following instruction (e.g. the delay slot) is
6951 asking for a disaster. I do not think this can happen as this pattern
6952 is only used when optimizing; jump optimization should eliminate the
6953 jump. But be prepared just in case. */
6954
6955 if (branch_to_delay_slot_p (insn))
6956 return "nop";
6957
6958 /* If this is a long branch with its delay slot unfilled, set `nullify'
6959 as it can nullify the delay slot and save a nop. */
6960 if (length == 8 && dbr_sequence_length () == 0)
6961 nullify = 1;
6962
6963 /* If this is a short forward conditional branch which did not get
6964 its delay slot filled, the delay slot can still be nullified. */
6965 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6966 nullify = forward_branch_p (insn);
6967
6968 /* A forward branch over a single nullified insn can be done with a
6969 extrs instruction. This avoids a single cycle penalty due to
6970 mis-predicted branch if we fall through (branch not taken). */
6971 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6972
6973 switch (length)
6974 {
6975
6976 /* All short conditional branches except backwards with an unfilled
6977 delay slot. */
6978 case 4:
6979 if (useskip)
6980 strcpy (buf, "{vextrs,|extrw,s,}");
6981 else
6982 strcpy (buf, "{bvb,|bb,}");
6983 if (useskip && GET_MODE (operands[0]) == DImode)
6984 strcpy (buf, "extrd,s,*");
6985 else if (GET_MODE (operands[0]) == DImode)
6986 strcpy (buf, "bb,*");
6987 if ((which == 0 && negated)
6988 || (which == 1 && ! negated))
6989 strcat (buf, ">=");
6990 else
6991 strcat (buf, "<");
6992 if (useskip)
6993 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6994 else if (nullify && negated)
6995 {
6996 if (branch_needs_nop_p (insn))
6997 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6998 else
6999 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7000 }
7001 else if (nullify && ! negated)
7002 {
7003 if (branch_needs_nop_p (insn))
7004 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7005 else
7006 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7007 }
7008 else if (! nullify && negated)
7009 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7010 else if (! nullify && ! negated)
7011 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7012 break;
7013
7014 /* All long conditionals. Note a short backward branch with an
7015 unfilled delay slot is treated just like a long backward branch
7016 with an unfilled delay slot. */
7017 case 8:
7018 /* Handle weird backwards branch with a filled delay slot
7019 which is nullified. */
7020 if (dbr_sequence_length () != 0
7021 && ! forward_branch_p (insn)
7022 && nullify)
7023 {
7024 strcpy (buf, "{bvb,|bb,}");
7025 if (GET_MODE (operands[0]) == DImode)
7026 strcat (buf, "*");
7027 if ((which == 0 && negated)
7028 || (which == 1 && ! negated))
7029 strcat (buf, "<");
7030 else
7031 strcat (buf, ">=");
7032 if (negated)
7033 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7034 else
7035 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7036 }
7037 /* Handle short backwards branch with an unfilled delay slot.
7038 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7039 taken and untaken branches. */
7040 else if (dbr_sequence_length () == 0
7041 && ! forward_branch_p (insn)
7042 && INSN_ADDRESSES_SET_P ()
7043 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7044 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7045 {
7046 strcpy (buf, "{bvb,|bb,}");
7047 if (GET_MODE (operands[0]) == DImode)
7048 strcat (buf, "*");
7049 if ((which == 0 && negated)
7050 || (which == 1 && ! negated))
7051 strcat (buf, ">=");
7052 else
7053 strcat (buf, "<");
7054 if (negated)
7055 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7056 else
7057 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7058 }
7059 else
7060 {
7061 strcpy (buf, "{vextrs,|extrw,s,}");
7062 if (GET_MODE (operands[0]) == DImode)
7063 strcpy (buf, "extrd,s,*");
7064 if ((which == 0 && negated)
7065 || (which == 1 && ! negated))
7066 strcat (buf, "<");
7067 else
7068 strcat (buf, ">=");
7069 if (nullify && negated)
7070 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7071 else if (nullify && ! negated)
7072 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7073 else if (negated)
7074 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7075 else
7076 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7077 }
7078 break;
7079
7080 default:
7081 /* The reversed conditional branch must branch over one additional
7082 instruction if the delay slot is filled and needs to be extracted
7083 by pa_output_lbranch. If the delay slot is empty or this is a
7084 nullified forward branch, the instruction after the reversed
7085 condition branch must be nullified. */
7086 if (dbr_sequence_length () == 0
7087 || (nullify && forward_branch_p (insn)))
7088 {
7089 nullify = 1;
7090 xdelay = 0;
7091 operands[4] = GEN_INT (length);
7092 }
7093 else
7094 {
7095 xdelay = 1;
7096 operands[4] = GEN_INT (length + 4);
7097 }
7098
7099 if (GET_MODE (operands[0]) == DImode)
7100 strcpy (buf, "bb,*");
7101 else
7102 strcpy (buf, "{bvb,|bb,}");
7103 if ((which == 0 && negated)
7104 || (which == 1 && !negated))
7105 strcat (buf, "<");
7106 else
7107 strcat (buf, ">=");
7108 if (nullify)
7109 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7110 else
7111 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7112 output_asm_insn (buf, operands);
7113 return pa_output_lbranch (negated ? operands[3] : operands[2],
7114 insn, xdelay);
7115 }
7116 return buf;
7117 }
7118
7119 /* Return the output template for emitting a dbra type insn.
7120
7121 Note it may perform some output operations on its own before
7122 returning the final output string. */
7123 const char *
7124 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7125 {
7126 int length = get_attr_length (insn);
7127
7128 /* A conditional branch to the following instruction (e.g. the delay slot) is
7129 asking for a disaster. Be prepared! */
7130
7131 if (branch_to_delay_slot_p (insn))
7132 {
7133 if (which_alternative == 0)
7134 return "ldo %1(%0),%0";
7135 else if (which_alternative == 1)
7136 {
7137 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7138 output_asm_insn ("ldw -16(%%r30),%4", operands);
7139 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7140 return "{fldws|fldw} -16(%%r30),%0";
7141 }
7142 else
7143 {
7144 output_asm_insn ("ldw %0,%4", operands);
7145 return "ldo %1(%4),%4\n\tstw %4,%0";
7146 }
7147 }
7148
7149 if (which_alternative == 0)
7150 {
7151 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7152 int xdelay;
7153
7154 /* If this is a long branch with its delay slot unfilled, set `nullify'
7155 as it can nullify the delay slot and save a nop. */
7156 if (length == 8 && dbr_sequence_length () == 0)
7157 nullify = 1;
7158
7159 /* If this is a short forward conditional branch which did not get
7160 its delay slot filled, the delay slot can still be nullified. */
7161 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7162 nullify = forward_branch_p (insn);
7163
7164 switch (length)
7165 {
7166 case 4:
7167 if (nullify)
7168 {
7169 if (branch_needs_nop_p (insn))
7170 return "addib,%C2,n %1,%0,%3%#";
7171 else
7172 return "addib,%C2,n %1,%0,%3";
7173 }
7174 else
7175 return "addib,%C2 %1,%0,%3";
7176
7177 case 8:
7178 /* Handle weird backwards branch with a fulled delay slot
7179 which is nullified. */
7180 if (dbr_sequence_length () != 0
7181 && ! forward_branch_p (insn)
7182 && nullify)
7183 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7184 /* Handle short backwards branch with an unfilled delay slot.
7185 Using a addb;nop rather than addi;bl saves 1 cycle for both
7186 taken and untaken branches. */
7187 else if (dbr_sequence_length () == 0
7188 && ! forward_branch_p (insn)
7189 && INSN_ADDRESSES_SET_P ()
7190 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7191 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7192 return "addib,%C2 %1,%0,%3%#";
7193
7194 /* Handle normal cases. */
7195 if (nullify)
7196 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7197 else
7198 return "addi,%N2 %1,%0,%0\n\tb %3";
7199
7200 default:
7201 /* The reversed conditional branch must branch over one additional
7202 instruction if the delay slot is filled and needs to be extracted
7203 by pa_output_lbranch. If the delay slot is empty or this is a
7204 nullified forward branch, the instruction after the reversed
7205 condition branch must be nullified. */
7206 if (dbr_sequence_length () == 0
7207 || (nullify && forward_branch_p (insn)))
7208 {
7209 nullify = 1;
7210 xdelay = 0;
7211 operands[4] = GEN_INT (length);
7212 }
7213 else
7214 {
7215 xdelay = 1;
7216 operands[4] = GEN_INT (length + 4);
7217 }
7218
7219 if (nullify)
7220 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7221 else
7222 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7223
7224 return pa_output_lbranch (operands[3], insn, xdelay);
7225 }
7226
7227 }
7228 /* Deal with gross reload from FP register case. */
7229 else if (which_alternative == 1)
7230 {
7231 /* Move loop counter from FP register to MEM then into a GR,
7232 increment the GR, store the GR into MEM, and finally reload
7233 the FP register from MEM from within the branch's delay slot. */
7234 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7235 operands);
7236 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7237 if (length == 24)
7238 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7239 else if (length == 28)
7240 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7241 else
7242 {
7243 operands[5] = GEN_INT (length - 16);
7244 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7245 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7246 return pa_output_lbranch (operands[3], insn, 0);
7247 }
7248 }
7249 /* Deal with gross reload from memory case. */
7250 else
7251 {
7252 /* Reload loop counter from memory, the store back to memory
7253 happens in the branch's delay slot. */
7254 output_asm_insn ("ldw %0,%4", operands);
7255 if (length == 12)
7256 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7257 else if (length == 16)
7258 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7259 else
7260 {
7261 operands[5] = GEN_INT (length - 4);
7262 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7263 return pa_output_lbranch (operands[3], insn, 0);
7264 }
7265 }
7266 }
7267
7268 /* Return the output template for emitting a movb type insn.
7269
7270 Note it may perform some output operations on its own before
7271 returning the final output string. */
7272 const char *
7273 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7274 int reverse_comparison)
7275 {
7276 int length = get_attr_length (insn);
7277
7278 /* A conditional branch to the following instruction (e.g. the delay slot) is
7279 asking for a disaster. Be prepared! */
7280
7281 if (branch_to_delay_slot_p (insn))
7282 {
7283 if (which_alternative == 0)
7284 return "copy %1,%0";
7285 else if (which_alternative == 1)
7286 {
7287 output_asm_insn ("stw %1,-16(%%r30)", operands);
7288 return "{fldws|fldw} -16(%%r30),%0";
7289 }
7290 else if (which_alternative == 2)
7291 return "stw %1,%0";
7292 else
7293 return "mtsar %r1";
7294 }
7295
7296 /* Support the second variant. */
7297 if (reverse_comparison)
7298 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7299
7300 if (which_alternative == 0)
7301 {
7302 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7303 int xdelay;
7304
7305 /* If this is a long branch with its delay slot unfilled, set `nullify'
7306 as it can nullify the delay slot and save a nop. */
7307 if (length == 8 && dbr_sequence_length () == 0)
7308 nullify = 1;
7309
7310 /* If this is a short forward conditional branch which did not get
7311 its delay slot filled, the delay slot can still be nullified. */
7312 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7313 nullify = forward_branch_p (insn);
7314
7315 switch (length)
7316 {
7317 case 4:
7318 if (nullify)
7319 {
7320 if (branch_needs_nop_p (insn))
7321 return "movb,%C2,n %1,%0,%3%#";
7322 else
7323 return "movb,%C2,n %1,%0,%3";
7324 }
7325 else
7326 return "movb,%C2 %1,%0,%3";
7327
7328 case 8:
7329 /* Handle weird backwards branch with a filled delay slot
7330 which is nullified. */
7331 if (dbr_sequence_length () != 0
7332 && ! forward_branch_p (insn)
7333 && nullify)
7334 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7335
7336 /* Handle short backwards branch with an unfilled delay slot.
7337 Using a movb;nop rather than or;bl saves 1 cycle for both
7338 taken and untaken branches. */
7339 else if (dbr_sequence_length () == 0
7340 && ! forward_branch_p (insn)
7341 && INSN_ADDRESSES_SET_P ()
7342 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7343 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7344 return "movb,%C2 %1,%0,%3%#";
7345 /* Handle normal cases. */
7346 if (nullify)
7347 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7348 else
7349 return "or,%N2 %1,%%r0,%0\n\tb %3";
7350
7351 default:
7352 /* The reversed conditional branch must branch over one additional
7353 instruction if the delay slot is filled and needs to be extracted
7354 by pa_output_lbranch. If the delay slot is empty or this is a
7355 nullified forward branch, the instruction after the reversed
7356 condition branch must be nullified. */
7357 if (dbr_sequence_length () == 0
7358 || (nullify && forward_branch_p (insn)))
7359 {
7360 nullify = 1;
7361 xdelay = 0;
7362 operands[4] = GEN_INT (length);
7363 }
7364 else
7365 {
7366 xdelay = 1;
7367 operands[4] = GEN_INT (length + 4);
7368 }
7369
7370 if (nullify)
7371 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7372 else
7373 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7374
7375 return pa_output_lbranch (operands[3], insn, xdelay);
7376 }
7377 }
7378 /* Deal with gross reload for FP destination register case. */
7379 else if (which_alternative == 1)
7380 {
7381 /* Move source register to MEM, perform the branch test, then
7382 finally load the FP register from MEM from within the branch's
7383 delay slot. */
7384 output_asm_insn ("stw %1,-16(%%r30)", operands);
7385 if (length == 12)
7386 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7387 else if (length == 16)
7388 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7389 else
7390 {
7391 operands[4] = GEN_INT (length - 4);
7392 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7393 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7394 return pa_output_lbranch (operands[3], insn, 0);
7395 }
7396 }
7397 /* Deal with gross reload from memory case. */
7398 else if (which_alternative == 2)
7399 {
7400 /* Reload loop counter from memory, the store back to memory
7401 happens in the branch's delay slot. */
7402 if (length == 8)
7403 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7404 else if (length == 12)
7405 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7406 else
7407 {
7408 operands[4] = GEN_INT (length);
7409 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7410 operands);
7411 return pa_output_lbranch (operands[3], insn, 0);
7412 }
7413 }
7414 /* Handle SAR as a destination. */
7415 else
7416 {
7417 if (length == 8)
7418 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7419 else if (length == 12)
7420 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7421 else
7422 {
7423 operands[4] = GEN_INT (length);
7424 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7425 operands);
7426 return pa_output_lbranch (operands[3], insn, 0);
7427 }
7428 }
7429 }
7430
7431 /* Copy any FP arguments in INSN into integer registers. */
7432 static void
7433 copy_fp_args (rtx insn)
7434 {
7435 rtx link;
7436 rtx xoperands[2];
7437
7438 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7439 {
7440 int arg_mode, regno;
7441 rtx use = XEXP (link, 0);
7442
7443 if (! (GET_CODE (use) == USE
7444 && GET_CODE (XEXP (use, 0)) == REG
7445 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7446 continue;
7447
7448 arg_mode = GET_MODE (XEXP (use, 0));
7449 regno = REGNO (XEXP (use, 0));
7450
7451 /* Is it a floating point register? */
7452 if (regno >= 32 && regno <= 39)
7453 {
7454 /* Copy the FP register into an integer register via memory. */
7455 if (arg_mode == SFmode)
7456 {
7457 xoperands[0] = XEXP (use, 0);
7458 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7459 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7460 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7461 }
7462 else
7463 {
7464 xoperands[0] = XEXP (use, 0);
7465 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7466 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7467 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7468 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7469 }
7470 }
7471 }
7472 }
7473
7474 /* Compute length of the FP argument copy sequence for INSN. */
7475 static int
7476 length_fp_args (rtx insn)
7477 {
7478 int length = 0;
7479 rtx link;
7480
7481 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7482 {
7483 int arg_mode, regno;
7484 rtx use = XEXP (link, 0);
7485
7486 if (! (GET_CODE (use) == USE
7487 && GET_CODE (XEXP (use, 0)) == REG
7488 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7489 continue;
7490
7491 arg_mode = GET_MODE (XEXP (use, 0));
7492 regno = REGNO (XEXP (use, 0));
7493
7494 /* Is it a floating point register? */
7495 if (regno >= 32 && regno <= 39)
7496 {
7497 if (arg_mode == SFmode)
7498 length += 8;
7499 else
7500 length += 12;
7501 }
7502 }
7503
7504 return length;
7505 }
7506
7507 /* Return the attribute length for the millicode call instruction INSN.
7508 The length must match the code generated by pa_output_millicode_call.
7509 We include the delay slot in the returned length as it is better to
7510 over estimate the length than to under estimate it. */
7511
7512 int
7513 pa_attr_length_millicode_call (rtx insn)
7514 {
7515 unsigned long distance = -1;
7516 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7517
7518 if (INSN_ADDRESSES_SET_P ())
7519 {
7520 distance = (total + insn_current_reference_address (insn));
7521 if (distance < total)
7522 distance = -1;
7523 }
7524
7525 if (TARGET_64BIT)
7526 {
7527 if (!TARGET_LONG_CALLS && distance < 7600000)
7528 return 8;
7529
7530 return 20;
7531 }
7532 else if (TARGET_PORTABLE_RUNTIME)
7533 return 24;
7534 else
7535 {
7536 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7537 return 8;
7538
7539 if (!flag_pic)
7540 return 12;
7541
7542 return 24;
7543 }
7544 }
7545
7546 /* INSN is a function call. It may have an unconditional jump
7547 in its delay slot.
7548
7549 CALL_DEST is the routine we are calling. */
7550
7551 const char *
7552 pa_output_millicode_call (rtx insn, rtx call_dest)
7553 {
7554 int attr_length = get_attr_length (insn);
7555 int seq_length = dbr_sequence_length ();
7556 int distance;
7557 rtx seq_insn;
7558 rtx xoperands[3];
7559
7560 xoperands[0] = call_dest;
7561 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7562
7563 /* Handle the common case where we are sure that the branch will
7564 reach the beginning of the $CODE$ subspace. The within reach
7565 form of the $$sh_func_adrs call has a length of 28. Because it
7566 has an attribute type of sh_func_adrs, it never has a nonzero
7567 sequence length (i.e., the delay slot is never filled). */
7568 if (!TARGET_LONG_CALLS
7569 && (attr_length == 8
7570 || (attr_length == 28
7571 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7572 {
7573 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7574 }
7575 else
7576 {
7577 if (TARGET_64BIT)
7578 {
7579 /* It might seem that one insn could be saved by accessing
7580 the millicode function using the linkage table. However,
7581 this doesn't work in shared libraries and other dynamically
7582 loaded objects. Using a pc-relative sequence also avoids
7583 problems related to the implicit use of the gp register. */
7584 output_asm_insn ("b,l .+8,%%r1", xoperands);
7585
7586 if (TARGET_GAS)
7587 {
7588 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7589 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7590 }
7591 else
7592 {
7593 xoperands[1] = gen_label_rtx ();
7594 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7595 targetm.asm_out.internal_label (asm_out_file, "L",
7596 CODE_LABEL_NUMBER (xoperands[1]));
7597 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7598 }
7599
7600 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7601 }
7602 else if (TARGET_PORTABLE_RUNTIME)
7603 {
7604 /* Pure portable runtime doesn't allow be/ble; we also don't
7605 have PIC support in the assembler/linker, so this sequence
7606 is needed. */
7607
7608 /* Get the address of our target into %r1. */
7609 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7610 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7611
7612 /* Get our return address into %r31. */
7613 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7614 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7615
7616 /* Jump to our target address in %r1. */
7617 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7618 }
7619 else if (!flag_pic)
7620 {
7621 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7622 if (TARGET_PA_20)
7623 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7624 else
7625 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7626 }
7627 else
7628 {
7629 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7630 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7631
7632 if (TARGET_SOM || !TARGET_GAS)
7633 {
7634 /* The HP assembler can generate relocations for the
7635 difference of two symbols. GAS can do this for a
7636 millicode symbol but not an arbitrary external
7637 symbol when generating SOM output. */
7638 xoperands[1] = gen_label_rtx ();
7639 targetm.asm_out.internal_label (asm_out_file, "L",
7640 CODE_LABEL_NUMBER (xoperands[1]));
7641 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7642 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7643 }
7644 else
7645 {
7646 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7647 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7648 xoperands);
7649 }
7650
7651 /* Jump to our target address in %r1. */
7652 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7653 }
7654 }
7655
7656 if (seq_length == 0)
7657 output_asm_insn ("nop", xoperands);
7658
7659 /* We are done if there isn't a jump in the delay slot. */
7660 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7661 return "";
7662
7663 /* This call has an unconditional jump in its delay slot. */
7664 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7665
7666 /* See if the return address can be adjusted. Use the containing
7667 sequence insn's address. */
7668 if (INSN_ADDRESSES_SET_P ())
7669 {
7670 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7671 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7672 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7673
7674 if (VAL_14_BITS_P (distance))
7675 {
7676 xoperands[1] = gen_label_rtx ();
7677 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7678 targetm.asm_out.internal_label (asm_out_file, "L",
7679 CODE_LABEL_NUMBER (xoperands[1]));
7680 }
7681 else
7682 /* ??? This branch may not reach its target. */
7683 output_asm_insn ("nop\n\tb,n %0", xoperands);
7684 }
7685 else
7686 /* ??? This branch may not reach its target. */
7687 output_asm_insn ("nop\n\tb,n %0", xoperands);
7688
7689 /* Delete the jump. */
7690 SET_INSN_DELETED (NEXT_INSN (insn));
7691
7692 return "";
7693 }
7694
7695 /* Return the attribute length of the call instruction INSN. The SIBCALL
7696 flag indicates whether INSN is a regular call or a sibling call. The
7697 length returned must be longer than the code actually generated by
7698 pa_output_call. Since branch shortening is done before delay branch
7699 sequencing, there is no way to determine whether or not the delay
7700 slot will be filled during branch shortening. Even when the delay
7701 slot is filled, we may have to add a nop if the delay slot contains
7702 a branch that can't reach its target. Thus, we always have to include
7703 the delay slot in the length estimate. This used to be done in
7704 pa_adjust_insn_length but we do it here now as some sequences always
7705 fill the delay slot and we can save four bytes in the estimate for
7706 these sequences. */
7707
7708 int
7709 pa_attr_length_call (rtx insn, int sibcall)
7710 {
7711 int local_call;
7712 rtx call, call_dest;
7713 tree call_decl;
7714 int length = 0;
7715 rtx pat = PATTERN (insn);
7716 unsigned long distance = -1;
7717
7718 gcc_assert (CALL_P (insn));
7719
7720 if (INSN_ADDRESSES_SET_P ())
7721 {
7722 unsigned long total;
7723
7724 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7725 distance = (total + insn_current_reference_address (insn));
7726 if (distance < total)
7727 distance = -1;
7728 }
7729
7730 gcc_assert (GET_CODE (pat) == PARALLEL);
7731
7732 /* Get the call rtx. */
7733 call = XVECEXP (pat, 0, 0);
7734 if (GET_CODE (call) == SET)
7735 call = SET_SRC (call);
7736
7737 gcc_assert (GET_CODE (call) == CALL);
7738
7739 /* Determine if this is a local call. */
7740 call_dest = XEXP (XEXP (call, 0), 0);
7741 call_decl = SYMBOL_REF_DECL (call_dest);
7742 local_call = call_decl && targetm.binds_local_p (call_decl);
7743
7744 /* pc-relative branch. */
7745 if (!TARGET_LONG_CALLS
7746 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7747 || distance < MAX_PCREL17F_OFFSET))
7748 length += 8;
7749
7750 /* 64-bit plabel sequence. */
7751 else if (TARGET_64BIT && !local_call)
7752 length += sibcall ? 28 : 24;
7753
7754 /* non-pic long absolute branch sequence. */
7755 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7756 length += 12;
7757
7758 /* long pc-relative branch sequence. */
7759 else if (TARGET_LONG_PIC_SDIFF_CALL
7760 || (TARGET_GAS && !TARGET_SOM
7761 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7762 {
7763 length += 20;
7764
7765 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7766 length += 8;
7767 }
7768
7769 /* 32-bit plabel sequence. */
7770 else
7771 {
7772 length += 32;
7773
7774 if (TARGET_SOM)
7775 length += length_fp_args (insn);
7776
7777 if (flag_pic)
7778 length += 4;
7779
7780 if (!TARGET_PA_20)
7781 {
7782 if (!sibcall)
7783 length += 8;
7784
7785 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7786 length += 8;
7787 }
7788 }
7789
7790 return length;
7791 }
7792
7793 /* INSN is a function call. It may have an unconditional jump
7794 in its delay slot.
7795
7796 CALL_DEST is the routine we are calling. */
7797
7798 const char *
7799 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7800 {
7801 int delay_insn_deleted = 0;
7802 int delay_slot_filled = 0;
7803 int seq_length = dbr_sequence_length ();
7804 tree call_decl = SYMBOL_REF_DECL (call_dest);
7805 int local_call = call_decl && targetm.binds_local_p (call_decl);
7806 rtx xoperands[2];
7807
7808 xoperands[0] = call_dest;
7809
7810 /* Handle the common case where we're sure that the branch will reach
7811 the beginning of the "$CODE$" subspace. This is the beginning of
7812 the current function if we are in a named section. */
7813 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7814 {
7815 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7816 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7817 }
7818 else
7819 {
7820 if (TARGET_64BIT && !local_call)
7821 {
7822 /* ??? As far as I can tell, the HP linker doesn't support the
7823 long pc-relative sequence described in the 64-bit runtime
7824 architecture. So, we use a slightly longer indirect call. */
7825 xoperands[0] = pa_get_deferred_plabel (call_dest);
7826 xoperands[1] = gen_label_rtx ();
7827
7828 /* If this isn't a sibcall, we put the load of %r27 into the
7829 delay slot. We can't do this in a sibcall as we don't
7830 have a second call-clobbered scratch register available. */
7831 if (seq_length != 0
7832 && ! JUMP_P (NEXT_INSN (insn))
7833 && !sibcall)
7834 {
7835 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7836 optimize, 0, NULL);
7837
7838 /* Now delete the delay insn. */
7839 SET_INSN_DELETED (NEXT_INSN (insn));
7840 delay_insn_deleted = 1;
7841 }
7842
7843 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7844 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7845 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7846
7847 if (sibcall)
7848 {
7849 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7850 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7851 output_asm_insn ("bve (%%r1)", xoperands);
7852 }
7853 else
7854 {
7855 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7856 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7857 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7858 delay_slot_filled = 1;
7859 }
7860 }
7861 else
7862 {
7863 int indirect_call = 0;
7864
7865 /* Emit a long call. There are several different sequences
7866 of increasing length and complexity. In most cases,
7867 they don't allow an instruction in the delay slot. */
7868 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7869 && !TARGET_LONG_PIC_SDIFF_CALL
7870 && !(TARGET_GAS && !TARGET_SOM
7871 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7872 && !TARGET_64BIT)
7873 indirect_call = 1;
7874
7875 if (seq_length != 0
7876 && ! JUMP_P (NEXT_INSN (insn))
7877 && !sibcall
7878 && (!TARGET_PA_20
7879 || indirect_call
7880 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7881 {
7882 /* A non-jump insn in the delay slot. By definition we can
7883 emit this insn before the call (and in fact before argument
7884 relocating. */
7885 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7886 NULL);
7887
7888 /* Now delete the delay insn. */
7889 SET_INSN_DELETED (NEXT_INSN (insn));
7890 delay_insn_deleted = 1;
7891 }
7892
7893 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7894 {
7895 /* This is the best sequence for making long calls in
7896 non-pic code. Unfortunately, GNU ld doesn't provide
7897 the stub needed for external calls, and GAS's support
7898 for this with the SOM linker is buggy. It is safe
7899 to use this for local calls. */
7900 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7901 if (sibcall)
7902 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7903 else
7904 {
7905 if (TARGET_PA_20)
7906 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7907 xoperands);
7908 else
7909 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7910
7911 output_asm_insn ("copy %%r31,%%r2", xoperands);
7912 delay_slot_filled = 1;
7913 }
7914 }
7915 else
7916 {
7917 if (TARGET_LONG_PIC_SDIFF_CALL)
7918 {
7919 /* The HP assembler and linker can handle relocations
7920 for the difference of two symbols. The HP assembler
7921 recognizes the sequence as a pc-relative call and
7922 the linker provides stubs when needed. */
7923 xoperands[1] = gen_label_rtx ();
7924 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7925 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7926 targetm.asm_out.internal_label (asm_out_file, "L",
7927 CODE_LABEL_NUMBER (xoperands[1]));
7928 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7929 }
7930 else if (TARGET_GAS && !TARGET_SOM
7931 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7932 {
7933 /* GAS currently can't generate the relocations that
7934 are needed for the SOM linker under HP-UX using this
7935 sequence. The GNU linker doesn't generate the stubs
7936 that are needed for external calls on TARGET_ELF32
7937 with this sequence. For now, we have to use a
7938 longer plabel sequence when using GAS. */
7939 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7940 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7941 xoperands);
7942 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7943 xoperands);
7944 }
7945 else
7946 {
7947 /* Emit a long plabel-based call sequence. This is
7948 essentially an inline implementation of $$dyncall.
7949 We don't actually try to call $$dyncall as this is
7950 as difficult as calling the function itself. */
7951 xoperands[0] = pa_get_deferred_plabel (call_dest);
7952 xoperands[1] = gen_label_rtx ();
7953
7954 /* Since the call is indirect, FP arguments in registers
7955 need to be copied to the general registers. Then, the
7956 argument relocation stub will copy them back. */
7957 if (TARGET_SOM)
7958 copy_fp_args (insn);
7959
7960 if (flag_pic)
7961 {
7962 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7963 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7964 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7965 }
7966 else
7967 {
7968 output_asm_insn ("addil LR'%0-$global$,%%r27",
7969 xoperands);
7970 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7971 xoperands);
7972 }
7973
7974 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7975 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7976 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7977 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7978
7979 if (!sibcall && !TARGET_PA_20)
7980 {
7981 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7982 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7983 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7984 else
7985 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7986 }
7987 }
7988
7989 if (TARGET_PA_20)
7990 {
7991 if (sibcall)
7992 output_asm_insn ("bve (%%r1)", xoperands);
7993 else
7994 {
7995 if (indirect_call)
7996 {
7997 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7998 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7999 delay_slot_filled = 1;
8000 }
8001 else
8002 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8003 }
8004 }
8005 else
8006 {
8007 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8008 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8009 xoperands);
8010
8011 if (sibcall)
8012 {
8013 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8014 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8015 else
8016 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8017 }
8018 else
8019 {
8020 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8021 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8022 else
8023 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8024
8025 if (indirect_call)
8026 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8027 else
8028 output_asm_insn ("copy %%r31,%%r2", xoperands);
8029 delay_slot_filled = 1;
8030 }
8031 }
8032 }
8033 }
8034 }
8035
8036 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8037 output_asm_insn ("nop", xoperands);
8038
8039 /* We are done if there isn't a jump in the delay slot. */
8040 if (seq_length == 0
8041 || delay_insn_deleted
8042 || ! JUMP_P (NEXT_INSN (insn)))
8043 return "";
8044
8045 /* A sibcall should never have a branch in the delay slot. */
8046 gcc_assert (!sibcall);
8047
8048 /* This call has an unconditional jump in its delay slot. */
8049 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8050
8051 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8052 {
8053 /* See if the return address can be adjusted. Use the containing
8054 sequence insn's address. This would break the regular call/return@
8055 relationship assumed by the table based eh unwinder, so only do that
8056 if the call is not possibly throwing. */
8057 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
8058 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8059 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8060
8061 if (VAL_14_BITS_P (distance)
8062 && !(can_throw_internal (insn) || can_throw_external (insn)))
8063 {
8064 xoperands[1] = gen_label_rtx ();
8065 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8066 targetm.asm_out.internal_label (asm_out_file, "L",
8067 CODE_LABEL_NUMBER (xoperands[1]));
8068 }
8069 else
8070 output_asm_insn ("nop\n\tb,n %0", xoperands);
8071 }
8072 else
8073 output_asm_insn ("b,n %0", xoperands);
8074
8075 /* Delete the jump. */
8076 SET_INSN_DELETED (NEXT_INSN (insn));
8077
8078 return "";
8079 }
8080
8081 /* Return the attribute length of the indirect call instruction INSN.
8082 The length must match the code generated by output_indirect call.
8083 The returned length includes the delay slot. Currently, the delay
8084 slot of an indirect call sequence is not exposed and it is used by
8085 the sequence itself. */
8086
8087 int
8088 pa_attr_length_indirect_call (rtx insn)
8089 {
8090 unsigned long distance = -1;
8091 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8092
8093 if (INSN_ADDRESSES_SET_P ())
8094 {
8095 distance = (total + insn_current_reference_address (insn));
8096 if (distance < total)
8097 distance = -1;
8098 }
8099
8100 if (TARGET_64BIT)
8101 return 12;
8102
8103 if (TARGET_FAST_INDIRECT_CALLS
8104 || (!TARGET_LONG_CALLS
8105 && !TARGET_PORTABLE_RUNTIME
8106 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8107 || distance < MAX_PCREL17F_OFFSET)))
8108 return 8;
8109
8110 if (flag_pic)
8111 return 20;
8112
8113 if (TARGET_PORTABLE_RUNTIME)
8114 return 16;
8115
8116 /* Out of reach, can use ble. */
8117 return 12;
8118 }
8119
8120 const char *
8121 pa_output_indirect_call (rtx insn, rtx call_dest)
8122 {
8123 rtx xoperands[1];
8124
8125 if (TARGET_64BIT)
8126 {
8127 xoperands[0] = call_dest;
8128 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8129 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8130 return "";
8131 }
8132
8133 /* First the special case for kernels, level 0 systems, etc. */
8134 if (TARGET_FAST_INDIRECT_CALLS)
8135 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8136
8137 /* Now the normal case -- we can reach $$dyncall directly or
8138 we're sure that we can get there via a long-branch stub.
8139
8140 No need to check target flags as the length uniquely identifies
8141 the remaining cases. */
8142 if (pa_attr_length_indirect_call (insn) == 8)
8143 {
8144 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8145 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8146 variant of the B,L instruction can't be used on the SOM target. */
8147 if (TARGET_PA_20 && !TARGET_SOM)
8148 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8149 else
8150 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8151 }
8152
8153 /* Long millicode call, but we are not generating PIC or portable runtime
8154 code. */
8155 if (pa_attr_length_indirect_call (insn) == 12)
8156 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8157
8158 /* Long millicode call for portable runtime. */
8159 if (pa_attr_length_indirect_call (insn) == 16)
8160 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8161
8162 /* We need a long PIC call to $$dyncall. */
8163 xoperands[0] = NULL_RTX;
8164 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8165 if (TARGET_SOM || !TARGET_GAS)
8166 {
8167 xoperands[0] = gen_label_rtx ();
8168 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8169 targetm.asm_out.internal_label (asm_out_file, "L",
8170 CODE_LABEL_NUMBER (xoperands[0]));
8171 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8172 }
8173 else
8174 {
8175 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8176 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8177 xoperands);
8178 }
8179 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8180 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8181 return "";
8182 }
8183
8184 /* In HPUX 8.0's shared library scheme, special relocations are needed
8185 for function labels if they might be passed to a function
8186 in a shared library (because shared libraries don't live in code
8187 space), and special magic is needed to construct their address. */
8188
8189 void
8190 pa_encode_label (rtx sym)
8191 {
8192 const char *str = XSTR (sym, 0);
8193 int len = strlen (str) + 1;
8194 char *newstr, *p;
8195
8196 p = newstr = XALLOCAVEC (char, len + 1);
8197 *p++ = '@';
8198 strcpy (p, str);
8199
8200 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8201 }
8202
8203 static void
8204 pa_encode_section_info (tree decl, rtx rtl, int first)
8205 {
8206 int old_referenced = 0;
8207
8208 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8209 old_referenced
8210 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8211
8212 default_encode_section_info (decl, rtl, first);
8213
8214 if (first && TEXT_SPACE_P (decl))
8215 {
8216 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8217 if (TREE_CODE (decl) == FUNCTION_DECL)
8218 pa_encode_label (XEXP (rtl, 0));
8219 }
8220 else if (old_referenced)
8221 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8222 }
8223
8224 /* This is sort of inverse to pa_encode_section_info. */
8225
8226 static const char *
8227 pa_strip_name_encoding (const char *str)
8228 {
8229 str += (*str == '@');
8230 str += (*str == '*');
8231 return str;
8232 }
8233
8234 /* Returns 1 if OP is a function label involved in a simple addition
8235 with a constant. Used to keep certain patterns from matching
8236 during instruction combination. */
8237 int
8238 pa_is_function_label_plus_const (rtx op)
8239 {
8240 /* Strip off any CONST. */
8241 if (GET_CODE (op) == CONST)
8242 op = XEXP (op, 0);
8243
8244 return (GET_CODE (op) == PLUS
8245 && function_label_operand (XEXP (op, 0), VOIDmode)
8246 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8247 }
8248
8249 /* Output assembly code for a thunk to FUNCTION. */
8250
8251 static void
8252 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8253 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8254 tree function)
8255 {
8256 static unsigned int current_thunk_number;
8257 int val_14 = VAL_14_BITS_P (delta);
8258 unsigned int old_last_address = last_address, nbytes = 0;
8259 char label[16];
8260 rtx xoperands[4];
8261
8262 xoperands[0] = XEXP (DECL_RTL (function), 0);
8263 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8264 xoperands[2] = GEN_INT (delta);
8265
8266 final_start_function (emit_barrier (), file, 1);
8267
8268 /* Output the thunk. We know that the function is in the same
8269 translation unit (i.e., the same space) as the thunk, and that
8270 thunks are output after their method. Thus, we don't need an
8271 external branch to reach the function. With SOM and GAS,
8272 functions and thunks are effectively in different sections.
8273 Thus, we can always use a IA-relative branch and the linker
8274 will add a long branch stub if necessary.
8275
8276 However, we have to be careful when generating PIC code on the
8277 SOM port to ensure that the sequence does not transfer to an
8278 import stub for the target function as this could clobber the
8279 return value saved at SP-24. This would also apply to the
8280 32-bit linux port if the multi-space model is implemented. */
8281 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8282 && !(flag_pic && TREE_PUBLIC (function))
8283 && (TARGET_GAS || last_address < 262132))
8284 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8285 && ((targetm_common.have_named_sections
8286 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8287 /* The GNU 64-bit linker has rather poor stub management.
8288 So, we use a long branch from thunks that aren't in
8289 the same section as the target function. */
8290 && ((!TARGET_64BIT
8291 && (DECL_SECTION_NAME (thunk_fndecl)
8292 != DECL_SECTION_NAME (function)))
8293 || ((DECL_SECTION_NAME (thunk_fndecl)
8294 == DECL_SECTION_NAME (function))
8295 && last_address < 262132)))
8296 || (targetm_common.have_named_sections
8297 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8298 && DECL_SECTION_NAME (function) == NULL
8299 && last_address < 262132)
8300 || (!targetm_common.have_named_sections
8301 && last_address < 262132))))
8302 {
8303 if (!val_14)
8304 output_asm_insn ("addil L'%2,%%r26", xoperands);
8305
8306 output_asm_insn ("b %0", xoperands);
8307
8308 if (val_14)
8309 {
8310 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8311 nbytes += 8;
8312 }
8313 else
8314 {
8315 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8316 nbytes += 12;
8317 }
8318 }
8319 else if (TARGET_64BIT)
8320 {
8321 /* We only have one call-clobbered scratch register, so we can't
8322 make use of the delay slot if delta doesn't fit in 14 bits. */
8323 if (!val_14)
8324 {
8325 output_asm_insn ("addil L'%2,%%r26", xoperands);
8326 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8327 }
8328
8329 output_asm_insn ("b,l .+8,%%r1", xoperands);
8330
8331 if (TARGET_GAS)
8332 {
8333 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8334 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8335 }
8336 else
8337 {
8338 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8339 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8340 }
8341
8342 if (val_14)
8343 {
8344 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8345 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8346 nbytes += 20;
8347 }
8348 else
8349 {
8350 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8351 nbytes += 24;
8352 }
8353 }
8354 else if (TARGET_PORTABLE_RUNTIME)
8355 {
8356 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8357 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8358
8359 if (!val_14)
8360 output_asm_insn ("addil L'%2,%%r26", xoperands);
8361
8362 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8363
8364 if (val_14)
8365 {
8366 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8367 nbytes += 16;
8368 }
8369 else
8370 {
8371 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8372 nbytes += 20;
8373 }
8374 }
8375 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8376 {
8377 /* The function is accessible from outside this module. The only
8378 way to avoid an import stub between the thunk and function is to
8379 call the function directly with an indirect sequence similar to
8380 that used by $$dyncall. This is possible because $$dyncall acts
8381 as the import stub in an indirect call. */
8382 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8383 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8384 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8385 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8386 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8387 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8388 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8389 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8390 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8391
8392 if (!val_14)
8393 {
8394 output_asm_insn ("addil L'%2,%%r26", xoperands);
8395 nbytes += 4;
8396 }
8397
8398 if (TARGET_PA_20)
8399 {
8400 output_asm_insn ("bve (%%r22)", xoperands);
8401 nbytes += 36;
8402 }
8403 else if (TARGET_NO_SPACE_REGS)
8404 {
8405 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8406 nbytes += 36;
8407 }
8408 else
8409 {
8410 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8411 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8412 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8413 nbytes += 44;
8414 }
8415
8416 if (val_14)
8417 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8418 else
8419 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8420 }
8421 else if (flag_pic)
8422 {
8423 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8424
8425 if (TARGET_SOM || !TARGET_GAS)
8426 {
8427 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8428 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8429 }
8430 else
8431 {
8432 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8433 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8434 }
8435
8436 if (!val_14)
8437 output_asm_insn ("addil L'%2,%%r26", xoperands);
8438
8439 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8440
8441 if (val_14)
8442 {
8443 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8444 nbytes += 20;
8445 }
8446 else
8447 {
8448 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8449 nbytes += 24;
8450 }
8451 }
8452 else
8453 {
8454 if (!val_14)
8455 output_asm_insn ("addil L'%2,%%r26", xoperands);
8456
8457 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8458 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8459
8460 if (val_14)
8461 {
8462 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8463 nbytes += 12;
8464 }
8465 else
8466 {
8467 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8468 nbytes += 16;
8469 }
8470 }
8471
8472 final_end_function ();
8473
8474 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8475 {
8476 switch_to_section (data_section);
8477 output_asm_insn (".align 4", xoperands);
8478 ASM_OUTPUT_LABEL (file, label);
8479 output_asm_insn (".word P'%0", xoperands);
8480 }
8481
8482 current_thunk_number++;
8483 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8484 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8485 last_address += nbytes;
8486 if (old_last_address > last_address)
8487 last_address = UINT_MAX;
8488 update_total_code_bytes (nbytes);
8489 }
8490
8491 /* Only direct calls to static functions are allowed to be sibling (tail)
8492 call optimized.
8493
8494 This restriction is necessary because some linker generated stubs will
8495 store return pointers into rp' in some cases which might clobber a
8496 live value already in rp'.
8497
8498 In a sibcall the current function and the target function share stack
8499 space. Thus if the path to the current function and the path to the
8500 target function save a value in rp', they save the value into the
8501 same stack slot, which has undesirable consequences.
8502
8503 Because of the deferred binding nature of shared libraries any function
8504 with external scope could be in a different load module and thus require
8505 rp' to be saved when calling that function. So sibcall optimizations
8506 can only be safe for static function.
8507
8508 Note that GCC never needs return value relocations, so we don't have to
8509 worry about static calls with return value relocations (which require
8510 saving rp').
8511
8512 It is safe to perform a sibcall optimization when the target function
8513 will never return. */
8514 static bool
8515 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8516 {
8517 if (TARGET_PORTABLE_RUNTIME)
8518 return false;
8519
8520 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8521 single subspace mode and the call is not indirect. As far as I know,
8522 there is no operating system support for the multiple subspace mode.
8523 It might be possible to support indirect calls if we didn't use
8524 $$dyncall (see the indirect sequence generated in pa_output_call). */
8525 if (TARGET_ELF32)
8526 return (decl != NULL_TREE);
8527
8528 /* Sibcalls are not ok because the arg pointer register is not a fixed
8529 register. This prevents the sibcall optimization from occurring. In
8530 addition, there are problems with stub placement using GNU ld. This
8531 is because a normal sibcall branch uses a 17-bit relocation while
8532 a regular call branch uses a 22-bit relocation. As a result, more
8533 care needs to be taken in the placement of long-branch stubs. */
8534 if (TARGET_64BIT)
8535 return false;
8536
8537 /* Sibcalls are only ok within a translation unit. */
8538 return (decl && !TREE_PUBLIC (decl));
8539 }
8540
8541 /* ??? Addition is not commutative on the PA due to the weird implicit
8542 space register selection rules for memory addresses. Therefore, we
8543 don't consider a + b == b + a, as this might be inside a MEM. */
8544 static bool
8545 pa_commutative_p (const_rtx x, int outer_code)
8546 {
8547 return (COMMUTATIVE_P (x)
8548 && (TARGET_NO_SPACE_REGS
8549 || (outer_code != UNKNOWN && outer_code != MEM)
8550 || GET_CODE (x) != PLUS));
8551 }
8552
8553 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8554 use in fmpyadd instructions. */
8555 int
8556 pa_fmpyaddoperands (rtx *operands)
8557 {
8558 enum machine_mode mode = GET_MODE (operands[0]);
8559
8560 /* Must be a floating point mode. */
8561 if (mode != SFmode && mode != DFmode)
8562 return 0;
8563
8564 /* All modes must be the same. */
8565 if (! (mode == GET_MODE (operands[1])
8566 && mode == GET_MODE (operands[2])
8567 && mode == GET_MODE (operands[3])
8568 && mode == GET_MODE (operands[4])
8569 && mode == GET_MODE (operands[5])))
8570 return 0;
8571
8572 /* All operands must be registers. */
8573 if (! (GET_CODE (operands[1]) == REG
8574 && GET_CODE (operands[2]) == REG
8575 && GET_CODE (operands[3]) == REG
8576 && GET_CODE (operands[4]) == REG
8577 && GET_CODE (operands[5]) == REG))
8578 return 0;
8579
8580 /* Only 2 real operands to the addition. One of the input operands must
8581 be the same as the output operand. */
8582 if (! rtx_equal_p (operands[3], operands[4])
8583 && ! rtx_equal_p (operands[3], operands[5]))
8584 return 0;
8585
8586 /* Inout operand of add cannot conflict with any operands from multiply. */
8587 if (rtx_equal_p (operands[3], operands[0])
8588 || rtx_equal_p (operands[3], operands[1])
8589 || rtx_equal_p (operands[3], operands[2]))
8590 return 0;
8591
8592 /* multiply cannot feed into addition operands. */
8593 if (rtx_equal_p (operands[4], operands[0])
8594 || rtx_equal_p (operands[5], operands[0]))
8595 return 0;
8596
8597 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8598 if (mode == SFmode
8599 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8600 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8601 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8602 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8603 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8604 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8605 return 0;
8606
8607 /* Passed. Operands are suitable for fmpyadd. */
8608 return 1;
8609 }
8610
8611 #if !defined(USE_COLLECT2)
8612 static void
8613 pa_asm_out_constructor (rtx symbol, int priority)
8614 {
8615 if (!function_label_operand (symbol, VOIDmode))
8616 pa_encode_label (symbol);
8617
8618 #ifdef CTORS_SECTION_ASM_OP
8619 default_ctor_section_asm_out_constructor (symbol, priority);
8620 #else
8621 # ifdef TARGET_ASM_NAMED_SECTION
8622 default_named_section_asm_out_constructor (symbol, priority);
8623 # else
8624 default_stabs_asm_out_constructor (symbol, priority);
8625 # endif
8626 #endif
8627 }
8628
8629 static void
8630 pa_asm_out_destructor (rtx symbol, int priority)
8631 {
8632 if (!function_label_operand (symbol, VOIDmode))
8633 pa_encode_label (symbol);
8634
8635 #ifdef DTORS_SECTION_ASM_OP
8636 default_dtor_section_asm_out_destructor (symbol, priority);
8637 #else
8638 # ifdef TARGET_ASM_NAMED_SECTION
8639 default_named_section_asm_out_destructor (symbol, priority);
8640 # else
8641 default_stabs_asm_out_destructor (symbol, priority);
8642 # endif
8643 #endif
8644 }
8645 #endif
8646
8647 /* This function places uninitialized global data in the bss section.
8648 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8649 function on the SOM port to prevent uninitialized global data from
8650 being placed in the data section. */
8651
8652 void
8653 pa_asm_output_aligned_bss (FILE *stream,
8654 const char *name,
8655 unsigned HOST_WIDE_INT size,
8656 unsigned int align)
8657 {
8658 switch_to_section (bss_section);
8659 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8660
8661 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8662 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8663 #endif
8664
8665 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8666 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8667 #endif
8668
8669 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8670 ASM_OUTPUT_LABEL (stream, name);
8671 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8672 }
8673
8674 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8675 that doesn't allow the alignment of global common storage to be directly
8676 specified. The SOM linker aligns common storage based on the rounded
8677 value of the NUM_BYTES parameter in the .comm directive. It's not
8678 possible to use the .align directive as it doesn't affect the alignment
8679 of the label associated with a .comm directive. */
8680
8681 void
8682 pa_asm_output_aligned_common (FILE *stream,
8683 const char *name,
8684 unsigned HOST_WIDE_INT size,
8685 unsigned int align)
8686 {
8687 unsigned int max_common_align;
8688
8689 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8690 if (align > max_common_align)
8691 {
8692 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8693 "for global common data. Using %u",
8694 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8695 align = max_common_align;
8696 }
8697
8698 switch_to_section (bss_section);
8699
8700 assemble_name (stream, name);
8701 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8702 MAX (size, align / BITS_PER_UNIT));
8703 }
8704
8705 /* We can't use .comm for local common storage as the SOM linker effectively
8706 treats the symbol as universal and uses the same storage for local symbols
8707 with the same name in different object files. The .block directive
8708 reserves an uninitialized block of storage. However, it's not common
8709 storage. Fortunately, GCC never requests common storage with the same
8710 name in any given translation unit. */
8711
8712 void
8713 pa_asm_output_aligned_local (FILE *stream,
8714 const char *name,
8715 unsigned HOST_WIDE_INT size,
8716 unsigned int align)
8717 {
8718 switch_to_section (bss_section);
8719 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8720
8721 #ifdef LOCAL_ASM_OP
8722 fprintf (stream, "%s", LOCAL_ASM_OP);
8723 assemble_name (stream, name);
8724 fprintf (stream, "\n");
8725 #endif
8726
8727 ASM_OUTPUT_LABEL (stream, name);
8728 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8729 }
8730
8731 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8732 use in fmpysub instructions. */
8733 int
8734 pa_fmpysuboperands (rtx *operands)
8735 {
8736 enum machine_mode mode = GET_MODE (operands[0]);
8737
8738 /* Must be a floating point mode. */
8739 if (mode != SFmode && mode != DFmode)
8740 return 0;
8741
8742 /* All modes must be the same. */
8743 if (! (mode == GET_MODE (operands[1])
8744 && mode == GET_MODE (operands[2])
8745 && mode == GET_MODE (operands[3])
8746 && mode == GET_MODE (operands[4])
8747 && mode == GET_MODE (operands[5])))
8748 return 0;
8749
8750 /* All operands must be registers. */
8751 if (! (GET_CODE (operands[1]) == REG
8752 && GET_CODE (operands[2]) == REG
8753 && GET_CODE (operands[3]) == REG
8754 && GET_CODE (operands[4]) == REG
8755 && GET_CODE (operands[5]) == REG))
8756 return 0;
8757
8758 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8759 operation, so operands[4] must be the same as operand[3]. */
8760 if (! rtx_equal_p (operands[3], operands[4]))
8761 return 0;
8762
8763 /* multiply cannot feed into subtraction. */
8764 if (rtx_equal_p (operands[5], operands[0]))
8765 return 0;
8766
8767 /* Inout operand of sub cannot conflict with any operands from multiply. */
8768 if (rtx_equal_p (operands[3], operands[0])
8769 || rtx_equal_p (operands[3], operands[1])
8770 || rtx_equal_p (operands[3], operands[2]))
8771 return 0;
8772
8773 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8774 if (mode == SFmode
8775 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8776 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8777 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8778 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8779 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8780 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8781 return 0;
8782
8783 /* Passed. Operands are suitable for fmpysub. */
8784 return 1;
8785 }
8786
8787 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8788 constants for shadd instructions. */
8789 int
8790 pa_shadd_constant_p (int val)
8791 {
8792 if (val == 2 || val == 4 || val == 8)
8793 return 1;
8794 else
8795 return 0;
8796 }
8797
8798 /* Return TRUE if INSN branches forward. */
8799
8800 static bool
8801 forward_branch_p (rtx insn)
8802 {
8803 rtx lab = JUMP_LABEL (insn);
8804
8805 /* The INSN must have a jump label. */
8806 gcc_assert (lab != NULL_RTX);
8807
8808 if (INSN_ADDRESSES_SET_P ())
8809 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8810
8811 while (insn)
8812 {
8813 if (insn == lab)
8814 return true;
8815 else
8816 insn = NEXT_INSN (insn);
8817 }
8818
8819 return false;
8820 }
8821
8822 /* Return 1 if INSN is in the delay slot of a call instruction. */
8823 int
8824 pa_jump_in_call_delay (rtx insn)
8825 {
8826
8827 if (! JUMP_P (insn))
8828 return 0;
8829
8830 if (PREV_INSN (insn)
8831 && PREV_INSN (PREV_INSN (insn))
8832 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8833 {
8834 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8835
8836 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8837 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8838
8839 }
8840 else
8841 return 0;
8842 }
8843
8844 /* Output an unconditional move and branch insn. */
8845
8846 const char *
8847 pa_output_parallel_movb (rtx *operands, rtx insn)
8848 {
8849 int length = get_attr_length (insn);
8850
8851 /* These are the cases in which we win. */
8852 if (length == 4)
8853 return "mov%I1b,tr %1,%0,%2";
8854
8855 /* None of the following cases win, but they don't lose either. */
8856 if (length == 8)
8857 {
8858 if (dbr_sequence_length () == 0)
8859 {
8860 /* Nothing in the delay slot, fake it by putting the combined
8861 insn (the copy or add) in the delay slot of a bl. */
8862 if (GET_CODE (operands[1]) == CONST_INT)
8863 return "b %2\n\tldi %1,%0";
8864 else
8865 return "b %2\n\tcopy %1,%0";
8866 }
8867 else
8868 {
8869 /* Something in the delay slot, but we've got a long branch. */
8870 if (GET_CODE (operands[1]) == CONST_INT)
8871 return "ldi %1,%0\n\tb %2";
8872 else
8873 return "copy %1,%0\n\tb %2";
8874 }
8875 }
8876
8877 if (GET_CODE (operands[1]) == CONST_INT)
8878 output_asm_insn ("ldi %1,%0", operands);
8879 else
8880 output_asm_insn ("copy %1,%0", operands);
8881 return pa_output_lbranch (operands[2], insn, 1);
8882 }
8883
8884 /* Output an unconditional add and branch insn. */
8885
8886 const char *
8887 pa_output_parallel_addb (rtx *operands, rtx insn)
8888 {
8889 int length = get_attr_length (insn);
8890
8891 /* To make life easy we want operand0 to be the shared input/output
8892 operand and operand1 to be the readonly operand. */
8893 if (operands[0] == operands[1])
8894 operands[1] = operands[2];
8895
8896 /* These are the cases in which we win. */
8897 if (length == 4)
8898 return "add%I1b,tr %1,%0,%3";
8899
8900 /* None of the following cases win, but they don't lose either. */
8901 if (length == 8)
8902 {
8903 if (dbr_sequence_length () == 0)
8904 /* Nothing in the delay slot, fake it by putting the combined
8905 insn (the copy or add) in the delay slot of a bl. */
8906 return "b %3\n\tadd%I1 %1,%0,%0";
8907 else
8908 /* Something in the delay slot, but we've got a long branch. */
8909 return "add%I1 %1,%0,%0\n\tb %3";
8910 }
8911
8912 output_asm_insn ("add%I1 %1,%0,%0", operands);
8913 return pa_output_lbranch (operands[3], insn, 1);
8914 }
8915
8916 /* Return nonzero if INSN (a jump insn) immediately follows a call
8917 to a named function. This is used to avoid filling the delay slot
8918 of the jump since it can usually be eliminated by modifying RP in
8919 the delay slot of the call. */
8920
8921 int
8922 pa_following_call (rtx insn)
8923 {
8924 if (! TARGET_JUMP_IN_DELAY)
8925 return 0;
8926
8927 /* Find the previous real insn, skipping NOTEs. */
8928 insn = PREV_INSN (insn);
8929 while (insn && NOTE_P (insn))
8930 insn = PREV_INSN (insn);
8931
8932 /* Check for CALL_INSNs and millicode calls. */
8933 if (insn
8934 && ((CALL_P (insn)
8935 && get_attr_type (insn) != TYPE_DYNCALL)
8936 || (NONJUMP_INSN_P (insn)
8937 && GET_CODE (PATTERN (insn)) != SEQUENCE
8938 && GET_CODE (PATTERN (insn)) != USE
8939 && GET_CODE (PATTERN (insn)) != CLOBBER
8940 && get_attr_type (insn) == TYPE_MILLI)))
8941 return 1;
8942
8943 return 0;
8944 }
8945
8946 /* We use this hook to perform a PA specific optimization which is difficult
8947 to do in earlier passes.
8948
8949 We surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8950 insns. Those insns mark where we should emit .begin_brtab and
8951 .end_brtab directives when using GAS. This allows for better link
8952 time optimizations. */
8953
8954 static void
8955 pa_reorg (void)
8956 {
8957 rtx insn;
8958
8959 remove_useless_addtr_insns (1);
8960
8961 if (pa_cpu < PROCESSOR_8000)
8962 pa_combine_instructions ();
8963
8964 /* Still need brtab marker insns. FIXME: the presence of these
8965 markers disables output of the branch table to readonly memory,
8966 and any alignment directives that might be needed. Possibly,
8967 the begin_brtab insn should be output before the label for the
8968 table. This doesn't matter at the moment since the tables are
8969 always output in the text section. */
8970 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8971 {
8972 /* Find an ADDR_VEC insn. */
8973 if (! JUMP_TABLE_DATA_P (insn))
8974 continue;
8975
8976 /* Now generate markers for the beginning and end of the
8977 branch table. */
8978 emit_insn_before (gen_begin_brtab (), insn);
8979 emit_insn_after (gen_end_brtab (), insn);
8980 }
8981 }
8982
8983 /* The PA has a number of odd instructions which can perform multiple
8984 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8985 it may be profitable to combine two instructions into one instruction
8986 with two outputs. It's not profitable PA2.0 machines because the
8987 two outputs would take two slots in the reorder buffers.
8988
8989 This routine finds instructions which can be combined and combines
8990 them. We only support some of the potential combinations, and we
8991 only try common ways to find suitable instructions.
8992
8993 * addb can add two registers or a register and a small integer
8994 and jump to a nearby (+-8k) location. Normally the jump to the
8995 nearby location is conditional on the result of the add, but by
8996 using the "true" condition we can make the jump unconditional.
8997 Thus addb can perform two independent operations in one insn.
8998
8999 * movb is similar to addb in that it can perform a reg->reg
9000 or small immediate->reg copy and jump to a nearby (+-8k location).
9001
9002 * fmpyadd and fmpysub can perform a FP multiply and either an
9003 FP add or FP sub if the operands of the multiply and add/sub are
9004 independent (there are other minor restrictions). Note both
9005 the fmpy and fadd/fsub can in theory move to better spots according
9006 to data dependencies, but for now we require the fmpy stay at a
9007 fixed location.
9008
9009 * Many of the memory operations can perform pre & post updates
9010 of index registers. GCC's pre/post increment/decrement addressing
9011 is far too simple to take advantage of all the possibilities. This
9012 pass may not be suitable since those insns may not be independent.
9013
9014 * comclr can compare two ints or an int and a register, nullify
9015 the following instruction and zero some other register. This
9016 is more difficult to use as it's harder to find an insn which
9017 will generate a comclr than finding something like an unconditional
9018 branch. (conditional moves & long branches create comclr insns).
9019
9020 * Most arithmetic operations can conditionally skip the next
9021 instruction. They can be viewed as "perform this operation
9022 and conditionally jump to this nearby location" (where nearby
9023 is an insns away). These are difficult to use due to the
9024 branch length restrictions. */
9025
9026 static void
9027 pa_combine_instructions (void)
9028 {
9029 rtx anchor, new_rtx;
9030
9031 /* This can get expensive since the basic algorithm is on the
9032 order of O(n^2) (or worse). Only do it for -O2 or higher
9033 levels of optimization. */
9034 if (optimize < 2)
9035 return;
9036
9037 /* Walk down the list of insns looking for "anchor" insns which
9038 may be combined with "floating" insns. As the name implies,
9039 "anchor" instructions don't move, while "floating" insns may
9040 move around. */
9041 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9042 new_rtx = make_insn_raw (new_rtx);
9043
9044 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9045 {
9046 enum attr_pa_combine_type anchor_attr;
9047 enum attr_pa_combine_type floater_attr;
9048
9049 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9050 Also ignore any special USE insns. */
9051 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9052 || GET_CODE (PATTERN (anchor)) == USE
9053 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9054 continue;
9055
9056 anchor_attr = get_attr_pa_combine_type (anchor);
9057 /* See if anchor is an insn suitable for combination. */
9058 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9059 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9060 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9061 && ! forward_branch_p (anchor)))
9062 {
9063 rtx floater;
9064
9065 for (floater = PREV_INSN (anchor);
9066 floater;
9067 floater = PREV_INSN (floater))
9068 {
9069 if (NOTE_P (floater)
9070 || (NONJUMP_INSN_P (floater)
9071 && (GET_CODE (PATTERN (floater)) == USE
9072 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9073 continue;
9074
9075 /* Anything except a regular INSN will stop our search. */
9076 if (! NONJUMP_INSN_P (floater))
9077 {
9078 floater = NULL_RTX;
9079 break;
9080 }
9081
9082 /* See if FLOATER is suitable for combination with the
9083 anchor. */
9084 floater_attr = get_attr_pa_combine_type (floater);
9085 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9086 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9087 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9088 && floater_attr == PA_COMBINE_TYPE_FMPY))
9089 {
9090 /* If ANCHOR and FLOATER can be combined, then we're
9091 done with this pass. */
9092 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9093 SET_DEST (PATTERN (floater)),
9094 XEXP (SET_SRC (PATTERN (floater)), 0),
9095 XEXP (SET_SRC (PATTERN (floater)), 1)))
9096 break;
9097 }
9098
9099 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9100 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9101 {
9102 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9103 {
9104 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9105 SET_DEST (PATTERN (floater)),
9106 XEXP (SET_SRC (PATTERN (floater)), 0),
9107 XEXP (SET_SRC (PATTERN (floater)), 1)))
9108 break;
9109 }
9110 else
9111 {
9112 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9113 SET_DEST (PATTERN (floater)),
9114 SET_SRC (PATTERN (floater)),
9115 SET_SRC (PATTERN (floater))))
9116 break;
9117 }
9118 }
9119 }
9120
9121 /* If we didn't find anything on the backwards scan try forwards. */
9122 if (!floater
9123 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9124 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9125 {
9126 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9127 {
9128 if (NOTE_P (floater)
9129 || (NONJUMP_INSN_P (floater)
9130 && (GET_CODE (PATTERN (floater)) == USE
9131 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9132
9133 continue;
9134
9135 /* Anything except a regular INSN will stop our search. */
9136 if (! NONJUMP_INSN_P (floater))
9137 {
9138 floater = NULL_RTX;
9139 break;
9140 }
9141
9142 /* See if FLOATER is suitable for combination with the
9143 anchor. */
9144 floater_attr = get_attr_pa_combine_type (floater);
9145 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9146 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9147 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9148 && floater_attr == PA_COMBINE_TYPE_FMPY))
9149 {
9150 /* If ANCHOR and FLOATER can be combined, then we're
9151 done with this pass. */
9152 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9153 SET_DEST (PATTERN (floater)),
9154 XEXP (SET_SRC (PATTERN (floater)),
9155 0),
9156 XEXP (SET_SRC (PATTERN (floater)),
9157 1)))
9158 break;
9159 }
9160 }
9161 }
9162
9163 /* FLOATER will be nonzero if we found a suitable floating
9164 insn for combination with ANCHOR. */
9165 if (floater
9166 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9167 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9168 {
9169 /* Emit the new instruction and delete the old anchor. */
9170 emit_insn_before (gen_rtx_PARALLEL
9171 (VOIDmode,
9172 gen_rtvec (2, PATTERN (anchor),
9173 PATTERN (floater))),
9174 anchor);
9175
9176 SET_INSN_DELETED (anchor);
9177
9178 /* Emit a special USE insn for FLOATER, then delete
9179 the floating insn. */
9180 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9181 delete_insn (floater);
9182
9183 continue;
9184 }
9185 else if (floater
9186 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9187 {
9188 rtx temp;
9189 /* Emit the new_jump instruction and delete the old anchor. */
9190 temp
9191 = emit_jump_insn_before (gen_rtx_PARALLEL
9192 (VOIDmode,
9193 gen_rtvec (2, PATTERN (anchor),
9194 PATTERN (floater))),
9195 anchor);
9196
9197 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9198 SET_INSN_DELETED (anchor);
9199
9200 /* Emit a special USE insn for FLOATER, then delete
9201 the floating insn. */
9202 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9203 delete_insn (floater);
9204 continue;
9205 }
9206 }
9207 }
9208 }
9209
9210 static int
9211 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9212 rtx src1, rtx src2)
9213 {
9214 int insn_code_number;
9215 rtx start, end;
9216
9217 /* Create a PARALLEL with the patterns of ANCHOR and
9218 FLOATER, try to recognize it, then test constraints
9219 for the resulting pattern.
9220
9221 If the pattern doesn't match or the constraints
9222 aren't met keep searching for a suitable floater
9223 insn. */
9224 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9225 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9226 INSN_CODE (new_rtx) = -1;
9227 insn_code_number = recog_memoized (new_rtx);
9228 if (insn_code_number < 0
9229 || (extract_insn (new_rtx), ! constrain_operands (1)))
9230 return 0;
9231
9232 if (reversed)
9233 {
9234 start = anchor;
9235 end = floater;
9236 }
9237 else
9238 {
9239 start = floater;
9240 end = anchor;
9241 }
9242
9243 /* There's up to three operands to consider. One
9244 output and two inputs.
9245
9246 The output must not be used between FLOATER & ANCHOR
9247 exclusive. The inputs must not be set between
9248 FLOATER and ANCHOR exclusive. */
9249
9250 if (reg_used_between_p (dest, start, end))
9251 return 0;
9252
9253 if (reg_set_between_p (src1, start, end))
9254 return 0;
9255
9256 if (reg_set_between_p (src2, start, end))
9257 return 0;
9258
9259 /* If we get here, then everything is good. */
9260 return 1;
9261 }
9262
9263 /* Return nonzero if references for INSN are delayed.
9264
9265 Millicode insns are actually function calls with some special
9266 constraints on arguments and register usage.
9267
9268 Millicode calls always expect their arguments in the integer argument
9269 registers, and always return their result in %r29 (ret1). They
9270 are expected to clobber their arguments, %r1, %r29, and the return
9271 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9272
9273 This function tells reorg that the references to arguments and
9274 millicode calls do not appear to happen until after the millicode call.
9275 This allows reorg to put insns which set the argument registers into the
9276 delay slot of the millicode call -- thus they act more like traditional
9277 CALL_INSNs.
9278
9279 Note we cannot consider side effects of the insn to be delayed because
9280 the branch and link insn will clobber the return pointer. If we happened
9281 to use the return pointer in the delay slot of the call, then we lose.
9282
9283 get_attr_type will try to recognize the given insn, so make sure to
9284 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9285 in particular. */
9286 int
9287 pa_insn_refs_are_delayed (rtx insn)
9288 {
9289 return ((NONJUMP_INSN_P (insn)
9290 && GET_CODE (PATTERN (insn)) != SEQUENCE
9291 && GET_CODE (PATTERN (insn)) != USE
9292 && GET_CODE (PATTERN (insn)) != CLOBBER
9293 && get_attr_type (insn) == TYPE_MILLI));
9294 }
9295
9296 /* Promote the return value, but not the arguments. */
9297
9298 static enum machine_mode
9299 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9300 enum machine_mode mode,
9301 int *punsignedp ATTRIBUTE_UNUSED,
9302 const_tree fntype ATTRIBUTE_UNUSED,
9303 int for_return)
9304 {
9305 if (for_return == 0)
9306 return mode;
9307 return promote_mode (type, mode, punsignedp);
9308 }
9309
9310 /* On the HP-PA the value is found in register(s) 28(-29), unless
9311 the mode is SF or DF. Then the value is returned in fr4 (32).
9312
9313 This must perform the same promotions as PROMOTE_MODE, else promoting
9314 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9315
9316 Small structures must be returned in a PARALLEL on PA64 in order
9317 to match the HP Compiler ABI. */
9318
9319 static rtx
9320 pa_function_value (const_tree valtype,
9321 const_tree func ATTRIBUTE_UNUSED,
9322 bool outgoing ATTRIBUTE_UNUSED)
9323 {
9324 enum machine_mode valmode;
9325
9326 if (AGGREGATE_TYPE_P (valtype)
9327 || TREE_CODE (valtype) == COMPLEX_TYPE
9328 || TREE_CODE (valtype) == VECTOR_TYPE)
9329 {
9330 if (TARGET_64BIT)
9331 {
9332 /* Aggregates with a size less than or equal to 128 bits are
9333 returned in GR 28(-29). They are left justified. The pad
9334 bits are undefined. Larger aggregates are returned in
9335 memory. */
9336 rtx loc[2];
9337 int i, offset = 0;
9338 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9339
9340 for (i = 0; i < ub; i++)
9341 {
9342 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9343 gen_rtx_REG (DImode, 28 + i),
9344 GEN_INT (offset));
9345 offset += 8;
9346 }
9347
9348 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9349 }
9350 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9351 {
9352 /* Aggregates 5 to 8 bytes in size are returned in general
9353 registers r28-r29 in the same manner as other non
9354 floating-point objects. The data is right-justified and
9355 zero-extended to 64 bits. This is opposite to the normal
9356 justification used on big endian targets and requires
9357 special treatment. */
9358 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9359 gen_rtx_REG (DImode, 28), const0_rtx);
9360 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9361 }
9362 }
9363
9364 if ((INTEGRAL_TYPE_P (valtype)
9365 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9366 || POINTER_TYPE_P (valtype))
9367 valmode = word_mode;
9368 else
9369 valmode = TYPE_MODE (valtype);
9370
9371 if (TREE_CODE (valtype) == REAL_TYPE
9372 && !AGGREGATE_TYPE_P (valtype)
9373 && TYPE_MODE (valtype) != TFmode
9374 && !TARGET_SOFT_FLOAT)
9375 return gen_rtx_REG (valmode, 32);
9376
9377 return gen_rtx_REG (valmode, 28);
9378 }
9379
9380 /* Implement the TARGET_LIBCALL_VALUE hook. */
9381
9382 static rtx
9383 pa_libcall_value (enum machine_mode mode,
9384 const_rtx fun ATTRIBUTE_UNUSED)
9385 {
9386 if (! TARGET_SOFT_FLOAT
9387 && (mode == SFmode || mode == DFmode))
9388 return gen_rtx_REG (mode, 32);
9389 else
9390 return gen_rtx_REG (mode, 28);
9391 }
9392
9393 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9394
9395 static bool
9396 pa_function_value_regno_p (const unsigned int regno)
9397 {
9398 if (regno == 28
9399 || (! TARGET_SOFT_FLOAT && regno == 32))
9400 return true;
9401
9402 return false;
9403 }
9404
9405 /* Update the data in CUM to advance over an argument
9406 of mode MODE and data type TYPE.
9407 (TYPE is null for libcalls where that information may not be available.) */
9408
9409 static void
9410 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9411 const_tree type, bool named ATTRIBUTE_UNUSED)
9412 {
9413 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9414 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9415
9416 cum->nargs_prototype--;
9417 cum->words += (arg_size
9418 + ((cum->words & 01)
9419 && type != NULL_TREE
9420 && arg_size > 1));
9421 }
9422
9423 /* Return the location of a parameter that is passed in a register or NULL
9424 if the parameter has any component that is passed in memory.
9425
9426 This is new code and will be pushed to into the net sources after
9427 further testing.
9428
9429 ??? We might want to restructure this so that it looks more like other
9430 ports. */
9431 static rtx
9432 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9433 const_tree type, bool named ATTRIBUTE_UNUSED)
9434 {
9435 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9436 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9437 int alignment = 0;
9438 int arg_size;
9439 int fpr_reg_base;
9440 int gpr_reg_base;
9441 rtx retval;
9442
9443 if (mode == VOIDmode)
9444 return NULL_RTX;
9445
9446 arg_size = FUNCTION_ARG_SIZE (mode, type);
9447
9448 /* If this arg would be passed partially or totally on the stack, then
9449 this routine should return zero. pa_arg_partial_bytes will
9450 handle arguments which are split between regs and stack slots if
9451 the ABI mandates split arguments. */
9452 if (!TARGET_64BIT)
9453 {
9454 /* The 32-bit ABI does not split arguments. */
9455 if (cum->words + arg_size > max_arg_words)
9456 return NULL_RTX;
9457 }
9458 else
9459 {
9460 if (arg_size > 1)
9461 alignment = cum->words & 1;
9462 if (cum->words + alignment >= max_arg_words)
9463 return NULL_RTX;
9464 }
9465
9466 /* The 32bit ABIs and the 64bit ABIs are rather different,
9467 particularly in their handling of FP registers. We might
9468 be able to cleverly share code between them, but I'm not
9469 going to bother in the hope that splitting them up results
9470 in code that is more easily understood. */
9471
9472 if (TARGET_64BIT)
9473 {
9474 /* Advance the base registers to their current locations.
9475
9476 Remember, gprs grow towards smaller register numbers while
9477 fprs grow to higher register numbers. Also remember that
9478 although FP regs are 32-bit addressable, we pretend that
9479 the registers are 64-bits wide. */
9480 gpr_reg_base = 26 - cum->words;
9481 fpr_reg_base = 32 + cum->words;
9482
9483 /* Arguments wider than one word and small aggregates need special
9484 treatment. */
9485 if (arg_size > 1
9486 || mode == BLKmode
9487 || (type && (AGGREGATE_TYPE_P (type)
9488 || TREE_CODE (type) == COMPLEX_TYPE
9489 || TREE_CODE (type) == VECTOR_TYPE)))
9490 {
9491 /* Double-extended precision (80-bit), quad-precision (128-bit)
9492 and aggregates including complex numbers are aligned on
9493 128-bit boundaries. The first eight 64-bit argument slots
9494 are associated one-to-one, with general registers r26
9495 through r19, and also with floating-point registers fr4
9496 through fr11. Arguments larger than one word are always
9497 passed in general registers.
9498
9499 Using a PARALLEL with a word mode register results in left
9500 justified data on a big-endian target. */
9501
9502 rtx loc[8];
9503 int i, offset = 0, ub = arg_size;
9504
9505 /* Align the base register. */
9506 gpr_reg_base -= alignment;
9507
9508 ub = MIN (ub, max_arg_words - cum->words - alignment);
9509 for (i = 0; i < ub; i++)
9510 {
9511 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9512 gen_rtx_REG (DImode, gpr_reg_base),
9513 GEN_INT (offset));
9514 gpr_reg_base -= 1;
9515 offset += 8;
9516 }
9517
9518 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9519 }
9520 }
9521 else
9522 {
9523 /* If the argument is larger than a word, then we know precisely
9524 which registers we must use. */
9525 if (arg_size > 1)
9526 {
9527 if (cum->words)
9528 {
9529 gpr_reg_base = 23;
9530 fpr_reg_base = 38;
9531 }
9532 else
9533 {
9534 gpr_reg_base = 25;
9535 fpr_reg_base = 34;
9536 }
9537
9538 /* Structures 5 to 8 bytes in size are passed in the general
9539 registers in the same manner as other non floating-point
9540 objects. The data is right-justified and zero-extended
9541 to 64 bits. This is opposite to the normal justification
9542 used on big endian targets and requires special treatment.
9543 We now define BLOCK_REG_PADDING to pad these objects.
9544 Aggregates, complex and vector types are passed in the same
9545 manner as structures. */
9546 if (mode == BLKmode
9547 || (type && (AGGREGATE_TYPE_P (type)
9548 || TREE_CODE (type) == COMPLEX_TYPE
9549 || TREE_CODE (type) == VECTOR_TYPE)))
9550 {
9551 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9552 gen_rtx_REG (DImode, gpr_reg_base),
9553 const0_rtx);
9554 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9555 }
9556 }
9557 else
9558 {
9559 /* We have a single word (32 bits). A simple computation
9560 will get us the register #s we need. */
9561 gpr_reg_base = 26 - cum->words;
9562 fpr_reg_base = 32 + 2 * cum->words;
9563 }
9564 }
9565
9566 /* Determine if the argument needs to be passed in both general and
9567 floating point registers. */
9568 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9569 /* If we are doing soft-float with portable runtime, then there
9570 is no need to worry about FP regs. */
9571 && !TARGET_SOFT_FLOAT
9572 /* The parameter must be some kind of scalar float, else we just
9573 pass it in integer registers. */
9574 && GET_MODE_CLASS (mode) == MODE_FLOAT
9575 /* The target function must not have a prototype. */
9576 && cum->nargs_prototype <= 0
9577 /* libcalls do not need to pass items in both FP and general
9578 registers. */
9579 && type != NULL_TREE
9580 /* All this hair applies to "outgoing" args only. This includes
9581 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9582 && !cum->incoming)
9583 /* Also pass outgoing floating arguments in both registers in indirect
9584 calls with the 32 bit ABI and the HP assembler since there is no
9585 way to the specify argument locations in static functions. */
9586 || (!TARGET_64BIT
9587 && !TARGET_GAS
9588 && !cum->incoming
9589 && cum->indirect
9590 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9591 {
9592 retval
9593 = gen_rtx_PARALLEL
9594 (mode,
9595 gen_rtvec (2,
9596 gen_rtx_EXPR_LIST (VOIDmode,
9597 gen_rtx_REG (mode, fpr_reg_base),
9598 const0_rtx),
9599 gen_rtx_EXPR_LIST (VOIDmode,
9600 gen_rtx_REG (mode, gpr_reg_base),
9601 const0_rtx)));
9602 }
9603 else
9604 {
9605 /* See if we should pass this parameter in a general register. */
9606 if (TARGET_SOFT_FLOAT
9607 /* Indirect calls in the normal 32bit ABI require all arguments
9608 to be passed in general registers. */
9609 || (!TARGET_PORTABLE_RUNTIME
9610 && !TARGET_64BIT
9611 && !TARGET_ELF32
9612 && cum->indirect)
9613 /* If the parameter is not a scalar floating-point parameter,
9614 then it belongs in GPRs. */
9615 || GET_MODE_CLASS (mode) != MODE_FLOAT
9616 /* Structure with single SFmode field belongs in GPR. */
9617 || (type && AGGREGATE_TYPE_P (type)))
9618 retval = gen_rtx_REG (mode, gpr_reg_base);
9619 else
9620 retval = gen_rtx_REG (mode, fpr_reg_base);
9621 }
9622 return retval;
9623 }
9624
9625 /* Arguments larger than one word are double word aligned. */
9626
9627 static unsigned int
9628 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9629 {
9630 bool singleword = (type
9631 ? (integer_zerop (TYPE_SIZE (type))
9632 || !TREE_CONSTANT (TYPE_SIZE (type))
9633 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9634 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9635
9636 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9637 }
9638
9639 /* If this arg would be passed totally in registers or totally on the stack,
9640 then this routine should return zero. */
9641
9642 static int
9643 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9644 tree type, bool named ATTRIBUTE_UNUSED)
9645 {
9646 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9647 unsigned int max_arg_words = 8;
9648 unsigned int offset = 0;
9649
9650 if (!TARGET_64BIT)
9651 return 0;
9652
9653 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9654 offset = 1;
9655
9656 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9657 /* Arg fits fully into registers. */
9658 return 0;
9659 else if (cum->words + offset >= max_arg_words)
9660 /* Arg fully on the stack. */
9661 return 0;
9662 else
9663 /* Arg is split. */
9664 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9665 }
9666
9667
9668 /* A get_unnamed_section callback for switching to the text section.
9669
9670 This function is only used with SOM. Because we don't support
9671 named subspaces, we can only create a new subspace or switch back
9672 to the default text subspace. */
9673
9674 static void
9675 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9676 {
9677 gcc_assert (TARGET_SOM);
9678 if (TARGET_GAS)
9679 {
9680 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9681 {
9682 /* We only want to emit a .nsubspa directive once at the
9683 start of the function. */
9684 cfun->machine->in_nsubspa = 1;
9685
9686 /* Create a new subspace for the text. This provides
9687 better stub placement and one-only functions. */
9688 if (cfun->decl
9689 && DECL_ONE_ONLY (cfun->decl)
9690 && !DECL_WEAK (cfun->decl))
9691 {
9692 output_section_asm_op ("\t.SPACE $TEXT$\n"
9693 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9694 "ACCESS=44,SORT=24,COMDAT");
9695 return;
9696 }
9697 }
9698 else
9699 {
9700 /* There isn't a current function or the body of the current
9701 function has been completed. So, we are changing to the
9702 text section to output debugging information. Thus, we
9703 need to forget that we are in the text section so that
9704 varasm.c will call us when text_section is selected again. */
9705 gcc_assert (!cfun || !cfun->machine
9706 || cfun->machine->in_nsubspa == 2);
9707 in_section = NULL;
9708 }
9709 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9710 return;
9711 }
9712 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9713 }
9714
9715 /* A get_unnamed_section callback for switching to comdat data
9716 sections. This function is only used with SOM. */
9717
9718 static void
9719 som_output_comdat_data_section_asm_op (const void *data)
9720 {
9721 in_section = NULL;
9722 output_section_asm_op (data);
9723 }
9724
9725 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9726
9727 static void
9728 pa_som_asm_init_sections (void)
9729 {
9730 text_section
9731 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9732
9733 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9734 is not being generated. */
9735 som_readonly_data_section
9736 = get_unnamed_section (0, output_section_asm_op,
9737 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9738
9739 /* When secondary definitions are not supported, SOM makes readonly
9740 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9741 the comdat flag. */
9742 som_one_only_readonly_data_section
9743 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9744 "\t.SPACE $TEXT$\n"
9745 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9746 "ACCESS=0x2c,SORT=16,COMDAT");
9747
9748
9749 /* When secondary definitions are not supported, SOM makes data one-only
9750 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9751 som_one_only_data_section
9752 = get_unnamed_section (SECTION_WRITE,
9753 som_output_comdat_data_section_asm_op,
9754 "\t.SPACE $PRIVATE$\n"
9755 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9756 "ACCESS=31,SORT=24,COMDAT");
9757
9758 if (flag_tm)
9759 som_tm_clone_table_section
9760 = get_unnamed_section (0, output_section_asm_op,
9761 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9762
9763 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9764 which reference data within the $TEXT$ space (for example constant
9765 strings in the $LIT$ subspace).
9766
9767 The assemblers (GAS and HP as) both have problems with handling
9768 the difference of two symbols which is the other correct way to
9769 reference constant data during PIC code generation.
9770
9771 So, there's no way to reference constant data which is in the
9772 $TEXT$ space during PIC generation. Instead place all constant
9773 data into the $PRIVATE$ subspace (this reduces sharing, but it
9774 works correctly). */
9775 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9776
9777 /* We must not have a reference to an external symbol defined in a
9778 shared library in a readonly section, else the SOM linker will
9779 complain.
9780
9781 So, we force exception information into the data section. */
9782 exception_section = data_section;
9783 }
9784
9785 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9786
9787 static section *
9788 pa_som_tm_clone_table_section (void)
9789 {
9790 return som_tm_clone_table_section;
9791 }
9792
9793 /* On hpux10, the linker will give an error if we have a reference
9794 in the read-only data section to a symbol defined in a shared
9795 library. Therefore, expressions that might require a reloc can
9796 not be placed in the read-only data section. */
9797
9798 static section *
9799 pa_select_section (tree exp, int reloc,
9800 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9801 {
9802 if (TREE_CODE (exp) == VAR_DECL
9803 && TREE_READONLY (exp)
9804 && !TREE_THIS_VOLATILE (exp)
9805 && DECL_INITIAL (exp)
9806 && (DECL_INITIAL (exp) == error_mark_node
9807 || TREE_CONSTANT (DECL_INITIAL (exp)))
9808 && !reloc)
9809 {
9810 if (TARGET_SOM
9811 && DECL_ONE_ONLY (exp)
9812 && !DECL_WEAK (exp))
9813 return som_one_only_readonly_data_section;
9814 else
9815 return readonly_data_section;
9816 }
9817 else if (CONSTANT_CLASS_P (exp) && !reloc)
9818 return readonly_data_section;
9819 else if (TARGET_SOM
9820 && TREE_CODE (exp) == VAR_DECL
9821 && DECL_ONE_ONLY (exp)
9822 && !DECL_WEAK (exp))
9823 return som_one_only_data_section;
9824 else
9825 return data_section;
9826 }
9827
9828 static void
9829 pa_globalize_label (FILE *stream, const char *name)
9830 {
9831 /* We only handle DATA objects here, functions are globalized in
9832 ASM_DECLARE_FUNCTION_NAME. */
9833 if (! FUNCTION_NAME_P (name))
9834 {
9835 fputs ("\t.EXPORT ", stream);
9836 assemble_name (stream, name);
9837 fputs (",DATA\n", stream);
9838 }
9839 }
9840
9841 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9842
9843 static rtx
9844 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9845 int incoming ATTRIBUTE_UNUSED)
9846 {
9847 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9848 }
9849
9850 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9851
9852 bool
9853 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9854 {
9855 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9856 PA64 ABI says that objects larger than 128 bits are returned in memory.
9857 Note, int_size_in_bytes can return -1 if the size of the object is
9858 variable or larger than the maximum value that can be expressed as
9859 a HOST_WIDE_INT. It can also return zero for an empty type. The
9860 simplest way to handle variable and empty types is to pass them in
9861 memory. This avoids problems in defining the boundaries of argument
9862 slots, allocating registers, etc. */
9863 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9864 || int_size_in_bytes (type) <= 0);
9865 }
9866
9867 /* Structure to hold declaration and name of external symbols that are
9868 emitted by GCC. We generate a vector of these symbols and output them
9869 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9870 This avoids putting out names that are never really used. */
9871
9872 typedef struct GTY(()) extern_symbol
9873 {
9874 tree decl;
9875 const char *name;
9876 } extern_symbol;
9877
9878 /* Define gc'd vector type for extern_symbol. */
9879
9880 /* Vector of extern_symbol pointers. */
9881 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9882
9883 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9884 /* Mark DECL (name NAME) as an external reference (assembler output
9885 file FILE). This saves the names to output at the end of the file
9886 if actually referenced. */
9887
9888 void
9889 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9890 {
9891 gcc_assert (file == asm_out_file);
9892 extern_symbol p = {decl, name};
9893 vec_safe_push (extern_symbols, p);
9894 }
9895
9896 /* Output text required at the end of an assembler file.
9897 This includes deferred plabels and .import directives for
9898 all external symbols that were actually referenced. */
9899
9900 static void
9901 pa_hpux_file_end (void)
9902 {
9903 unsigned int i;
9904 extern_symbol *p;
9905
9906 if (!NO_DEFERRED_PROFILE_COUNTERS)
9907 output_deferred_profile_counters ();
9908
9909 output_deferred_plabels ();
9910
9911 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9912 {
9913 tree decl = p->decl;
9914
9915 if (!TREE_ASM_WRITTEN (decl)
9916 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9917 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9918 }
9919
9920 vec_free (extern_symbols);
9921 }
9922 #endif
9923
9924 /* Return true if a change from mode FROM to mode TO for a register
9925 in register class RCLASS is invalid. */
9926
9927 bool
9928 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9929 enum reg_class rclass)
9930 {
9931 if (from == to)
9932 return false;
9933
9934 /* Reject changes to/from complex and vector modes. */
9935 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9936 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9937 return true;
9938
9939 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9940 return false;
9941
9942 /* There is no way to load QImode or HImode values directly from
9943 memory. SImode loads to the FP registers are not zero extended.
9944 On the 64-bit target, this conflicts with the definition of
9945 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9946 with different sizes in the floating-point registers. */
9947 if (MAYBE_FP_REG_CLASS_P (rclass))
9948 return true;
9949
9950 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9951 in specific sets of registers. Thus, we cannot allow changing
9952 to a larger mode when it's larger than a word. */
9953 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9954 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9955 return true;
9956
9957 return false;
9958 }
9959
9960 /* Returns TRUE if it is a good idea to tie two pseudo registers
9961 when one has mode MODE1 and one has mode MODE2.
9962 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9963 for any hard reg, then this must be FALSE for correct output.
9964
9965 We should return FALSE for QImode and HImode because these modes
9966 are not ok in the floating-point registers. However, this prevents
9967 tieing these modes to SImode and DImode in the general registers.
9968 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9969 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9970 in the floating-point registers. */
9971
9972 bool
9973 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9974 {
9975 /* Don't tie modes in different classes. */
9976 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9977 return false;
9978
9979 return true;
9980 }
9981
9982 \f
9983 /* Length in units of the trampoline instruction code. */
9984
9985 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9986
9987
9988 /* Output assembler code for a block containing the constant parts
9989 of a trampoline, leaving space for the variable parts.\
9990
9991 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9992 and then branches to the specified routine.
9993
9994 This code template is copied from text segment to stack location
9995 and then patched with pa_trampoline_init to contain valid values,
9996 and then entered as a subroutine.
9997
9998 It is best to keep this as small as possible to avoid having to
9999 flush multiple lines in the cache. */
10000
10001 static void
10002 pa_asm_trampoline_template (FILE *f)
10003 {
10004 if (!TARGET_64BIT)
10005 {
10006 fputs ("\tldw 36(%r22),%r21\n", f);
10007 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10008 if (ASSEMBLER_DIALECT == 0)
10009 fputs ("\tdepi 0,31,2,%r21\n", f);
10010 else
10011 fputs ("\tdepwi 0,31,2,%r21\n", f);
10012 fputs ("\tldw 4(%r21),%r19\n", f);
10013 fputs ("\tldw 0(%r21),%r21\n", f);
10014 if (TARGET_PA_20)
10015 {
10016 fputs ("\tbve (%r21)\n", f);
10017 fputs ("\tldw 40(%r22),%r29\n", f);
10018 fputs ("\t.word 0\n", f);
10019 fputs ("\t.word 0\n", f);
10020 }
10021 else
10022 {
10023 fputs ("\tldsid (%r21),%r1\n", f);
10024 fputs ("\tmtsp %r1,%sr0\n", f);
10025 fputs ("\tbe 0(%sr0,%r21)\n", f);
10026 fputs ("\tldw 40(%r22),%r29\n", f);
10027 }
10028 fputs ("\t.word 0\n", f);
10029 fputs ("\t.word 0\n", f);
10030 fputs ("\t.word 0\n", f);
10031 fputs ("\t.word 0\n", f);
10032 }
10033 else
10034 {
10035 fputs ("\t.dword 0\n", f);
10036 fputs ("\t.dword 0\n", f);
10037 fputs ("\t.dword 0\n", f);
10038 fputs ("\t.dword 0\n", f);
10039 fputs ("\tmfia %r31\n", f);
10040 fputs ("\tldd 24(%r31),%r1\n", f);
10041 fputs ("\tldd 24(%r1),%r27\n", f);
10042 fputs ("\tldd 16(%r1),%r1\n", f);
10043 fputs ("\tbve (%r1)\n", f);
10044 fputs ("\tldd 32(%r31),%r31\n", f);
10045 fputs ("\t.dword 0 ; fptr\n", f);
10046 fputs ("\t.dword 0 ; static link\n", f);
10047 }
10048 }
10049
10050 /* Emit RTL insns to initialize the variable parts of a trampoline.
10051 FNADDR is an RTX for the address of the function's pure code.
10052 CXT is an RTX for the static chain value for the function.
10053
10054 Move the function address to the trampoline template at offset 36.
10055 Move the static chain value to trampoline template at offset 40.
10056 Move the trampoline address to trampoline template at offset 44.
10057 Move r19 to trampoline template at offset 48. The latter two
10058 words create a plabel for the indirect call to the trampoline.
10059
10060 A similar sequence is used for the 64-bit port but the plabel is
10061 at the beginning of the trampoline.
10062
10063 Finally, the cache entries for the trampoline code are flushed.
10064 This is necessary to ensure that the trampoline instruction sequence
10065 is written to memory prior to any attempts at prefetching the code
10066 sequence. */
10067
10068 static void
10069 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10070 {
10071 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10072 rtx start_addr = gen_reg_rtx (Pmode);
10073 rtx end_addr = gen_reg_rtx (Pmode);
10074 rtx line_length = gen_reg_rtx (Pmode);
10075 rtx r_tramp, tmp;
10076
10077 emit_block_move (m_tramp, assemble_trampoline_template (),
10078 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10079 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10080
10081 if (!TARGET_64BIT)
10082 {
10083 tmp = adjust_address (m_tramp, Pmode, 36);
10084 emit_move_insn (tmp, fnaddr);
10085 tmp = adjust_address (m_tramp, Pmode, 40);
10086 emit_move_insn (tmp, chain_value);
10087
10088 /* Create a fat pointer for the trampoline. */
10089 tmp = adjust_address (m_tramp, Pmode, 44);
10090 emit_move_insn (tmp, r_tramp);
10091 tmp = adjust_address (m_tramp, Pmode, 48);
10092 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10093
10094 /* fdc and fic only use registers for the address to flush,
10095 they do not accept integer displacements. We align the
10096 start and end addresses to the beginning of their respective
10097 cache lines to minimize the number of lines flushed. */
10098 emit_insn (gen_andsi3 (start_addr, r_tramp,
10099 GEN_INT (-MIN_CACHELINE_SIZE)));
10100 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10101 TRAMPOLINE_CODE_SIZE-1));
10102 emit_insn (gen_andsi3 (end_addr, tmp,
10103 GEN_INT (-MIN_CACHELINE_SIZE)));
10104 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10105 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10106 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10107 gen_reg_rtx (Pmode),
10108 gen_reg_rtx (Pmode)));
10109 }
10110 else
10111 {
10112 tmp = adjust_address (m_tramp, Pmode, 56);
10113 emit_move_insn (tmp, fnaddr);
10114 tmp = adjust_address (m_tramp, Pmode, 64);
10115 emit_move_insn (tmp, chain_value);
10116
10117 /* Create a fat pointer for the trampoline. */
10118 tmp = adjust_address (m_tramp, Pmode, 16);
10119 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10120 r_tramp, 32)));
10121 tmp = adjust_address (m_tramp, Pmode, 24);
10122 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10123
10124 /* fdc and fic only use registers for the address to flush,
10125 they do not accept integer displacements. We align the
10126 start and end addresses to the beginning of their respective
10127 cache lines to minimize the number of lines flushed. */
10128 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10129 emit_insn (gen_anddi3 (start_addr, tmp,
10130 GEN_INT (-MIN_CACHELINE_SIZE)));
10131 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10132 TRAMPOLINE_CODE_SIZE - 1));
10133 emit_insn (gen_anddi3 (end_addr, tmp,
10134 GEN_INT (-MIN_CACHELINE_SIZE)));
10135 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10136 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10137 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10138 gen_reg_rtx (Pmode),
10139 gen_reg_rtx (Pmode)));
10140 }
10141
10142 #ifdef HAVE_ENABLE_EXECUTE_STACK
10143  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10144      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10145 #endif
10146 }
10147
10148 /* Perform any machine-specific adjustment in the address of the trampoline.
10149 ADDR contains the address that was passed to pa_trampoline_init.
10150 Adjust the trampoline address to point to the plabel at offset 44. */
10151
10152 static rtx
10153 pa_trampoline_adjust_address (rtx addr)
10154 {
10155 if (!TARGET_64BIT)
10156 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10157 return addr;
10158 }
10159
10160 static rtx
10161 pa_delegitimize_address (rtx orig_x)
10162 {
10163 rtx x = delegitimize_mem_from_attrs (orig_x);
10164
10165 if (GET_CODE (x) == LO_SUM
10166 && GET_CODE (XEXP (x, 1)) == UNSPEC
10167 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10168 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10169 return x;
10170 }
10171 \f
10172 static rtx
10173 pa_internal_arg_pointer (void)
10174 {
10175 /* The argument pointer and the hard frame pointer are the same in
10176 the 32-bit runtime, so we don't need a copy. */
10177 if (TARGET_64BIT)
10178 return copy_to_reg (virtual_incoming_args_rtx);
10179 else
10180 return virtual_incoming_args_rtx;
10181 }
10182
10183 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10184 Frame pointer elimination is automatically handled. */
10185
10186 static bool
10187 pa_can_eliminate (const int from, const int to)
10188 {
10189 /* The argument cannot be eliminated in the 64-bit runtime. */
10190 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10191 return false;
10192
10193 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10194 ? ! frame_pointer_needed
10195 : true);
10196 }
10197
10198 /* Define the offset between two registers, FROM to be eliminated and its
10199 replacement TO, at the start of a routine. */
10200 HOST_WIDE_INT
10201 pa_initial_elimination_offset (int from, int to)
10202 {
10203 HOST_WIDE_INT offset;
10204
10205 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10206 && to == STACK_POINTER_REGNUM)
10207 offset = -pa_compute_frame_size (get_frame_size (), 0);
10208 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10209 offset = 0;
10210 else
10211 gcc_unreachable ();
10212
10213 return offset;
10214 }
10215
10216 static void
10217 pa_conditional_register_usage (void)
10218 {
10219 int i;
10220
10221 if (!TARGET_64BIT && !TARGET_PA_11)
10222 {
10223 for (i = 56; i <= FP_REG_LAST; i++)
10224 fixed_regs[i] = call_used_regs[i] = 1;
10225 for (i = 33; i < 56; i += 2)
10226 fixed_regs[i] = call_used_regs[i] = 1;
10227 }
10228 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10229 {
10230 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10231 fixed_regs[i] = call_used_regs[i] = 1;
10232 }
10233 if (flag_pic)
10234 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10235 }
10236
10237 /* Target hook for c_mode_for_suffix. */
10238
10239 static enum machine_mode
10240 pa_c_mode_for_suffix (char suffix)
10241 {
10242 if (HPUX_LONG_DOUBLE_LIBRARY)
10243 {
10244 if (suffix == 'q')
10245 return TFmode;
10246 }
10247
10248 return VOIDmode;
10249 }
10250
10251 /* Target hook for function_section. */
10252
10253 static section *
10254 pa_function_section (tree decl, enum node_frequency freq,
10255 bool startup, bool exit)
10256 {
10257 /* Put functions in text section if target doesn't have named sections. */
10258 if (!targetm_common.have_named_sections)
10259 return text_section;
10260
10261 /* Force nested functions into the same section as the containing
10262 function. */
10263 if (decl
10264 && DECL_SECTION_NAME (decl) == NULL_TREE
10265 && DECL_CONTEXT (decl) != NULL_TREE
10266 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10267 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL_TREE)
10268 return function_section (DECL_CONTEXT (decl));
10269
10270 /* Otherwise, use the default function section. */
10271 return default_function_section (decl, freq, startup, exit);
10272 }
10273
10274 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10275
10276 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10277 that need more than three instructions to load prior to reload. This
10278 limit is somewhat arbitrary. It takes three instructions to load a
10279 CONST_INT from memory but two are memory accesses. It may be better
10280 to increase the allowed range for CONST_INTS. We may also be able
10281 to handle CONST_DOUBLES. */
10282
10283 static bool
10284 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10285 {
10286 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10287 return false;
10288
10289 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10290 return false;
10291
10292 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10293 legitimate constants. The other variants can't be handled by
10294 the move patterns after reload starts. */
10295 if (pa_tls_referenced_p (x))
10296 return false;
10297
10298 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10299 return false;
10300
10301 if (TARGET_64BIT
10302 && HOST_BITS_PER_WIDE_INT > 32
10303 && GET_CODE (x) == CONST_INT
10304 && !reload_in_progress
10305 && !reload_completed
10306 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10307 && !pa_cint_ok_for_move (INTVAL (x)))
10308 return false;
10309
10310 if (function_label_operand (x, mode))
10311 return false;
10312
10313 return true;
10314 }
10315
10316 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10317
10318 static unsigned int
10319 pa_section_type_flags (tree decl, const char *name, int reloc)
10320 {
10321 unsigned int flags;
10322
10323 flags = default_section_type_flags (decl, name, reloc);
10324
10325 /* Function labels are placed in the constant pool. This can
10326 cause a section conflict if decls are put in ".data.rel.ro"
10327 or ".data.rel.ro.local" using the __attribute__ construct. */
10328 if (strcmp (name, ".data.rel.ro") == 0
10329 || strcmp (name, ".data.rel.ro.local") == 0)
10330 flags |= SECTION_WRITE | SECTION_RELRO;
10331
10332 return flags;
10333 }
10334
10335 /* pa_legitimate_address_p recognizes an RTL expression that is a
10336 valid memory address for an instruction. The MODE argument is the
10337 machine mode for the MEM expression that wants to use this address.
10338
10339 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10340 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10341 available with floating point loads and stores, and integer loads.
10342 We get better code by allowing indexed addresses in the initial
10343 RTL generation.
10344
10345 The acceptance of indexed addresses as legitimate implies that we
10346 must provide patterns for doing indexed integer stores, or the move
10347 expanders must force the address of an indexed store to a register.
10348 We have adopted the latter approach.
10349
10350 Another function of pa_legitimate_address_p is to ensure that
10351 the base register is a valid pointer for indexed instructions.
10352 On targets that have non-equivalent space registers, we have to
10353 know at the time of assembler output which register in a REG+REG
10354 pair is the base register. The REG_POINTER flag is sometimes lost
10355 in reload and the following passes, so it can't be relied on during
10356 code generation. Thus, we either have to canonicalize the order
10357 of the registers in REG+REG indexed addresses, or treat REG+REG
10358 addresses separately and provide patterns for both permutations.
10359
10360 The latter approach requires several hundred additional lines of
10361 code in pa.md. The downside to canonicalizing is that a PLUS
10362 in the wrong order can't combine to form to make a scaled indexed
10363 memory operand. As we won't need to canonicalize the operands if
10364 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10365
10366 We initially break out scaled indexed addresses in canonical order
10367 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10368 scaled indexed addresses during RTL generation. However, fold_rtx
10369 has its own opinion on how the operands of a PLUS should be ordered.
10370 If one of the operands is equivalent to a constant, it will make
10371 that operand the second operand. As the base register is likely to
10372 be equivalent to a SYMBOL_REF, we have made it the second operand.
10373
10374 pa_legitimate_address_p accepts REG+REG as legitimate when the
10375 operands are in the order INDEX+BASE on targets with non-equivalent
10376 space registers, and in any order on targets with equivalent space
10377 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10378
10379 We treat a SYMBOL_REF as legitimate if it is part of the current
10380 function's constant-pool, because such addresses can actually be
10381 output as REG+SMALLINT. */
10382
10383 static bool
10384 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10385 {
10386 if ((REG_P (x)
10387 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10388 : REG_OK_FOR_BASE_P (x)))
10389 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10390 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10391 && REG_P (XEXP (x, 0))
10392 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10393 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10394 return true;
10395
10396 if (GET_CODE (x) == PLUS)
10397 {
10398 rtx base, index;
10399
10400 /* For REG+REG, the base register should be in XEXP (x, 1),
10401 so check it first. */
10402 if (REG_P (XEXP (x, 1))
10403 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10404 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10405 base = XEXP (x, 1), index = XEXP (x, 0);
10406 else if (REG_P (XEXP (x, 0))
10407 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10408 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10409 base = XEXP (x, 0), index = XEXP (x, 1);
10410 else
10411 return false;
10412
10413 if (GET_CODE (index) == CONST_INT)
10414 {
10415 if (INT_5_BITS (index))
10416 return true;
10417
10418 /* When INT14_OK_STRICT is false, a secondary reload is needed
10419 to adjust the displacement of SImode and DImode floating point
10420 instructions but this may fail when the register also needs
10421 reloading. So, we return false when STRICT is true. We
10422 also reject long displacements for float mode addresses since
10423 the majority of accesses will use floating point instructions
10424 that don't support 14-bit offsets. */
10425 if (!INT14_OK_STRICT
10426 && (strict || !(reload_in_progress || reload_completed))
10427 && mode != QImode
10428 && mode != HImode)
10429 return false;
10430
10431 return base14_operand (index, mode);
10432 }
10433
10434 if (!TARGET_DISABLE_INDEXING
10435 /* Only accept the "canonical" INDEX+BASE operand order
10436 on targets with non-equivalent space registers. */
10437 && (TARGET_NO_SPACE_REGS
10438 ? REG_P (index)
10439 : (base == XEXP (x, 1) && REG_P (index)
10440 && (reload_completed
10441 || (reload_in_progress && HARD_REGISTER_P (base))
10442 || REG_POINTER (base))
10443 && (reload_completed
10444 || (reload_in_progress && HARD_REGISTER_P (index))
10445 || !REG_POINTER (index))))
10446 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10447 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10448 : REG_OK_FOR_INDEX_P (index))
10449 && borx_reg_operand (base, Pmode)
10450 && borx_reg_operand (index, Pmode))
10451 return true;
10452
10453 if (!TARGET_DISABLE_INDEXING
10454 && GET_CODE (index) == MULT
10455 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10456 && REG_P (XEXP (index, 0))
10457 && GET_MODE (XEXP (index, 0)) == Pmode
10458 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10459 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10460 && GET_CODE (XEXP (index, 1)) == CONST_INT
10461 && INTVAL (XEXP (index, 1))
10462 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10463 && borx_reg_operand (base, Pmode))
10464 return true;
10465
10466 return false;
10467 }
10468
10469 if (GET_CODE (x) == LO_SUM)
10470 {
10471 rtx y = XEXP (x, 0);
10472
10473 if (GET_CODE (y) == SUBREG)
10474 y = SUBREG_REG (y);
10475
10476 if (REG_P (y)
10477 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10478 : REG_OK_FOR_BASE_P (y)))
10479 {
10480 /* Needed for -fPIC */
10481 if (mode == Pmode
10482 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10483 return true;
10484
10485 if (!INT14_OK_STRICT
10486 && (strict || !(reload_in_progress || reload_completed))
10487 && mode != QImode
10488 && mode != HImode)
10489 return false;
10490
10491 if (CONSTANT_P (XEXP (x, 1)))
10492 return true;
10493 }
10494 return false;
10495 }
10496
10497 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10498 return true;
10499
10500 return false;
10501 }
10502
10503 /* Look for machine dependent ways to make the invalid address AD a
10504 valid address.
10505
10506 For the PA, transform:
10507
10508 memory(X + <large int>)
10509
10510 into:
10511
10512 if (<large int> & mask) >= 16
10513 Y = (<large int> & ~mask) + mask + 1 Round up.
10514 else
10515 Y = (<large int> & ~mask) Round down.
10516 Z = X + Y
10517 memory (Z + (<large int> - Y));
10518
10519 This makes reload inheritance and reload_cse work better since Z
10520 can be reused.
10521
10522 There may be more opportunities to improve code with this hook. */
10523
10524 rtx
10525 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10526 int opnum, int type,
10527 int ind_levels ATTRIBUTE_UNUSED)
10528 {
10529 long offset, newoffset, mask;
10530 rtx new_rtx, temp = NULL_RTX;
10531
10532 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10533 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10534
10535 if (optimize && GET_CODE (ad) == PLUS)
10536 temp = simplify_binary_operation (PLUS, Pmode,
10537 XEXP (ad, 0), XEXP (ad, 1));
10538
10539 new_rtx = temp ? temp : ad;
10540
10541 if (optimize
10542 && GET_CODE (new_rtx) == PLUS
10543 && GET_CODE (XEXP (new_rtx, 0)) == REG
10544 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10545 {
10546 offset = INTVAL (XEXP ((new_rtx), 1));
10547
10548 /* Choose rounding direction. Round up if we are >= halfway. */
10549 if ((offset & mask) >= ((mask + 1) / 2))
10550 newoffset = (offset & ~mask) + mask + 1;
10551 else
10552 newoffset = offset & ~mask;
10553
10554 /* Ensure that long displacements are aligned. */
10555 if (mask == 0x3fff
10556 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10557 || (TARGET_64BIT && (mode) == DImode)))
10558 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10559
10560 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10561 {
10562 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10563 GEN_INT (newoffset));
10564 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10565 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10566 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10567 opnum, (enum reload_type) type);
10568 return ad;
10569 }
10570 }
10571
10572 return NULL_RTX;
10573 }
10574
10575 #include "gt-pa.h"